mirror of
https://github.com/AetherDroid/android_kernel_samsung_on5xelte.git
synced 2025-09-08 01:08:03 -04:00
Fixed MTP to work with TWRP
This commit is contained in:
commit
f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions
16
drivers/s390/cio/Makefile
Normal file
16
drivers/s390/cio/Makefile
Normal file
|
@ -0,0 +1,16 @@
|
|||
#
|
||||
# Makefile for the S/390 common i/o drivers
|
||||
#
|
||||
|
||||
obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \
|
||||
fcx.o itcw.o crw.o ccwreq.o
|
||||
ccw_device-objs += device.o device_fsm.o device_ops.o
|
||||
ccw_device-objs += device_id.o device_pgid.o device_status.o
|
||||
obj-y += ccw_device.o cmf.o
|
||||
obj-$(CONFIG_CHSC_SCH) += chsc_sch.o
|
||||
obj-$(CONFIG_EADM_SCH) += eadm_sch.o
|
||||
obj-$(CONFIG_SCM_BUS) += scm.o
|
||||
obj-$(CONFIG_CCWGROUP) += ccwgroup.o
|
||||
|
||||
qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_setup.o
|
||||
obj-$(CONFIG_QDIO) += qdio.o
|
275
drivers/s390/cio/airq.c
Normal file
275
drivers/s390/cio/airq.c
Normal file
|
@ -0,0 +1,275 @@
|
|||
/*
|
||||
* Support for adapter interruptions
|
||||
*
|
||||
* Copyright IBM Corp. 1999, 2007
|
||||
* Author(s): Ingo Adlung <adlung@de.ibm.com>
|
||||
* Cornelia Huck <cornelia.huck@de.ibm.com>
|
||||
* Arnd Bergmann <arndb@de.ibm.com>
|
||||
* Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/rculist.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <asm/airq.h>
|
||||
#include <asm/isc.h>
|
||||
|
||||
#include "cio.h"
|
||||
#include "cio_debug.h"
|
||||
#include "ioasm.h"
|
||||
|
||||
static DEFINE_SPINLOCK(airq_lists_lock);
|
||||
static struct hlist_head airq_lists[MAX_ISC+1];
|
||||
|
||||
/**
|
||||
* register_adapter_interrupt() - register adapter interrupt handler
|
||||
* @airq: pointer to adapter interrupt descriptor
|
||||
*
|
||||
* Returns 0 on success, or -EINVAL.
|
||||
*/
|
||||
int register_adapter_interrupt(struct airq_struct *airq)
|
||||
{
|
||||
char dbf_txt[32];
|
||||
|
||||
if (!airq->handler || airq->isc > MAX_ISC)
|
||||
return -EINVAL;
|
||||
if (!airq->lsi_ptr) {
|
||||
airq->lsi_ptr = kzalloc(1, GFP_KERNEL);
|
||||
if (!airq->lsi_ptr)
|
||||
return -ENOMEM;
|
||||
airq->flags |= AIRQ_PTR_ALLOCATED;
|
||||
}
|
||||
if (!airq->lsi_mask)
|
||||
airq->lsi_mask = 0xff;
|
||||
snprintf(dbf_txt, sizeof(dbf_txt), "rairq:%p", airq);
|
||||
CIO_TRACE_EVENT(4, dbf_txt);
|
||||
isc_register(airq->isc);
|
||||
spin_lock(&airq_lists_lock);
|
||||
hlist_add_head_rcu(&airq->list, &airq_lists[airq->isc]);
|
||||
spin_unlock(&airq_lists_lock);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(register_adapter_interrupt);
|
||||
|
||||
/**
|
||||
* unregister_adapter_interrupt - unregister adapter interrupt handler
|
||||
* @airq: pointer to adapter interrupt descriptor
|
||||
*/
|
||||
void unregister_adapter_interrupt(struct airq_struct *airq)
|
||||
{
|
||||
char dbf_txt[32];
|
||||
|
||||
if (hlist_unhashed(&airq->list))
|
||||
return;
|
||||
snprintf(dbf_txt, sizeof(dbf_txt), "urairq:%p", airq);
|
||||
CIO_TRACE_EVENT(4, dbf_txt);
|
||||
spin_lock(&airq_lists_lock);
|
||||
hlist_del_rcu(&airq->list);
|
||||
spin_unlock(&airq_lists_lock);
|
||||
synchronize_rcu();
|
||||
isc_unregister(airq->isc);
|
||||
if (airq->flags & AIRQ_PTR_ALLOCATED) {
|
||||
kfree(airq->lsi_ptr);
|
||||
airq->lsi_ptr = NULL;
|
||||
airq->flags &= ~AIRQ_PTR_ALLOCATED;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(unregister_adapter_interrupt);
|
||||
|
||||
static irqreturn_t do_airq_interrupt(int irq, void *dummy)
|
||||
{
|
||||
struct tpi_info *tpi_info;
|
||||
struct airq_struct *airq;
|
||||
struct hlist_head *head;
|
||||
|
||||
set_cpu_flag(CIF_NOHZ_DELAY);
|
||||
tpi_info = (struct tpi_info *) &get_irq_regs()->int_code;
|
||||
head = &airq_lists[tpi_info->isc];
|
||||
rcu_read_lock();
|
||||
hlist_for_each_entry_rcu(airq, head, list)
|
||||
if ((*airq->lsi_ptr & airq->lsi_mask) != 0)
|
||||
airq->handler(airq);
|
||||
rcu_read_unlock();
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static struct irqaction airq_interrupt = {
|
||||
.name = "AIO",
|
||||
.handler = do_airq_interrupt,
|
||||
};
|
||||
|
||||
void __init init_airq_interrupts(void)
|
||||
{
|
||||
irq_set_chip_and_handler(THIN_INTERRUPT,
|
||||
&dummy_irq_chip, handle_percpu_irq);
|
||||
setup_irq(THIN_INTERRUPT, &airq_interrupt);
|
||||
}
|
||||
|
||||
/**
|
||||
* airq_iv_create - create an interrupt vector
|
||||
* @bits: number of bits in the interrupt vector
|
||||
* @flags: allocation flags
|
||||
*
|
||||
* Returns a pointer to an interrupt vector structure
|
||||
*/
|
||||
struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags)
|
||||
{
|
||||
struct airq_iv *iv;
|
||||
unsigned long size;
|
||||
|
||||
iv = kzalloc(sizeof(*iv), GFP_KERNEL);
|
||||
if (!iv)
|
||||
goto out;
|
||||
iv->bits = bits;
|
||||
size = BITS_TO_LONGS(bits) * sizeof(unsigned long);
|
||||
iv->vector = kzalloc(size, GFP_KERNEL);
|
||||
if (!iv->vector)
|
||||
goto out_free;
|
||||
if (flags & AIRQ_IV_ALLOC) {
|
||||
iv->avail = kmalloc(size, GFP_KERNEL);
|
||||
if (!iv->avail)
|
||||
goto out_free;
|
||||
memset(iv->avail, 0xff, size);
|
||||
iv->end = 0;
|
||||
} else
|
||||
iv->end = bits;
|
||||
if (flags & AIRQ_IV_BITLOCK) {
|
||||
iv->bitlock = kzalloc(size, GFP_KERNEL);
|
||||
if (!iv->bitlock)
|
||||
goto out_free;
|
||||
}
|
||||
if (flags & AIRQ_IV_PTR) {
|
||||
size = bits * sizeof(unsigned long);
|
||||
iv->ptr = kzalloc(size, GFP_KERNEL);
|
||||
if (!iv->ptr)
|
||||
goto out_free;
|
||||
}
|
||||
if (flags & AIRQ_IV_DATA) {
|
||||
size = bits * sizeof(unsigned int);
|
||||
iv->data = kzalloc(size, GFP_KERNEL);
|
||||
if (!iv->data)
|
||||
goto out_free;
|
||||
}
|
||||
spin_lock_init(&iv->lock);
|
||||
return iv;
|
||||
|
||||
out_free:
|
||||
kfree(iv->ptr);
|
||||
kfree(iv->bitlock);
|
||||
kfree(iv->avail);
|
||||
kfree(iv->vector);
|
||||
kfree(iv);
|
||||
out:
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(airq_iv_create);
|
||||
|
||||
/**
|
||||
* airq_iv_release - release an interrupt vector
|
||||
* @iv: pointer to interrupt vector structure
|
||||
*/
|
||||
void airq_iv_release(struct airq_iv *iv)
|
||||
{
|
||||
kfree(iv->data);
|
||||
kfree(iv->ptr);
|
||||
kfree(iv->bitlock);
|
||||
kfree(iv->vector);
|
||||
kfree(iv->avail);
|
||||
kfree(iv);
|
||||
}
|
||||
EXPORT_SYMBOL(airq_iv_release);
|
||||
|
||||
/**
|
||||
* airq_iv_alloc - allocate irq bits from an interrupt vector
|
||||
* @iv: pointer to an interrupt vector structure
|
||||
* @num: number of consecutive irq bits to allocate
|
||||
*
|
||||
* Returns the bit number of the first irq in the allocated block of irqs,
|
||||
* or -1UL if no bit is available or the AIRQ_IV_ALLOC flag has not been
|
||||
* specified
|
||||
*/
|
||||
unsigned long airq_iv_alloc(struct airq_iv *iv, unsigned long num)
|
||||
{
|
||||
unsigned long bit, i, flags;
|
||||
|
||||
if (!iv->avail || num == 0)
|
||||
return -1UL;
|
||||
spin_lock_irqsave(&iv->lock, flags);
|
||||
bit = find_first_bit_inv(iv->avail, iv->bits);
|
||||
while (bit + num <= iv->bits) {
|
||||
for (i = 1; i < num; i++)
|
||||
if (!test_bit_inv(bit + i, iv->avail))
|
||||
break;
|
||||
if (i >= num) {
|
||||
/* Found a suitable block of irqs */
|
||||
for (i = 0; i < num; i++)
|
||||
clear_bit_inv(bit + i, iv->avail);
|
||||
if (bit + num >= iv->end)
|
||||
iv->end = bit + num + 1;
|
||||
break;
|
||||
}
|
||||
bit = find_next_bit_inv(iv->avail, iv->bits, bit + i + 1);
|
||||
}
|
||||
if (bit + num > iv->bits)
|
||||
bit = -1UL;
|
||||
spin_unlock_irqrestore(&iv->lock, flags);
|
||||
return bit;
|
||||
}
|
||||
EXPORT_SYMBOL(airq_iv_alloc);
|
||||
|
||||
/**
|
||||
* airq_iv_free - free irq bits of an interrupt vector
|
||||
* @iv: pointer to interrupt vector structure
|
||||
* @bit: number of the first irq bit to free
|
||||
* @num: number of consecutive irq bits to free
|
||||
*/
|
||||
void airq_iv_free(struct airq_iv *iv, unsigned long bit, unsigned long num)
|
||||
{
|
||||
unsigned long i, flags;
|
||||
|
||||
if (!iv->avail || num == 0)
|
||||
return;
|
||||
spin_lock_irqsave(&iv->lock, flags);
|
||||
for (i = 0; i < num; i++) {
|
||||
/* Clear (possibly left over) interrupt bit */
|
||||
clear_bit_inv(bit + i, iv->vector);
|
||||
/* Make the bit positions available again */
|
||||
set_bit_inv(bit + i, iv->avail);
|
||||
}
|
||||
if (bit + num >= iv->end) {
|
||||
/* Find new end of bit-field */
|
||||
while (iv->end > 0 && !test_bit_inv(iv->end - 1, iv->avail))
|
||||
iv->end--;
|
||||
}
|
||||
spin_unlock_irqrestore(&iv->lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(airq_iv_free);
|
||||
|
||||
/**
|
||||
* airq_iv_scan - scan interrupt vector for non-zero bits
|
||||
* @iv: pointer to interrupt vector structure
|
||||
* @start: bit number to start the search
|
||||
* @end: bit number to end the search
|
||||
*
|
||||
* Returns the bit number of the next non-zero interrupt bit, or
|
||||
* -1UL if the scan completed without finding any more any non-zero bits.
|
||||
*/
|
||||
unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
unsigned long bit;
|
||||
|
||||
/* Find non-zero bit starting from 'ivs->next'. */
|
||||
bit = find_next_bit_inv(iv->vector, end, start);
|
||||
if (bit >= end)
|
||||
return -1UL;
|
||||
clear_bit_inv(bit, iv->vector);
|
||||
return bit;
|
||||
}
|
||||
EXPORT_SYMBOL(airq_iv_scan);
|
421
drivers/s390/cio/blacklist.c
Normal file
421
drivers/s390/cio/blacklist.c
Normal file
|
@ -0,0 +1,421 @@
|
|||
/*
|
||||
* S/390 common I/O routines -- blacklisting of specific devices
|
||||
*
|
||||
* Copyright IBM Corp. 1999, 2013
|
||||
* Author(s): Ingo Adlung (adlung@de.ibm.com)
|
||||
* Cornelia Huck (cornelia.huck@de.ibm.com)
|
||||
* Arnd Bergmann (arndb@de.ibm.com)
|
||||
*/
|
||||
|
||||
#define KMSG_COMPONENT "cio"
|
||||
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/device.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/cio.h>
|
||||
#include <asm/ipl.h>
|
||||
|
||||
#include "blacklist.h"
|
||||
#include "cio.h"
|
||||
#include "cio_debug.h"
|
||||
#include "css.h"
|
||||
#include "device.h"
|
||||
|
||||
/*
|
||||
* "Blacklisting" of certain devices:
|
||||
* Device numbers given in the commandline as cio_ignore=... won't be known
|
||||
* to Linux.
|
||||
*
|
||||
* These can be single devices or ranges of devices
|
||||
*/
|
||||
|
||||
/* 65536 bits for each set to indicate if a devno is blacklisted or not */
|
||||
#define __BL_DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \
|
||||
(8*sizeof(long)))
|
||||
static unsigned long bl_dev[__MAX_SSID + 1][__BL_DEV_WORDS];
|
||||
typedef enum {add, free} range_action;
|
||||
|
||||
/*
|
||||
* Function: blacklist_range
|
||||
* (Un-)blacklist the devices from-to
|
||||
*/
|
||||
static int blacklist_range(range_action action, unsigned int from_ssid,
|
||||
unsigned int to_ssid, unsigned int from,
|
||||
unsigned int to, int msgtrigger)
|
||||
{
|
||||
if ((from_ssid > to_ssid) || ((from_ssid == to_ssid) && (from > to))) {
|
||||
if (msgtrigger)
|
||||
pr_warning("0.%x.%04x to 0.%x.%04x is not a valid "
|
||||
"range for cio_ignore\n", from_ssid, from,
|
||||
to_ssid, to);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
while ((from_ssid < to_ssid) || ((from_ssid == to_ssid) &&
|
||||
(from <= to))) {
|
||||
if (action == add)
|
||||
set_bit(from, bl_dev[from_ssid]);
|
||||
else
|
||||
clear_bit(from, bl_dev[from_ssid]);
|
||||
from++;
|
||||
if (from > __MAX_SUBCHANNEL) {
|
||||
from_ssid++;
|
||||
from = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pure_hex(char **cp, unsigned int *val, int min_digit,
|
||||
int max_digit, int max_val)
|
||||
{
|
||||
int diff;
|
||||
|
||||
diff = 0;
|
||||
*val = 0;
|
||||
|
||||
while (diff <= max_digit) {
|
||||
int value = hex_to_bin(**cp);
|
||||
|
||||
if (value < 0)
|
||||
break;
|
||||
*val = *val * 16 + value;
|
||||
(*cp)++;
|
||||
diff++;
|
||||
}
|
||||
|
||||
if ((diff < min_digit) || (diff > max_digit) || (*val > max_val))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int parse_busid(char *str, unsigned int *cssid, unsigned int *ssid,
|
||||
unsigned int *devno, int msgtrigger)
|
||||
{
|
||||
char *str_work;
|
||||
int val, rc, ret;
|
||||
|
||||
rc = 1;
|
||||
|
||||
if (*str == '\0')
|
||||
goto out;
|
||||
|
||||
/* old style */
|
||||
str_work = str;
|
||||
val = simple_strtoul(str, &str_work, 16);
|
||||
|
||||
if (*str_work == '\0') {
|
||||
if (val <= __MAX_SUBCHANNEL) {
|
||||
*devno = val;
|
||||
*ssid = 0;
|
||||
*cssid = 0;
|
||||
rc = 0;
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* new style */
|
||||
str_work = str;
|
||||
ret = pure_hex(&str_work, cssid, 1, 2, __MAX_CSSID);
|
||||
if (ret || (str_work[0] != '.'))
|
||||
goto out;
|
||||
str_work++;
|
||||
ret = pure_hex(&str_work, ssid, 1, 1, __MAX_SSID);
|
||||
if (ret || (str_work[0] != '.'))
|
||||
goto out;
|
||||
str_work++;
|
||||
ret = pure_hex(&str_work, devno, 4, 4, __MAX_SUBCHANNEL);
|
||||
if (ret || (str_work[0] != '\0'))
|
||||
goto out;
|
||||
|
||||
rc = 0;
|
||||
out:
|
||||
if (rc && msgtrigger)
|
||||
pr_warning("%s is not a valid device for the cio_ignore "
|
||||
"kernel parameter\n", str);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int blacklist_parse_parameters(char *str, range_action action,
|
||||
int msgtrigger)
|
||||
{
|
||||
unsigned int from_cssid, to_cssid, from_ssid, to_ssid, from, to;
|
||||
int rc, totalrc;
|
||||
char *parm;
|
||||
range_action ra;
|
||||
|
||||
totalrc = 0;
|
||||
|
||||
while ((parm = strsep(&str, ","))) {
|
||||
rc = 0;
|
||||
ra = action;
|
||||
if (*parm == '!') {
|
||||
if (ra == add)
|
||||
ra = free;
|
||||
else
|
||||
ra = add;
|
||||
parm++;
|
||||
}
|
||||
if (strcmp(parm, "all") == 0) {
|
||||
from_cssid = 0;
|
||||
from_ssid = 0;
|
||||
from = 0;
|
||||
to_cssid = __MAX_CSSID;
|
||||
to_ssid = __MAX_SSID;
|
||||
to = __MAX_SUBCHANNEL;
|
||||
} else if (strcmp(parm, "ipldev") == 0) {
|
||||
if (ipl_info.type == IPL_TYPE_CCW) {
|
||||
from_cssid = 0;
|
||||
from_ssid = ipl_info.data.ccw.dev_id.ssid;
|
||||
from = ipl_info.data.ccw.dev_id.devno;
|
||||
} else if (ipl_info.type == IPL_TYPE_FCP ||
|
||||
ipl_info.type == IPL_TYPE_FCP_DUMP) {
|
||||
from_cssid = 0;
|
||||
from_ssid = ipl_info.data.fcp.dev_id.ssid;
|
||||
from = ipl_info.data.fcp.dev_id.devno;
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
to_cssid = from_cssid;
|
||||
to_ssid = from_ssid;
|
||||
to = from;
|
||||
} else if (strcmp(parm, "condev") == 0) {
|
||||
if (console_devno == -1)
|
||||
continue;
|
||||
|
||||
from_cssid = to_cssid = 0;
|
||||
from_ssid = to_ssid = 0;
|
||||
from = to = console_devno;
|
||||
} else {
|
||||
rc = parse_busid(strsep(&parm, "-"), &from_cssid,
|
||||
&from_ssid, &from, msgtrigger);
|
||||
if (!rc) {
|
||||
if (parm != NULL)
|
||||
rc = parse_busid(parm, &to_cssid,
|
||||
&to_ssid, &to,
|
||||
msgtrigger);
|
||||
else {
|
||||
to_cssid = from_cssid;
|
||||
to_ssid = from_ssid;
|
||||
to = from;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!rc) {
|
||||
rc = blacklist_range(ra, from_ssid, to_ssid, from, to,
|
||||
msgtrigger);
|
||||
if (rc)
|
||||
totalrc = -EINVAL;
|
||||
} else
|
||||
totalrc = -EINVAL;
|
||||
}
|
||||
|
||||
return totalrc;
|
||||
}
|
||||
|
||||
static int __init
|
||||
blacklist_setup (char *str)
|
||||
{
|
||||
CIO_MSG_EVENT(6, "Reading blacklist parameters\n");
|
||||
if (blacklist_parse_parameters(str, add, 1))
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
__setup ("cio_ignore=", blacklist_setup);
|
||||
|
||||
/* Checking if devices are blacklisted */
|
||||
|
||||
/*
|
||||
* Function: is_blacklisted
|
||||
* Returns 1 if the given devicenumber can be found in the blacklist,
|
||||
* otherwise 0.
|
||||
* Used by validate_subchannel()
|
||||
*/
|
||||
int
|
||||
is_blacklisted (int ssid, int devno)
|
||||
{
|
||||
return test_bit (devno, bl_dev[ssid]);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
/*
|
||||
* Function: blacklist_parse_proc_parameters
|
||||
* parse the stuff which is piped to /proc/cio_ignore
|
||||
*/
|
||||
static int blacklist_parse_proc_parameters(char *buf)
|
||||
{
|
||||
int rc;
|
||||
char *parm;
|
||||
|
||||
parm = strsep(&buf, " ");
|
||||
|
||||
if (strcmp("free", parm) == 0) {
|
||||
rc = blacklist_parse_parameters(buf, free, 0);
|
||||
css_schedule_eval_all_unreg(0);
|
||||
} else if (strcmp("add", parm) == 0)
|
||||
rc = blacklist_parse_parameters(buf, add, 0);
|
||||
else if (strcmp("purge", parm) == 0)
|
||||
return ccw_purge_blacklisted();
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Iterator struct for all devices. */
|
||||
struct ccwdev_iter {
|
||||
int devno;
|
||||
int ssid;
|
||||
int in_range;
|
||||
};
|
||||
|
||||
static void *
|
||||
cio_ignore_proc_seq_start(struct seq_file *s, loff_t *offset)
|
||||
{
|
||||
struct ccwdev_iter *iter = s->private;
|
||||
|
||||
if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
|
||||
return NULL;
|
||||
memset(iter, 0, sizeof(*iter));
|
||||
iter->ssid = *offset / (__MAX_SUBCHANNEL + 1);
|
||||
iter->devno = *offset % (__MAX_SUBCHANNEL + 1);
|
||||
return iter;
|
||||
}
|
||||
|
||||
static void
|
||||
cio_ignore_proc_seq_stop(struct seq_file *s, void *it)
|
||||
{
|
||||
}
|
||||
|
||||
static void *
|
||||
cio_ignore_proc_seq_next(struct seq_file *s, void *it, loff_t *offset)
|
||||
{
|
||||
struct ccwdev_iter *iter;
|
||||
|
||||
if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
|
||||
return NULL;
|
||||
iter = it;
|
||||
if (iter->devno == __MAX_SUBCHANNEL) {
|
||||
iter->devno = 0;
|
||||
iter->ssid++;
|
||||
if (iter->ssid > __MAX_SSID)
|
||||
return NULL;
|
||||
} else
|
||||
iter->devno++;
|
||||
(*offset)++;
|
||||
return iter;
|
||||
}
|
||||
|
||||
static int
|
||||
cio_ignore_proc_seq_show(struct seq_file *s, void *it)
|
||||
{
|
||||
struct ccwdev_iter *iter;
|
||||
|
||||
iter = it;
|
||||
if (!is_blacklisted(iter->ssid, iter->devno))
|
||||
/* Not blacklisted, nothing to output. */
|
||||
return 0;
|
||||
if (!iter->in_range) {
|
||||
/* First device in range. */
|
||||
if ((iter->devno == __MAX_SUBCHANNEL) ||
|
||||
!is_blacklisted(iter->ssid, iter->devno + 1))
|
||||
/* Singular device. */
|
||||
return seq_printf(s, "0.%x.%04x\n",
|
||||
iter->ssid, iter->devno);
|
||||
iter->in_range = 1;
|
||||
return seq_printf(s, "0.%x.%04x-", iter->ssid, iter->devno);
|
||||
}
|
||||
if ((iter->devno == __MAX_SUBCHANNEL) ||
|
||||
!is_blacklisted(iter->ssid, iter->devno + 1)) {
|
||||
/* Last device in range. */
|
||||
iter->in_range = 0;
|
||||
return seq_printf(s, "0.%x.%04x\n", iter->ssid, iter->devno);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
cio_ignore_write(struct file *file, const char __user *user_buf,
|
||||
size_t user_len, loff_t *offset)
|
||||
{
|
||||
char *buf;
|
||||
ssize_t rc, ret, i;
|
||||
|
||||
if (*offset)
|
||||
return -EINVAL;
|
||||
if (user_len > 65536)
|
||||
user_len = 65536;
|
||||
buf = vzalloc(user_len + 1); /* maybe better use the stack? */
|
||||
if (buf == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
if (strncpy_from_user (buf, user_buf, user_len) < 0) {
|
||||
rc = -EFAULT;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
i = user_len - 1;
|
||||
while ((i >= 0) && (isspace(buf[i]) || (buf[i] == 0))) {
|
||||
buf[i] = '\0';
|
||||
i--;
|
||||
}
|
||||
ret = blacklist_parse_proc_parameters(buf);
|
||||
if (ret)
|
||||
rc = ret;
|
||||
else
|
||||
rc = user_len;
|
||||
|
||||
out_free:
|
||||
vfree (buf);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static const struct seq_operations cio_ignore_proc_seq_ops = {
|
||||
.start = cio_ignore_proc_seq_start,
|
||||
.stop = cio_ignore_proc_seq_stop,
|
||||
.next = cio_ignore_proc_seq_next,
|
||||
.show = cio_ignore_proc_seq_show,
|
||||
};
|
||||
|
||||
static int
|
||||
cio_ignore_proc_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return seq_open_private(file, &cio_ignore_proc_seq_ops,
|
||||
sizeof(struct ccwdev_iter));
|
||||
}
|
||||
|
||||
static const struct file_operations cio_ignore_proc_fops = {
|
||||
.open = cio_ignore_proc_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = seq_release_private,
|
||||
.write = cio_ignore_write,
|
||||
};
|
||||
|
||||
static int
|
||||
cio_ignore_proc_init (void)
|
||||
{
|
||||
struct proc_dir_entry *entry;
|
||||
|
||||
entry = proc_create("cio_ignore", S_IFREG | S_IRUGO | S_IWUSR, NULL,
|
||||
&cio_ignore_proc_fops);
|
||||
if (!entry)
|
||||
return -ENOENT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
__initcall (cio_ignore_proc_init);
|
||||
|
||||
#endif /* CONFIG_PROC_FS */
|
6
drivers/s390/cio/blacklist.h
Normal file
6
drivers/s390/cio/blacklist.h
Normal file
|
@ -0,0 +1,6 @@
|
|||
#ifndef S390_BLACKLIST_H
|
||||
#define S390_BLACKLIST_H
|
||||
|
||||
extern int is_blacklisted (int ssid, int devno);
|
||||
|
||||
#endif
|
641
drivers/s390/cio/ccwgroup.c
Normal file
641
drivers/s390/cio/ccwgroup.c
Normal file
|
@ -0,0 +1,641 @@
|
|||
/*
|
||||
* bus driver for ccwgroup
|
||||
*
|
||||
* Copyright IBM Corp. 2002, 2012
|
||||
*
|
||||
* Author(s): Arnd Bergmann (arndb@de.ibm.com)
|
||||
* Cornelia Huck (cornelia.huck@de.ibm.com)
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/dcache.h>
|
||||
|
||||
#include <asm/cio.h>
|
||||
#include <asm/ccwdev.h>
|
||||
#include <asm/ccwgroup.h>
|
||||
|
||||
#include "device.h"
|
||||
|
||||
#define CCW_BUS_ID_SIZE 10
|
||||
|
||||
/* In Linux 2.4, we had a channel device layer called "chandev"
|
||||
* that did all sorts of obscure stuff for networking devices.
|
||||
* This is another driver that serves as a replacement for just
|
||||
* one of its functions, namely the translation of single subchannels
|
||||
* to devices that use multiple subchannels.
|
||||
*/
|
||||
|
||||
static struct bus_type ccwgroup_bus_type;
|
||||
|
||||
static void __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
|
||||
{
|
||||
int i;
|
||||
char str[8];
|
||||
|
||||
for (i = 0; i < gdev->count; i++) {
|
||||
sprintf(str, "cdev%d", i);
|
||||
sysfs_remove_link(&gdev->dev.kobj, str);
|
||||
sysfs_remove_link(&gdev->cdev[i]->dev.kobj, "group_device");
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove references from ccw devices to ccw group device and from
|
||||
* ccw group device to ccw devices.
|
||||
*/
|
||||
static void __ccwgroup_remove_cdev_refs(struct ccwgroup_device *gdev)
|
||||
{
|
||||
struct ccw_device *cdev;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < gdev->count; i++) {
|
||||
cdev = gdev->cdev[i];
|
||||
if (!cdev)
|
||||
continue;
|
||||
spin_lock_irq(cdev->ccwlock);
|
||||
dev_set_drvdata(&cdev->dev, NULL);
|
||||
spin_unlock_irq(cdev->ccwlock);
|
||||
gdev->cdev[i] = NULL;
|
||||
put_device(&cdev->dev);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ccwgroup_set_online() - enable a ccwgroup device
|
||||
* @gdev: target ccwgroup device
|
||||
*
|
||||
* This function attempts to put the ccwgroup device into the online state.
|
||||
* Returns:
|
||||
* %0 on success and a negative error value on failure.
|
||||
*/
|
||||
int ccwgroup_set_online(struct ccwgroup_device *gdev)
|
||||
{
|
||||
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
|
||||
return -EAGAIN;
|
||||
if (gdev->state == CCWGROUP_ONLINE)
|
||||
goto out;
|
||||
if (gdrv->set_online)
|
||||
ret = gdrv->set_online(gdev);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
gdev->state = CCWGROUP_ONLINE;
|
||||
out:
|
||||
atomic_set(&gdev->onoff, 0);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ccwgroup_set_online);
|
||||
|
||||
/**
|
||||
* ccwgroup_set_offline() - disable a ccwgroup device
|
||||
* @gdev: target ccwgroup device
|
||||
*
|
||||
* This function attempts to put the ccwgroup device into the offline state.
|
||||
* Returns:
|
||||
* %0 on success and a negative error value on failure.
|
||||
*/
|
||||
int ccwgroup_set_offline(struct ccwgroup_device *gdev)
|
||||
{
|
||||
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
|
||||
return -EAGAIN;
|
||||
if (gdev->state == CCWGROUP_OFFLINE)
|
||||
goto out;
|
||||
if (gdrv->set_offline)
|
||||
ret = gdrv->set_offline(gdev);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
gdev->state = CCWGROUP_OFFLINE;
|
||||
out:
|
||||
atomic_set(&gdev->onoff, 0);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ccwgroup_set_offline);
|
||||
|
||||
static ssize_t ccwgroup_online_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
|
||||
unsigned long value;
|
||||
int ret;
|
||||
|
||||
device_lock(dev);
|
||||
if (!dev->driver) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = kstrtoul(buf, 0, &value);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (value == 1)
|
||||
ret = ccwgroup_set_online(gdev);
|
||||
else if (value == 0)
|
||||
ret = ccwgroup_set_offline(gdev);
|
||||
else
|
||||
ret = -EINVAL;
|
||||
out:
|
||||
device_unlock(dev);
|
||||
return (ret == 0) ? count : ret;
|
||||
}
|
||||
|
||||
static ssize_t ccwgroup_online_show(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
|
||||
int online;
|
||||
|
||||
online = (gdev->state == CCWGROUP_ONLINE) ? 1 : 0;
|
||||
|
||||
return scnprintf(buf, PAGE_SIZE, "%d\n", online);
|
||||
}
|
||||
|
||||
/*
|
||||
* Provide an 'ungroup' attribute so the user can remove group devices no
|
||||
* longer needed or accidentially created. Saves memory :)
|
||||
*/
|
||||
static void ccwgroup_ungroup(struct ccwgroup_device *gdev)
|
||||
{
|
||||
mutex_lock(&gdev->reg_mutex);
|
||||
if (device_is_registered(&gdev->dev)) {
|
||||
__ccwgroup_remove_symlinks(gdev);
|
||||
device_unregister(&gdev->dev);
|
||||
__ccwgroup_remove_cdev_refs(gdev);
|
||||
}
|
||||
mutex_unlock(&gdev->reg_mutex);
|
||||
}
|
||||
|
||||
static ssize_t ccwgroup_ungroup_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
|
||||
int rc = 0;
|
||||
|
||||
/* Prevent concurrent online/offline processing and ungrouping. */
|
||||
if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
|
||||
return -EAGAIN;
|
||||
if (gdev->state != CCWGROUP_OFFLINE) {
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (device_remove_file_self(dev, attr))
|
||||
ccwgroup_ungroup(gdev);
|
||||
else
|
||||
rc = -ENODEV;
|
||||
out:
|
||||
if (rc) {
|
||||
/* Release onoff "lock" when ungrouping failed. */
|
||||
atomic_set(&gdev->onoff, 0);
|
||||
return rc;
|
||||
}
|
||||
return count;
|
||||
}
|
||||
static DEVICE_ATTR(ungroup, 0200, NULL, ccwgroup_ungroup_store);
|
||||
static DEVICE_ATTR(online, 0644, ccwgroup_online_show, ccwgroup_online_store);
|
||||
|
||||
static struct attribute *ccwgroup_attrs[] = {
|
||||
&dev_attr_online.attr,
|
||||
&dev_attr_ungroup.attr,
|
||||
NULL,
|
||||
};
|
||||
static struct attribute_group ccwgroup_attr_group = {
|
||||
.attrs = ccwgroup_attrs,
|
||||
};
|
||||
static const struct attribute_group *ccwgroup_attr_groups[] = {
|
||||
&ccwgroup_attr_group,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static void ccwgroup_ungroup_workfn(struct work_struct *work)
|
||||
{
|
||||
struct ccwgroup_device *gdev =
|
||||
container_of(work, struct ccwgroup_device, ungroup_work);
|
||||
|
||||
ccwgroup_ungroup(gdev);
|
||||
put_device(&gdev->dev);
|
||||
}
|
||||
|
||||
static void ccwgroup_release(struct device *dev)
|
||||
{
|
||||
kfree(to_ccwgroupdev(dev));
|
||||
}
|
||||
|
||||
static int __ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
|
||||
{
|
||||
char str[8];
|
||||
int i, rc;
|
||||
|
||||
for (i = 0; i < gdev->count; i++) {
|
||||
rc = sysfs_create_link(&gdev->cdev[i]->dev.kobj,
|
||||
&gdev->dev.kobj, "group_device");
|
||||
if (rc) {
|
||||
for (--i; i >= 0; i--)
|
||||
sysfs_remove_link(&gdev->cdev[i]->dev.kobj,
|
||||
"group_device");
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
for (i = 0; i < gdev->count; i++) {
|
||||
sprintf(str, "cdev%d", i);
|
||||
rc = sysfs_create_link(&gdev->dev.kobj,
|
||||
&gdev->cdev[i]->dev.kobj, str);
|
||||
if (rc) {
|
||||
for (--i; i >= 0; i--) {
|
||||
sprintf(str, "cdev%d", i);
|
||||
sysfs_remove_link(&gdev->dev.kobj, str);
|
||||
}
|
||||
for (i = 0; i < gdev->count; i++)
|
||||
sysfs_remove_link(&gdev->cdev[i]->dev.kobj,
|
||||
"group_device");
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __get_next_id(const char **buf, struct ccw_dev_id *id)
|
||||
{
|
||||
unsigned int cssid, ssid, devno;
|
||||
int ret = 0, len;
|
||||
char *start, *end;
|
||||
|
||||
start = (char *)*buf;
|
||||
end = strchr(start, ',');
|
||||
if (!end) {
|
||||
/* Last entry. Strip trailing newline, if applicable. */
|
||||
end = strchr(start, '\n');
|
||||
if (end)
|
||||
*end = '\0';
|
||||
len = strlen(start) + 1;
|
||||
} else {
|
||||
len = end - start + 1;
|
||||
end++;
|
||||
}
|
||||
if (len <= CCW_BUS_ID_SIZE) {
|
||||
if (sscanf(start, "%2x.%1x.%04x", &cssid, &ssid, &devno) != 3)
|
||||
ret = -EINVAL;
|
||||
} else
|
||||
ret = -EINVAL;
|
||||
|
||||
if (!ret) {
|
||||
id->ssid = ssid;
|
||||
id->devno = devno;
|
||||
}
|
||||
*buf = end;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* ccwgroup_create_dev() - create and register a ccw group device
|
||||
* @parent: parent device for the new device
|
||||
* @gdrv: driver for the new group device
|
||||
* @num_devices: number of slave devices
|
||||
* @buf: buffer containing comma separated bus ids of slave devices
|
||||
*
|
||||
* Create and register a new ccw group device as a child of @parent. Slave
|
||||
* devices are obtained from the list of bus ids given in @buf.
|
||||
* Returns:
|
||||
* %0 on success and an error code on failure.
|
||||
* Context:
|
||||
* non-atomic
|
||||
*/
|
||||
int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv,
|
||||
int num_devices, const char *buf)
|
||||
{
|
||||
struct ccwgroup_device *gdev;
|
||||
struct ccw_dev_id dev_id;
|
||||
int rc, i;
|
||||
|
||||
gdev = kzalloc(sizeof(*gdev) + num_devices * sizeof(gdev->cdev[0]),
|
||||
GFP_KERNEL);
|
||||
if (!gdev)
|
||||
return -ENOMEM;
|
||||
|
||||
atomic_set(&gdev->onoff, 0);
|
||||
mutex_init(&gdev->reg_mutex);
|
||||
mutex_lock(&gdev->reg_mutex);
|
||||
INIT_WORK(&gdev->ungroup_work, ccwgroup_ungroup_workfn);
|
||||
gdev->count = num_devices;
|
||||
gdev->dev.bus = &ccwgroup_bus_type;
|
||||
gdev->dev.parent = parent;
|
||||
gdev->dev.release = ccwgroup_release;
|
||||
device_initialize(&gdev->dev);
|
||||
|
||||
for (i = 0; i < num_devices && buf; i++) {
|
||||
rc = __get_next_id(&buf, &dev_id);
|
||||
if (rc != 0)
|
||||
goto error;
|
||||
gdev->cdev[i] = get_ccwdev_by_dev_id(&dev_id);
|
||||
/*
|
||||
* All devices have to be of the same type in
|
||||
* order to be grouped.
|
||||
*/
|
||||
if (!gdev->cdev[i] || !gdev->cdev[i]->drv ||
|
||||
gdev->cdev[i]->drv != gdev->cdev[0]->drv ||
|
||||
gdev->cdev[i]->id.driver_info !=
|
||||
gdev->cdev[0]->id.driver_info) {
|
||||
rc = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
/* Don't allow a device to belong to more than one group. */
|
||||
spin_lock_irq(gdev->cdev[i]->ccwlock);
|
||||
if (dev_get_drvdata(&gdev->cdev[i]->dev)) {
|
||||
spin_unlock_irq(gdev->cdev[i]->ccwlock);
|
||||
rc = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
dev_set_drvdata(&gdev->cdev[i]->dev, gdev);
|
||||
spin_unlock_irq(gdev->cdev[i]->ccwlock);
|
||||
}
|
||||
/* Check for sufficient number of bus ids. */
|
||||
if (i < num_devices) {
|
||||
rc = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
/* Check for trailing stuff. */
|
||||
if (i == num_devices && strlen(buf) > 0) {
|
||||
rc = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
|
||||
dev_set_name(&gdev->dev, "%s", dev_name(&gdev->cdev[0]->dev));
|
||||
gdev->dev.groups = ccwgroup_attr_groups;
|
||||
|
||||
if (gdrv) {
|
||||
gdev->dev.driver = &gdrv->driver;
|
||||
rc = gdrv->setup ? gdrv->setup(gdev) : 0;
|
||||
if (rc)
|
||||
goto error;
|
||||
}
|
||||
rc = device_add(&gdev->dev);
|
||||
if (rc)
|
||||
goto error;
|
||||
rc = __ccwgroup_create_symlinks(gdev);
|
||||
if (rc) {
|
||||
device_del(&gdev->dev);
|
||||
goto error;
|
||||
}
|
||||
mutex_unlock(&gdev->reg_mutex);
|
||||
return 0;
|
||||
error:
|
||||
for (i = 0; i < num_devices; i++)
|
||||
if (gdev->cdev[i]) {
|
||||
spin_lock_irq(gdev->cdev[i]->ccwlock);
|
||||
if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev)
|
||||
dev_set_drvdata(&gdev->cdev[i]->dev, NULL);
|
||||
spin_unlock_irq(gdev->cdev[i]->ccwlock);
|
||||
put_device(&gdev->cdev[i]->dev);
|
||||
gdev->cdev[i] = NULL;
|
||||
}
|
||||
mutex_unlock(&gdev->reg_mutex);
|
||||
put_device(&gdev->dev);
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL(ccwgroup_create_dev);
|
||||
|
||||
static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action,
|
||||
void *data)
|
||||
{
|
||||
struct ccwgroup_device *gdev = to_ccwgroupdev(data);
|
||||
|
||||
if (action == BUS_NOTIFY_UNBIND_DRIVER) {
|
||||
get_device(&gdev->dev);
|
||||
schedule_work(&gdev->ungroup_work);
|
||||
}
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block ccwgroup_nb = {
|
||||
.notifier_call = ccwgroup_notifier
|
||||
};
|
||||
|
||||
static int __init init_ccwgroup(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = bus_register(&ccwgroup_bus_type);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = bus_register_notifier(&ccwgroup_bus_type, &ccwgroup_nb);
|
||||
if (ret)
|
||||
bus_unregister(&ccwgroup_bus_type);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit cleanup_ccwgroup(void)
|
||||
{
|
||||
bus_unregister_notifier(&ccwgroup_bus_type, &ccwgroup_nb);
|
||||
bus_unregister(&ccwgroup_bus_type);
|
||||
}
|
||||
|
||||
module_init(init_ccwgroup);
|
||||
module_exit(cleanup_ccwgroup);
|
||||
|
||||
/************************** driver stuff ******************************/
|
||||
|
||||
static int ccwgroup_remove(struct device *dev)
|
||||
{
|
||||
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
|
||||
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
|
||||
|
||||
if (!dev->driver)
|
||||
return 0;
|
||||
if (gdrv->remove)
|
||||
gdrv->remove(gdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ccwgroup_shutdown(struct device *dev)
|
||||
{
|
||||
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
|
||||
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
|
||||
|
||||
if (!dev->driver)
|
||||
return;
|
||||
if (gdrv->shutdown)
|
||||
gdrv->shutdown(gdev);
|
||||
}
|
||||
|
||||
static int ccwgroup_pm_prepare(struct device *dev)
|
||||
{
|
||||
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
|
||||
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
|
||||
|
||||
/* Fail while device is being set online/offline. */
|
||||
if (atomic_read(&gdev->onoff))
|
||||
return -EAGAIN;
|
||||
|
||||
if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
|
||||
return 0;
|
||||
|
||||
return gdrv->prepare ? gdrv->prepare(gdev) : 0;
|
||||
}
|
||||
|
||||
static void ccwgroup_pm_complete(struct device *dev)
|
||||
{
|
||||
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
|
||||
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
|
||||
|
||||
if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
|
||||
return;
|
||||
|
||||
if (gdrv->complete)
|
||||
gdrv->complete(gdev);
|
||||
}
|
||||
|
||||
static int ccwgroup_pm_freeze(struct device *dev)
|
||||
{
|
||||
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
|
||||
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
|
||||
|
||||
if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
|
||||
return 0;
|
||||
|
||||
return gdrv->freeze ? gdrv->freeze(gdev) : 0;
|
||||
}
|
||||
|
||||
static int ccwgroup_pm_thaw(struct device *dev)
|
||||
{
|
||||
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
|
||||
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
|
||||
|
||||
if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
|
||||
return 0;
|
||||
|
||||
return gdrv->thaw ? gdrv->thaw(gdev) : 0;
|
||||
}
|
||||
|
||||
static int ccwgroup_pm_restore(struct device *dev)
|
||||
{
|
||||
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
|
||||
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
|
||||
|
||||
if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
|
||||
return 0;
|
||||
|
||||
return gdrv->restore ? gdrv->restore(gdev) : 0;
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops ccwgroup_pm_ops = {
|
||||
.prepare = ccwgroup_pm_prepare,
|
||||
.complete = ccwgroup_pm_complete,
|
||||
.freeze = ccwgroup_pm_freeze,
|
||||
.thaw = ccwgroup_pm_thaw,
|
||||
.restore = ccwgroup_pm_restore,
|
||||
};
|
||||
|
||||
static struct bus_type ccwgroup_bus_type = {
|
||||
.name = "ccwgroup",
|
||||
.remove = ccwgroup_remove,
|
||||
.shutdown = ccwgroup_shutdown,
|
||||
.pm = &ccwgroup_pm_ops,
|
||||
};
|
||||
|
||||
/**
|
||||
* ccwgroup_driver_register() - register a ccw group driver
|
||||
* @cdriver: driver to be registered
|
||||
*
|
||||
* This function is mainly a wrapper around driver_register().
|
||||
*/
|
||||
int ccwgroup_driver_register(struct ccwgroup_driver *cdriver)
|
||||
{
|
||||
/* register our new driver with the core */
|
||||
cdriver->driver.bus = &ccwgroup_bus_type;
|
||||
|
||||
return driver_register(&cdriver->driver);
|
||||
}
|
||||
EXPORT_SYMBOL(ccwgroup_driver_register);
|
||||
|
||||
static int __ccwgroup_match_all(struct device *dev, void *data)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* ccwgroup_driver_unregister() - deregister a ccw group driver
|
||||
* @cdriver: driver to be deregistered
|
||||
*
|
||||
* This function is mainly a wrapper around driver_unregister().
|
||||
*/
|
||||
void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver)
|
||||
{
|
||||
struct device *dev;
|
||||
|
||||
/* We don't want ccwgroup devices to live longer than their driver. */
|
||||
while ((dev = driver_find_device(&cdriver->driver, NULL, NULL,
|
||||
__ccwgroup_match_all))) {
|
||||
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
|
||||
|
||||
ccwgroup_ungroup(gdev);
|
||||
put_device(dev);
|
||||
}
|
||||
driver_unregister(&cdriver->driver);
|
||||
}
|
||||
EXPORT_SYMBOL(ccwgroup_driver_unregister);
|
||||
|
||||
/**
|
||||
* ccwgroup_probe_ccwdev() - probe function for slave devices
|
||||
* @cdev: ccw device to be probed
|
||||
*
|
||||
* This is a dummy probe function for ccw devices that are slave devices in
|
||||
* a ccw group device.
|
||||
* Returns:
|
||||
* always %0
|
||||
*/
|
||||
int ccwgroup_probe_ccwdev(struct ccw_device *cdev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ccwgroup_probe_ccwdev);
|
||||
|
||||
/**
|
||||
* ccwgroup_remove_ccwdev() - remove function for slave devices
|
||||
* @cdev: ccw device to be removed
|
||||
*
|
||||
* This is a remove function for ccw devices that are slave devices in a ccw
|
||||
* group device. It sets the ccw device offline and also deregisters the
|
||||
* embedding ccw group device.
|
||||
*/
|
||||
void ccwgroup_remove_ccwdev(struct ccw_device *cdev)
|
||||
{
|
||||
struct ccwgroup_device *gdev;
|
||||
|
||||
/* Ignore offlining errors, device is gone anyway. */
|
||||
ccw_device_set_offline(cdev);
|
||||
/* If one of its devices is gone, the whole group is done for. */
|
||||
spin_lock_irq(cdev->ccwlock);
|
||||
gdev = dev_get_drvdata(&cdev->dev);
|
||||
if (!gdev) {
|
||||
spin_unlock_irq(cdev->ccwlock);
|
||||
return;
|
||||
}
|
||||
/* Get ccwgroup device reference for local processing. */
|
||||
get_device(&gdev->dev);
|
||||
spin_unlock_irq(cdev->ccwlock);
|
||||
/* Unregister group device. */
|
||||
ccwgroup_ungroup(gdev);
|
||||
/* Release ccwgroup device reference for local processing. */
|
||||
put_device(&gdev->dev);
|
||||
}
|
||||
EXPORT_SYMBOL(ccwgroup_remove_ccwdev);
|
||||
MODULE_LICENSE("GPL");
|
367
drivers/s390/cio/ccwreq.c
Normal file
367
drivers/s390/cio/ccwreq.c
Normal file
|
@ -0,0 +1,367 @@
|
|||
/*
|
||||
* Handling of internal CCW device requests.
|
||||
*
|
||||
* Copyright IBM Corp. 2009, 2011
|
||||
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
|
||||
*/
|
||||
|
||||
#define KMSG_COMPONENT "cio"
|
||||
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/err.h>
|
||||
#include <asm/ccwdev.h>
|
||||
#include <asm/cio.h>
|
||||
|
||||
#include "io_sch.h"
|
||||
#include "cio.h"
|
||||
#include "device.h"
|
||||
#include "cio_debug.h"
|
||||
|
||||
/**
|
||||
* lpm_adjust - adjust path mask
|
||||
* @lpm: path mask to adjust
|
||||
* @mask: mask of available paths
|
||||
*
|
||||
* Shift @lpm right until @lpm and @mask have at least one bit in common or
|
||||
* until @lpm is zero. Return the resulting lpm.
|
||||
*/
|
||||
int lpm_adjust(int lpm, int mask)
|
||||
{
|
||||
while (lpm && ((lpm & mask) == 0))
|
||||
lpm >>= 1;
|
||||
return lpm;
|
||||
}
|
||||
|
||||
/*
|
||||
* Adjust path mask to use next path and reset retry count. Return resulting
|
||||
* path mask.
|
||||
*/
|
||||
static u16 ccwreq_next_path(struct ccw_device *cdev)
|
||||
{
|
||||
struct ccw_request *req = &cdev->private->req;
|
||||
|
||||
if (!req->singlepath) {
|
||||
req->mask = 0;
|
||||
goto out;
|
||||
}
|
||||
req->retries = req->maxretries;
|
||||
req->mask = lpm_adjust(req->mask >> 1, req->lpm);
|
||||
out:
|
||||
return req->mask;
|
||||
}
|
||||
|
||||
/*
|
||||
* Clean up device state and report to callback.
|
||||
*/
|
||||
static void ccwreq_stop(struct ccw_device *cdev, int rc)
|
||||
{
|
||||
struct ccw_request *req = &cdev->private->req;
|
||||
|
||||
if (req->done)
|
||||
return;
|
||||
req->done = 1;
|
||||
ccw_device_set_timeout(cdev, 0);
|
||||
memset(&cdev->private->irb, 0, sizeof(struct irb));
|
||||
if (rc && rc != -ENODEV && req->drc)
|
||||
rc = req->drc;
|
||||
req->callback(cdev, req->data, rc);
|
||||
}
|
||||
|
||||
/*
|
||||
* (Re-)Start the operation until retries and paths are exhausted.
|
||||
*/
|
||||
static void ccwreq_do(struct ccw_device *cdev)
|
||||
{
|
||||
struct ccw_request *req = &cdev->private->req;
|
||||
struct subchannel *sch = to_subchannel(cdev->dev.parent);
|
||||
struct ccw1 *cp = req->cp;
|
||||
int rc = -EACCES;
|
||||
|
||||
while (req->mask) {
|
||||
if (req->retries-- == 0) {
|
||||
/* Retries exhausted, try next path. */
|
||||
ccwreq_next_path(cdev);
|
||||
continue;
|
||||
}
|
||||
/* Perform start function. */
|
||||
memset(&cdev->private->irb, 0, sizeof(struct irb));
|
||||
rc = cio_start(sch, cp, (u8) req->mask);
|
||||
if (rc == 0) {
|
||||
/* I/O started successfully. */
|
||||
ccw_device_set_timeout(cdev, req->timeout);
|
||||
return;
|
||||
}
|
||||
if (rc == -ENODEV) {
|
||||
/* Permanent device error. */
|
||||
break;
|
||||
}
|
||||
if (rc == -EACCES) {
|
||||
/* Permant path error. */
|
||||
ccwreq_next_path(cdev);
|
||||
continue;
|
||||
}
|
||||
/* Temporary improper status. */
|
||||
rc = cio_clear(sch);
|
||||
if (rc)
|
||||
break;
|
||||
return;
|
||||
}
|
||||
ccwreq_stop(cdev, rc);
|
||||
}
|
||||
|
||||
/**
|
||||
* ccw_request_start - perform I/O request
|
||||
* @cdev: ccw device
|
||||
*
|
||||
* Perform the I/O request specified by cdev->req.
|
||||
*/
|
||||
void ccw_request_start(struct ccw_device *cdev)
|
||||
{
|
||||
struct ccw_request *req = &cdev->private->req;
|
||||
|
||||
if (req->singlepath) {
|
||||
/* Try all paths twice to counter link flapping. */
|
||||
req->mask = 0x8080;
|
||||
} else
|
||||
req->mask = req->lpm;
|
||||
|
||||
req->retries = req->maxretries;
|
||||
req->mask = lpm_adjust(req->mask, req->lpm);
|
||||
req->drc = 0;
|
||||
req->done = 0;
|
||||
req->cancel = 0;
|
||||
if (!req->mask)
|
||||
goto out_nopath;
|
||||
ccwreq_do(cdev);
|
||||
return;
|
||||
|
||||
out_nopath:
|
||||
ccwreq_stop(cdev, -EACCES);
|
||||
}
|
||||
|
||||
/**
|
||||
* ccw_request_cancel - cancel running I/O request
|
||||
* @cdev: ccw device
|
||||
*
|
||||
* Cancel the I/O request specified by cdev->req. Return non-zero if request
|
||||
* has already finished, zero otherwise.
|
||||
*/
|
||||
int ccw_request_cancel(struct ccw_device *cdev)
|
||||
{
|
||||
struct subchannel *sch = to_subchannel(cdev->dev.parent);
|
||||
struct ccw_request *req = &cdev->private->req;
|
||||
int rc;
|
||||
|
||||
if (req->done)
|
||||
return 1;
|
||||
req->cancel = 1;
|
||||
rc = cio_clear(sch);
|
||||
if (rc)
|
||||
ccwreq_stop(cdev, rc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the status of the internal I/O started on the specified ccw device.
|
||||
* Perform BASIC SENSE if required.
|
||||
*/
|
||||
static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb)
|
||||
{
|
||||
struct irb *irb = &cdev->private->irb;
|
||||
struct cmd_scsw *scsw = &irb->scsw.cmd;
|
||||
enum uc_todo todo;
|
||||
|
||||
/* Perform BASIC SENSE if needed. */
|
||||
if (ccw_device_accumulate_and_sense(cdev, lcirb))
|
||||
return IO_RUNNING;
|
||||
/* Check for halt/clear interrupt. */
|
||||
if (scsw->fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
|
||||
return IO_KILLED;
|
||||
/* Check for path error. */
|
||||
if (scsw->cc == 3 || scsw->pno)
|
||||
return IO_PATH_ERROR;
|
||||
/* Handle BASIC SENSE data. */
|
||||
if (irb->esw.esw0.erw.cons) {
|
||||
CIO_TRACE_EVENT(2, "sensedata");
|
||||
CIO_HEX_EVENT(2, &cdev->private->dev_id,
|
||||
sizeof(struct ccw_dev_id));
|
||||
CIO_HEX_EVENT(2, &cdev->private->irb.ecw, SENSE_MAX_COUNT);
|
||||
/* Check for command reject. */
|
||||
if (irb->ecw[0] & SNS0_CMD_REJECT)
|
||||
return IO_REJECTED;
|
||||
/* Ask the driver what to do */
|
||||
if (cdev->drv && cdev->drv->uc_handler) {
|
||||
todo = cdev->drv->uc_handler(cdev, lcirb);
|
||||
CIO_TRACE_EVENT(2, "uc_response");
|
||||
CIO_HEX_EVENT(2, &todo, sizeof(todo));
|
||||
switch (todo) {
|
||||
case UC_TODO_RETRY:
|
||||
return IO_STATUS_ERROR;
|
||||
case UC_TODO_RETRY_ON_NEW_PATH:
|
||||
return IO_PATH_ERROR;
|
||||
case UC_TODO_STOP:
|
||||
return IO_REJECTED;
|
||||
default:
|
||||
return IO_STATUS_ERROR;
|
||||
}
|
||||
}
|
||||
/* Assume that unexpected SENSE data implies an error. */
|
||||
return IO_STATUS_ERROR;
|
||||
}
|
||||
/* Check for channel errors. */
|
||||
if (scsw->cstat != 0)
|
||||
return IO_STATUS_ERROR;
|
||||
/* Check for device errors. */
|
||||
if (scsw->dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))
|
||||
return IO_STATUS_ERROR;
|
||||
/* Check for final state. */
|
||||
if (!(scsw->dstat & DEV_STAT_DEV_END))
|
||||
return IO_RUNNING;
|
||||
/* Check for other improper status. */
|
||||
if (scsw->cc == 1 && (scsw->stctl & SCSW_STCTL_ALERT_STATUS))
|
||||
return IO_STATUS_ERROR;
|
||||
return IO_DONE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Log ccw request status.
|
||||
*/
|
||||
static void ccwreq_log_status(struct ccw_device *cdev, enum io_status status)
|
||||
{
|
||||
struct ccw_request *req = &cdev->private->req;
|
||||
struct {
|
||||
struct ccw_dev_id dev_id;
|
||||
u16 retries;
|
||||
u8 lpm;
|
||||
u8 status;
|
||||
} __attribute__ ((packed)) data;
|
||||
data.dev_id = cdev->private->dev_id;
|
||||
data.retries = req->retries;
|
||||
data.lpm = (u8) req->mask;
|
||||
data.status = (u8) status;
|
||||
CIO_TRACE_EVENT(2, "reqstat");
|
||||
CIO_HEX_EVENT(2, &data, sizeof(data));
|
||||
}
|
||||
|
||||
/**
|
||||
* ccw_request_handler - interrupt handler for I/O request procedure.
|
||||
* @cdev: ccw device
|
||||
*
|
||||
* Handle interrupt during I/O request procedure.
|
||||
*/
|
||||
void ccw_request_handler(struct ccw_device *cdev)
|
||||
{
|
||||
struct irb *irb = this_cpu_ptr(&cio_irb);
|
||||
struct ccw_request *req = &cdev->private->req;
|
||||
enum io_status status;
|
||||
int rc = -EOPNOTSUPP;
|
||||
|
||||
/* Check status of I/O request. */
|
||||
status = ccwreq_status(cdev, irb);
|
||||
if (req->filter)
|
||||
status = req->filter(cdev, req->data, irb, status);
|
||||
if (status != IO_RUNNING)
|
||||
ccw_device_set_timeout(cdev, 0);
|
||||
if (status != IO_DONE && status != IO_RUNNING)
|
||||
ccwreq_log_status(cdev, status);
|
||||
switch (status) {
|
||||
case IO_DONE:
|
||||
break;
|
||||
case IO_RUNNING:
|
||||
return;
|
||||
case IO_REJECTED:
|
||||
goto err;
|
||||
case IO_PATH_ERROR:
|
||||
goto out_next_path;
|
||||
case IO_STATUS_ERROR:
|
||||
goto out_restart;
|
||||
case IO_KILLED:
|
||||
/* Check if request was cancelled on purpose. */
|
||||
if (req->cancel) {
|
||||
rc = -EIO;
|
||||
goto err;
|
||||
}
|
||||
goto out_restart;
|
||||
}
|
||||
/* Check back with request initiator. */
|
||||
if (!req->check)
|
||||
goto out;
|
||||
switch (req->check(cdev, req->data)) {
|
||||
case 0:
|
||||
break;
|
||||
case -EAGAIN:
|
||||
goto out_restart;
|
||||
case -EACCES:
|
||||
goto out_next_path;
|
||||
default:
|
||||
goto err;
|
||||
}
|
||||
out:
|
||||
ccwreq_stop(cdev, 0);
|
||||
return;
|
||||
|
||||
out_next_path:
|
||||
/* Try next path and restart I/O. */
|
||||
if (!ccwreq_next_path(cdev)) {
|
||||
rc = -EACCES;
|
||||
goto err;
|
||||
}
|
||||
out_restart:
|
||||
/* Restart. */
|
||||
ccwreq_do(cdev);
|
||||
return;
|
||||
err:
|
||||
ccwreq_stop(cdev, rc);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* ccw_request_timeout - timeout handler for I/O request procedure
|
||||
* @cdev: ccw device
|
||||
*
|
||||
* Handle timeout during I/O request procedure.
|
||||
*/
|
||||
void ccw_request_timeout(struct ccw_device *cdev)
|
||||
{
|
||||
struct subchannel *sch = to_subchannel(cdev->dev.parent);
|
||||
struct ccw_request *req = &cdev->private->req;
|
||||
int rc = -ENODEV, chp;
|
||||
|
||||
if (cio_update_schib(sch))
|
||||
goto err;
|
||||
|
||||
for (chp = 0; chp < 8; chp++) {
|
||||
if ((0x80 >> chp) & sch->schib.pmcw.lpum)
|
||||
pr_warning("%s: No interrupt was received within %lus "
|
||||
"(CS=%02x, DS=%02x, CHPID=%x.%02x)\n",
|
||||
dev_name(&cdev->dev), req->timeout / HZ,
|
||||
scsw_cstat(&sch->schib.scsw),
|
||||
scsw_dstat(&sch->schib.scsw),
|
||||
sch->schid.cssid,
|
||||
sch->schib.pmcw.chpid[chp]);
|
||||
}
|
||||
|
||||
if (!ccwreq_next_path(cdev)) {
|
||||
/* set the final return code for this request */
|
||||
req->drc = -ETIME;
|
||||
}
|
||||
rc = cio_clear(sch);
|
||||
if (rc)
|
||||
goto err;
|
||||
return;
|
||||
|
||||
err:
|
||||
ccwreq_stop(cdev, rc);
|
||||
}
|
||||
|
||||
/**
|
||||
* ccw_request_notoper - notoper handler for I/O request procedure
|
||||
* @cdev: ccw device
|
||||
*
|
||||
* Handle notoper during I/O request procedure.
|
||||
*/
|
||||
void ccw_request_notoper(struct ccw_device *cdev)
|
||||
{
|
||||
ccwreq_stop(cdev, -ENODEV);
|
||||
}
|
792
drivers/s390/cio/chp.c
Normal file
792
drivers/s390/cio/chp.c
Normal file
|
@ -0,0 +1,792 @@
|
|||
/*
|
||||
* Copyright IBM Corp. 1999, 2010
|
||||
* Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
|
||||
* Arnd Bergmann (arndb@de.ibm.com)
|
||||
* Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/bug.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/chpid.h>
|
||||
#include <asm/sclp.h>
|
||||
#include <asm/crw.h>
|
||||
|
||||
#include "cio.h"
|
||||
#include "css.h"
|
||||
#include "ioasm.h"
|
||||
#include "cio_debug.h"
|
||||
#include "chp.h"
|
||||
|
||||
#define to_channelpath(device) container_of(device, struct channel_path, dev)
|
||||
#define CHP_INFO_UPDATE_INTERVAL 1*HZ
|
||||
|
||||
enum cfg_task_t {
|
||||
cfg_none,
|
||||
cfg_configure,
|
||||
cfg_deconfigure
|
||||
};
|
||||
|
||||
/* Map for pending configure tasks. */
|
||||
static enum cfg_task_t chp_cfg_task[__MAX_CSSID + 1][__MAX_CHPID + 1];
|
||||
static DEFINE_MUTEX(cfg_lock);
|
||||
static int cfg_busy;
|
||||
|
||||
/* Map for channel-path status. */
|
||||
static struct sclp_chp_info chp_info;
|
||||
static DEFINE_MUTEX(info_lock);
|
||||
|
||||
/* Time after which channel-path status may be outdated. */
|
||||
static unsigned long chp_info_expires;
|
||||
|
||||
/* Workqueue to perform pending configure tasks. */
|
||||
static struct workqueue_struct *chp_wq;
|
||||
static struct work_struct cfg_work;
|
||||
|
||||
/* Wait queue for configure completion events. */
|
||||
static wait_queue_head_t cfg_wait_queue;
|
||||
|
||||
/* Set vary state for given chpid. */
|
||||
static void set_chp_logically_online(struct chp_id chpid, int onoff)
|
||||
{
|
||||
chpid_to_chp(chpid)->state = onoff;
|
||||
}
|
||||
|
||||
/* On success return 0 if channel-path is varied offline, 1 if it is varied
|
||||
* online. Return -ENODEV if channel-path is not registered. */
|
||||
int chp_get_status(struct chp_id chpid)
|
||||
{
|
||||
return (chpid_to_chp(chpid) ? chpid_to_chp(chpid)->state : -ENODEV);
|
||||
}
|
||||
|
||||
/**
|
||||
* chp_get_sch_opm - return opm for subchannel
|
||||
* @sch: subchannel
|
||||
*
|
||||
* Calculate and return the operational path mask (opm) based on the chpids
|
||||
* used by the subchannel and the status of the associated channel-paths.
|
||||
*/
|
||||
u8 chp_get_sch_opm(struct subchannel *sch)
|
||||
{
|
||||
struct chp_id chpid;
|
||||
int opm;
|
||||
int i;
|
||||
|
||||
opm = 0;
|
||||
chp_id_init(&chpid);
|
||||
for (i = 0; i < 8; i++) {
|
||||
opm <<= 1;
|
||||
chpid.id = sch->schib.pmcw.chpid[i];
|
||||
if (chp_get_status(chpid) != 0)
|
||||
opm |= 1;
|
||||
}
|
||||
return opm;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(chp_get_sch_opm);
|
||||
|
||||
/**
|
||||
* chp_is_registered - check if a channel-path is registered
|
||||
* @chpid: channel-path ID
|
||||
*
|
||||
* Return non-zero if a channel-path with the given chpid is registered,
|
||||
* zero otherwise.
|
||||
*/
|
||||
int chp_is_registered(struct chp_id chpid)
|
||||
{
|
||||
return chpid_to_chp(chpid) != NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Function: s390_vary_chpid
|
||||
* Varies the specified chpid online or offline
|
||||
*/
|
||||
static int s390_vary_chpid(struct chp_id chpid, int on)
|
||||
{
|
||||
char dbf_text[15];
|
||||
int status;
|
||||
|
||||
sprintf(dbf_text, on?"varyon%x.%02x":"varyoff%x.%02x", chpid.cssid,
|
||||
chpid.id);
|
||||
CIO_TRACE_EVENT(2, dbf_text);
|
||||
|
||||
status = chp_get_status(chpid);
|
||||
if (!on && !status)
|
||||
return 0;
|
||||
|
||||
set_chp_logically_online(chpid, on);
|
||||
chsc_chp_vary(chpid, on);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Channel measurement related functions
|
||||
*/
|
||||
static ssize_t chp_measurement_chars_read(struct file *filp,
|
||||
struct kobject *kobj,
|
||||
struct bin_attribute *bin_attr,
|
||||
char *buf, loff_t off, size_t count)
|
||||
{
|
||||
struct channel_path *chp;
|
||||
struct device *device;
|
||||
|
||||
device = container_of(kobj, struct device, kobj);
|
||||
chp = to_channelpath(device);
|
||||
if (!chp->cmg_chars)
|
||||
return 0;
|
||||
|
||||
return memory_read_from_buffer(buf, count, &off,
|
||||
chp->cmg_chars, sizeof(struct cmg_chars));
|
||||
}
|
||||
|
||||
static struct bin_attribute chp_measurement_chars_attr = {
|
||||
.attr = {
|
||||
.name = "measurement_chars",
|
||||
.mode = S_IRUSR,
|
||||
},
|
||||
.size = sizeof(struct cmg_chars),
|
||||
.read = chp_measurement_chars_read,
|
||||
};
|
||||
|
||||
static void chp_measurement_copy_block(struct cmg_entry *buf,
|
||||
struct channel_subsystem *css,
|
||||
struct chp_id chpid)
|
||||
{
|
||||
void *area;
|
||||
struct cmg_entry *entry, reference_buf;
|
||||
int idx;
|
||||
|
||||
if (chpid.id < 128) {
|
||||
area = css->cub_addr1;
|
||||
idx = chpid.id;
|
||||
} else {
|
||||
area = css->cub_addr2;
|
||||
idx = chpid.id - 128;
|
||||
}
|
||||
entry = area + (idx * sizeof(struct cmg_entry));
|
||||
do {
|
||||
memcpy(buf, entry, sizeof(*entry));
|
||||
memcpy(&reference_buf, entry, sizeof(*entry));
|
||||
} while (reference_buf.values[0] != buf->values[0]);
|
||||
}
|
||||
|
||||
static ssize_t chp_measurement_read(struct file *filp, struct kobject *kobj,
|
||||
struct bin_attribute *bin_attr,
|
||||
char *buf, loff_t off, size_t count)
|
||||
{
|
||||
struct channel_path *chp;
|
||||
struct channel_subsystem *css;
|
||||
struct device *device;
|
||||
unsigned int size;
|
||||
|
||||
device = container_of(kobj, struct device, kobj);
|
||||
chp = to_channelpath(device);
|
||||
css = to_css(chp->dev.parent);
|
||||
|
||||
size = sizeof(struct cmg_entry);
|
||||
|
||||
/* Only allow single reads. */
|
||||
if (off || count < size)
|
||||
return 0;
|
||||
chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->chpid);
|
||||
count = size;
|
||||
return count;
|
||||
}
|
||||
|
||||
static struct bin_attribute chp_measurement_attr = {
|
||||
.attr = {
|
||||
.name = "measurement",
|
||||
.mode = S_IRUSR,
|
||||
},
|
||||
.size = sizeof(struct cmg_entry),
|
||||
.read = chp_measurement_read,
|
||||
};
|
||||
|
||||
void chp_remove_cmg_attr(struct channel_path *chp)
|
||||
{
|
||||
device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
|
||||
device_remove_bin_file(&chp->dev, &chp_measurement_attr);
|
||||
}
|
||||
|
||||
int chp_add_cmg_attr(struct channel_path *chp)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = device_create_bin_file(&chp->dev, &chp_measurement_attr);
|
||||
if (ret)
|
||||
device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Files for the channel path entries.
|
||||
*/
|
||||
static ssize_t chp_status_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct channel_path *chp = to_channelpath(dev);
|
||||
int status;
|
||||
|
||||
mutex_lock(&chp->lock);
|
||||
status = chp->state;
|
||||
mutex_unlock(&chp->lock);
|
||||
|
||||
return status ? sprintf(buf, "online\n") : sprintf(buf, "offline\n");
|
||||
}
|
||||
|
||||
static ssize_t chp_status_write(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct channel_path *cp = to_channelpath(dev);
|
||||
char cmd[10];
|
||||
int num_args;
|
||||
int error;
|
||||
|
||||
num_args = sscanf(buf, "%5s", cmd);
|
||||
if (!num_args)
|
||||
return count;
|
||||
|
||||
if (!strncasecmp(cmd, "on", 2) || !strcmp(cmd, "1")) {
|
||||
mutex_lock(&cp->lock);
|
||||
error = s390_vary_chpid(cp->chpid, 1);
|
||||
mutex_unlock(&cp->lock);
|
||||
} else if (!strncasecmp(cmd, "off", 3) || !strcmp(cmd, "0")) {
|
||||
mutex_lock(&cp->lock);
|
||||
error = s390_vary_chpid(cp->chpid, 0);
|
||||
mutex_unlock(&cp->lock);
|
||||
} else
|
||||
error = -EINVAL;
|
||||
|
||||
return error < 0 ? error : count;
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
|
||||
|
||||
static ssize_t chp_configure_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct channel_path *cp;
|
||||
int status;
|
||||
|
||||
cp = to_channelpath(dev);
|
||||
status = chp_info_get_status(cp->chpid);
|
||||
if (status < 0)
|
||||
return status;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", status);
|
||||
}
|
||||
|
||||
static int cfg_wait_idle(void);
|
||||
|
||||
static ssize_t chp_configure_write(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct channel_path *cp;
|
||||
int val;
|
||||
char delim;
|
||||
|
||||
if (sscanf(buf, "%d %c", &val, &delim) != 1)
|
||||
return -EINVAL;
|
||||
if (val != 0 && val != 1)
|
||||
return -EINVAL;
|
||||
cp = to_channelpath(dev);
|
||||
chp_cfg_schedule(cp->chpid, val);
|
||||
cfg_wait_idle();
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(configure, 0644, chp_configure_show, chp_configure_write);
|
||||
|
||||
static ssize_t chp_type_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct channel_path *chp = to_channelpath(dev);
|
||||
u8 type;
|
||||
|
||||
mutex_lock(&chp->lock);
|
||||
type = chp->desc.desc;
|
||||
mutex_unlock(&chp->lock);
|
||||
return sprintf(buf, "%x\n", type);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
|
||||
|
||||
static ssize_t chp_cmg_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct channel_path *chp = to_channelpath(dev);
|
||||
|
||||
if (!chp)
|
||||
return 0;
|
||||
if (chp->cmg == -1) /* channel measurements not available */
|
||||
return sprintf(buf, "unknown\n");
|
||||
return sprintf(buf, "%x\n", chp->cmg);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL);
|
||||
|
||||
static ssize_t chp_shared_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct channel_path *chp = to_channelpath(dev);
|
||||
|
||||
if (!chp)
|
||||
return 0;
|
||||
if (chp->shared == -1) /* channel measurements not available */
|
||||
return sprintf(buf, "unknown\n");
|
||||
return sprintf(buf, "%x\n", chp->shared);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL);
|
||||
|
||||
static ssize_t chp_chid_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct channel_path *chp = to_channelpath(dev);
|
||||
ssize_t rc;
|
||||
|
||||
mutex_lock(&chp->lock);
|
||||
if (chp->desc_fmt1.flags & 0x10)
|
||||
rc = sprintf(buf, "%04x\n", chp->desc_fmt1.chid);
|
||||
else
|
||||
rc = 0;
|
||||
mutex_unlock(&chp->lock);
|
||||
|
||||
return rc;
|
||||
}
|
||||
static DEVICE_ATTR(chid, 0444, chp_chid_show, NULL);
|
||||
|
||||
static ssize_t chp_chid_external_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct channel_path *chp = to_channelpath(dev);
|
||||
ssize_t rc;
|
||||
|
||||
mutex_lock(&chp->lock);
|
||||
if (chp->desc_fmt1.flags & 0x10)
|
||||
rc = sprintf(buf, "%x\n", chp->desc_fmt1.flags & 0x8 ? 1 : 0);
|
||||
else
|
||||
rc = 0;
|
||||
mutex_unlock(&chp->lock);
|
||||
|
||||
return rc;
|
||||
}
|
||||
static DEVICE_ATTR(chid_external, 0444, chp_chid_external_show, NULL);
|
||||
|
||||
static struct attribute *chp_attrs[] = {
|
||||
&dev_attr_status.attr,
|
||||
&dev_attr_configure.attr,
|
||||
&dev_attr_type.attr,
|
||||
&dev_attr_cmg.attr,
|
||||
&dev_attr_shared.attr,
|
||||
&dev_attr_chid.attr,
|
||||
&dev_attr_chid_external.attr,
|
||||
NULL,
|
||||
};
|
||||
static struct attribute_group chp_attr_group = {
|
||||
.attrs = chp_attrs,
|
||||
};
|
||||
static const struct attribute_group *chp_attr_groups[] = {
|
||||
&chp_attr_group,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static void chp_release(struct device *dev)
|
||||
{
|
||||
struct channel_path *cp;
|
||||
|
||||
cp = to_channelpath(dev);
|
||||
kfree(cp);
|
||||
}
|
||||
|
||||
/**
|
||||
* chp_update_desc - update channel-path description
|
||||
* @chp - channel-path
|
||||
*
|
||||
* Update the channel-path description of the specified channel-path.
|
||||
* Return zero on success, non-zero otherwise.
|
||||
*/
|
||||
int chp_update_desc(struct channel_path *chp)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = chsc_determine_base_channel_path_desc(chp->chpid, &chp->desc);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = chsc_determine_fmt1_channel_path_desc(chp->chpid, &chp->desc_fmt1);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* chp_new - register a new channel-path
|
||||
* @chpid - channel-path ID
|
||||
*
|
||||
* Create and register data structure representing new channel-path. Return
|
||||
* zero on success, non-zero otherwise.
|
||||
*/
|
||||
int chp_new(struct chp_id chpid)
|
||||
{
|
||||
struct channel_path *chp;
|
||||
int ret;
|
||||
|
||||
if (chp_is_registered(chpid))
|
||||
return 0;
|
||||
chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL);
|
||||
if (!chp)
|
||||
return -ENOMEM;
|
||||
|
||||
/* fill in status, etc. */
|
||||
chp->chpid = chpid;
|
||||
chp->state = 1;
|
||||
chp->dev.parent = &channel_subsystems[chpid.cssid]->device;
|
||||
chp->dev.groups = chp_attr_groups;
|
||||
chp->dev.release = chp_release;
|
||||
mutex_init(&chp->lock);
|
||||
|
||||
/* Obtain channel path description and fill it in. */
|
||||
ret = chp_update_desc(chp);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
if ((chp->desc.flags & 0x80) == 0) {
|
||||
ret = -ENODEV;
|
||||
goto out_free;
|
||||
}
|
||||
/* Get channel-measurement characteristics. */
|
||||
if (css_chsc_characteristics.scmc && css_chsc_characteristics.secm) {
|
||||
ret = chsc_get_channel_measurement_chars(chp);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
} else {
|
||||
chp->cmg = -1;
|
||||
}
|
||||
dev_set_name(&chp->dev, "chp%x.%02x", chpid.cssid, chpid.id);
|
||||
|
||||
/* make it known to the system */
|
||||
ret = device_register(&chp->dev);
|
||||
if (ret) {
|
||||
CIO_MSG_EVENT(0, "Could not register chp%x.%02x: %d\n",
|
||||
chpid.cssid, chpid.id, ret);
|
||||
put_device(&chp->dev);
|
||||
goto out;
|
||||
}
|
||||
mutex_lock(&channel_subsystems[chpid.cssid]->mutex);
|
||||
if (channel_subsystems[chpid.cssid]->cm_enabled) {
|
||||
ret = chp_add_cmg_attr(chp);
|
||||
if (ret) {
|
||||
device_unregister(&chp->dev);
|
||||
mutex_unlock(&channel_subsystems[chpid.cssid]->mutex);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
channel_subsystems[chpid.cssid]->chps[chpid.id] = chp;
|
||||
mutex_unlock(&channel_subsystems[chpid.cssid]->mutex);
|
||||
goto out;
|
||||
out_free:
|
||||
kfree(chp);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* chp_get_chp_desc - return newly allocated channel-path description
|
||||
* @chpid: channel-path ID
|
||||
*
|
||||
* On success return a newly allocated copy of the channel-path description
|
||||
* data associated with the given channel-path ID. Return %NULL on error.
|
||||
*/
|
||||
struct channel_path_desc *chp_get_chp_desc(struct chp_id chpid)
|
||||
{
|
||||
struct channel_path *chp;
|
||||
struct channel_path_desc *desc;
|
||||
|
||||
chp = chpid_to_chp(chpid);
|
||||
if (!chp)
|
||||
return NULL;
|
||||
desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
|
||||
if (!desc)
|
||||
return NULL;
|
||||
|
||||
mutex_lock(&chp->lock);
|
||||
memcpy(desc, &chp->desc, sizeof(struct channel_path_desc));
|
||||
mutex_unlock(&chp->lock);
|
||||
return desc;
|
||||
}
|
||||
|
||||
/**
|
||||
* chp_process_crw - process channel-path status change
|
||||
* @crw0: channel report-word to handler
|
||||
* @crw1: second channel-report word (always NULL)
|
||||
* @overflow: crw overflow indication
|
||||
*
|
||||
* Handle channel-report-words indicating that the status of a channel-path
|
||||
* has changed.
|
||||
*/
|
||||
static void chp_process_crw(struct crw *crw0, struct crw *crw1,
|
||||
int overflow)
|
||||
{
|
||||
struct chp_id chpid;
|
||||
|
||||
if (overflow) {
|
||||
css_schedule_eval_all();
|
||||
return;
|
||||
}
|
||||
CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
|
||||
"chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
|
||||
crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
|
||||
crw0->erc, crw0->rsid);
|
||||
/*
|
||||
* Check for solicited machine checks. These are
|
||||
* created by reset channel path and need not be
|
||||
* handled here.
|
||||
*/
|
||||
if (crw0->slct) {
|
||||
CIO_CRW_EVENT(2, "solicited machine check for "
|
||||
"channel path %02X\n", crw0->rsid);
|
||||
return;
|
||||
}
|
||||
chp_id_init(&chpid);
|
||||
chpid.id = crw0->rsid;
|
||||
switch (crw0->erc) {
|
||||
case CRW_ERC_IPARM: /* Path has come. */
|
||||
if (!chp_is_registered(chpid))
|
||||
chp_new(chpid);
|
||||
chsc_chp_online(chpid);
|
||||
break;
|
||||
case CRW_ERC_PERRI: /* Path has gone. */
|
||||
case CRW_ERC_PERRN:
|
||||
chsc_chp_offline(chpid);
|
||||
break;
|
||||
default:
|
||||
CIO_CRW_EVENT(2, "Don't know how to handle erc=%x\n",
|
||||
crw0->erc);
|
||||
}
|
||||
}
|
||||
|
||||
int chp_ssd_get_mask(struct chsc_ssd_info *ssd, struct chp_link *link)
|
||||
{
|
||||
int i;
|
||||
int mask;
|
||||
|
||||
for (i = 0; i < 8; i++) {
|
||||
mask = 0x80 >> i;
|
||||
if (!(ssd->path_mask & mask))
|
||||
continue;
|
||||
if (!chp_id_is_equal(&ssd->chpid[i], &link->chpid))
|
||||
continue;
|
||||
if ((ssd->fla_valid_mask & mask) &&
|
||||
((ssd->fla[i] & link->fla_mask) != link->fla))
|
||||
continue;
|
||||
return mask;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(chp_ssd_get_mask);
|
||||
|
||||
static inline int info_bit_num(struct chp_id id)
|
||||
{
|
||||
return id.id + id.cssid * (__MAX_CHPID + 1);
|
||||
}
|
||||
|
||||
/* Force chp_info refresh on next call to info_validate(). */
|
||||
static void info_expire(void)
|
||||
{
|
||||
mutex_lock(&info_lock);
|
||||
chp_info_expires = jiffies - 1;
|
||||
mutex_unlock(&info_lock);
|
||||
}
|
||||
|
||||
/* Ensure that chp_info is up-to-date. */
|
||||
static int info_update(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
mutex_lock(&info_lock);
|
||||
rc = 0;
|
||||
if (time_after(jiffies, chp_info_expires)) {
|
||||
/* Data is too old, update. */
|
||||
rc = sclp_chp_read_info(&chp_info);
|
||||
chp_info_expires = jiffies + CHP_INFO_UPDATE_INTERVAL ;
|
||||
}
|
||||
mutex_unlock(&info_lock);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* chp_info_get_status - retrieve configure status of a channel-path
|
||||
* @chpid: channel-path ID
|
||||
*
|
||||
* On success, return 0 for standby, 1 for configured, 2 for reserved,
|
||||
* 3 for not recognized. Return negative error code on error.
|
||||
*/
|
||||
int chp_info_get_status(struct chp_id chpid)
|
||||
{
|
||||
int rc;
|
||||
int bit;
|
||||
|
||||
rc = info_update();
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
bit = info_bit_num(chpid);
|
||||
mutex_lock(&info_lock);
|
||||
if (!chp_test_bit(chp_info.recognized, bit))
|
||||
rc = CHP_STATUS_NOT_RECOGNIZED;
|
||||
else if (chp_test_bit(chp_info.configured, bit))
|
||||
rc = CHP_STATUS_CONFIGURED;
|
||||
else if (chp_test_bit(chp_info.standby, bit))
|
||||
rc = CHP_STATUS_STANDBY;
|
||||
else
|
||||
rc = CHP_STATUS_RESERVED;
|
||||
mutex_unlock(&info_lock);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Return configure task for chpid. */
|
||||
static enum cfg_task_t cfg_get_task(struct chp_id chpid)
|
||||
{
|
||||
return chp_cfg_task[chpid.cssid][chpid.id];
|
||||
}
|
||||
|
||||
/* Set configure task for chpid. */
|
||||
static void cfg_set_task(struct chp_id chpid, enum cfg_task_t cfg)
|
||||
{
|
||||
chp_cfg_task[chpid.cssid][chpid.id] = cfg;
|
||||
}
|
||||
|
||||
/* Perform one configure/deconfigure request. Reschedule work function until
|
||||
* last request. */
|
||||
static void cfg_func(struct work_struct *work)
|
||||
{
|
||||
struct chp_id chpid;
|
||||
enum cfg_task_t t;
|
||||
int rc;
|
||||
|
||||
mutex_lock(&cfg_lock);
|
||||
t = cfg_none;
|
||||
chp_id_for_each(&chpid) {
|
||||
t = cfg_get_task(chpid);
|
||||
if (t != cfg_none) {
|
||||
cfg_set_task(chpid, cfg_none);
|
||||
break;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&cfg_lock);
|
||||
|
||||
switch (t) {
|
||||
case cfg_configure:
|
||||
rc = sclp_chp_configure(chpid);
|
||||
if (rc)
|
||||
CIO_MSG_EVENT(2, "chp: sclp_chp_configure(%x.%02x)="
|
||||
"%d\n", chpid.cssid, chpid.id, rc);
|
||||
else {
|
||||
info_expire();
|
||||
chsc_chp_online(chpid);
|
||||
}
|
||||
break;
|
||||
case cfg_deconfigure:
|
||||
rc = sclp_chp_deconfigure(chpid);
|
||||
if (rc)
|
||||
CIO_MSG_EVENT(2, "chp: sclp_chp_deconfigure(%x.%02x)="
|
||||
"%d\n", chpid.cssid, chpid.id, rc);
|
||||
else {
|
||||
info_expire();
|
||||
chsc_chp_offline(chpid);
|
||||
}
|
||||
break;
|
||||
case cfg_none:
|
||||
/* Get updated information after last change. */
|
||||
info_update();
|
||||
mutex_lock(&cfg_lock);
|
||||
cfg_busy = 0;
|
||||
mutex_unlock(&cfg_lock);
|
||||
wake_up_interruptible(&cfg_wait_queue);
|
||||
return;
|
||||
}
|
||||
queue_work(chp_wq, &cfg_work);
|
||||
}
|
||||
|
||||
/**
|
||||
* chp_cfg_schedule - schedule chpid configuration request
|
||||
* @chpid - channel-path ID
|
||||
* @configure - Non-zero for configure, zero for deconfigure
|
||||
*
|
||||
* Schedule a channel-path configuration/deconfiguration request.
|
||||
*/
|
||||
void chp_cfg_schedule(struct chp_id chpid, int configure)
|
||||
{
|
||||
CIO_MSG_EVENT(2, "chp_cfg_sched%x.%02x=%d\n", chpid.cssid, chpid.id,
|
||||
configure);
|
||||
mutex_lock(&cfg_lock);
|
||||
cfg_set_task(chpid, configure ? cfg_configure : cfg_deconfigure);
|
||||
cfg_busy = 1;
|
||||
mutex_unlock(&cfg_lock);
|
||||
queue_work(chp_wq, &cfg_work);
|
||||
}
|
||||
|
||||
/**
|
||||
* chp_cfg_cancel_deconfigure - cancel chpid deconfiguration request
|
||||
* @chpid - channel-path ID
|
||||
*
|
||||
* Cancel an active channel-path deconfiguration request if it has not yet
|
||||
* been performed.
|
||||
*/
|
||||
void chp_cfg_cancel_deconfigure(struct chp_id chpid)
|
||||
{
|
||||
CIO_MSG_EVENT(2, "chp_cfg_cancel:%x.%02x\n", chpid.cssid, chpid.id);
|
||||
mutex_lock(&cfg_lock);
|
||||
if (cfg_get_task(chpid) == cfg_deconfigure)
|
||||
cfg_set_task(chpid, cfg_none);
|
||||
mutex_unlock(&cfg_lock);
|
||||
}
|
||||
|
||||
static int cfg_wait_idle(void)
|
||||
{
|
||||
if (wait_event_interruptible(cfg_wait_queue, !cfg_busy))
|
||||
return -ERESTARTSYS;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init chp_init(void)
|
||||
{
|
||||
struct chp_id chpid;
|
||||
int ret;
|
||||
|
||||
ret = crw_register_handler(CRW_RSC_CPATH, chp_process_crw);
|
||||
if (ret)
|
||||
return ret;
|
||||
chp_wq = create_singlethread_workqueue("cio_chp");
|
||||
if (!chp_wq) {
|
||||
crw_unregister_handler(CRW_RSC_CPATH);
|
||||
return -ENOMEM;
|
||||
}
|
||||
INIT_WORK(&cfg_work, cfg_func);
|
||||
init_waitqueue_head(&cfg_wait_queue);
|
||||
if (info_update())
|
||||
return 0;
|
||||
/* Register available channel-paths. */
|
||||
chp_id_for_each(&chpid) {
|
||||
if (chp_info_get_status(chpid) != CHP_STATUS_NOT_RECOGNIZED)
|
||||
chp_new(chpid);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
subsys_initcall(chp_init);
|
72
drivers/s390/cio/chp.h
Normal file
72
drivers/s390/cio/chp.h
Normal file
|
@ -0,0 +1,72 @@
|
|||
/*
|
||||
* Copyright IBM Corp. 2007, 2010
|
||||
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
|
||||
*/
|
||||
|
||||
#ifndef S390_CHP_H
|
||||
#define S390_CHP_H S390_CHP_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <asm/chpid.h>
|
||||
#include "chsc.h"
|
||||
#include "css.h"
|
||||
|
||||
#define CHP_STATUS_STANDBY 0
|
||||
#define CHP_STATUS_CONFIGURED 1
|
||||
#define CHP_STATUS_RESERVED 2
|
||||
#define CHP_STATUS_NOT_RECOGNIZED 3
|
||||
|
||||
#define CHP_ONLINE 0
|
||||
#define CHP_OFFLINE 1
|
||||
#define CHP_VARY_ON 2
|
||||
#define CHP_VARY_OFF 3
|
||||
|
||||
struct chp_link {
|
||||
struct chp_id chpid;
|
||||
u32 fla_mask;
|
||||
u16 fla;
|
||||
};
|
||||
|
||||
static inline int chp_test_bit(u8 *bitmap, int num)
|
||||
{
|
||||
int byte = num >> 3;
|
||||
int mask = 128 >> (num & 7);
|
||||
|
||||
return (bitmap[byte] & mask) ? 1 : 0;
|
||||
}
|
||||
|
||||
|
||||
struct channel_path {
|
||||
struct device dev;
|
||||
struct chp_id chpid;
|
||||
struct mutex lock; /* Serialize access to below members. */
|
||||
int state;
|
||||
struct channel_path_desc desc;
|
||||
struct channel_path_desc_fmt1 desc_fmt1;
|
||||
/* Channel-measurement related stuff: */
|
||||
int cmg;
|
||||
int shared;
|
||||
void *cmg_chars;
|
||||
};
|
||||
|
||||
/* Return channel_path struct for given chpid. */
|
||||
static inline struct channel_path *chpid_to_chp(struct chp_id chpid)
|
||||
{
|
||||
return channel_subsystems[chpid.cssid]->chps[chpid.id];
|
||||
}
|
||||
|
||||
int chp_get_status(struct chp_id chpid);
|
||||
u8 chp_get_sch_opm(struct subchannel *sch);
|
||||
int chp_is_registered(struct chp_id chpid);
|
||||
struct channel_path_desc *chp_get_chp_desc(struct chp_id chpid);
|
||||
void chp_remove_cmg_attr(struct channel_path *chp);
|
||||
int chp_add_cmg_attr(struct channel_path *chp);
|
||||
int chp_update_desc(struct channel_path *chp);
|
||||
int chp_new(struct chp_id chpid);
|
||||
void chp_cfg_schedule(struct chp_id chpid, int configure);
|
||||
void chp_cfg_cancel_deconfigure(struct chp_id chpid);
|
||||
int chp_info_get_status(struct chp_id chpid);
|
||||
int chp_ssd_get_mask(struct chsc_ssd_info *, struct chp_link *);
|
||||
#endif /* S390_CHP_H */
|
1250
drivers/s390/cio/chsc.c
Normal file
1250
drivers/s390/cio/chsc.c
Normal file
File diff suppressed because it is too large
Load diff
238
drivers/s390/cio/chsc.h
Normal file
238
drivers/s390/cio/chsc.h
Normal file
|
@ -0,0 +1,238 @@
|
|||
#ifndef S390_CHSC_H
|
||||
#define S390_CHSC_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/device.h>
|
||||
#include <asm/css_chars.h>
|
||||
#include <asm/chpid.h>
|
||||
#include <asm/chsc.h>
|
||||
#include <asm/schid.h>
|
||||
#include <asm/qdio.h>
|
||||
|
||||
#define CHSC_SDA_OC_MSS 0x2
|
||||
|
||||
#define NR_MEASUREMENT_CHARS 5
|
||||
struct cmg_chars {
|
||||
u32 values[NR_MEASUREMENT_CHARS];
|
||||
} __attribute__ ((packed));
|
||||
|
||||
#define NR_MEASUREMENT_ENTRIES 8
|
||||
struct cmg_entry {
|
||||
u32 values[NR_MEASUREMENT_ENTRIES];
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct channel_path_desc_fmt1 {
|
||||
u8 flags;
|
||||
u8 lsn;
|
||||
u8 desc;
|
||||
u8 chpid;
|
||||
u32:24;
|
||||
u8 chpp;
|
||||
u32 unused[2];
|
||||
u16 chid;
|
||||
u32:16;
|
||||
u16 mdc;
|
||||
u16:13;
|
||||
u8 r:1;
|
||||
u8 s:1;
|
||||
u8 f:1;
|
||||
u32 zeros[2];
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct channel_path;
|
||||
|
||||
struct css_chsc_char {
|
||||
u64 res;
|
||||
u64 : 20;
|
||||
u32 secm : 1; /* bit 84 */
|
||||
u32 : 1;
|
||||
u32 scmc : 1; /* bit 86 */
|
||||
u32 : 20;
|
||||
u32 scssc : 1; /* bit 107 */
|
||||
u32 scsscf : 1; /* bit 108 */
|
||||
u32:7;
|
||||
u32 pnso:1; /* bit 116 */
|
||||
u32:11;
|
||||
}__attribute__((packed));
|
||||
|
||||
extern struct css_chsc_char css_chsc_characteristics;
|
||||
|
||||
struct chsc_ssd_info {
|
||||
u8 path_mask;
|
||||
u8 fla_valid_mask;
|
||||
struct chp_id chpid[8];
|
||||
u16 fla[8];
|
||||
};
|
||||
|
||||
struct chsc_ssqd_area {
|
||||
struct chsc_header request;
|
||||
u16:10;
|
||||
u8 ssid:2;
|
||||
u8 fmt:4;
|
||||
u16 first_sch;
|
||||
u16:16;
|
||||
u16 last_sch;
|
||||
u32:32;
|
||||
struct chsc_header response;
|
||||
u32:32;
|
||||
struct qdio_ssqd_desc qdio_ssqd;
|
||||
} __packed;
|
||||
|
||||
struct chsc_scssc_area {
|
||||
struct chsc_header request;
|
||||
u16 operation_code;
|
||||
u16:16;
|
||||
u32:32;
|
||||
u32:32;
|
||||
u64 summary_indicator_addr;
|
||||
u64 subchannel_indicator_addr;
|
||||
u32 ks:4;
|
||||
u32 kc:4;
|
||||
u32:21;
|
||||
u32 isc:3;
|
||||
u32 word_with_d_bit;
|
||||
u32:32;
|
||||
struct subchannel_id schid;
|
||||
u32 reserved[1004];
|
||||
struct chsc_header response;
|
||||
u32:32;
|
||||
} __packed;
|
||||
|
||||
struct chsc_scpd {
|
||||
struct chsc_header request;
|
||||
u32:2;
|
||||
u32 m:1;
|
||||
u32 c:1;
|
||||
u32 fmt:4;
|
||||
u32 cssid:8;
|
||||
u32:4;
|
||||
u32 rfmt:4;
|
||||
u32 first_chpid:8;
|
||||
u32:24;
|
||||
u32 last_chpid:8;
|
||||
u32 zeroes1;
|
||||
struct chsc_header response;
|
||||
u8 data[PAGE_SIZE - 20];
|
||||
} __attribute__ ((packed));
|
||||
|
||||
|
||||
extern int chsc_get_ssd_info(struct subchannel_id schid,
|
||||
struct chsc_ssd_info *ssd);
|
||||
extern int chsc_determine_css_characteristics(void);
|
||||
extern int chsc_init(void);
|
||||
extern void chsc_init_cleanup(void);
|
||||
|
||||
extern int chsc_enable_facility(int);
|
||||
struct channel_subsystem;
|
||||
extern int chsc_secm(struct channel_subsystem *, int);
|
||||
int __chsc_do_secm(struct channel_subsystem *css, int enable);
|
||||
|
||||
int chsc_chp_vary(struct chp_id chpid, int on);
|
||||
int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
|
||||
int c, int m, void *page);
|
||||
int chsc_determine_base_channel_path_desc(struct chp_id chpid,
|
||||
struct channel_path_desc *desc);
|
||||
int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
|
||||
struct channel_path_desc_fmt1 *desc);
|
||||
void chsc_chp_online(struct chp_id chpid);
|
||||
void chsc_chp_offline(struct chp_id chpid);
|
||||
int chsc_get_channel_measurement_chars(struct channel_path *chp);
|
||||
int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd);
|
||||
int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc,
|
||||
u64 summary_indicator_addr, u64 subchannel_indicator_addr);
|
||||
int chsc_error_from_response(int response);
|
||||
|
||||
int chsc_siosl(struct subchannel_id schid);
|
||||
|
||||
/* Functions and definitions to query storage-class memory. */
|
||||
struct sale {
|
||||
u64 sa;
|
||||
u32 p:4;
|
||||
u32 op_state:4;
|
||||
u32 data_state:4;
|
||||
u32 rank:4;
|
||||
u32 r:1;
|
||||
u32:7;
|
||||
u32 rid:8;
|
||||
u32:32;
|
||||
} __packed;
|
||||
|
||||
struct chsc_scm_info {
|
||||
struct chsc_header request;
|
||||
u32:32;
|
||||
u64 reqtok;
|
||||
u32 reserved1[4];
|
||||
struct chsc_header response;
|
||||
u64:56;
|
||||
u8 rq;
|
||||
u32 mbc;
|
||||
u64 msa;
|
||||
u16 is;
|
||||
u16 mmc;
|
||||
u32 mci;
|
||||
u64 nr_scm_ini;
|
||||
u64 nr_scm_unini;
|
||||
u32 reserved2[10];
|
||||
u64 restok;
|
||||
struct sale scmal[248];
|
||||
} __packed;
|
||||
|
||||
int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token);
|
||||
|
||||
struct chsc_brinfo_resume_token {
|
||||
u64 t1;
|
||||
u64 t2;
|
||||
} __packed;
|
||||
|
||||
struct chsc_brinfo_naihdr {
|
||||
struct chsc_brinfo_resume_token resume_token;
|
||||
u32:32;
|
||||
u32 instance;
|
||||
u32:24;
|
||||
u8 naids;
|
||||
u32 reserved[3];
|
||||
} __packed;
|
||||
|
||||
struct chsc_pnso_area {
|
||||
struct chsc_header request;
|
||||
u8:2;
|
||||
u8 m:1;
|
||||
u8:5;
|
||||
u8:2;
|
||||
u8 ssid:2;
|
||||
u8 fmt:4;
|
||||
u16 sch;
|
||||
u8:8;
|
||||
u8 cssid;
|
||||
u16:16;
|
||||
u8 oc;
|
||||
u32:24;
|
||||
struct chsc_brinfo_resume_token resume_token;
|
||||
u32 n:1;
|
||||
u32:31;
|
||||
u32 reserved[3];
|
||||
struct chsc_header response;
|
||||
u32:32;
|
||||
struct chsc_brinfo_naihdr naihdr;
|
||||
union {
|
||||
struct qdio_brinfo_entry_l3_ipv6 l3_ipv6[0];
|
||||
struct qdio_brinfo_entry_l3_ipv4 l3_ipv4[0];
|
||||
struct qdio_brinfo_entry_l2 l2[0];
|
||||
} entries;
|
||||
} __packed;
|
||||
|
||||
int chsc_pnso_brinfo(struct subchannel_id schid,
|
||||
struct chsc_pnso_area *brinfo_area,
|
||||
struct chsc_brinfo_resume_token resume_token,
|
||||
int cnc);
|
||||
|
||||
#ifdef CONFIG_SCM_BUS
|
||||
int scm_update_information(void);
|
||||
int scm_process_availability_information(void);
|
||||
#else /* CONFIG_SCM_BUS */
|
||||
static inline int scm_update_information(void) { return 0; }
|
||||
static inline int scm_process_availability_information(void) { return 0; }
|
||||
#endif /* CONFIG_SCM_BUS */
|
||||
|
||||
|
||||
#endif
|
1017
drivers/s390/cio/chsc_sch.c
Normal file
1017
drivers/s390/cio/chsc_sch.c
Normal file
File diff suppressed because it is too large
Load diff
13
drivers/s390/cio/chsc_sch.h
Normal file
13
drivers/s390/cio/chsc_sch.h
Normal file
|
@ -0,0 +1,13 @@
|
|||
#ifndef _CHSC_SCH_H
|
||||
#define _CHSC_SCH_H
|
||||
|
||||
struct chsc_request {
|
||||
struct completion completion;
|
||||
struct irb irb;
|
||||
};
|
||||
|
||||
struct chsc_private {
|
||||
struct chsc_request *request;
|
||||
};
|
||||
|
||||
#endif
|
1022
drivers/s390/cio/cio.c
Normal file
1022
drivers/s390/cio/cio.c
Normal file
File diff suppressed because it is too large
Load diff
137
drivers/s390/cio/cio.h
Normal file
137
drivers/s390/cio/cio.h
Normal file
|
@ -0,0 +1,137 @@
|
|||
#ifndef S390_CIO_H
|
||||
#define S390_CIO_H
|
||||
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <asm/chpid.h>
|
||||
#include <asm/cio.h>
|
||||
#include <asm/fcx.h>
|
||||
#include <asm/schid.h>
|
||||
#include "chsc.h"
|
||||
|
||||
/*
|
||||
* path management control word
|
||||
*/
|
||||
struct pmcw {
|
||||
u32 intparm; /* interruption parameter */
|
||||
u32 qf : 1; /* qdio facility */
|
||||
u32 w : 1;
|
||||
u32 isc : 3; /* interruption sublass */
|
||||
u32 res5 : 3; /* reserved zeros */
|
||||
u32 ena : 1; /* enabled */
|
||||
u32 lm : 2; /* limit mode */
|
||||
u32 mme : 2; /* measurement-mode enable */
|
||||
u32 mp : 1; /* multipath mode */
|
||||
u32 tf : 1; /* timing facility */
|
||||
u32 dnv : 1; /* device number valid */
|
||||
u32 dev : 16; /* device number */
|
||||
u8 lpm; /* logical path mask */
|
||||
u8 pnom; /* path not operational mask */
|
||||
u8 lpum; /* last path used mask */
|
||||
u8 pim; /* path installed mask */
|
||||
u16 mbi; /* measurement-block index */
|
||||
u8 pom; /* path operational mask */
|
||||
u8 pam; /* path available mask */
|
||||
u8 chpid[8]; /* CHPID 0-7 (if available) */
|
||||
u32 unused1 : 8; /* reserved zeros */
|
||||
u32 st : 3; /* subchannel type */
|
||||
u32 unused2 : 18; /* reserved zeros */
|
||||
u32 mbfc : 1; /* measurement block format control */
|
||||
u32 xmwme : 1; /* extended measurement word mode enable */
|
||||
u32 csense : 1; /* concurrent sense; can be enabled ...*/
|
||||
/* ... per MSCH, however, if facility */
|
||||
/* ... is not installed, this results */
|
||||
/* ... in an operand exception. */
|
||||
} __attribute__ ((packed));
|
||||
|
||||
/* Target SCHIB configuration. */
|
||||
struct schib_config {
|
||||
u64 mba;
|
||||
u32 intparm;
|
||||
u16 mbi;
|
||||
u32 isc:3;
|
||||
u32 ena:1;
|
||||
u32 mme:2;
|
||||
u32 mp:1;
|
||||
u32 csense:1;
|
||||
u32 mbfc:1;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
/*
|
||||
* subchannel information block
|
||||
*/
|
||||
struct schib {
|
||||
struct pmcw pmcw; /* path management control word */
|
||||
union scsw scsw; /* subchannel status word */
|
||||
__u64 mba; /* measurement block address */
|
||||
__u8 mda[4]; /* model dependent area */
|
||||
} __attribute__ ((packed,aligned(4)));
|
||||
|
||||
/*
|
||||
* When rescheduled, todo's with higher values will overwrite those
|
||||
* with lower values.
|
||||
*/
|
||||
enum sch_todo {
|
||||
SCH_TODO_NOTHING,
|
||||
SCH_TODO_EVAL,
|
||||
SCH_TODO_UNREG,
|
||||
};
|
||||
|
||||
/* subchannel data structure used by I/O subroutines */
|
||||
struct subchannel {
|
||||
struct subchannel_id schid;
|
||||
spinlock_t *lock; /* subchannel lock */
|
||||
struct mutex reg_mutex;
|
||||
enum {
|
||||
SUBCHANNEL_TYPE_IO = 0,
|
||||
SUBCHANNEL_TYPE_CHSC = 1,
|
||||
SUBCHANNEL_TYPE_MSG = 2,
|
||||
SUBCHANNEL_TYPE_ADM = 3,
|
||||
} st; /* subchannel type */
|
||||
__u8 vpm; /* verified path mask */
|
||||
__u8 lpm; /* logical path mask */
|
||||
__u8 opm; /* operational path mask */
|
||||
struct schib schib; /* subchannel information block */
|
||||
int isc; /* desired interruption subclass */
|
||||
struct chsc_ssd_info ssd_info; /* subchannel description */
|
||||
struct device dev; /* entry in device tree */
|
||||
struct css_driver *driver;
|
||||
enum sch_todo todo;
|
||||
struct work_struct todo_work;
|
||||
struct schib_config config;
|
||||
} __attribute__ ((aligned(8)));
|
||||
|
||||
DECLARE_PER_CPU(struct irb, cio_irb);
|
||||
|
||||
#define to_subchannel(n) container_of(n, struct subchannel, dev)
|
||||
|
||||
extern int cio_validate_subchannel (struct subchannel *, struct subchannel_id);
|
||||
extern int cio_enable_subchannel(struct subchannel *, u32);
|
||||
extern int cio_disable_subchannel (struct subchannel *);
|
||||
extern int cio_cancel (struct subchannel *);
|
||||
extern int cio_clear (struct subchannel *);
|
||||
extern int cio_resume (struct subchannel *);
|
||||
extern int cio_halt (struct subchannel *);
|
||||
extern int cio_start (struct subchannel *, struct ccw1 *, __u8);
|
||||
extern int cio_start_key (struct subchannel *, struct ccw1 *, __u8, __u8);
|
||||
extern int cio_cancel (struct subchannel *);
|
||||
extern int cio_set_options (struct subchannel *, int);
|
||||
extern int cio_update_schib(struct subchannel *sch);
|
||||
extern int cio_commit_config(struct subchannel *sch);
|
||||
|
||||
int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key);
|
||||
int cio_tm_intrg(struct subchannel *sch);
|
||||
|
||||
/* Use with care. */
|
||||
#ifdef CONFIG_CCW_CONSOLE
|
||||
extern struct subchannel *cio_probe_console(void);
|
||||
extern int cio_is_console(struct subchannel_id);
|
||||
extern void cio_register_early_subchannels(void);
|
||||
extern void cio_tsch(struct subchannel *sch);
|
||||
#else
|
||||
#define cio_is_console(schid) 0
|
||||
static inline void cio_register_early_subchannels(void) {}
|
||||
#endif
|
||||
|
||||
#endif
|
34
drivers/s390/cio/cio_debug.h
Normal file
34
drivers/s390/cio/cio_debug.h
Normal file
|
@ -0,0 +1,34 @@
|
|||
#ifndef CIO_DEBUG_H
|
||||
#define CIO_DEBUG_H
|
||||
|
||||
#include <asm/debug.h>
|
||||
|
||||
/* for use of debug feature */
|
||||
extern debug_info_t *cio_debug_msg_id;
|
||||
extern debug_info_t *cio_debug_trace_id;
|
||||
extern debug_info_t *cio_debug_crw_id;
|
||||
|
||||
#define CIO_TRACE_EVENT(imp, txt) do { \
|
||||
debug_text_event(cio_debug_trace_id, imp, txt); \
|
||||
} while (0)
|
||||
|
||||
#define CIO_MSG_EVENT(imp, args...) do { \
|
||||
debug_sprintf_event(cio_debug_msg_id, imp , ##args); \
|
||||
} while (0)
|
||||
|
||||
#define CIO_CRW_EVENT(imp, args...) do { \
|
||||
debug_sprintf_event(cio_debug_crw_id, imp , ##args); \
|
||||
} while (0)
|
||||
|
||||
static inline void CIO_HEX_EVENT(int level, void *data, int length)
|
||||
{
|
||||
if (unlikely(!cio_debug_trace_id))
|
||||
return;
|
||||
while (length > 0) {
|
||||
debug_event(cio_debug_trace_id, level, data, length);
|
||||
length -= cio_debug_trace_id->buf_size;
|
||||
data += cio_debug_trace_id->buf_size;
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
1347
drivers/s390/cio/cmf.c
Normal file
1347
drivers/s390/cio/cmf.c
Normal file
File diff suppressed because it is too large
Load diff
161
drivers/s390/cio/crw.c
Normal file
161
drivers/s390/cio/crw.c
Normal file
|
@ -0,0 +1,161 @@
|
|||
/*
|
||||
* Channel report handling code
|
||||
*
|
||||
* Copyright IBM Corp. 2000, 2009
|
||||
* Author(s): Ingo Adlung <adlung@de.ibm.com>,
|
||||
* Martin Schwidefsky <schwidefsky@de.ibm.com>,
|
||||
* Cornelia Huck <cornelia.huck@de.ibm.com>,
|
||||
* Heiko Carstens <heiko.carstens@de.ibm.com>,
|
||||
*/
|
||||
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/wait.h>
|
||||
#include <asm/crw.h>
|
||||
#include <asm/ctl_reg.h>
|
||||
|
||||
static DEFINE_MUTEX(crw_handler_mutex);
|
||||
static crw_handler_t crw_handlers[NR_RSCS];
|
||||
static atomic_t crw_nr_req = ATOMIC_INIT(0);
|
||||
static DECLARE_WAIT_QUEUE_HEAD(crw_handler_wait_q);
|
||||
|
||||
/**
|
||||
* crw_register_handler() - register a channel report word handler
|
||||
* @rsc: reporting source code to handle
|
||||
* @handler: handler to be registered
|
||||
*
|
||||
* Returns %0 on success and a negative error value otherwise.
|
||||
*/
|
||||
int crw_register_handler(int rsc, crw_handler_t handler)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
if ((rsc < 0) || (rsc >= NR_RSCS))
|
||||
return -EINVAL;
|
||||
mutex_lock(&crw_handler_mutex);
|
||||
if (crw_handlers[rsc])
|
||||
rc = -EBUSY;
|
||||
else
|
||||
crw_handlers[rsc] = handler;
|
||||
mutex_unlock(&crw_handler_mutex);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* crw_unregister_handler() - unregister a channel report word handler
|
||||
* @rsc: reporting source code to handle
|
||||
*/
|
||||
void crw_unregister_handler(int rsc)
|
||||
{
|
||||
if ((rsc < 0) || (rsc >= NR_RSCS))
|
||||
return;
|
||||
mutex_lock(&crw_handler_mutex);
|
||||
crw_handlers[rsc] = NULL;
|
||||
mutex_unlock(&crw_handler_mutex);
|
||||
}
|
||||
|
||||
/*
|
||||
* Retrieve CRWs and call function to handle event.
|
||||
*/
|
||||
static int crw_collect_info(void *unused)
|
||||
{
|
||||
struct crw crw[2];
|
||||
int ccode, signal;
|
||||
unsigned int chain;
|
||||
|
||||
repeat:
|
||||
signal = wait_event_interruptible(crw_handler_wait_q,
|
||||
atomic_read(&crw_nr_req) > 0);
|
||||
if (unlikely(signal))
|
||||
atomic_inc(&crw_nr_req);
|
||||
chain = 0;
|
||||
while (1) {
|
||||
crw_handler_t handler;
|
||||
|
||||
if (unlikely(chain > 1)) {
|
||||
struct crw tmp_crw;
|
||||
|
||||
printk(KERN_WARNING"%s: Code does not support more "
|
||||
"than two chained crws; please report to "
|
||||
"linux390@de.ibm.com!\n", __func__);
|
||||
ccode = stcrw(&tmp_crw);
|
||||
printk(KERN_WARNING"%s: crw reports slct=%d, oflw=%d, "
|
||||
"chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
|
||||
__func__, tmp_crw.slct, tmp_crw.oflw,
|
||||
tmp_crw.chn, tmp_crw.rsc, tmp_crw.anc,
|
||||
tmp_crw.erc, tmp_crw.rsid);
|
||||
printk(KERN_WARNING"%s: This was crw number %x in the "
|
||||
"chain\n", __func__, chain);
|
||||
if (ccode != 0)
|
||||
break;
|
||||
chain = tmp_crw.chn ? chain + 1 : 0;
|
||||
continue;
|
||||
}
|
||||
ccode = stcrw(&crw[chain]);
|
||||
if (ccode != 0)
|
||||
break;
|
||||
printk(KERN_DEBUG "crw_info : CRW reports slct=%d, oflw=%d, "
|
||||
"chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
|
||||
crw[chain].slct, crw[chain].oflw, crw[chain].chn,
|
||||
crw[chain].rsc, crw[chain].anc, crw[chain].erc,
|
||||
crw[chain].rsid);
|
||||
/* Check for overflows. */
|
||||
if (crw[chain].oflw) {
|
||||
int i;
|
||||
|
||||
pr_debug("%s: crw overflow detected!\n", __func__);
|
||||
mutex_lock(&crw_handler_mutex);
|
||||
for (i = 0; i < NR_RSCS; i++) {
|
||||
if (crw_handlers[i])
|
||||
crw_handlers[i](NULL, NULL, 1);
|
||||
}
|
||||
mutex_unlock(&crw_handler_mutex);
|
||||
chain = 0;
|
||||
continue;
|
||||
}
|
||||
if (crw[0].chn && !chain) {
|
||||
chain++;
|
||||
continue;
|
||||
}
|
||||
mutex_lock(&crw_handler_mutex);
|
||||
handler = crw_handlers[crw[chain].rsc];
|
||||
if (handler)
|
||||
handler(&crw[0], chain ? &crw[1] : NULL, 0);
|
||||
mutex_unlock(&crw_handler_mutex);
|
||||
/* chain is always 0 or 1 here. */
|
||||
chain = crw[chain].chn ? chain + 1 : 0;
|
||||
}
|
||||
if (atomic_dec_and_test(&crw_nr_req))
|
||||
wake_up(&crw_handler_wait_q);
|
||||
goto repeat;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void crw_handle_channel_report(void)
|
||||
{
|
||||
atomic_inc(&crw_nr_req);
|
||||
wake_up(&crw_handler_wait_q);
|
||||
}
|
||||
|
||||
void crw_wait_for_channel_report(void)
|
||||
{
|
||||
crw_handle_channel_report();
|
||||
wait_event(crw_handler_wait_q, atomic_read(&crw_nr_req) == 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Machine checks for the channel subsystem must be enabled
|
||||
* after the channel subsystem is initialized
|
||||
*/
|
||||
static int __init crw_machine_check_init(void)
|
||||
{
|
||||
struct task_struct *task;
|
||||
|
||||
task = kthread_run(crw_collect_info, NULL, "kmcheck");
|
||||
if (IS_ERR(task))
|
||||
return PTR_ERR(task);
|
||||
ctl_set_bit(14, 28); /* enable channel report MCH */
|
||||
return 0;
|
||||
}
|
||||
device_initcall(crw_machine_check_init);
|
1294
drivers/s390/cio/css.c
Normal file
1294
drivers/s390/cio/css.c
Normal file
File diff suppressed because it is too large
Load diff
146
drivers/s390/cio/css.h
Normal file
146
drivers/s390/cio/css.h
Normal file
|
@ -0,0 +1,146 @@
|
|||
#ifndef _CSS_H
|
||||
#define _CSS_H
|
||||
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <asm/cio.h>
|
||||
#include <asm/chpid.h>
|
||||
#include <asm/schid.h>
|
||||
|
||||
#include "cio.h"
|
||||
|
||||
/*
|
||||
* path grouping stuff
|
||||
*/
|
||||
#define SPID_FUNC_SINGLE_PATH 0x00
|
||||
#define SPID_FUNC_MULTI_PATH 0x80
|
||||
#define SPID_FUNC_ESTABLISH 0x00
|
||||
#define SPID_FUNC_RESIGN 0x40
|
||||
#define SPID_FUNC_DISBAND 0x20
|
||||
|
||||
#define SNID_STATE1_RESET 0
|
||||
#define SNID_STATE1_UNGROUPED 2
|
||||
#define SNID_STATE1_GROUPED 3
|
||||
|
||||
#define SNID_STATE2_NOT_RESVD 0
|
||||
#define SNID_STATE2_RESVD_ELSE 2
|
||||
#define SNID_STATE2_RESVD_SELF 3
|
||||
|
||||
#define SNID_STATE3_MULTI_PATH 1
|
||||
#define SNID_STATE3_SINGLE_PATH 0
|
||||
|
||||
struct path_state {
|
||||
__u8 state1 : 2; /* path state value 1 */
|
||||
__u8 state2 : 2; /* path state value 2 */
|
||||
__u8 state3 : 1; /* path state value 3 */
|
||||
__u8 resvd : 3; /* reserved */
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct extended_cssid {
|
||||
u8 version;
|
||||
u8 cssid;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct pgid {
|
||||
union {
|
||||
__u8 fc; /* SPID function code */
|
||||
struct path_state ps; /* SNID path state */
|
||||
} __attribute__ ((packed)) inf;
|
||||
union {
|
||||
__u32 cpu_addr : 16; /* CPU address */
|
||||
struct extended_cssid ext_cssid;
|
||||
} __attribute__ ((packed)) pgid_high;
|
||||
__u32 cpu_id : 24; /* CPU identification */
|
||||
__u32 cpu_model : 16; /* CPU model */
|
||||
__u32 tod_high; /* high word TOD clock */
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct subchannel;
|
||||
struct chp_link;
|
||||
/**
|
||||
* struct css_driver - device driver for subchannels
|
||||
* @subchannel_type: subchannel type supported by this driver
|
||||
* @drv: embedded device driver structure
|
||||
* @irq: called on interrupts
|
||||
* @chp_event: called for events affecting a channel path
|
||||
* @sch_event: called for events affecting the subchannel
|
||||
* @probe: function called on probe
|
||||
* @remove: function called on remove
|
||||
* @shutdown: called at device shutdown
|
||||
* @prepare: prepare for pm state transition
|
||||
* @complete: undo work done in @prepare
|
||||
* @freeze: callback for freezing during hibernation snapshotting
|
||||
* @thaw: undo work done in @freeze
|
||||
* @restore: callback for restoring after hibernation
|
||||
* @settle: wait for asynchronous work to finish
|
||||
*/
|
||||
struct css_driver {
|
||||
struct css_device_id *subchannel_type;
|
||||
struct device_driver drv;
|
||||
void (*irq)(struct subchannel *);
|
||||
int (*chp_event)(struct subchannel *, struct chp_link *, int);
|
||||
int (*sch_event)(struct subchannel *, int);
|
||||
int (*probe)(struct subchannel *);
|
||||
int (*remove)(struct subchannel *);
|
||||
void (*shutdown)(struct subchannel *);
|
||||
int (*prepare) (struct subchannel *);
|
||||
void (*complete) (struct subchannel *);
|
||||
int (*freeze)(struct subchannel *);
|
||||
int (*thaw) (struct subchannel *);
|
||||
int (*restore)(struct subchannel *);
|
||||
int (*settle)(void);
|
||||
};
|
||||
|
||||
#define to_cssdriver(n) container_of(n, struct css_driver, drv)
|
||||
|
||||
extern int css_driver_register(struct css_driver *);
|
||||
extern void css_driver_unregister(struct css_driver *);
|
||||
|
||||
extern void css_sch_device_unregister(struct subchannel *);
|
||||
extern int css_register_subchannel(struct subchannel *);
|
||||
extern struct subchannel *css_alloc_subchannel(struct subchannel_id);
|
||||
extern struct subchannel *get_subchannel_by_schid(struct subchannel_id);
|
||||
extern int css_init_done;
|
||||
extern int max_ssid;
|
||||
int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
|
||||
int (*fn_unknown)(struct subchannel_id,
|
||||
void *), void *data);
|
||||
extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
|
||||
void css_update_ssd_info(struct subchannel *sch);
|
||||
|
||||
struct channel_subsystem {
|
||||
u8 cssid;
|
||||
int valid;
|
||||
struct channel_path *chps[__MAX_CHPID + 1];
|
||||
struct device device;
|
||||
struct pgid global_pgid;
|
||||
struct mutex mutex;
|
||||
/* channel measurement related */
|
||||
int cm_enabled;
|
||||
void *cub_addr1;
|
||||
void *cub_addr2;
|
||||
/* for orphaned ccw devices */
|
||||
struct subchannel *pseudo_subchannel;
|
||||
};
|
||||
#define to_css(dev) container_of(dev, struct channel_subsystem, device)
|
||||
|
||||
extern struct channel_subsystem *channel_subsystems[];
|
||||
|
||||
/* Helper functions to build lists for the slow path. */
|
||||
void css_schedule_eval(struct subchannel_id schid);
|
||||
void css_schedule_eval_all(void);
|
||||
void css_schedule_eval_all_unreg(unsigned long delay);
|
||||
int css_complete_work(void);
|
||||
|
||||
int sch_is_pseudo_sch(struct subchannel *);
|
||||
struct schib;
|
||||
int css_sch_is_valid(struct schib *);
|
||||
|
||||
extern struct workqueue_struct *cio_work_q;
|
||||
void css_wait_for_slow_path(void);
|
||||
void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo);
|
||||
#endif
|
2161
drivers/s390/cio/device.c
Normal file
2161
drivers/s390/cio/device.c
Normal file
File diff suppressed because it is too large
Load diff
150
drivers/s390/cio/device.h
Normal file
150
drivers/s390/cio/device.h
Normal file
|
@ -0,0 +1,150 @@
|
|||
#ifndef S390_DEVICE_H
|
||||
#define S390_DEVICE_H
|
||||
|
||||
#include <asm/ccwdev.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include "io_sch.h"
|
||||
|
||||
/*
|
||||
* states of the device statemachine
|
||||
*/
|
||||
enum dev_state {
|
||||
DEV_STATE_NOT_OPER,
|
||||
DEV_STATE_SENSE_PGID,
|
||||
DEV_STATE_SENSE_ID,
|
||||
DEV_STATE_OFFLINE,
|
||||
DEV_STATE_VERIFY,
|
||||
DEV_STATE_ONLINE,
|
||||
DEV_STATE_W4SENSE,
|
||||
DEV_STATE_DISBAND_PGID,
|
||||
DEV_STATE_BOXED,
|
||||
/* states to wait for i/o completion before doing something */
|
||||
DEV_STATE_TIMEOUT_KILL,
|
||||
DEV_STATE_QUIESCE,
|
||||
/* special states for devices gone not operational */
|
||||
DEV_STATE_DISCONNECTED,
|
||||
DEV_STATE_DISCONNECTED_SENSE_ID,
|
||||
DEV_STATE_CMFCHANGE,
|
||||
DEV_STATE_CMFUPDATE,
|
||||
DEV_STATE_STEAL_LOCK,
|
||||
/* last element! */
|
||||
NR_DEV_STATES
|
||||
};
|
||||
|
||||
/*
|
||||
* asynchronous events of the device statemachine
|
||||
*/
|
||||
enum dev_event {
|
||||
DEV_EVENT_NOTOPER,
|
||||
DEV_EVENT_INTERRUPT,
|
||||
DEV_EVENT_TIMEOUT,
|
||||
DEV_EVENT_VERIFY,
|
||||
/* last element! */
|
||||
NR_DEV_EVENTS
|
||||
};
|
||||
|
||||
struct ccw_device;
|
||||
|
||||
/*
|
||||
* action called through jumptable
|
||||
*/
|
||||
typedef void (fsm_func_t)(struct ccw_device *, enum dev_event);
|
||||
extern fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS];
|
||||
|
||||
static inline void
|
||||
dev_fsm_event(struct ccw_device *cdev, enum dev_event dev_event)
|
||||
{
|
||||
int state = cdev->private->state;
|
||||
|
||||
if (dev_event == DEV_EVENT_INTERRUPT) {
|
||||
if (state == DEV_STATE_ONLINE)
|
||||
inc_irq_stat(cdev->private->int_class);
|
||||
else if (state != DEV_STATE_CMFCHANGE &&
|
||||
state != DEV_STATE_CMFUPDATE)
|
||||
inc_irq_stat(IRQIO_CIO);
|
||||
}
|
||||
dev_jumptable[state][dev_event](cdev, dev_event);
|
||||
}
|
||||
|
||||
/*
|
||||
* Delivers 1 if the device state is final.
|
||||
*/
|
||||
static inline int
|
||||
dev_fsm_final_state(struct ccw_device *cdev)
|
||||
{
|
||||
return (cdev->private->state == DEV_STATE_NOT_OPER ||
|
||||
cdev->private->state == DEV_STATE_OFFLINE ||
|
||||
cdev->private->state == DEV_STATE_ONLINE ||
|
||||
cdev->private->state == DEV_STATE_BOXED);
|
||||
}
|
||||
|
||||
int __init io_subchannel_init(void);
|
||||
|
||||
void io_subchannel_recog_done(struct ccw_device *cdev);
|
||||
void io_subchannel_init_config(struct subchannel *sch);
|
||||
|
||||
int ccw_device_cancel_halt_clear(struct ccw_device *);
|
||||
|
||||
int ccw_device_is_orphan(struct ccw_device *);
|
||||
|
||||
void ccw_device_recognition(struct ccw_device *);
|
||||
int ccw_device_online(struct ccw_device *);
|
||||
int ccw_device_offline(struct ccw_device *);
|
||||
void ccw_device_update_sense_data(struct ccw_device *);
|
||||
int ccw_device_test_sense_data(struct ccw_device *);
|
||||
void ccw_device_schedule_sch_unregister(struct ccw_device *);
|
||||
int ccw_purge_blacklisted(void);
|
||||
void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo);
|
||||
struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id);
|
||||
|
||||
/* Function prototypes for device status and basic sense stuff. */
|
||||
void ccw_device_accumulate_irb(struct ccw_device *, struct irb *);
|
||||
void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *);
|
||||
int ccw_device_accumulate_and_sense(struct ccw_device *, struct irb *);
|
||||
int ccw_device_do_sense(struct ccw_device *, struct irb *);
|
||||
|
||||
/* Function prototype for internal request handling. */
|
||||
int lpm_adjust(int lpm, int mask);
|
||||
void ccw_request_start(struct ccw_device *);
|
||||
int ccw_request_cancel(struct ccw_device *cdev);
|
||||
void ccw_request_handler(struct ccw_device *cdev);
|
||||
void ccw_request_timeout(struct ccw_device *cdev);
|
||||
void ccw_request_notoper(struct ccw_device *cdev);
|
||||
|
||||
/* Function prototypes for sense id stuff. */
|
||||
void ccw_device_sense_id_start(struct ccw_device *);
|
||||
void ccw_device_sense_id_done(struct ccw_device *, int);
|
||||
|
||||
/* Function prototypes for path grouping stuff. */
|
||||
void ccw_device_verify_start(struct ccw_device *);
|
||||
void ccw_device_verify_done(struct ccw_device *, int);
|
||||
|
||||
void ccw_device_disband_start(struct ccw_device *);
|
||||
void ccw_device_disband_done(struct ccw_device *, int);
|
||||
|
||||
void ccw_device_stlck_start(struct ccw_device *, void *, void *, void *);
|
||||
void ccw_device_stlck_done(struct ccw_device *, void *, int);
|
||||
|
||||
int ccw_device_call_handler(struct ccw_device *);
|
||||
|
||||
int ccw_device_stlck(struct ccw_device *);
|
||||
|
||||
/* Helper function for machine check handling. */
|
||||
void ccw_device_trigger_reprobe(struct ccw_device *);
|
||||
void ccw_device_kill_io(struct ccw_device *);
|
||||
int ccw_device_notify(struct ccw_device *, int);
|
||||
void ccw_device_set_disconnected(struct ccw_device *cdev);
|
||||
void ccw_device_set_notoper(struct ccw_device *cdev);
|
||||
|
||||
void ccw_device_set_timeout(struct ccw_device *, int);
|
||||
|
||||
/* Channel measurement facility related */
|
||||
void retry_set_schib(struct ccw_device *cdev);
|
||||
void cmf_retry_copy_block(struct ccw_device *);
|
||||
int cmf_reenable(struct ccw_device *);
|
||||
int ccw_set_cmf(struct ccw_device *cdev, int enable);
|
||||
extern struct device_attribute dev_attr_cmb_enable;
|
||||
#endif
|
1117
drivers/s390/cio/device_fsm.c
Normal file
1117
drivers/s390/cio/device_fsm.c
Normal file
File diff suppressed because it is too large
Load diff
222
drivers/s390/cio/device_id.c
Normal file
222
drivers/s390/cio/device_id.c
Normal file
|
@ -0,0 +1,222 @@
|
|||
/*
|
||||
* CCW device SENSE ID I/O handling.
|
||||
*
|
||||
* Copyright IBM Corp. 2002, 2009
|
||||
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
|
||||
* Martin Schwidefsky <schwidefsky@de.ibm.com>
|
||||
* Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/errno.h>
|
||||
#include <asm/ccwdev.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/cio.h>
|
||||
#include <asm/diag.h>
|
||||
|
||||
#include "cio.h"
|
||||
#include "cio_debug.h"
|
||||
#include "device.h"
|
||||
#include "io_sch.h"
|
||||
|
||||
#define SENSE_ID_RETRIES 256
|
||||
#define SENSE_ID_TIMEOUT (10 * HZ)
|
||||
#define SENSE_ID_MIN_LEN 4
|
||||
#define SENSE_ID_BASIC_LEN 7
|
||||
|
||||
/**
|
||||
* diag210_to_senseid - convert diag 0x210 data to sense id information
|
||||
* @senseid: sense id
|
||||
* @diag: diag 0x210 data
|
||||
*
|
||||
* Return 0 on success, non-zero otherwise.
|
||||
*/
|
||||
static int diag210_to_senseid(struct senseid *senseid, struct diag210 *diag)
|
||||
{
|
||||
static struct {
|
||||
int class, type, cu_type;
|
||||
} vm_devices[] = {
|
||||
{ 0x08, 0x01, 0x3480 },
|
||||
{ 0x08, 0x02, 0x3430 },
|
||||
{ 0x08, 0x10, 0x3420 },
|
||||
{ 0x08, 0x42, 0x3424 },
|
||||
{ 0x08, 0x44, 0x9348 },
|
||||
{ 0x08, 0x81, 0x3490 },
|
||||
{ 0x08, 0x82, 0x3422 },
|
||||
{ 0x10, 0x41, 0x1403 },
|
||||
{ 0x10, 0x42, 0x3211 },
|
||||
{ 0x10, 0x43, 0x3203 },
|
||||
{ 0x10, 0x45, 0x3800 },
|
||||
{ 0x10, 0x47, 0x3262 },
|
||||
{ 0x10, 0x48, 0x3820 },
|
||||
{ 0x10, 0x49, 0x3800 },
|
||||
{ 0x10, 0x4a, 0x4245 },
|
||||
{ 0x10, 0x4b, 0x4248 },
|
||||
{ 0x10, 0x4d, 0x3800 },
|
||||
{ 0x10, 0x4e, 0x3820 },
|
||||
{ 0x10, 0x4f, 0x3820 },
|
||||
{ 0x10, 0x82, 0x2540 },
|
||||
{ 0x10, 0x84, 0x3525 },
|
||||
{ 0x20, 0x81, 0x2501 },
|
||||
{ 0x20, 0x82, 0x2540 },
|
||||
{ 0x20, 0x84, 0x3505 },
|
||||
{ 0x40, 0x01, 0x3278 },
|
||||
{ 0x40, 0x04, 0x3277 },
|
||||
{ 0x40, 0x80, 0x2250 },
|
||||
{ 0x40, 0xc0, 0x5080 },
|
||||
{ 0x80, 0x00, 0x3215 },
|
||||
};
|
||||
int i;
|
||||
|
||||
/* Special case for osa devices. */
|
||||
if (diag->vrdcvcla == 0x02 && diag->vrdcvtyp == 0x20) {
|
||||
senseid->cu_type = 0x3088;
|
||||
senseid->cu_model = 0x60;
|
||||
senseid->reserved = 0xff;
|
||||
return 0;
|
||||
}
|
||||
for (i = 0; i < ARRAY_SIZE(vm_devices); i++) {
|
||||
if (diag->vrdcvcla == vm_devices[i].class &&
|
||||
diag->vrdcvtyp == vm_devices[i].type) {
|
||||
senseid->cu_type = vm_devices[i].cu_type;
|
||||
senseid->reserved = 0xff;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/**
|
||||
* diag_get_dev_info - retrieve device information via diag 0x210
|
||||
* @cdev: ccw device
|
||||
*
|
||||
* Returns zero on success, non-zero otherwise.
|
||||
*/
|
||||
static int diag210_get_dev_info(struct ccw_device *cdev)
|
||||
{
|
||||
struct ccw_dev_id *dev_id = &cdev->private->dev_id;
|
||||
struct senseid *senseid = &cdev->private->senseid;
|
||||
struct diag210 diag_data;
|
||||
int rc;
|
||||
|
||||
if (dev_id->ssid != 0)
|
||||
return -ENODEV;
|
||||
memset(&diag_data, 0, sizeof(diag_data));
|
||||
diag_data.vrdcdvno = dev_id->devno;
|
||||
diag_data.vrdclen = sizeof(diag_data);
|
||||
rc = diag210(&diag_data);
|
||||
CIO_TRACE_EVENT(4, "diag210");
|
||||
CIO_HEX_EVENT(4, &rc, sizeof(rc));
|
||||
CIO_HEX_EVENT(4, &diag_data, sizeof(diag_data));
|
||||
if (rc != 0 && rc != 2)
|
||||
goto err_failed;
|
||||
if (diag210_to_senseid(senseid, &diag_data))
|
||||
goto err_unknown;
|
||||
return 0;
|
||||
|
||||
err_unknown:
|
||||
CIO_MSG_EVENT(0, "snsid: device 0.%x.%04x: unknown diag210 data\n",
|
||||
dev_id->ssid, dev_id->devno);
|
||||
return -ENODEV;
|
||||
err_failed:
|
||||
CIO_MSG_EVENT(0, "snsid: device 0.%x.%04x: diag210 failed (rc=%d)\n",
|
||||
dev_id->ssid, dev_id->devno, rc);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize SENSE ID data.
|
||||
*/
|
||||
static void snsid_init(struct ccw_device *cdev)
|
||||
{
|
||||
cdev->private->flags.esid = 0;
|
||||
memset(&cdev->private->senseid, 0, sizeof(cdev->private->senseid));
|
||||
cdev->private->senseid.cu_type = 0xffff;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check for complete SENSE ID data.
|
||||
*/
|
||||
static int snsid_check(struct ccw_device *cdev, void *data)
|
||||
{
|
||||
struct cmd_scsw *scsw = &cdev->private->irb.scsw.cmd;
|
||||
int len = sizeof(struct senseid) - scsw->count;
|
||||
|
||||
/* Check for incomplete SENSE ID data. */
|
||||
if (len < SENSE_ID_MIN_LEN)
|
||||
goto out_restart;
|
||||
if (cdev->private->senseid.cu_type == 0xffff)
|
||||
goto out_restart;
|
||||
/* Check for incompatible SENSE ID data. */
|
||||
if (cdev->private->senseid.reserved != 0xff)
|
||||
return -EOPNOTSUPP;
|
||||
/* Check for extended-identification information. */
|
||||
if (len > SENSE_ID_BASIC_LEN)
|
||||
cdev->private->flags.esid = 1;
|
||||
return 0;
|
||||
|
||||
out_restart:
|
||||
snsid_init(cdev);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
/*
|
||||
* Process SENSE ID request result.
|
||||
*/
|
||||
static void snsid_callback(struct ccw_device *cdev, void *data, int rc)
|
||||
{
|
||||
struct ccw_dev_id *id = &cdev->private->dev_id;
|
||||
struct senseid *senseid = &cdev->private->senseid;
|
||||
int vm = 0;
|
||||
|
||||
if (rc && MACHINE_IS_VM) {
|
||||
/* Try diag 0x210 fallback on z/VM. */
|
||||
snsid_init(cdev);
|
||||
if (diag210_get_dev_info(cdev) == 0) {
|
||||
rc = 0;
|
||||
vm = 1;
|
||||
}
|
||||
}
|
||||
CIO_MSG_EVENT(2, "snsid: device 0.%x.%04x: rc=%d %04x/%02x "
|
||||
"%04x/%02x%s\n", id->ssid, id->devno, rc,
|
||||
senseid->cu_type, senseid->cu_model, senseid->dev_type,
|
||||
senseid->dev_model, vm ? " (diag210)" : "");
|
||||
ccw_device_sense_id_done(cdev, rc);
|
||||
}
|
||||
|
||||
/**
|
||||
* ccw_device_sense_id_start - perform SENSE ID
|
||||
* @cdev: ccw device
|
||||
*
|
||||
* Execute a SENSE ID channel program on @cdev to update its sense id
|
||||
* information. When finished, call ccw_device_sense_id_done with a
|
||||
* return code specifying the result.
|
||||
*/
|
||||
void ccw_device_sense_id_start(struct ccw_device *cdev)
|
||||
{
|
||||
struct subchannel *sch = to_subchannel(cdev->dev.parent);
|
||||
struct ccw_request *req = &cdev->private->req;
|
||||
struct ccw1 *cp = cdev->private->iccws;
|
||||
|
||||
CIO_TRACE_EVENT(4, "snsid");
|
||||
CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
|
||||
/* Data setup. */
|
||||
snsid_init(cdev);
|
||||
/* Channel program setup. */
|
||||
cp->cmd_code = CCW_CMD_SENSE_ID;
|
||||
cp->cda = (u32) (addr_t) &cdev->private->senseid;
|
||||
cp->count = sizeof(struct senseid);
|
||||
cp->flags = CCW_FLAG_SLI;
|
||||
/* Request setup. */
|
||||
memset(req, 0, sizeof(*req));
|
||||
req->cp = cp;
|
||||
req->timeout = SENSE_ID_TIMEOUT;
|
||||
req->maxretries = SENSE_ID_RETRIES;
|
||||
req->lpm = sch->schib.pmcw.pam & sch->opm;
|
||||
req->check = snsid_check;
|
||||
req->callback = snsid_callback;
|
||||
ccw_request_start(cdev);
|
||||
}
|
799
drivers/s390/cio/device_ops.c
Normal file
799
drivers/s390/cio/device_ops.c
Normal file
|
@ -0,0 +1,799 @@
|
|||
/*
|
||||
* Copyright IBM Corp. 2002, 2009
|
||||
*
|
||||
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
|
||||
* Cornelia Huck (cornelia.huck@de.ibm.com)
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/completion.h>
|
||||
|
||||
#include <asm/ccwdev.h>
|
||||
#include <asm/idals.h>
|
||||
#include <asm/chpid.h>
|
||||
#include <asm/fcx.h>
|
||||
|
||||
#include "cio.h"
|
||||
#include "cio_debug.h"
|
||||
#include "css.h"
|
||||
#include "chsc.h"
|
||||
#include "device.h"
|
||||
#include "chp.h"
|
||||
|
||||
/**
|
||||
* ccw_device_set_options_mask() - set some options and unset the rest
|
||||
* @cdev: device for which the options are to be set
|
||||
* @flags: options to be set
|
||||
*
|
||||
* All flags specified in @flags are set, all flags not specified in @flags
|
||||
* are cleared.
|
||||
* Returns:
|
||||
* %0 on success, -%EINVAL on an invalid flag combination.
|
||||
*/
|
||||
int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags)
|
||||
{
|
||||
/*
|
||||
* The flag usage is mutal exclusive ...
|
||||
*/
|
||||
if ((flags & CCWDEV_EARLY_NOTIFICATION) &&
|
||||
(flags & CCWDEV_REPORT_ALL))
|
||||
return -EINVAL;
|
||||
cdev->private->options.fast = (flags & CCWDEV_EARLY_NOTIFICATION) != 0;
|
||||
cdev->private->options.repall = (flags & CCWDEV_REPORT_ALL) != 0;
|
||||
cdev->private->options.pgroup = (flags & CCWDEV_DO_PATHGROUP) != 0;
|
||||
cdev->private->options.force = (flags & CCWDEV_ALLOW_FORCE) != 0;
|
||||
cdev->private->options.mpath = (flags & CCWDEV_DO_MULTIPATH) != 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ccw_device_set_options() - set some options
|
||||
* @cdev: device for which the options are to be set
|
||||
* @flags: options to be set
|
||||
*
|
||||
* All flags specified in @flags are set, the remainder is left untouched.
|
||||
* Returns:
|
||||
* %0 on success, -%EINVAL if an invalid flag combination would ensue.
|
||||
*/
|
||||
int ccw_device_set_options(struct ccw_device *cdev, unsigned long flags)
|
||||
{
|
||||
/*
|
||||
* The flag usage is mutal exclusive ...
|
||||
*/
|
||||
if (((flags & CCWDEV_EARLY_NOTIFICATION) &&
|
||||
(flags & CCWDEV_REPORT_ALL)) ||
|
||||
((flags & CCWDEV_EARLY_NOTIFICATION) &&
|
||||
cdev->private->options.repall) ||
|
||||
((flags & CCWDEV_REPORT_ALL) &&
|
||||
cdev->private->options.fast))
|
||||
return -EINVAL;
|
||||
cdev->private->options.fast |= (flags & CCWDEV_EARLY_NOTIFICATION) != 0;
|
||||
cdev->private->options.repall |= (flags & CCWDEV_REPORT_ALL) != 0;
|
||||
cdev->private->options.pgroup |= (flags & CCWDEV_DO_PATHGROUP) != 0;
|
||||
cdev->private->options.force |= (flags & CCWDEV_ALLOW_FORCE) != 0;
|
||||
cdev->private->options.mpath |= (flags & CCWDEV_DO_MULTIPATH) != 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ccw_device_clear_options() - clear some options
|
||||
* @cdev: device for which the options are to be cleared
|
||||
* @flags: options to be cleared
|
||||
*
|
||||
* All flags specified in @flags are cleared, the remainder is left untouched.
|
||||
*/
|
||||
void ccw_device_clear_options(struct ccw_device *cdev, unsigned long flags)
|
||||
{
|
||||
cdev->private->options.fast &= (flags & CCWDEV_EARLY_NOTIFICATION) == 0;
|
||||
cdev->private->options.repall &= (flags & CCWDEV_REPORT_ALL) == 0;
|
||||
cdev->private->options.pgroup &= (flags & CCWDEV_DO_PATHGROUP) == 0;
|
||||
cdev->private->options.force &= (flags & CCWDEV_ALLOW_FORCE) == 0;
|
||||
cdev->private->options.mpath &= (flags & CCWDEV_DO_MULTIPATH) == 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ccw_device_is_pathgroup - determine if paths to this device are grouped
|
||||
* @cdev: ccw device
|
||||
*
|
||||
* Return non-zero if there is a path group, zero otherwise.
|
||||
*/
|
||||
int ccw_device_is_pathgroup(struct ccw_device *cdev)
|
||||
{
|
||||
return cdev->private->flags.pgroup;
|
||||
}
|
||||
EXPORT_SYMBOL(ccw_device_is_pathgroup);
|
||||
|
||||
/**
|
||||
* ccw_device_is_multipath - determine if device is operating in multipath mode
|
||||
* @cdev: ccw device
|
||||
*
|
||||
* Return non-zero if device is operating in multipath mode, zero otherwise.
|
||||
*/
|
||||
int ccw_device_is_multipath(struct ccw_device *cdev)
|
||||
{
|
||||
return cdev->private->flags.mpath;
|
||||
}
|
||||
EXPORT_SYMBOL(ccw_device_is_multipath);
|
||||
|
||||
/**
|
||||
* ccw_device_clear() - terminate I/O request processing
|
||||
* @cdev: target ccw device
|
||||
* @intparm: interruption parameter; value is only used if no I/O is
|
||||
* outstanding, otherwise the intparm associated with the I/O request
|
||||
* is returned
|
||||
*
|
||||
* ccw_device_clear() calls csch on @cdev's subchannel.
|
||||
* Returns:
|
||||
* %0 on success,
|
||||
* -%ENODEV on device not operational,
|
||||
* -%EINVAL on invalid device state.
|
||||
* Context:
|
||||
* Interrupts disabled, ccw device lock held
|
||||
*/
|
||||
int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
|
||||
{
|
||||
struct subchannel *sch;
|
||||
int ret;
|
||||
|
||||
if (!cdev || !cdev->dev.parent)
|
||||
return -ENODEV;
|
||||
sch = to_subchannel(cdev->dev.parent);
|
||||
if (!sch->schib.pmcw.ena)
|
||||
return -EINVAL;
|
||||
if (cdev->private->state == DEV_STATE_NOT_OPER)
|
||||
return -ENODEV;
|
||||
if (cdev->private->state != DEV_STATE_ONLINE &&
|
||||
cdev->private->state != DEV_STATE_W4SENSE)
|
||||
return -EINVAL;
|
||||
|
||||
ret = cio_clear(sch);
|
||||
if (ret == 0)
|
||||
cdev->private->intparm = intparm;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* ccw_device_start_key() - start a s390 channel program with key
|
||||
* @cdev: target ccw device
|
||||
* @cpa: logical start address of channel program
|
||||
* @intparm: user specific interruption parameter; will be presented back to
|
||||
* @cdev's interrupt handler. Allows a device driver to associate
|
||||
* the interrupt with a particular I/O request.
|
||||
* @lpm: defines the channel path to be used for a specific I/O request. A
|
||||
* value of 0 will make cio use the opm.
|
||||
* @key: storage key to be used for the I/O
|
||||
* @flags: additional flags; defines the action to be performed for I/O
|
||||
* processing.
|
||||
*
|
||||
* Start a S/390 channel program. When the interrupt arrives, the
|
||||
* IRQ handler is called, either immediately, delayed (dev-end missing,
|
||||
* or sense required) or never (no IRQ handler registered).
|
||||
* Returns:
|
||||
* %0, if the operation was successful;
|
||||
* -%EBUSY, if the device is busy, or status pending;
|
||||
* -%EACCES, if no path specified in @lpm is operational;
|
||||
* -%ENODEV, if the device is not operational.
|
||||
* Context:
|
||||
* Interrupts disabled, ccw device lock held
|
||||
*/
|
||||
int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
|
||||
unsigned long intparm, __u8 lpm, __u8 key,
|
||||
unsigned long flags)
|
||||
{
|
||||
struct subchannel *sch;
|
||||
int ret;
|
||||
|
||||
if (!cdev || !cdev->dev.parent)
|
||||
return -ENODEV;
|
||||
sch = to_subchannel(cdev->dev.parent);
|
||||
if (!sch->schib.pmcw.ena)
|
||||
return -EINVAL;
|
||||
if (cdev->private->state == DEV_STATE_NOT_OPER)
|
||||
return -ENODEV;
|
||||
if (cdev->private->state == DEV_STATE_VERIFY) {
|
||||
/* Remember to fake irb when finished. */
|
||||
if (!cdev->private->flags.fake_irb) {
|
||||
cdev->private->flags.fake_irb = FAKE_CMD_IRB;
|
||||
cdev->private->intparm = intparm;
|
||||
return 0;
|
||||
} else
|
||||
/* There's already a fake I/O around. */
|
||||
return -EBUSY;
|
||||
}
|
||||
if (cdev->private->state != DEV_STATE_ONLINE ||
|
||||
((sch->schib.scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
|
||||
!(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)) ||
|
||||
cdev->private->flags.doverify)
|
||||
return -EBUSY;
|
||||
ret = cio_set_options (sch, flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
/* Adjust requested path mask to exclude unusable paths. */
|
||||
if (lpm) {
|
||||
lpm &= sch->lpm;
|
||||
if (lpm == 0)
|
||||
return -EACCES;
|
||||
}
|
||||
ret = cio_start_key (sch, cpa, lpm, key);
|
||||
switch (ret) {
|
||||
case 0:
|
||||
cdev->private->intparm = intparm;
|
||||
break;
|
||||
case -EACCES:
|
||||
case -ENODEV:
|
||||
dev_fsm_event(cdev, DEV_EVENT_VERIFY);
|
||||
break;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* ccw_device_start_timeout_key() - start a s390 channel program with timeout and key
|
||||
* @cdev: target ccw device
|
||||
* @cpa: logical start address of channel program
|
||||
* @intparm: user specific interruption parameter; will be presented back to
|
||||
* @cdev's interrupt handler. Allows a device driver to associate
|
||||
* the interrupt with a particular I/O request.
|
||||
* @lpm: defines the channel path to be used for a specific I/O request. A
|
||||
* value of 0 will make cio use the opm.
|
||||
* @key: storage key to be used for the I/O
|
||||
* @flags: additional flags; defines the action to be performed for I/O
|
||||
* processing.
|
||||
* @expires: timeout value in jiffies
|
||||
*
|
||||
* Start a S/390 channel program. When the interrupt arrives, the
|
||||
* IRQ handler is called, either immediately, delayed (dev-end missing,
|
||||
* or sense required) or never (no IRQ handler registered).
|
||||
* This function notifies the device driver if the channel program has not
|
||||
* completed during the time specified by @expires. If a timeout occurs, the
|
||||
* channel program is terminated via xsch, hsch or csch, and the device's
|
||||
* interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
|
||||
* Returns:
|
||||
* %0, if the operation was successful;
|
||||
* -%EBUSY, if the device is busy, or status pending;
|
||||
* -%EACCES, if no path specified in @lpm is operational;
|
||||
* -%ENODEV, if the device is not operational.
|
||||
* Context:
|
||||
* Interrupts disabled, ccw device lock held
|
||||
*/
|
||||
int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
|
||||
unsigned long intparm, __u8 lpm, __u8 key,
|
||||
unsigned long flags, int expires)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!cdev)
|
||||
return -ENODEV;
|
||||
ccw_device_set_timeout(cdev, expires);
|
||||
ret = ccw_device_start_key(cdev, cpa, intparm, lpm, key, flags);
|
||||
if (ret != 0)
|
||||
ccw_device_set_timeout(cdev, 0);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* ccw_device_start() - start a s390 channel program
|
||||
* @cdev: target ccw device
|
||||
* @cpa: logical start address of channel program
|
||||
* @intparm: user specific interruption parameter; will be presented back to
|
||||
* @cdev's interrupt handler. Allows a device driver to associate
|
||||
* the interrupt with a particular I/O request.
|
||||
* @lpm: defines the channel path to be used for a specific I/O request. A
|
||||
* value of 0 will make cio use the opm.
|
||||
* @flags: additional flags; defines the action to be performed for I/O
|
||||
* processing.
|
||||
*
|
||||
* Start a S/390 channel program. When the interrupt arrives, the
|
||||
* IRQ handler is called, either immediately, delayed (dev-end missing,
|
||||
* or sense required) or never (no IRQ handler registered).
|
||||
* Returns:
|
||||
* %0, if the operation was successful;
|
||||
* -%EBUSY, if the device is busy, or status pending;
|
||||
* -%EACCES, if no path specified in @lpm is operational;
|
||||
* -%ENODEV, if the device is not operational.
|
||||
* Context:
|
||||
* Interrupts disabled, ccw device lock held
|
||||
*/
|
||||
int ccw_device_start(struct ccw_device *cdev, struct ccw1 *cpa,
|
||||
unsigned long intparm, __u8 lpm, unsigned long flags)
|
||||
{
|
||||
return ccw_device_start_key(cdev, cpa, intparm, lpm,
|
||||
PAGE_DEFAULT_KEY, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* ccw_device_start_timeout() - start a s390 channel program with timeout
|
||||
* @cdev: target ccw device
|
||||
* @cpa: logical start address of channel program
|
||||
* @intparm: user specific interruption parameter; will be presented back to
|
||||
* @cdev's interrupt handler. Allows a device driver to associate
|
||||
* the interrupt with a particular I/O request.
|
||||
* @lpm: defines the channel path to be used for a specific I/O request. A
|
||||
* value of 0 will make cio use the opm.
|
||||
* @flags: additional flags; defines the action to be performed for I/O
|
||||
* processing.
|
||||
* @expires: timeout value in jiffies
|
||||
*
|
||||
* Start a S/390 channel program. When the interrupt arrives, the
|
||||
* IRQ handler is called, either immediately, delayed (dev-end missing,
|
||||
* or sense required) or never (no IRQ handler registered).
|
||||
* This function notifies the device driver if the channel program has not
|
||||
* completed during the time specified by @expires. If a timeout occurs, the
|
||||
* channel program is terminated via xsch, hsch or csch, and the device's
|
||||
* interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
|
||||
* Returns:
|
||||
* %0, if the operation was successful;
|
||||
* -%EBUSY, if the device is busy, or status pending;
|
||||
* -%EACCES, if no path specified in @lpm is operational;
|
||||
* -%ENODEV, if the device is not operational.
|
||||
* Context:
|
||||
* Interrupts disabled, ccw device lock held
|
||||
*/
|
||||
int ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa,
|
||||
unsigned long intparm, __u8 lpm,
|
||||
unsigned long flags, int expires)
|
||||
{
|
||||
return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm,
|
||||
PAGE_DEFAULT_KEY, flags,
|
||||
expires);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* ccw_device_halt() - halt I/O request processing
|
||||
* @cdev: target ccw device
|
||||
* @intparm: interruption parameter; value is only used if no I/O is
|
||||
* outstanding, otherwise the intparm associated with the I/O request
|
||||
* is returned
|
||||
*
|
||||
* ccw_device_halt() calls hsch on @cdev's subchannel.
|
||||
* Returns:
|
||||
* %0 on success,
|
||||
* -%ENODEV on device not operational,
|
||||
* -%EINVAL on invalid device state,
|
||||
* -%EBUSY on device busy or interrupt pending.
|
||||
* Context:
|
||||
* Interrupts disabled, ccw device lock held
|
||||
*/
|
||||
int ccw_device_halt(struct ccw_device *cdev, unsigned long intparm)
|
||||
{
|
||||
struct subchannel *sch;
|
||||
int ret;
|
||||
|
||||
if (!cdev || !cdev->dev.parent)
|
||||
return -ENODEV;
|
||||
sch = to_subchannel(cdev->dev.parent);
|
||||
if (!sch->schib.pmcw.ena)
|
||||
return -EINVAL;
|
||||
if (cdev->private->state == DEV_STATE_NOT_OPER)
|
||||
return -ENODEV;
|
||||
if (cdev->private->state != DEV_STATE_ONLINE &&
|
||||
cdev->private->state != DEV_STATE_W4SENSE)
|
||||
return -EINVAL;
|
||||
|
||||
ret = cio_halt(sch);
|
||||
if (ret == 0)
|
||||
cdev->private->intparm = intparm;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* ccw_device_resume() - resume channel program execution
|
||||
* @cdev: target ccw device
|
||||
*
|
||||
* ccw_device_resume() calls rsch on @cdev's subchannel.
|
||||
* Returns:
|
||||
* %0 on success,
|
||||
* -%ENODEV on device not operational,
|
||||
* -%EINVAL on invalid device state,
|
||||
* -%EBUSY on device busy or interrupt pending.
|
||||
* Context:
|
||||
* Interrupts disabled, ccw device lock held
|
||||
*/
|
||||
int ccw_device_resume(struct ccw_device *cdev)
|
||||
{
|
||||
struct subchannel *sch;
|
||||
|
||||
if (!cdev || !cdev->dev.parent)
|
||||
return -ENODEV;
|
||||
sch = to_subchannel(cdev->dev.parent);
|
||||
if (!sch->schib.pmcw.ena)
|
||||
return -EINVAL;
|
||||
if (cdev->private->state == DEV_STATE_NOT_OPER)
|
||||
return -ENODEV;
|
||||
if (cdev->private->state != DEV_STATE_ONLINE ||
|
||||
!(sch->schib.scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
|
||||
return -EINVAL;
|
||||
return cio_resume(sch);
|
||||
}
|
||||
|
||||
/*
|
||||
* Pass interrupt to device driver.
|
||||
*/
|
||||
int
|
||||
ccw_device_call_handler(struct ccw_device *cdev)
|
||||
{
|
||||
unsigned int stctl;
|
||||
int ending_status;
|
||||
|
||||
/*
|
||||
* we allow for the device action handler if .
|
||||
* - we received ending status
|
||||
* - the action handler requested to see all interrupts
|
||||
* - we received an intermediate status
|
||||
* - fast notification was requested (primary status)
|
||||
* - unsolicited interrupts
|
||||
*/
|
||||
stctl = scsw_stctl(&cdev->private->irb.scsw);
|
||||
ending_status = (stctl & SCSW_STCTL_SEC_STATUS) ||
|
||||
(stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) ||
|
||||
(stctl == SCSW_STCTL_STATUS_PEND);
|
||||
if (!ending_status &&
|
||||
!cdev->private->options.repall &&
|
||||
!(stctl & SCSW_STCTL_INTER_STATUS) &&
|
||||
!(cdev->private->options.fast &&
|
||||
(stctl & SCSW_STCTL_PRIM_STATUS)))
|
||||
return 0;
|
||||
|
||||
/* Clear pending timers for device driver initiated I/O. */
|
||||
if (ending_status)
|
||||
ccw_device_set_timeout(cdev, 0);
|
||||
/*
|
||||
* Now we are ready to call the device driver interrupt handler.
|
||||
*/
|
||||
if (cdev->handler)
|
||||
cdev->handler(cdev, cdev->private->intparm,
|
||||
&cdev->private->irb);
|
||||
|
||||
/*
|
||||
* Clear the old and now useless interrupt response block.
|
||||
*/
|
||||
memset(&cdev->private->irb, 0, sizeof(struct irb));
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* ccw_device_get_ciw() - Search for CIW command in extended sense data.
|
||||
* @cdev: ccw device to inspect
|
||||
* @ct: command type to look for
|
||||
*
|
||||
* During SenseID, command information words (CIWs) describing special
|
||||
* commands available to the device may have been stored in the extended
|
||||
* sense data. This function searches for CIWs of a specified command
|
||||
* type in the extended sense data.
|
||||
* Returns:
|
||||
* %NULL if no extended sense data has been stored or if no CIW of the
|
||||
* specified command type could be found,
|
||||
* else a pointer to the CIW of the specified command type.
|
||||
*/
|
||||
struct ciw *ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct)
|
||||
{
|
||||
int ciw_cnt;
|
||||
|
||||
if (cdev->private->flags.esid == 0)
|
||||
return NULL;
|
||||
for (ciw_cnt = 0; ciw_cnt < MAX_CIWS; ciw_cnt++)
|
||||
if (cdev->private->senseid.ciw[ciw_cnt].ct == ct)
|
||||
return cdev->private->senseid.ciw + ciw_cnt;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* ccw_device_get_path_mask() - get currently available paths
|
||||
* @cdev: ccw device to be queried
|
||||
* Returns:
|
||||
* %0 if no subchannel for the device is available,
|
||||
* else the mask of currently available paths for the ccw device's subchannel.
|
||||
*/
|
||||
__u8 ccw_device_get_path_mask(struct ccw_device *cdev)
|
||||
{
|
||||
struct subchannel *sch;
|
||||
|
||||
if (!cdev->dev.parent)
|
||||
return 0;
|
||||
|
||||
sch = to_subchannel(cdev->dev.parent);
|
||||
return sch->lpm;
|
||||
}
|
||||
|
||||
struct stlck_data {
|
||||
struct completion done;
|
||||
int rc;
|
||||
};
|
||||
|
||||
void ccw_device_stlck_done(struct ccw_device *cdev, void *data, int rc)
|
||||
{
|
||||
struct stlck_data *sdata = data;
|
||||
|
||||
sdata->rc = rc;
|
||||
complete(&sdata->done);
|
||||
}
|
||||
|
||||
/*
|
||||
* Perform unconditional reserve + release.
|
||||
*/
|
||||
int ccw_device_stlck(struct ccw_device *cdev)
|
||||
{
|
||||
struct subchannel *sch = to_subchannel(cdev->dev.parent);
|
||||
struct stlck_data data;
|
||||
u8 *buffer;
|
||||
int rc;
|
||||
|
||||
/* Check if steal lock operation is valid for this device. */
|
||||
if (cdev->drv) {
|
||||
if (!cdev->private->options.force)
|
||||
return -EINVAL;
|
||||
}
|
||||
buffer = kzalloc(64, GFP_DMA | GFP_KERNEL);
|
||||
if (!buffer)
|
||||
return -ENOMEM;
|
||||
init_completion(&data.done);
|
||||
data.rc = -EIO;
|
||||
spin_lock_irq(sch->lock);
|
||||
rc = cio_enable_subchannel(sch, (u32) (addr_t) sch);
|
||||
if (rc)
|
||||
goto out_unlock;
|
||||
/* Perform operation. */
|
||||
cdev->private->state = DEV_STATE_STEAL_LOCK,
|
||||
ccw_device_stlck_start(cdev, &data, &buffer[0], &buffer[32]);
|
||||
spin_unlock_irq(sch->lock);
|
||||
/* Wait for operation to finish. */
|
||||
if (wait_for_completion_interruptible(&data.done)) {
|
||||
/* Got a signal. */
|
||||
spin_lock_irq(sch->lock);
|
||||
ccw_request_cancel(cdev);
|
||||
spin_unlock_irq(sch->lock);
|
||||
wait_for_completion(&data.done);
|
||||
}
|
||||
rc = data.rc;
|
||||
/* Check results. */
|
||||
spin_lock_irq(sch->lock);
|
||||
cio_disable_subchannel(sch);
|
||||
cdev->private->state = DEV_STATE_BOXED;
|
||||
out_unlock:
|
||||
spin_unlock_irq(sch->lock);
|
||||
kfree(buffer);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* chp_get_chp_desc - return newly allocated channel-path descriptor
|
||||
* @cdev: device to obtain the descriptor for
|
||||
* @chp_idx: index of the channel path
|
||||
*
|
||||
* On success return a newly allocated copy of the channel-path description
|
||||
* data associated with the given channel path. Return %NULL on error.
|
||||
*/
|
||||
struct channel_path_desc *ccw_device_get_chp_desc(struct ccw_device *cdev,
|
||||
int chp_idx)
|
||||
{
|
||||
struct subchannel *sch;
|
||||
struct chp_id chpid;
|
||||
|
||||
sch = to_subchannel(cdev->dev.parent);
|
||||
chp_id_init(&chpid);
|
||||
chpid.id = sch->schib.pmcw.chpid[chp_idx];
|
||||
return chp_get_chp_desc(chpid);
|
||||
}
|
||||
|
||||
/**
|
||||
* ccw_device_get_id - obtain a ccw device id
|
||||
* @cdev: device to obtain the id for
|
||||
* @dev_id: where to fill in the values
|
||||
*/
|
||||
void ccw_device_get_id(struct ccw_device *cdev, struct ccw_dev_id *dev_id)
|
||||
{
|
||||
*dev_id = cdev->private->dev_id;
|
||||
}
|
||||
EXPORT_SYMBOL(ccw_device_get_id);
|
||||
|
||||
/**
|
||||
* ccw_device_tm_start_key - perform start function
|
||||
* @cdev: ccw device on which to perform the start function
|
||||
* @tcw: transport-command word to be started
|
||||
* @intparm: user defined parameter to be passed to the interrupt handler
|
||||
* @lpm: mask of paths to use
|
||||
* @key: storage key to use for storage access
|
||||
*
|
||||
* Start the tcw on the given ccw device. Return zero on success, non-zero
|
||||
* otherwise.
|
||||
*/
|
||||
int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
|
||||
unsigned long intparm, u8 lpm, u8 key)
|
||||
{
|
||||
struct subchannel *sch;
|
||||
int rc;
|
||||
|
||||
sch = to_subchannel(cdev->dev.parent);
|
||||
if (!sch->schib.pmcw.ena)
|
||||
return -EINVAL;
|
||||
if (cdev->private->state == DEV_STATE_VERIFY) {
|
||||
/* Remember to fake irb when finished. */
|
||||
if (!cdev->private->flags.fake_irb) {
|
||||
cdev->private->flags.fake_irb = FAKE_TM_IRB;
|
||||
cdev->private->intparm = intparm;
|
||||
return 0;
|
||||
} else
|
||||
/* There's already a fake I/O around. */
|
||||
return -EBUSY;
|
||||
}
|
||||
if (cdev->private->state != DEV_STATE_ONLINE)
|
||||
return -EIO;
|
||||
/* Adjust requested path mask to exclude unusable paths. */
|
||||
if (lpm) {
|
||||
lpm &= sch->lpm;
|
||||
if (lpm == 0)
|
||||
return -EACCES;
|
||||
}
|
||||
rc = cio_tm_start_key(sch, tcw, lpm, key);
|
||||
if (rc == 0)
|
||||
cdev->private->intparm = intparm;
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL(ccw_device_tm_start_key);
|
||||
|
||||
/**
|
||||
* ccw_device_tm_start_timeout_key - perform start function
|
||||
* @cdev: ccw device on which to perform the start function
|
||||
* @tcw: transport-command word to be started
|
||||
* @intparm: user defined parameter to be passed to the interrupt handler
|
||||
* @lpm: mask of paths to use
|
||||
* @key: storage key to use for storage access
|
||||
* @expires: time span in jiffies after which to abort request
|
||||
*
|
||||
* Start the tcw on the given ccw device. Return zero on success, non-zero
|
||||
* otherwise.
|
||||
*/
|
||||
int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw,
|
||||
unsigned long intparm, u8 lpm, u8 key,
|
||||
int expires)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ccw_device_set_timeout(cdev, expires);
|
||||
ret = ccw_device_tm_start_key(cdev, tcw, intparm, lpm, key);
|
||||
if (ret != 0)
|
||||
ccw_device_set_timeout(cdev, 0);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ccw_device_tm_start_timeout_key);
|
||||
|
||||
/**
|
||||
* ccw_device_tm_start - perform start function
|
||||
* @cdev: ccw device on which to perform the start function
|
||||
* @tcw: transport-command word to be started
|
||||
* @intparm: user defined parameter to be passed to the interrupt handler
|
||||
* @lpm: mask of paths to use
|
||||
*
|
||||
* Start the tcw on the given ccw device. Return zero on success, non-zero
|
||||
* otherwise.
|
||||
*/
|
||||
int ccw_device_tm_start(struct ccw_device *cdev, struct tcw *tcw,
|
||||
unsigned long intparm, u8 lpm)
|
||||
{
|
||||
return ccw_device_tm_start_key(cdev, tcw, intparm, lpm,
|
||||
PAGE_DEFAULT_KEY);
|
||||
}
|
||||
EXPORT_SYMBOL(ccw_device_tm_start);
|
||||
|
||||
/**
|
||||
* ccw_device_tm_start_timeout - perform start function
|
||||
* @cdev: ccw device on which to perform the start function
|
||||
* @tcw: transport-command word to be started
|
||||
* @intparm: user defined parameter to be passed to the interrupt handler
|
||||
* @lpm: mask of paths to use
|
||||
* @expires: time span in jiffies after which to abort request
|
||||
*
|
||||
* Start the tcw on the given ccw device. Return zero on success, non-zero
|
||||
* otherwise.
|
||||
*/
|
||||
int ccw_device_tm_start_timeout(struct ccw_device *cdev, struct tcw *tcw,
|
||||
unsigned long intparm, u8 lpm, int expires)
|
||||
{
|
||||
return ccw_device_tm_start_timeout_key(cdev, tcw, intparm, lpm,
|
||||
PAGE_DEFAULT_KEY, expires);
|
||||
}
|
||||
EXPORT_SYMBOL(ccw_device_tm_start_timeout);
|
||||
|
||||
/**
|
||||
* ccw_device_get_mdc - accumulate max data count
|
||||
* @cdev: ccw device for which the max data count is accumulated
|
||||
* @mask: mask of paths to use
|
||||
*
|
||||
* Return the number of 64K-bytes blocks all paths at least support
|
||||
* for a transport command. Return values <= 0 indicate failures.
|
||||
*/
|
||||
int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask)
|
||||
{
|
||||
struct subchannel *sch = to_subchannel(cdev->dev.parent);
|
||||
struct channel_path *chp;
|
||||
struct chp_id chpid;
|
||||
int mdc = 0, i;
|
||||
|
||||
/* Adjust requested path mask to excluded varied off paths. */
|
||||
if (mask)
|
||||
mask &= sch->lpm;
|
||||
else
|
||||
mask = sch->lpm;
|
||||
|
||||
chp_id_init(&chpid);
|
||||
for (i = 0; i < 8; i++) {
|
||||
if (!(mask & (0x80 >> i)))
|
||||
continue;
|
||||
chpid.id = sch->schib.pmcw.chpid[i];
|
||||
chp = chpid_to_chp(chpid);
|
||||
if (!chp)
|
||||
continue;
|
||||
|
||||
mutex_lock(&chp->lock);
|
||||
if (!chp->desc_fmt1.f) {
|
||||
mutex_unlock(&chp->lock);
|
||||
return 0;
|
||||
}
|
||||
if (!chp->desc_fmt1.r)
|
||||
mdc = 1;
|
||||
mdc = mdc ? min_t(int, mdc, chp->desc_fmt1.mdc) :
|
||||
chp->desc_fmt1.mdc;
|
||||
mutex_unlock(&chp->lock);
|
||||
}
|
||||
|
||||
return mdc;
|
||||
}
|
||||
EXPORT_SYMBOL(ccw_device_get_mdc);
|
||||
|
||||
/**
|
||||
* ccw_device_tm_intrg - perform interrogate function
|
||||
* @cdev: ccw device on which to perform the interrogate function
|
||||
*
|
||||
* Perform an interrogate function on the given ccw device. Return zero on
|
||||
* success, non-zero otherwise.
|
||||
*/
|
||||
int ccw_device_tm_intrg(struct ccw_device *cdev)
|
||||
{
|
||||
struct subchannel *sch = to_subchannel(cdev->dev.parent);
|
||||
|
||||
if (!sch->schib.pmcw.ena)
|
||||
return -EINVAL;
|
||||
if (cdev->private->state != DEV_STATE_ONLINE)
|
||||
return -EIO;
|
||||
if (!scsw_is_tm(&sch->schib.scsw) ||
|
||||
!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_START_PEND))
|
||||
return -EINVAL;
|
||||
return cio_tm_intrg(sch);
|
||||
}
|
||||
EXPORT_SYMBOL(ccw_device_tm_intrg);
|
||||
|
||||
/**
|
||||
* ccw_device_get_schid - obtain a subchannel id
|
||||
* @cdev: device to obtain the id for
|
||||
* @schid: where to fill in the values
|
||||
*/
|
||||
void ccw_device_get_schid(struct ccw_device *cdev, struct subchannel_id *schid)
|
||||
{
|
||||
struct subchannel *sch = to_subchannel(cdev->dev.parent);
|
||||
|
||||
*schid = sch->schid;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ccw_device_get_schid);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
EXPORT_SYMBOL(ccw_device_set_options_mask);
|
||||
EXPORT_SYMBOL(ccw_device_set_options);
|
||||
EXPORT_SYMBOL(ccw_device_clear_options);
|
||||
EXPORT_SYMBOL(ccw_device_clear);
|
||||
EXPORT_SYMBOL(ccw_device_halt);
|
||||
EXPORT_SYMBOL(ccw_device_resume);
|
||||
EXPORT_SYMBOL(ccw_device_start_timeout);
|
||||
EXPORT_SYMBOL(ccw_device_start);
|
||||
EXPORT_SYMBOL(ccw_device_start_timeout_key);
|
||||
EXPORT_SYMBOL(ccw_device_start_key);
|
||||
EXPORT_SYMBOL(ccw_device_get_ciw);
|
||||
EXPORT_SYMBOL(ccw_device_get_path_mask);
|
||||
EXPORT_SYMBOL_GPL(ccw_device_get_chp_desc);
|
669
drivers/s390/cio/device_pgid.c
Normal file
669
drivers/s390/cio/device_pgid.c
Normal file
|
@ -0,0 +1,669 @@
|
|||
/*
|
||||
* CCW device PGID and path verification I/O handling.
|
||||
*
|
||||
* Copyright IBM Corp. 2002, 2009
|
||||
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
|
||||
* Martin Schwidefsky <schwidefsky@de.ibm.com>
|
||||
* Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <asm/ccwdev.h>
|
||||
#include <asm/cio.h>
|
||||
|
||||
#include "cio.h"
|
||||
#include "cio_debug.h"
|
||||
#include "device.h"
|
||||
#include "io_sch.h"
|
||||
|
||||
#define PGID_RETRIES 256
|
||||
#define PGID_TIMEOUT (10 * HZ)
|
||||
|
||||
static void verify_start(struct ccw_device *cdev);
|
||||
|
||||
/*
|
||||
* Process path verification data and report result.
|
||||
*/
|
||||
static void verify_done(struct ccw_device *cdev, int rc)
|
||||
{
|
||||
struct subchannel *sch = to_subchannel(cdev->dev.parent);
|
||||
struct ccw_dev_id *id = &cdev->private->dev_id;
|
||||
int mpath = cdev->private->flags.mpath;
|
||||
int pgroup = cdev->private->flags.pgroup;
|
||||
|
||||
if (rc)
|
||||
goto out;
|
||||
/* Ensure consistent multipathing state at device and channel. */
|
||||
if (sch->config.mp != mpath) {
|
||||
sch->config.mp = mpath;
|
||||
rc = cio_commit_config(sch);
|
||||
}
|
||||
out:
|
||||
CIO_MSG_EVENT(2, "vrfy: device 0.%x.%04x: rc=%d pgroup=%d mpath=%d "
|
||||
"vpm=%02x\n", id->ssid, id->devno, rc, pgroup, mpath,
|
||||
sch->vpm);
|
||||
ccw_device_verify_done(cdev, rc);
|
||||
}
|
||||
|
||||
/*
|
||||
* Create channel program to perform a NOOP.
|
||||
*/
|
||||
static void nop_build_cp(struct ccw_device *cdev)
|
||||
{
|
||||
struct ccw_request *req = &cdev->private->req;
|
||||
struct ccw1 *cp = cdev->private->iccws;
|
||||
|
||||
cp->cmd_code = CCW_CMD_NOOP;
|
||||
cp->cda = 0;
|
||||
cp->count = 0;
|
||||
cp->flags = CCW_FLAG_SLI;
|
||||
req->cp = cp;
|
||||
}
|
||||
|
||||
/*
|
||||
* Perform NOOP on a single path.
|
||||
*/
|
||||
static void nop_do(struct ccw_device *cdev)
|
||||
{
|
||||
struct subchannel *sch = to_subchannel(cdev->dev.parent);
|
||||
struct ccw_request *req = &cdev->private->req;
|
||||
|
||||
req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm &
|
||||
~cdev->private->path_noirq_mask);
|
||||
if (!req->lpm)
|
||||
goto out_nopath;
|
||||
nop_build_cp(cdev);
|
||||
ccw_request_start(cdev);
|
||||
return;
|
||||
|
||||
out_nopath:
|
||||
verify_done(cdev, sch->vpm ? 0 : -EACCES);
|
||||
}
|
||||
|
||||
/*
|
||||
* Adjust NOOP I/O status.
|
||||
*/
|
||||
static enum io_status nop_filter(struct ccw_device *cdev, void *data,
|
||||
struct irb *irb, enum io_status status)
|
||||
{
|
||||
/* Only subchannel status might indicate a path error. */
|
||||
if (status == IO_STATUS_ERROR && irb->scsw.cmd.cstat == 0)
|
||||
return IO_DONE;
|
||||
return status;
|
||||
}
|
||||
|
||||
/*
|
||||
* Process NOOP request result for a single path.
|
||||
*/
|
||||
static void nop_callback(struct ccw_device *cdev, void *data, int rc)
|
||||
{
|
||||
struct subchannel *sch = to_subchannel(cdev->dev.parent);
|
||||
struct ccw_request *req = &cdev->private->req;
|
||||
|
||||
switch (rc) {
|
||||
case 0:
|
||||
sch->vpm |= req->lpm;
|
||||
break;
|
||||
case -ETIME:
|
||||
cdev->private->path_noirq_mask |= req->lpm;
|
||||
break;
|
||||
case -EACCES:
|
||||
cdev->private->path_notoper_mask |= req->lpm;
|
||||
break;
|
||||
default:
|
||||
goto err;
|
||||
}
|
||||
/* Continue on the next path. */
|
||||
req->lpm >>= 1;
|
||||
nop_do(cdev);
|
||||
return;
|
||||
|
||||
err:
|
||||
verify_done(cdev, rc);
|
||||
}
|
||||
|
||||
/*
|
||||
* Create channel program to perform SET PGID on a single path.
|
||||
*/
|
||||
static void spid_build_cp(struct ccw_device *cdev, u8 fn)
|
||||
{
|
||||
struct ccw_request *req = &cdev->private->req;
|
||||
struct ccw1 *cp = cdev->private->iccws;
|
||||
int i = 8 - ffs(req->lpm);
|
||||
struct pgid *pgid = &cdev->private->pgid[i];
|
||||
|
||||
pgid->inf.fc = fn;
|
||||
cp->cmd_code = CCW_CMD_SET_PGID;
|
||||
cp->cda = (u32) (addr_t) pgid;
|
||||
cp->count = sizeof(*pgid);
|
||||
cp->flags = CCW_FLAG_SLI;
|
||||
req->cp = cp;
|
||||
}
|
||||
|
||||
static void pgid_wipeout_callback(struct ccw_device *cdev, void *data, int rc)
|
||||
{
|
||||
if (rc) {
|
||||
/* We don't know the path groups' state. Abort. */
|
||||
verify_done(cdev, rc);
|
||||
return;
|
||||
}
|
||||
/*
|
||||
* Path groups have been reset. Restart path verification but
|
||||
* leave paths in path_noirq_mask out.
|
||||
*/
|
||||
cdev->private->flags.pgid_unknown = 0;
|
||||
verify_start(cdev);
|
||||
}
|
||||
|
||||
/*
|
||||
* Reset pathgroups and restart path verification, leave unusable paths out.
|
||||
*/
|
||||
static void pgid_wipeout_start(struct ccw_device *cdev)
|
||||
{
|
||||
struct subchannel *sch = to_subchannel(cdev->dev.parent);
|
||||
struct ccw_dev_id *id = &cdev->private->dev_id;
|
||||
struct ccw_request *req = &cdev->private->req;
|
||||
u8 fn;
|
||||
|
||||
CIO_MSG_EVENT(2, "wipe: device 0.%x.%04x: pvm=%02x nim=%02x\n",
|
||||
id->ssid, id->devno, cdev->private->pgid_valid_mask,
|
||||
cdev->private->path_noirq_mask);
|
||||
|
||||
/* Initialize request data. */
|
||||
memset(req, 0, sizeof(*req));
|
||||
req->timeout = PGID_TIMEOUT;
|
||||
req->maxretries = PGID_RETRIES;
|
||||
req->lpm = sch->schib.pmcw.pam;
|
||||
req->callback = pgid_wipeout_callback;
|
||||
fn = SPID_FUNC_DISBAND;
|
||||
if (cdev->private->flags.mpath)
|
||||
fn |= SPID_FUNC_MULTI_PATH;
|
||||
spid_build_cp(cdev, fn);
|
||||
ccw_request_start(cdev);
|
||||
}
|
||||
|
||||
/*
|
||||
* Perform establish/resign SET PGID on a single path.
|
||||
*/
|
||||
static void spid_do(struct ccw_device *cdev)
|
||||
{
|
||||
struct subchannel *sch = to_subchannel(cdev->dev.parent);
|
||||
struct ccw_request *req = &cdev->private->req;
|
||||
u8 fn;
|
||||
|
||||
/* Use next available path that is not already in correct state. */
|
||||
req->lpm = lpm_adjust(req->lpm, cdev->private->pgid_todo_mask);
|
||||
if (!req->lpm)
|
||||
goto out_nopath;
|
||||
/* Channel program setup. */
|
||||
if (req->lpm & sch->opm)
|
||||
fn = SPID_FUNC_ESTABLISH;
|
||||
else
|
||||
fn = SPID_FUNC_RESIGN;
|
||||
if (cdev->private->flags.mpath)
|
||||
fn |= SPID_FUNC_MULTI_PATH;
|
||||
spid_build_cp(cdev, fn);
|
||||
ccw_request_start(cdev);
|
||||
return;
|
||||
|
||||
out_nopath:
|
||||
if (cdev->private->flags.pgid_unknown) {
|
||||
/* At least one SPID could be partially done. */
|
||||
pgid_wipeout_start(cdev);
|
||||
return;
|
||||
}
|
||||
verify_done(cdev, sch->vpm ? 0 : -EACCES);
|
||||
}
|
||||
|
||||
/*
|
||||
* Process SET PGID request result for a single path.
|
||||
*/
|
||||
static void spid_callback(struct ccw_device *cdev, void *data, int rc)
|
||||
{
|
||||
struct subchannel *sch = to_subchannel(cdev->dev.parent);
|
||||
struct ccw_request *req = &cdev->private->req;
|
||||
|
||||
switch (rc) {
|
||||
case 0:
|
||||
sch->vpm |= req->lpm & sch->opm;
|
||||
break;
|
||||
case -ETIME:
|
||||
cdev->private->flags.pgid_unknown = 1;
|
||||
cdev->private->path_noirq_mask |= req->lpm;
|
||||
break;
|
||||
case -EACCES:
|
||||
cdev->private->path_notoper_mask |= req->lpm;
|
||||
break;
|
||||
case -EOPNOTSUPP:
|
||||
if (cdev->private->flags.mpath) {
|
||||
/* Try without multipathing. */
|
||||
cdev->private->flags.mpath = 0;
|
||||
goto out_restart;
|
||||
}
|
||||
/* Try without pathgrouping. */
|
||||
cdev->private->flags.pgroup = 0;
|
||||
goto out_restart;
|
||||
default:
|
||||
goto err;
|
||||
}
|
||||
req->lpm >>= 1;
|
||||
spid_do(cdev);
|
||||
return;
|
||||
|
||||
out_restart:
|
||||
verify_start(cdev);
|
||||
return;
|
||||
err:
|
||||
verify_done(cdev, rc);
|
||||
}
|
||||
|
||||
static void spid_start(struct ccw_device *cdev)
|
||||
{
|
||||
struct ccw_request *req = &cdev->private->req;
|
||||
|
||||
/* Initialize request data. */
|
||||
memset(req, 0, sizeof(*req));
|
||||
req->timeout = PGID_TIMEOUT;
|
||||
req->maxretries = PGID_RETRIES;
|
||||
req->lpm = 0x80;
|
||||
req->singlepath = 1;
|
||||
req->callback = spid_callback;
|
||||
spid_do(cdev);
|
||||
}
|
||||
|
||||
static int pgid_is_reset(struct pgid *p)
|
||||
{
|
||||
char *c;
|
||||
|
||||
for (c = (char *)p + 1; c < (char *)(p + 1); c++) {
|
||||
if (*c != 0)
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int pgid_cmp(struct pgid *p1, struct pgid *p2)
|
||||
{
|
||||
return memcmp((char *) p1 + 1, (char *) p2 + 1,
|
||||
sizeof(struct pgid) - 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Determine pathgroup state from PGID data.
|
||||
*/
|
||||
static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
|
||||
int *mismatch, u8 *reserved, u8 *reset)
|
||||
{
|
||||
struct pgid *pgid = &cdev->private->pgid[0];
|
||||
struct pgid *first = NULL;
|
||||
int lpm;
|
||||
int i;
|
||||
|
||||
*mismatch = 0;
|
||||
*reserved = 0;
|
||||
*reset = 0;
|
||||
for (i = 0, lpm = 0x80; i < 8; i++, pgid++, lpm >>= 1) {
|
||||
if ((cdev->private->pgid_valid_mask & lpm) == 0)
|
||||
continue;
|
||||
if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE)
|
||||
*reserved |= lpm;
|
||||
if (pgid_is_reset(pgid)) {
|
||||
*reset |= lpm;
|
||||
continue;
|
||||
}
|
||||
if (!first) {
|
||||
first = pgid;
|
||||
continue;
|
||||
}
|
||||
if (pgid_cmp(pgid, first) != 0)
|
||||
*mismatch = 1;
|
||||
}
|
||||
if (!first)
|
||||
first = &channel_subsystems[0]->global_pgid;
|
||||
*p = first;
|
||||
}
|
||||
|
||||
static u8 pgid_to_donepm(struct ccw_device *cdev)
|
||||
{
|
||||
struct subchannel *sch = to_subchannel(cdev->dev.parent);
|
||||
struct pgid *pgid;
|
||||
int i;
|
||||
int lpm;
|
||||
u8 donepm = 0;
|
||||
|
||||
/* Set bits for paths which are already in the target state. */
|
||||
for (i = 0; i < 8; i++) {
|
||||
lpm = 0x80 >> i;
|
||||
if ((cdev->private->pgid_valid_mask & lpm) == 0)
|
||||
continue;
|
||||
pgid = &cdev->private->pgid[i];
|
||||
if (sch->opm & lpm) {
|
||||
if (pgid->inf.ps.state1 != SNID_STATE1_GROUPED)
|
||||
continue;
|
||||
} else {
|
||||
if (pgid->inf.ps.state1 != SNID_STATE1_UNGROUPED)
|
||||
continue;
|
||||
}
|
||||
if (cdev->private->flags.mpath) {
|
||||
if (pgid->inf.ps.state3 != SNID_STATE3_MULTI_PATH)
|
||||
continue;
|
||||
} else {
|
||||
if (pgid->inf.ps.state3 != SNID_STATE3_SINGLE_PATH)
|
||||
continue;
|
||||
}
|
||||
donepm |= lpm;
|
||||
}
|
||||
|
||||
return donepm;
|
||||
}
|
||||
|
||||
static void pgid_fill(struct ccw_device *cdev, struct pgid *pgid)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 8; i++)
|
||||
memcpy(&cdev->private->pgid[i], pgid, sizeof(struct pgid));
|
||||
}
|
||||
|
||||
/*
|
||||
* Process SENSE PGID data and report result.
|
||||
*/
|
||||
static void snid_done(struct ccw_device *cdev, int rc)
|
||||
{
|
||||
struct ccw_dev_id *id = &cdev->private->dev_id;
|
||||
struct subchannel *sch = to_subchannel(cdev->dev.parent);
|
||||
struct pgid *pgid;
|
||||
int mismatch = 0;
|
||||
u8 reserved = 0;
|
||||
u8 reset = 0;
|
||||
u8 donepm;
|
||||
|
||||
if (rc)
|
||||
goto out;
|
||||
pgid_analyze(cdev, &pgid, &mismatch, &reserved, &reset);
|
||||
if (reserved == cdev->private->pgid_valid_mask)
|
||||
rc = -EUSERS;
|
||||
else if (mismatch)
|
||||
rc = -EOPNOTSUPP;
|
||||
else {
|
||||
donepm = pgid_to_donepm(cdev);
|
||||
sch->vpm = donepm & sch->opm;
|
||||
cdev->private->pgid_reset_mask |= reset;
|
||||
cdev->private->pgid_todo_mask &=
|
||||
~(donepm | cdev->private->path_noirq_mask);
|
||||
pgid_fill(cdev, pgid);
|
||||
}
|
||||
out:
|
||||
CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x "
|
||||
"todo=%02x mism=%d rsvd=%02x reset=%02x\n", id->ssid,
|
||||
id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm,
|
||||
cdev->private->pgid_todo_mask, mismatch, reserved, reset);
|
||||
switch (rc) {
|
||||
case 0:
|
||||
if (cdev->private->flags.pgid_unknown) {
|
||||
pgid_wipeout_start(cdev);
|
||||
return;
|
||||
}
|
||||
/* Anything left to do? */
|
||||
if (cdev->private->pgid_todo_mask == 0) {
|
||||
verify_done(cdev, sch->vpm == 0 ? -EACCES : 0);
|
||||
return;
|
||||
}
|
||||
/* Perform path-grouping. */
|
||||
spid_start(cdev);
|
||||
break;
|
||||
case -EOPNOTSUPP:
|
||||
/* Path-grouping not supported. */
|
||||
cdev->private->flags.pgroup = 0;
|
||||
cdev->private->flags.mpath = 0;
|
||||
verify_start(cdev);
|
||||
break;
|
||||
default:
|
||||
verify_done(cdev, rc);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Create channel program to perform a SENSE PGID on a single path.
|
||||
*/
|
||||
static void snid_build_cp(struct ccw_device *cdev)
|
||||
{
|
||||
struct ccw_request *req = &cdev->private->req;
|
||||
struct ccw1 *cp = cdev->private->iccws;
|
||||
int i = 8 - ffs(req->lpm);
|
||||
|
||||
/* Channel program setup. */
|
||||
cp->cmd_code = CCW_CMD_SENSE_PGID;
|
||||
cp->cda = (u32) (addr_t) &cdev->private->pgid[i];
|
||||
cp->count = sizeof(struct pgid);
|
||||
cp->flags = CCW_FLAG_SLI;
|
||||
req->cp = cp;
|
||||
}
|
||||
|
||||
/*
|
||||
* Perform SENSE PGID on a single path.
|
||||
*/
|
||||
static void snid_do(struct ccw_device *cdev)
|
||||
{
|
||||
struct subchannel *sch = to_subchannel(cdev->dev.parent);
|
||||
struct ccw_request *req = &cdev->private->req;
|
||||
int ret;
|
||||
|
||||
req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam &
|
||||
~cdev->private->path_noirq_mask);
|
||||
if (!req->lpm)
|
||||
goto out_nopath;
|
||||
snid_build_cp(cdev);
|
||||
ccw_request_start(cdev);
|
||||
return;
|
||||
|
||||
out_nopath:
|
||||
if (cdev->private->pgid_valid_mask)
|
||||
ret = 0;
|
||||
else if (cdev->private->path_noirq_mask)
|
||||
ret = -ETIME;
|
||||
else
|
||||
ret = -EACCES;
|
||||
snid_done(cdev, ret);
|
||||
}
|
||||
|
||||
/*
|
||||
* Process SENSE PGID request result for single path.
|
||||
*/
|
||||
static void snid_callback(struct ccw_device *cdev, void *data, int rc)
|
||||
{
|
||||
struct ccw_request *req = &cdev->private->req;
|
||||
|
||||
switch (rc) {
|
||||
case 0:
|
||||
cdev->private->pgid_valid_mask |= req->lpm;
|
||||
break;
|
||||
case -ETIME:
|
||||
cdev->private->flags.pgid_unknown = 1;
|
||||
cdev->private->path_noirq_mask |= req->lpm;
|
||||
break;
|
||||
case -EACCES:
|
||||
cdev->private->path_notoper_mask |= req->lpm;
|
||||
break;
|
||||
default:
|
||||
goto err;
|
||||
}
|
||||
/* Continue on the next path. */
|
||||
req->lpm >>= 1;
|
||||
snid_do(cdev);
|
||||
return;
|
||||
|
||||
err:
|
||||
snid_done(cdev, rc);
|
||||
}
|
||||
|
||||
/*
|
||||
* Perform path verification.
|
||||
*/
|
||||
static void verify_start(struct ccw_device *cdev)
|
||||
{
|
||||
struct subchannel *sch = to_subchannel(cdev->dev.parent);
|
||||
struct ccw_request *req = &cdev->private->req;
|
||||
struct ccw_dev_id *devid = &cdev->private->dev_id;
|
||||
|
||||
sch->vpm = 0;
|
||||
sch->lpm = sch->schib.pmcw.pam;
|
||||
|
||||
/* Initialize PGID data. */
|
||||
memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid));
|
||||
cdev->private->pgid_valid_mask = 0;
|
||||
cdev->private->pgid_todo_mask = sch->schib.pmcw.pam;
|
||||
cdev->private->path_notoper_mask = 0;
|
||||
|
||||
/* Initialize request data. */
|
||||
memset(req, 0, sizeof(*req));
|
||||
req->timeout = PGID_TIMEOUT;
|
||||
req->maxretries = PGID_RETRIES;
|
||||
req->lpm = 0x80;
|
||||
req->singlepath = 1;
|
||||
if (cdev->private->flags.pgroup) {
|
||||
CIO_TRACE_EVENT(4, "snid");
|
||||
CIO_HEX_EVENT(4, devid, sizeof(*devid));
|
||||
req->callback = snid_callback;
|
||||
snid_do(cdev);
|
||||
} else {
|
||||
CIO_TRACE_EVENT(4, "nop");
|
||||
CIO_HEX_EVENT(4, devid, sizeof(*devid));
|
||||
req->filter = nop_filter;
|
||||
req->callback = nop_callback;
|
||||
nop_do(cdev);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ccw_device_verify_start - perform path verification
|
||||
* @cdev: ccw device
|
||||
*
|
||||
* Perform an I/O on each available channel path to @cdev to determine which
|
||||
* paths are operational. The resulting path mask is stored in sch->vpm.
|
||||
* If device options specify pathgrouping, establish a pathgroup for the
|
||||
* operational paths. When finished, call ccw_device_verify_done with a
|
||||
* return code specifying the result.
|
||||
*/
|
||||
void ccw_device_verify_start(struct ccw_device *cdev)
|
||||
{
|
||||
CIO_TRACE_EVENT(4, "vrfy");
|
||||
CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
|
||||
/*
|
||||
* Initialize pathgroup and multipath state with target values.
|
||||
* They may change in the course of path verification.
|
||||
*/
|
||||
cdev->private->flags.pgroup = cdev->private->options.pgroup;
|
||||
cdev->private->flags.mpath = cdev->private->options.mpath;
|
||||
cdev->private->flags.doverify = 0;
|
||||
cdev->private->path_noirq_mask = 0;
|
||||
verify_start(cdev);
|
||||
}
|
||||
|
||||
/*
|
||||
* Process disband SET PGID request result.
|
||||
*/
|
||||
static void disband_callback(struct ccw_device *cdev, void *data, int rc)
|
||||
{
|
||||
struct subchannel *sch = to_subchannel(cdev->dev.parent);
|
||||
struct ccw_dev_id *id = &cdev->private->dev_id;
|
||||
|
||||
if (rc)
|
||||
goto out;
|
||||
/* Ensure consistent multipathing state at device and channel. */
|
||||
cdev->private->flags.mpath = 0;
|
||||
if (sch->config.mp) {
|
||||
sch->config.mp = 0;
|
||||
rc = cio_commit_config(sch);
|
||||
}
|
||||
out:
|
||||
CIO_MSG_EVENT(0, "disb: device 0.%x.%04x: rc=%d\n", id->ssid, id->devno,
|
||||
rc);
|
||||
ccw_device_disband_done(cdev, rc);
|
||||
}
|
||||
|
||||
/**
|
||||
* ccw_device_disband_start - disband pathgroup
|
||||
* @cdev: ccw device
|
||||
*
|
||||
* Execute a SET PGID channel program on @cdev to disband a previously
|
||||
* established pathgroup. When finished, call ccw_device_disband_done with
|
||||
* a return code specifying the result.
|
||||
*/
|
||||
void ccw_device_disband_start(struct ccw_device *cdev)
|
||||
{
|
||||
struct subchannel *sch = to_subchannel(cdev->dev.parent);
|
||||
struct ccw_request *req = &cdev->private->req;
|
||||
u8 fn;
|
||||
|
||||
CIO_TRACE_EVENT(4, "disb");
|
||||
CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
|
||||
/* Request setup. */
|
||||
memset(req, 0, sizeof(*req));
|
||||
req->timeout = PGID_TIMEOUT;
|
||||
req->maxretries = PGID_RETRIES;
|
||||
req->lpm = sch->schib.pmcw.pam & sch->opm;
|
||||
req->singlepath = 1;
|
||||
req->callback = disband_callback;
|
||||
fn = SPID_FUNC_DISBAND;
|
||||
if (cdev->private->flags.mpath)
|
||||
fn |= SPID_FUNC_MULTI_PATH;
|
||||
spid_build_cp(cdev, fn);
|
||||
ccw_request_start(cdev);
|
||||
}
|
||||
|
||||
static void stlck_build_cp(struct ccw_device *cdev, void *buf1, void *buf2)
|
||||
{
|
||||
struct ccw_request *req = &cdev->private->req;
|
||||
struct ccw1 *cp = cdev->private->iccws;
|
||||
|
||||
cp[0].cmd_code = CCW_CMD_STLCK;
|
||||
cp[0].cda = (u32) (addr_t) buf1;
|
||||
cp[0].count = 32;
|
||||
cp[0].flags = CCW_FLAG_CC;
|
||||
cp[1].cmd_code = CCW_CMD_RELEASE;
|
||||
cp[1].cda = (u32) (addr_t) buf2;
|
||||
cp[1].count = 32;
|
||||
cp[1].flags = 0;
|
||||
req->cp = cp;
|
||||
}
|
||||
|
||||
static void stlck_callback(struct ccw_device *cdev, void *data, int rc)
|
||||
{
|
||||
ccw_device_stlck_done(cdev, data, rc);
|
||||
}
|
||||
|
||||
/**
|
||||
* ccw_device_stlck_start - perform unconditional release
|
||||
* @cdev: ccw device
|
||||
* @data: data pointer to be passed to ccw_device_stlck_done
|
||||
* @buf1: data pointer used in channel program
|
||||
* @buf2: data pointer used in channel program
|
||||
*
|
||||
* Execute a channel program on @cdev to release an existing PGID reservation.
|
||||
* When finished, call ccw_device_stlck_done with a return code specifying the
|
||||
* result.
|
||||
*/
|
||||
void ccw_device_stlck_start(struct ccw_device *cdev, void *data, void *buf1,
|
||||
void *buf2)
|
||||
{
|
||||
struct subchannel *sch = to_subchannel(cdev->dev.parent);
|
||||
struct ccw_request *req = &cdev->private->req;
|
||||
|
||||
CIO_TRACE_EVENT(4, "stlck");
|
||||
CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
|
||||
/* Request setup. */
|
||||
memset(req, 0, sizeof(*req));
|
||||
req->timeout = PGID_TIMEOUT;
|
||||
req->maxretries = PGID_RETRIES;
|
||||
req->lpm = sch->schib.pmcw.pam & sch->opm;
|
||||
req->data = data;
|
||||
req->callback = stlck_callback;
|
||||
stlck_build_cp(cdev, buf1, buf2);
|
||||
ccw_request_start(cdev);
|
||||
}
|
||||
|
397
drivers/s390/cio/device_status.c
Normal file
397
drivers/s390/cio/device_status.c
Normal file
|
@ -0,0 +1,397 @@
|
|||
/*
|
||||
* Copyright IBM Corp. 2002
|
||||
* Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
|
||||
* Martin Schwidefsky (schwidefsky@de.ibm.com)
|
||||
*
|
||||
* Status accumulation and basic sense functions.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
|
||||
#include <asm/ccwdev.h>
|
||||
#include <asm/cio.h>
|
||||
|
||||
#include "cio.h"
|
||||
#include "cio_debug.h"
|
||||
#include "css.h"
|
||||
#include "device.h"
|
||||
#include "ioasm.h"
|
||||
#include "io_sch.h"
|
||||
|
||||
/*
|
||||
* Check for any kind of channel or interface control check but don't
|
||||
* issue the message for the console device
|
||||
*/
|
||||
static void
|
||||
ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
|
||||
{
|
||||
char dbf_text[15];
|
||||
|
||||
if (!scsw_is_valid_cstat(&irb->scsw) ||
|
||||
!(scsw_cstat(&irb->scsw) & (SCHN_STAT_CHN_DATA_CHK |
|
||||
SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK)))
|
||||
return;
|
||||
CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check "
|
||||
"received"
|
||||
" ... device %04x on subchannel 0.%x.%04x, dev_stat "
|
||||
": %02X sch_stat : %02X\n",
|
||||
cdev->private->dev_id.devno, cdev->private->schid.ssid,
|
||||
cdev->private->schid.sch_no,
|
||||
scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw));
|
||||
sprintf(dbf_text, "chk%x", cdev->private->schid.sch_no);
|
||||
CIO_TRACE_EVENT(0, dbf_text);
|
||||
CIO_HEX_EVENT(0, irb, sizeof(struct irb));
|
||||
}
|
||||
|
||||
/*
|
||||
* Some paths became not operational (pno bit in scsw is set).
|
||||
*/
|
||||
static void
|
||||
ccw_device_path_notoper(struct ccw_device *cdev)
|
||||
{
|
||||
struct subchannel *sch;
|
||||
|
||||
sch = to_subchannel(cdev->dev.parent);
|
||||
if (cio_update_schib(sch))
|
||||
goto doverify;
|
||||
|
||||
CIO_MSG_EVENT(0, "%s(0.%x.%04x) - path(s) %02x are "
|
||||
"not operational \n", __func__,
|
||||
sch->schid.ssid, sch->schid.sch_no,
|
||||
sch->schib.pmcw.pnom);
|
||||
|
||||
sch->lpm &= ~sch->schib.pmcw.pnom;
|
||||
doverify:
|
||||
cdev->private->flags.doverify = 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy valid bits from the extended control word to device irb.
|
||||
*/
|
||||
static void
|
||||
ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb)
|
||||
{
|
||||
/*
|
||||
* Copy extended control bit if it is valid... yes there
|
||||
* are condition that have to be met for the extended control
|
||||
* bit to have meaning. Sick.
|
||||
*/
|
||||
cdev->private->irb.scsw.cmd.ectl = 0;
|
||||
if ((irb->scsw.cmd.stctl & SCSW_STCTL_ALERT_STATUS) &&
|
||||
!(irb->scsw.cmd.stctl & SCSW_STCTL_INTER_STATUS))
|
||||
cdev->private->irb.scsw.cmd.ectl = irb->scsw.cmd.ectl;
|
||||
/* Check if extended control word is valid. */
|
||||
if (!cdev->private->irb.scsw.cmd.ectl)
|
||||
return;
|
||||
/* Copy concurrent sense / model dependent information. */
|
||||
memcpy (&cdev->private->irb.ecw, irb->ecw, sizeof (irb->ecw));
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if extended status word is valid.
|
||||
*/
|
||||
static int
|
||||
ccw_device_accumulate_esw_valid(struct irb *irb)
|
||||
{
|
||||
if (!irb->scsw.cmd.eswf &&
|
||||
(irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND))
|
||||
return 0;
|
||||
if (irb->scsw.cmd.stctl ==
|
||||
(SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND) &&
|
||||
!(irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy valid bits from the extended status word to device irb.
|
||||
*/
|
||||
static void
|
||||
ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb)
|
||||
{
|
||||
struct irb *cdev_irb;
|
||||
struct sublog *cdev_sublog, *sublog;
|
||||
|
||||
if (!ccw_device_accumulate_esw_valid(irb))
|
||||
return;
|
||||
|
||||
cdev_irb = &cdev->private->irb;
|
||||
|
||||
/* Copy last path used mask. */
|
||||
cdev_irb->esw.esw1.lpum = irb->esw.esw1.lpum;
|
||||
|
||||
/* Copy subchannel logout information if esw is of format 0. */
|
||||
if (irb->scsw.cmd.eswf) {
|
||||
cdev_sublog = &cdev_irb->esw.esw0.sublog;
|
||||
sublog = &irb->esw.esw0.sublog;
|
||||
/* Copy extended status flags. */
|
||||
cdev_sublog->esf = sublog->esf;
|
||||
/*
|
||||
* Copy fields that have a meaning for channel data check
|
||||
* channel control check and interface control check.
|
||||
*/
|
||||
if (irb->scsw.cmd.cstat & (SCHN_STAT_CHN_DATA_CHK |
|
||||
SCHN_STAT_CHN_CTRL_CHK |
|
||||
SCHN_STAT_INTF_CTRL_CHK)) {
|
||||
/* Copy ancillary report bit. */
|
||||
cdev_sublog->arep = sublog->arep;
|
||||
/* Copy field-validity-flags. */
|
||||
cdev_sublog->fvf = sublog->fvf;
|
||||
/* Copy storage access code. */
|
||||
cdev_sublog->sacc = sublog->sacc;
|
||||
/* Copy termination code. */
|
||||
cdev_sublog->termc = sublog->termc;
|
||||
/* Copy sequence code. */
|
||||
cdev_sublog->seqc = sublog->seqc;
|
||||
}
|
||||
/* Copy device status check. */
|
||||
cdev_sublog->devsc = sublog->devsc;
|
||||
/* Copy secondary error. */
|
||||
cdev_sublog->serr = sublog->serr;
|
||||
/* Copy i/o-error alert. */
|
||||
cdev_sublog->ioerr = sublog->ioerr;
|
||||
/* Copy channel path timeout bit. */
|
||||
if (irb->scsw.cmd.cstat & SCHN_STAT_INTF_CTRL_CHK)
|
||||
cdev_irb->esw.esw0.erw.cpt = irb->esw.esw0.erw.cpt;
|
||||
/* Copy failing storage address validity flag. */
|
||||
cdev_irb->esw.esw0.erw.fsavf = irb->esw.esw0.erw.fsavf;
|
||||
if (cdev_irb->esw.esw0.erw.fsavf) {
|
||||
/* ... and copy the failing storage address. */
|
||||
memcpy(cdev_irb->esw.esw0.faddr, irb->esw.esw0.faddr,
|
||||
sizeof (irb->esw.esw0.faddr));
|
||||
/* ... and copy the failing storage address format. */
|
||||
cdev_irb->esw.esw0.erw.fsaf = irb->esw.esw0.erw.fsaf;
|
||||
}
|
||||
/* Copy secondary ccw address validity bit. */
|
||||
cdev_irb->esw.esw0.erw.scavf = irb->esw.esw0.erw.scavf;
|
||||
if (irb->esw.esw0.erw.scavf)
|
||||
/* ... and copy the secondary ccw address. */
|
||||
cdev_irb->esw.esw0.saddr = irb->esw.esw0.saddr;
|
||||
|
||||
}
|
||||
/* FIXME: DCTI for format 2? */
|
||||
|
||||
/* Copy authorization bit. */
|
||||
cdev_irb->esw.esw0.erw.auth = irb->esw.esw0.erw.auth;
|
||||
/* Copy path verification required flag. */
|
||||
cdev_irb->esw.esw0.erw.pvrf = irb->esw.esw0.erw.pvrf;
|
||||
if (irb->esw.esw0.erw.pvrf)
|
||||
cdev->private->flags.doverify = 1;
|
||||
/* Copy concurrent sense bit. */
|
||||
cdev_irb->esw.esw0.erw.cons = irb->esw.esw0.erw.cons;
|
||||
if (irb->esw.esw0.erw.cons)
|
||||
cdev_irb->esw.esw0.erw.scnt = irb->esw.esw0.erw.scnt;
|
||||
}
|
||||
|
||||
/*
|
||||
* Accumulate status from irb to devstat.
|
||||
*/
|
||||
void
|
||||
ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb)
|
||||
{
|
||||
struct irb *cdev_irb;
|
||||
|
||||
/*
|
||||
* Check if the status pending bit is set in stctl.
|
||||
* If not, the remaining bit have no meaning and we must ignore them.
|
||||
* The esw is not meaningful as well...
|
||||
*/
|
||||
if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))
|
||||
return;
|
||||
|
||||
/* Check for channel checks and interface control checks. */
|
||||
ccw_device_msg_control_check(cdev, irb);
|
||||
|
||||
/* Check for path not operational. */
|
||||
if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw))
|
||||
ccw_device_path_notoper(cdev);
|
||||
/* No irb accumulation for transport mode irbs. */
|
||||
if (scsw_is_tm(&irb->scsw)) {
|
||||
memcpy(&cdev->private->irb, irb, sizeof(struct irb));
|
||||
return;
|
||||
}
|
||||
/*
|
||||
* Don't accumulate unsolicited interrupts.
|
||||
*/
|
||||
if (!scsw_is_solicited(&irb->scsw))
|
||||
return;
|
||||
|
||||
cdev_irb = &cdev->private->irb;
|
||||
|
||||
/*
|
||||
* If the clear function had been performed, all formerly pending
|
||||
* status at the subchannel has been cleared and we must not pass
|
||||
* intermediate accumulated status to the device driver.
|
||||
*/
|
||||
if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC)
|
||||
memset(&cdev->private->irb, 0, sizeof(struct irb));
|
||||
|
||||
/* Copy bits which are valid only for the start function. */
|
||||
if (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) {
|
||||
/* Copy key. */
|
||||
cdev_irb->scsw.cmd.key = irb->scsw.cmd.key;
|
||||
/* Copy suspend control bit. */
|
||||
cdev_irb->scsw.cmd.sctl = irb->scsw.cmd.sctl;
|
||||
/* Accumulate deferred condition code. */
|
||||
cdev_irb->scsw.cmd.cc |= irb->scsw.cmd.cc;
|
||||
/* Copy ccw format bit. */
|
||||
cdev_irb->scsw.cmd.fmt = irb->scsw.cmd.fmt;
|
||||
/* Copy prefetch bit. */
|
||||
cdev_irb->scsw.cmd.pfch = irb->scsw.cmd.pfch;
|
||||
/* Copy initial-status-interruption-control. */
|
||||
cdev_irb->scsw.cmd.isic = irb->scsw.cmd.isic;
|
||||
/* Copy address limit checking control. */
|
||||
cdev_irb->scsw.cmd.alcc = irb->scsw.cmd.alcc;
|
||||
/* Copy suppress suspend bit. */
|
||||
cdev_irb->scsw.cmd.ssi = irb->scsw.cmd.ssi;
|
||||
}
|
||||
|
||||
/* Take care of the extended control bit and extended control word. */
|
||||
ccw_device_accumulate_ecw(cdev, irb);
|
||||
|
||||
/* Accumulate function control. */
|
||||
cdev_irb->scsw.cmd.fctl |= irb->scsw.cmd.fctl;
|
||||
/* Copy activity control. */
|
||||
cdev_irb->scsw.cmd.actl = irb->scsw.cmd.actl;
|
||||
/* Accumulate status control. */
|
||||
cdev_irb->scsw.cmd.stctl |= irb->scsw.cmd.stctl;
|
||||
/*
|
||||
* Copy ccw address if it is valid. This is a bit simplified
|
||||
* but should be close enough for all practical purposes.
|
||||
*/
|
||||
if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) ||
|
||||
((irb->scsw.cmd.stctl ==
|
||||
(SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND)) &&
|
||||
(irb->scsw.cmd.actl & SCSW_ACTL_DEVACT) &&
|
||||
(irb->scsw.cmd.actl & SCSW_ACTL_SCHACT)) ||
|
||||
(irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
|
||||
cdev_irb->scsw.cmd.cpa = irb->scsw.cmd.cpa;
|
||||
/* Accumulate device status, but not the device busy flag. */
|
||||
cdev_irb->scsw.cmd.dstat &= ~DEV_STAT_BUSY;
|
||||
/* dstat is not always valid. */
|
||||
if (irb->scsw.cmd.stctl &
|
||||
(SCSW_STCTL_PRIM_STATUS | SCSW_STCTL_SEC_STATUS
|
||||
| SCSW_STCTL_INTER_STATUS | SCSW_STCTL_ALERT_STATUS))
|
||||
cdev_irb->scsw.cmd.dstat |= irb->scsw.cmd.dstat;
|
||||
/* Accumulate subchannel status. */
|
||||
cdev_irb->scsw.cmd.cstat |= irb->scsw.cmd.cstat;
|
||||
/* Copy residual count if it is valid. */
|
||||
if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
|
||||
(irb->scsw.cmd.cstat & ~(SCHN_STAT_PCI | SCHN_STAT_INCORR_LEN))
|
||||
== 0)
|
||||
cdev_irb->scsw.cmd.count = irb->scsw.cmd.count;
|
||||
|
||||
/* Take care of bits in the extended status word. */
|
||||
ccw_device_accumulate_esw(cdev, irb);
|
||||
|
||||
/*
|
||||
* Check whether we must issue a SENSE CCW ourselves if there is no
|
||||
* concurrent sense facility installed for the subchannel.
|
||||
* No sense is required if no delayed sense is pending
|
||||
* and we did not get a unit check without sense information.
|
||||
*
|
||||
* Note: We should check for ioinfo[irq]->flags.consns but VM
|
||||
* violates the ESA/390 architecture and doesn't present an
|
||||
* operand exception for virtual devices without concurrent
|
||||
* sense facility available/supported when enabling the
|
||||
* concurrent sense facility.
|
||||
*/
|
||||
if ((cdev_irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
|
||||
!(cdev_irb->esw.esw0.erw.cons))
|
||||
cdev->private->flags.dosense = 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Do a basic sense.
|
||||
*/
|
||||
int
|
||||
ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
|
||||
{
|
||||
struct subchannel *sch;
|
||||
struct ccw1 *sense_ccw;
|
||||
int rc;
|
||||
|
||||
sch = to_subchannel(cdev->dev.parent);
|
||||
|
||||
/* A sense is required, can we do it now ? */
|
||||
if (scsw_actl(&irb->scsw) & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT))
|
||||
/*
|
||||
* we received an Unit Check but we have no final
|
||||
* status yet, therefore we must delay the SENSE
|
||||
* processing. We must not report this intermediate
|
||||
* status to the device interrupt handler.
|
||||
*/
|
||||
return -EBUSY;
|
||||
|
||||
/*
|
||||
* We have ending status but no sense information. Do a basic sense.
|
||||
*/
|
||||
sense_ccw = &to_io_private(sch)->sense_ccw;
|
||||
sense_ccw->cmd_code = CCW_CMD_BASIC_SENSE;
|
||||
sense_ccw->cda = (__u32) __pa(cdev->private->irb.ecw);
|
||||
sense_ccw->count = SENSE_MAX_COUNT;
|
||||
sense_ccw->flags = CCW_FLAG_SLI;
|
||||
|
||||
rc = cio_start(sch, sense_ccw, 0xff);
|
||||
if (rc == -ENODEV || rc == -EACCES)
|
||||
dev_fsm_event(cdev, DEV_EVENT_VERIFY);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add information from basic sense to devstat.
|
||||
*/
|
||||
void
|
||||
ccw_device_accumulate_basic_sense(struct ccw_device *cdev, struct irb *irb)
|
||||
{
|
||||
/*
|
||||
* Check if the status pending bit is set in stctl.
|
||||
* If not, the remaining bit have no meaning and we must ignore them.
|
||||
* The esw is not meaningful as well...
|
||||
*/
|
||||
if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))
|
||||
return;
|
||||
|
||||
/* Check for channel checks and interface control checks. */
|
||||
ccw_device_msg_control_check(cdev, irb);
|
||||
|
||||
/* Check for path not operational. */
|
||||
if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw))
|
||||
ccw_device_path_notoper(cdev);
|
||||
|
||||
if (!(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
|
||||
(irb->scsw.cmd.dstat & DEV_STAT_CHN_END)) {
|
||||
cdev->private->irb.esw.esw0.erw.cons = 1;
|
||||
cdev->private->flags.dosense = 0;
|
||||
}
|
||||
/* Check if path verification is required. */
|
||||
if (ccw_device_accumulate_esw_valid(irb) &&
|
||||
irb->esw.esw0.erw.pvrf)
|
||||
cdev->private->flags.doverify = 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function accumulates the status into the private devstat and
|
||||
* starts a basic sense if one is needed.
|
||||
*/
|
||||
int
|
||||
ccw_device_accumulate_and_sense(struct ccw_device *cdev, struct irb *irb)
|
||||
{
|
||||
ccw_device_accumulate_irb(cdev, irb);
|
||||
if ((irb->scsw.cmd.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0)
|
||||
return -EBUSY;
|
||||
/* Check for basic sense. */
|
||||
if (cdev->private->flags.dosense &&
|
||||
!(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)) {
|
||||
cdev->private->irb.esw.esw0.erw.cons = 1;
|
||||
cdev->private->flags.dosense = 0;
|
||||
return 0;
|
||||
}
|
||||
if (cdev->private->flags.dosense) {
|
||||
ccw_device_do_sense(cdev, irb);
|
||||
return -EBUSY;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
418
drivers/s390/cio/eadm_sch.c
Normal file
418
drivers/s390/cio/eadm_sch.c
Normal file
|
@ -0,0 +1,418 @@
|
|||
/*
|
||||
* Driver for s390 eadm subchannels
|
||||
*
|
||||
* Copyright IBM Corp. 2012
|
||||
* Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/list.h>
|
||||
|
||||
#include <asm/css_chars.h>
|
||||
#include <asm/debug.h>
|
||||
#include <asm/isc.h>
|
||||
#include <asm/cio.h>
|
||||
#include <asm/scsw.h>
|
||||
#include <asm/eadm.h>
|
||||
|
||||
#include "eadm_sch.h"
|
||||
#include "ioasm.h"
|
||||
#include "cio.h"
|
||||
#include "css.h"
|
||||
#include "orb.h"
|
||||
|
||||
MODULE_DESCRIPTION("driver for s390 eadm subchannels");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
#define EADM_TIMEOUT (5 * HZ)
|
||||
static DEFINE_SPINLOCK(list_lock);
|
||||
static LIST_HEAD(eadm_list);
|
||||
|
||||
static debug_info_t *eadm_debug;
|
||||
|
||||
#define EADM_LOG(imp, txt) do { \
|
||||
debug_text_event(eadm_debug, imp, txt); \
|
||||
} while (0)
|
||||
|
||||
static void EADM_LOG_HEX(int level, void *data, int length)
|
||||
{
|
||||
if (!debug_level_enabled(eadm_debug, level))
|
||||
return;
|
||||
while (length > 0) {
|
||||
debug_event(eadm_debug, level, data, length);
|
||||
length -= eadm_debug->buf_size;
|
||||
data += eadm_debug->buf_size;
|
||||
}
|
||||
}
|
||||
|
||||
static void orb_init(union orb *orb)
|
||||
{
|
||||
memset(orb, 0, sizeof(union orb));
|
||||
orb->eadm.compat1 = 1;
|
||||
orb->eadm.compat2 = 1;
|
||||
orb->eadm.fmt = 1;
|
||||
orb->eadm.x = 1;
|
||||
}
|
||||
|
||||
static int eadm_subchannel_start(struct subchannel *sch, struct aob *aob)
|
||||
{
|
||||
union orb *orb = &get_eadm_private(sch)->orb;
|
||||
int cc;
|
||||
|
||||
orb_init(orb);
|
||||
orb->eadm.aob = (u32)__pa(aob);
|
||||
orb->eadm.intparm = (u32)(addr_t)sch;
|
||||
orb->eadm.key = PAGE_DEFAULT_KEY >> 4;
|
||||
|
||||
EADM_LOG(6, "start");
|
||||
EADM_LOG_HEX(6, &sch->schid, sizeof(sch->schid));
|
||||
|
||||
cc = ssch(sch->schid, orb);
|
||||
switch (cc) {
|
||||
case 0:
|
||||
sch->schib.scsw.eadm.actl |= SCSW_ACTL_START_PEND;
|
||||
break;
|
||||
case 1: /* status pending */
|
||||
case 2: /* busy */
|
||||
return -EBUSY;
|
||||
case 3: /* not operational */
|
||||
return -ENODEV;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int eadm_subchannel_clear(struct subchannel *sch)
|
||||
{
|
||||
int cc;
|
||||
|
||||
cc = csch(sch->schid);
|
||||
if (cc)
|
||||
return -ENODEV;
|
||||
|
||||
sch->schib.scsw.eadm.actl |= SCSW_ACTL_CLEAR_PEND;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void eadm_subchannel_timeout(unsigned long data)
|
||||
{
|
||||
struct subchannel *sch = (struct subchannel *) data;
|
||||
|
||||
spin_lock_irq(sch->lock);
|
||||
EADM_LOG(1, "timeout");
|
||||
EADM_LOG_HEX(1, &sch->schid, sizeof(sch->schid));
|
||||
if (eadm_subchannel_clear(sch))
|
||||
EADM_LOG(0, "clear failed");
|
||||
spin_unlock_irq(sch->lock);
|
||||
}
|
||||
|
||||
static void eadm_subchannel_set_timeout(struct subchannel *sch, int expires)
|
||||
{
|
||||
struct eadm_private *private = get_eadm_private(sch);
|
||||
|
||||
if (expires == 0) {
|
||||
del_timer(&private->timer);
|
||||
return;
|
||||
}
|
||||
if (timer_pending(&private->timer)) {
|
||||
if (mod_timer(&private->timer, jiffies + expires))
|
||||
return;
|
||||
}
|
||||
private->timer.function = eadm_subchannel_timeout;
|
||||
private->timer.data = (unsigned long) sch;
|
||||
private->timer.expires = jiffies + expires;
|
||||
add_timer(&private->timer);
|
||||
}
|
||||
|
||||
static void eadm_subchannel_irq(struct subchannel *sch)
|
||||
{
|
||||
struct eadm_private *private = get_eadm_private(sch);
|
||||
struct eadm_scsw *scsw = &sch->schib.scsw.eadm;
|
||||
struct irb *irb = this_cpu_ptr(&cio_irb);
|
||||
int error = 0;
|
||||
|
||||
EADM_LOG(6, "irq");
|
||||
EADM_LOG_HEX(6, irb, sizeof(*irb));
|
||||
|
||||
inc_irq_stat(IRQIO_ADM);
|
||||
|
||||
if ((scsw->stctl & (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))
|
||||
&& scsw->eswf == 1 && irb->esw.eadm.erw.r)
|
||||
error = -EIO;
|
||||
|
||||
if (scsw->fctl & SCSW_FCTL_CLEAR_FUNC)
|
||||
error = -ETIMEDOUT;
|
||||
|
||||
eadm_subchannel_set_timeout(sch, 0);
|
||||
|
||||
if (private->state != EADM_BUSY) {
|
||||
EADM_LOG(1, "irq unsol");
|
||||
EADM_LOG_HEX(1, irb, sizeof(*irb));
|
||||
private->state = EADM_NOT_OPER;
|
||||
css_sched_sch_todo(sch, SCH_TODO_EVAL);
|
||||
return;
|
||||
}
|
||||
scm_irq_handler((struct aob *)(unsigned long)scsw->aob, error);
|
||||
private->state = EADM_IDLE;
|
||||
|
||||
if (private->completion)
|
||||
complete(private->completion);
|
||||
}
|
||||
|
||||
static struct subchannel *eadm_get_idle_sch(void)
|
||||
{
|
||||
struct eadm_private *private;
|
||||
struct subchannel *sch;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&list_lock, flags);
|
||||
list_for_each_entry(private, &eadm_list, head) {
|
||||
sch = private->sch;
|
||||
spin_lock(sch->lock);
|
||||
if (private->state == EADM_IDLE) {
|
||||
private->state = EADM_BUSY;
|
||||
list_move_tail(&private->head, &eadm_list);
|
||||
spin_unlock(sch->lock);
|
||||
spin_unlock_irqrestore(&list_lock, flags);
|
||||
|
||||
return sch;
|
||||
}
|
||||
spin_unlock(sch->lock);
|
||||
}
|
||||
spin_unlock_irqrestore(&list_lock, flags);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int eadm_start_aob(struct aob *aob)
|
||||
{
|
||||
struct eadm_private *private;
|
||||
struct subchannel *sch;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
sch = eadm_get_idle_sch();
|
||||
if (!sch)
|
||||
return -EBUSY;
|
||||
|
||||
spin_lock_irqsave(sch->lock, flags);
|
||||
eadm_subchannel_set_timeout(sch, EADM_TIMEOUT);
|
||||
ret = eadm_subchannel_start(sch, aob);
|
||||
if (!ret)
|
||||
goto out_unlock;
|
||||
|
||||
/* Handle start subchannel failure. */
|
||||
eadm_subchannel_set_timeout(sch, 0);
|
||||
private = get_eadm_private(sch);
|
||||
private->state = EADM_NOT_OPER;
|
||||
css_sched_sch_todo(sch, SCH_TODO_EVAL);
|
||||
|
||||
out_unlock:
|
||||
spin_unlock_irqrestore(sch->lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(eadm_start_aob);
|
||||
|
||||
static int eadm_subchannel_probe(struct subchannel *sch)
|
||||
{
|
||||
struct eadm_private *private;
|
||||
int ret;
|
||||
|
||||
private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
|
||||
if (!private)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_LIST_HEAD(&private->head);
|
||||
init_timer(&private->timer);
|
||||
|
||||
spin_lock_irq(sch->lock);
|
||||
set_eadm_private(sch, private);
|
||||
private->state = EADM_IDLE;
|
||||
private->sch = sch;
|
||||
sch->isc = EADM_SCH_ISC;
|
||||
ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
|
||||
if (ret) {
|
||||
set_eadm_private(sch, NULL);
|
||||
spin_unlock_irq(sch->lock);
|
||||
kfree(private);
|
||||
goto out;
|
||||
}
|
||||
spin_unlock_irq(sch->lock);
|
||||
|
||||
spin_lock_irq(&list_lock);
|
||||
list_add(&private->head, &eadm_list);
|
||||
spin_unlock_irq(&list_lock);
|
||||
|
||||
if (dev_get_uevent_suppress(&sch->dev)) {
|
||||
dev_set_uevent_suppress(&sch->dev, 0);
|
||||
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
|
||||
}
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void eadm_quiesce(struct subchannel *sch)
|
||||
{
|
||||
struct eadm_private *private = get_eadm_private(sch);
|
||||
DECLARE_COMPLETION_ONSTACK(completion);
|
||||
int ret;
|
||||
|
||||
spin_lock_irq(sch->lock);
|
||||
if (private->state != EADM_BUSY)
|
||||
goto disable;
|
||||
|
||||
if (eadm_subchannel_clear(sch))
|
||||
goto disable;
|
||||
|
||||
private->completion = &completion;
|
||||
spin_unlock_irq(sch->lock);
|
||||
|
||||
wait_for_completion_io(&completion);
|
||||
|
||||
spin_lock_irq(sch->lock);
|
||||
private->completion = NULL;
|
||||
|
||||
disable:
|
||||
eadm_subchannel_set_timeout(sch, 0);
|
||||
do {
|
||||
ret = cio_disable_subchannel(sch);
|
||||
} while (ret == -EBUSY);
|
||||
|
||||
spin_unlock_irq(sch->lock);
|
||||
}
|
||||
|
||||
static int eadm_subchannel_remove(struct subchannel *sch)
|
||||
{
|
||||
struct eadm_private *private = get_eadm_private(sch);
|
||||
|
||||
spin_lock_irq(&list_lock);
|
||||
list_del(&private->head);
|
||||
spin_unlock_irq(&list_lock);
|
||||
|
||||
eadm_quiesce(sch);
|
||||
|
||||
spin_lock_irq(sch->lock);
|
||||
set_eadm_private(sch, NULL);
|
||||
spin_unlock_irq(sch->lock);
|
||||
|
||||
kfree(private);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void eadm_subchannel_shutdown(struct subchannel *sch)
|
||||
{
|
||||
eadm_quiesce(sch);
|
||||
}
|
||||
|
||||
static int eadm_subchannel_freeze(struct subchannel *sch)
|
||||
{
|
||||
return cio_disable_subchannel(sch);
|
||||
}
|
||||
|
||||
static int eadm_subchannel_restore(struct subchannel *sch)
|
||||
{
|
||||
return cio_enable_subchannel(sch, (u32)(unsigned long)sch);
|
||||
}
|
||||
|
||||
/**
|
||||
* eadm_subchannel_sch_event - process subchannel event
|
||||
* @sch: subchannel
|
||||
* @process: non-zero if function is called in process context
|
||||
*
|
||||
* An unspecified event occurred for this subchannel. Adjust data according
|
||||
* to the current operational state of the subchannel. Return zero when the
|
||||
* event has been handled sufficiently or -EAGAIN when this function should
|
||||
* be called again in process context.
|
||||
*/
|
||||
static int eadm_subchannel_sch_event(struct subchannel *sch, int process)
|
||||
{
|
||||
struct eadm_private *private;
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock_irqsave(sch->lock, flags);
|
||||
if (!device_is_registered(&sch->dev))
|
||||
goto out_unlock;
|
||||
|
||||
if (work_pending(&sch->todo_work))
|
||||
goto out_unlock;
|
||||
|
||||
if (cio_update_schib(sch)) {
|
||||
css_sched_sch_todo(sch, SCH_TODO_UNREG);
|
||||
goto out_unlock;
|
||||
}
|
||||
private = get_eadm_private(sch);
|
||||
if (private->state == EADM_NOT_OPER)
|
||||
private->state = EADM_IDLE;
|
||||
|
||||
out_unlock:
|
||||
spin_unlock_irqrestore(sch->lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct css_device_id eadm_subchannel_ids[] = {
|
||||
{ .match_flags = 0x1, .type = SUBCHANNEL_TYPE_ADM, },
|
||||
{ /* end of list */ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(css, eadm_subchannel_ids);
|
||||
|
||||
static struct css_driver eadm_subchannel_driver = {
|
||||
.drv = {
|
||||
.name = "eadm_subchannel",
|
||||
.owner = THIS_MODULE,
|
||||
},
|
||||
.subchannel_type = eadm_subchannel_ids,
|
||||
.irq = eadm_subchannel_irq,
|
||||
.probe = eadm_subchannel_probe,
|
||||
.remove = eadm_subchannel_remove,
|
||||
.shutdown = eadm_subchannel_shutdown,
|
||||
.sch_event = eadm_subchannel_sch_event,
|
||||
.freeze = eadm_subchannel_freeze,
|
||||
.thaw = eadm_subchannel_restore,
|
||||
.restore = eadm_subchannel_restore,
|
||||
};
|
||||
|
||||
static int __init eadm_sch_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!css_general_characteristics.eadm)
|
||||
return -ENXIO;
|
||||
|
||||
eadm_debug = debug_register("eadm_log", 16, 1, 16);
|
||||
if (!eadm_debug)
|
||||
return -ENOMEM;
|
||||
|
||||
debug_register_view(eadm_debug, &debug_hex_ascii_view);
|
||||
debug_set_level(eadm_debug, 2);
|
||||
|
||||
isc_register(EADM_SCH_ISC);
|
||||
ret = css_driver_register(&eadm_subchannel_driver);
|
||||
if (ret)
|
||||
goto cleanup;
|
||||
|
||||
return ret;
|
||||
|
||||
cleanup:
|
||||
isc_unregister(EADM_SCH_ISC);
|
||||
debug_unregister(eadm_debug);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit eadm_sch_exit(void)
|
||||
{
|
||||
css_driver_unregister(&eadm_subchannel_driver);
|
||||
isc_unregister(EADM_SCH_ISC);
|
||||
debug_unregister(eadm_debug);
|
||||
}
|
||||
module_init(eadm_sch_init);
|
||||
module_exit(eadm_sch_exit);
|
22
drivers/s390/cio/eadm_sch.h
Normal file
22
drivers/s390/cio/eadm_sch.h
Normal file
|
@ -0,0 +1,22 @@
|
|||
#ifndef EADM_SCH_H
|
||||
#define EADM_SCH_H
|
||||
|
||||
#include <linux/completion.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/list.h>
|
||||
#include "orb.h"
|
||||
|
||||
struct eadm_private {
|
||||
union orb orb;
|
||||
enum {EADM_IDLE, EADM_BUSY, EADM_NOT_OPER} state;
|
||||
struct completion *completion;
|
||||
struct subchannel *sch;
|
||||
struct timer_list timer;
|
||||
struct list_head head;
|
||||
} __aligned(8);
|
||||
|
||||
#define get_eadm_private(n) ((struct eadm_private *)dev_get_drvdata(&n->dev))
|
||||
#define set_eadm_private(n, p) (dev_set_drvdata(&n->dev, p))
|
||||
|
||||
#endif
|
350
drivers/s390/cio/fcx.c
Normal file
350
drivers/s390/cio/fcx.c
Normal file
|
@ -0,0 +1,350 @@
|
|||
/*
|
||||
* Functions for assembling fcx enabled I/O control blocks.
|
||||
*
|
||||
* Copyright IBM Corp. 2008
|
||||
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/module.h>
|
||||
#include <asm/fcx.h>
|
||||
#include "cio.h"
|
||||
|
||||
/**
|
||||
* tcw_get_intrg - return pointer to associated interrogate tcw
|
||||
* @tcw: pointer to the original tcw
|
||||
*
|
||||
* Return a pointer to the interrogate tcw associated with the specified tcw
|
||||
* or %NULL if there is no associated interrogate tcw.
|
||||
*/
|
||||
struct tcw *tcw_get_intrg(struct tcw *tcw)
|
||||
{
|
||||
return (struct tcw *) ((addr_t) tcw->intrg);
|
||||
}
|
||||
EXPORT_SYMBOL(tcw_get_intrg);
|
||||
|
||||
/**
|
||||
* tcw_get_data - return pointer to input/output data associated with tcw
|
||||
* @tcw: pointer to the tcw
|
||||
*
|
||||
* Return the input or output data address specified in the tcw depending
|
||||
* on whether the r-bit or the w-bit is set. If neither bit is set, return
|
||||
* %NULL.
|
||||
*/
|
||||
void *tcw_get_data(struct tcw *tcw)
|
||||
{
|
||||
if (tcw->r)
|
||||
return (void *) ((addr_t) tcw->input);
|
||||
if (tcw->w)
|
||||
return (void *) ((addr_t) tcw->output);
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(tcw_get_data);
|
||||
|
||||
/**
|
||||
* tcw_get_tccb - return pointer to tccb associated with tcw
|
||||
* @tcw: pointer to the tcw
|
||||
*
|
||||
* Return pointer to the tccb associated with this tcw.
|
||||
*/
|
||||
struct tccb *tcw_get_tccb(struct tcw *tcw)
|
||||
{
|
||||
return (struct tccb *) ((addr_t) tcw->tccb);
|
||||
}
|
||||
EXPORT_SYMBOL(tcw_get_tccb);
|
||||
|
||||
/**
|
||||
* tcw_get_tsb - return pointer to tsb associated with tcw
|
||||
* @tcw: pointer to the tcw
|
||||
*
|
||||
* Return pointer to the tsb associated with this tcw.
|
||||
*/
|
||||
struct tsb *tcw_get_tsb(struct tcw *tcw)
|
||||
{
|
||||
return (struct tsb *) ((addr_t) tcw->tsb);
|
||||
}
|
||||
EXPORT_SYMBOL(tcw_get_tsb);
|
||||
|
||||
/**
|
||||
* tcw_init - initialize tcw data structure
|
||||
* @tcw: pointer to the tcw to be initialized
|
||||
* @r: initial value of the r-bit
|
||||
* @w: initial value of the w-bit
|
||||
*
|
||||
* Initialize all fields of the specified tcw data structure with zero and
|
||||
* fill in the format, flags, r and w fields.
|
||||
*/
|
||||
void tcw_init(struct tcw *tcw, int r, int w)
|
||||
{
|
||||
memset(tcw, 0, sizeof(struct tcw));
|
||||
tcw->format = TCW_FORMAT_DEFAULT;
|
||||
tcw->flags = TCW_FLAGS_TIDAW_FORMAT(TCW_TIDAW_FORMAT_DEFAULT);
|
||||
if (r)
|
||||
tcw->r = 1;
|
||||
if (w)
|
||||
tcw->w = 1;
|
||||
}
|
||||
EXPORT_SYMBOL(tcw_init);
|
||||
|
||||
static inline size_t tca_size(struct tccb *tccb)
|
||||
{
|
||||
return tccb->tcah.tcal - 12;
|
||||
}
|
||||
|
||||
static u32 calc_dcw_count(struct tccb *tccb)
|
||||
{
|
||||
int offset;
|
||||
struct dcw *dcw;
|
||||
u32 count = 0;
|
||||
size_t size;
|
||||
|
||||
size = tca_size(tccb);
|
||||
for (offset = 0; offset < size;) {
|
||||
dcw = (struct dcw *) &tccb->tca[offset];
|
||||
count += dcw->count;
|
||||
if (!(dcw->flags & DCW_FLAGS_CC))
|
||||
break;
|
||||
offset += sizeof(struct dcw) + ALIGN((int) dcw->cd_count, 4);
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
static u32 calc_cbc_size(struct tidaw *tidaw, int num)
|
||||
{
|
||||
int i;
|
||||
u32 cbc_data;
|
||||
u32 cbc_count = 0;
|
||||
u64 data_count = 0;
|
||||
|
||||
for (i = 0; i < num; i++) {
|
||||
if (tidaw[i].flags & TIDAW_FLAGS_LAST)
|
||||
break;
|
||||
/* TODO: find out if padding applies to total of data
|
||||
* transferred or data transferred by this tidaw. Assumption:
|
||||
* applies to total. */
|
||||
data_count += tidaw[i].count;
|
||||
if (tidaw[i].flags & TIDAW_FLAGS_INSERT_CBC) {
|
||||
cbc_data = 4 + ALIGN(data_count, 4) - data_count;
|
||||
cbc_count += cbc_data;
|
||||
data_count += cbc_data;
|
||||
}
|
||||
}
|
||||
return cbc_count;
|
||||
}
|
||||
|
||||
/**
|
||||
* tcw_finalize - finalize tcw length fields and tidaw list
|
||||
* @tcw: pointer to the tcw
|
||||
* @num_tidaws: the number of tidaws used to address input/output data or zero
|
||||
* if no tida is used
|
||||
*
|
||||
* Calculate the input-/output-count and tccbl field in the tcw, add a
|
||||
* tcat the tccb and terminate the data tidaw list if used.
|
||||
*
|
||||
* Note: in case input- or output-tida is used, the tidaw-list must be stored
|
||||
* in contiguous storage (no ttic). The tcal field in the tccb must be
|
||||
* up-to-date.
|
||||
*/
|
||||
void tcw_finalize(struct tcw *tcw, int num_tidaws)
|
||||
{
|
||||
struct tidaw *tidaw;
|
||||
struct tccb *tccb;
|
||||
struct tccb_tcat *tcat;
|
||||
u32 count;
|
||||
|
||||
/* Terminate tidaw list. */
|
||||
tidaw = tcw_get_data(tcw);
|
||||
if (num_tidaws > 0)
|
||||
tidaw[num_tidaws - 1].flags |= TIDAW_FLAGS_LAST;
|
||||
/* Add tcat to tccb. */
|
||||
tccb = tcw_get_tccb(tcw);
|
||||
tcat = (struct tccb_tcat *) &tccb->tca[tca_size(tccb)];
|
||||
memset(tcat, 0, sizeof(*tcat));
|
||||
/* Calculate tcw input/output count and tcat transport count. */
|
||||
count = calc_dcw_count(tccb);
|
||||
if (tcw->w && (tcw->flags & TCW_FLAGS_OUTPUT_TIDA))
|
||||
count += calc_cbc_size(tidaw, num_tidaws);
|
||||
if (tcw->r)
|
||||
tcw->input_count = count;
|
||||
else if (tcw->w)
|
||||
tcw->output_count = count;
|
||||
tcat->count = ALIGN(count, 4) + 4;
|
||||
/* Calculate tccbl. */
|
||||
tcw->tccbl = (sizeof(struct tccb) + tca_size(tccb) +
|
||||
sizeof(struct tccb_tcat) - 20) >> 2;
|
||||
}
|
||||
EXPORT_SYMBOL(tcw_finalize);
|
||||
|
||||
/**
|
||||
* tcw_set_intrg - set the interrogate tcw address of a tcw
|
||||
* @tcw: the tcw address
|
||||
* @intrg_tcw: the address of the interrogate tcw
|
||||
*
|
||||
* Set the address of the interrogate tcw in the specified tcw.
|
||||
*/
|
||||
void tcw_set_intrg(struct tcw *tcw, struct tcw *intrg_tcw)
|
||||
{
|
||||
tcw->intrg = (u32) ((addr_t) intrg_tcw);
|
||||
}
|
||||
EXPORT_SYMBOL(tcw_set_intrg);
|
||||
|
||||
/**
|
||||
* tcw_set_data - set data address and tida flag of a tcw
|
||||
* @tcw: the tcw address
|
||||
* @data: the data address
|
||||
* @use_tidal: zero of the data address specifies a contiguous block of data,
|
||||
* non-zero if it specifies a list if tidaws.
|
||||
*
|
||||
* Set the input/output data address of a tcw (depending on the value of the
|
||||
* r-flag and w-flag). If @use_tidal is non-zero, the corresponding tida flag
|
||||
* is set as well.
|
||||
*/
|
||||
void tcw_set_data(struct tcw *tcw, void *data, int use_tidal)
|
||||
{
|
||||
if (tcw->r) {
|
||||
tcw->input = (u64) ((addr_t) data);
|
||||
if (use_tidal)
|
||||
tcw->flags |= TCW_FLAGS_INPUT_TIDA;
|
||||
} else if (tcw->w) {
|
||||
tcw->output = (u64) ((addr_t) data);
|
||||
if (use_tidal)
|
||||
tcw->flags |= TCW_FLAGS_OUTPUT_TIDA;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(tcw_set_data);
|
||||
|
||||
/**
|
||||
* tcw_set_tccb - set tccb address of a tcw
|
||||
* @tcw: the tcw address
|
||||
* @tccb: the tccb address
|
||||
*
|
||||
* Set the address of the tccb in the specified tcw.
|
||||
*/
|
||||
void tcw_set_tccb(struct tcw *tcw, struct tccb *tccb)
|
||||
{
|
||||
tcw->tccb = (u64) ((addr_t) tccb);
|
||||
}
|
||||
EXPORT_SYMBOL(tcw_set_tccb);
|
||||
|
||||
/**
|
||||
* tcw_set_tsb - set tsb address of a tcw
|
||||
* @tcw: the tcw address
|
||||
* @tsb: the tsb address
|
||||
*
|
||||
* Set the address of the tsb in the specified tcw.
|
||||
*/
|
||||
void tcw_set_tsb(struct tcw *tcw, struct tsb *tsb)
|
||||
{
|
||||
tcw->tsb = (u64) ((addr_t) tsb);
|
||||
}
|
||||
EXPORT_SYMBOL(tcw_set_tsb);
|
||||
|
||||
/**
|
||||
* tccb_init - initialize tccb
|
||||
* @tccb: the tccb address
|
||||
* @size: the maximum size of the tccb
|
||||
* @sac: the service-action-code to be user
|
||||
*
|
||||
* Initialize the header of the specified tccb by resetting all values to zero
|
||||
* and filling in defaults for format, sac and initial tcal fields.
|
||||
*/
|
||||
void tccb_init(struct tccb *tccb, size_t size, u32 sac)
|
||||
{
|
||||
memset(tccb, 0, size);
|
||||
tccb->tcah.format = TCCB_FORMAT_DEFAULT;
|
||||
tccb->tcah.sac = sac;
|
||||
tccb->tcah.tcal = 12;
|
||||
}
|
||||
EXPORT_SYMBOL(tccb_init);
|
||||
|
||||
/**
|
||||
* tsb_init - initialize tsb
|
||||
* @tsb: the tsb address
|
||||
*
|
||||
* Initialize the specified tsb by resetting all values to zero.
|
||||
*/
|
||||
void tsb_init(struct tsb *tsb)
|
||||
{
|
||||
memset(tsb, 0, sizeof(*tsb));
|
||||
}
|
||||
EXPORT_SYMBOL(tsb_init);
|
||||
|
||||
/**
|
||||
* tccb_add_dcw - add a dcw to the tccb
|
||||
* @tccb: the tccb address
|
||||
* @tccb_size: the maximum tccb size
|
||||
* @cmd: the dcw command
|
||||
* @flags: flags for the dcw
|
||||
* @cd: pointer to control data for this dcw or NULL if none is required
|
||||
* @cd_count: number of control data bytes for this dcw
|
||||
* @count: number of data bytes for this dcw
|
||||
*
|
||||
* Add a new dcw to the specified tccb by writing the dcw information specified
|
||||
* by @cmd, @flags, @cd, @cd_count and @count to the tca of the tccb. Return
|
||||
* a pointer to the newly added dcw on success or -%ENOSPC if the new dcw
|
||||
* would exceed the available space as defined by @tccb_size.
|
||||
*
|
||||
* Note: the tcal field of the tccb header will be updates to reflect added
|
||||
* content.
|
||||
*/
|
||||
struct dcw *tccb_add_dcw(struct tccb *tccb, size_t tccb_size, u8 cmd, u8 flags,
|
||||
void *cd, u8 cd_count, u32 count)
|
||||
{
|
||||
struct dcw *dcw;
|
||||
int size;
|
||||
int tca_offset;
|
||||
|
||||
/* Check for space. */
|
||||
tca_offset = tca_size(tccb);
|
||||
size = ALIGN(sizeof(struct dcw) + cd_count, 4);
|
||||
if (sizeof(struct tccb_tcah) + tca_offset + size +
|
||||
sizeof(struct tccb_tcat) > tccb_size)
|
||||
return ERR_PTR(-ENOSPC);
|
||||
/* Add dcw to tca. */
|
||||
dcw = (struct dcw *) &tccb->tca[tca_offset];
|
||||
memset(dcw, 0, size);
|
||||
dcw->cmd = cmd;
|
||||
dcw->flags = flags;
|
||||
dcw->count = count;
|
||||
dcw->cd_count = cd_count;
|
||||
if (cd)
|
||||
memcpy(&dcw->cd[0], cd, cd_count);
|
||||
tccb->tcah.tcal += size;
|
||||
return dcw;
|
||||
}
|
||||
EXPORT_SYMBOL(tccb_add_dcw);
|
||||
|
||||
/**
|
||||
* tcw_add_tidaw - add a tidaw to a tcw
|
||||
* @tcw: the tcw address
|
||||
* @num_tidaws: the current number of tidaws
|
||||
* @flags: flags for the new tidaw
|
||||
* @addr: address value for the new tidaw
|
||||
* @count: count value for the new tidaw
|
||||
*
|
||||
* Add a new tidaw to the input/output data tidaw-list of the specified tcw
|
||||
* (depending on the value of the r-flag and w-flag) and return a pointer to
|
||||
* the new tidaw.
|
||||
*
|
||||
* Note: the tidaw-list is assumed to be contiguous with no ttics. The caller
|
||||
* must ensure that there is enough space for the new tidaw. The last-tidaw
|
||||
* flag for the last tidaw in the list will be set by tcw_finalize.
|
||||
*/
|
||||
struct tidaw *tcw_add_tidaw(struct tcw *tcw, int num_tidaws, u8 flags,
|
||||
void *addr, u32 count)
|
||||
{
|
||||
struct tidaw *tidaw;
|
||||
|
||||
/* Add tidaw to tidaw-list. */
|
||||
tidaw = ((struct tidaw *) tcw_get_data(tcw)) + num_tidaws;
|
||||
memset(tidaw, 0, sizeof(struct tidaw));
|
||||
tidaw->flags = flags;
|
||||
tidaw->count = count;
|
||||
tidaw->addr = (u64) ((addr_t) addr);
|
||||
return tidaw;
|
||||
}
|
||||
EXPORT_SYMBOL(tcw_add_tidaw);
|
131
drivers/s390/cio/idset.c
Normal file
131
drivers/s390/cio/idset.c
Normal file
|
@ -0,0 +1,131 @@
|
|||
/*
|
||||
* Copyright IBM Corp. 2007, 2012
|
||||
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/bitmap.h>
|
||||
#include <linux/bitops.h>
|
||||
#include "idset.h"
|
||||
#include "css.h"
|
||||
|
||||
struct idset {
|
||||
int num_ssid;
|
||||
int num_id;
|
||||
unsigned long bitmap[0];
|
||||
};
|
||||
|
||||
static inline unsigned long bitmap_size(int num_ssid, int num_id)
|
||||
{
|
||||
return BITS_TO_LONGS(num_ssid * num_id) * sizeof(unsigned long);
|
||||
}
|
||||
|
||||
static struct idset *idset_new(int num_ssid, int num_id)
|
||||
{
|
||||
struct idset *set;
|
||||
|
||||
set = vmalloc(sizeof(struct idset) + bitmap_size(num_ssid, num_id));
|
||||
if (set) {
|
||||
set->num_ssid = num_ssid;
|
||||
set->num_id = num_id;
|
||||
memset(set->bitmap, 0, bitmap_size(num_ssid, num_id));
|
||||
}
|
||||
return set;
|
||||
}
|
||||
|
||||
void idset_free(struct idset *set)
|
||||
{
|
||||
vfree(set);
|
||||
}
|
||||
|
||||
void idset_clear(struct idset *set)
|
||||
{
|
||||
memset(set->bitmap, 0, bitmap_size(set->num_ssid, set->num_id));
|
||||
}
|
||||
|
||||
void idset_fill(struct idset *set)
|
||||
{
|
||||
memset(set->bitmap, 0xff, bitmap_size(set->num_ssid, set->num_id));
|
||||
}
|
||||
|
||||
static inline void idset_add(struct idset *set, int ssid, int id)
|
||||
{
|
||||
set_bit(ssid * set->num_id + id, set->bitmap);
|
||||
}
|
||||
|
||||
static inline void idset_del(struct idset *set, int ssid, int id)
|
||||
{
|
||||
clear_bit(ssid * set->num_id + id, set->bitmap);
|
||||
}
|
||||
|
||||
static inline int idset_contains(struct idset *set, int ssid, int id)
|
||||
{
|
||||
return test_bit(ssid * set->num_id + id, set->bitmap);
|
||||
}
|
||||
|
||||
static inline int idset_get_first(struct idset *set, int *ssid, int *id)
|
||||
{
|
||||
int bitnum;
|
||||
|
||||
bitnum = find_first_bit(set->bitmap, set->num_ssid * set->num_id);
|
||||
if (bitnum >= set->num_ssid * set->num_id)
|
||||
return 0;
|
||||
*ssid = bitnum / set->num_id;
|
||||
*id = bitnum % set->num_id;
|
||||
return 1;
|
||||
}
|
||||
|
||||
struct idset *idset_sch_new(void)
|
||||
{
|
||||
return idset_new(max_ssid + 1, __MAX_SUBCHANNEL + 1);
|
||||
}
|
||||
|
||||
void idset_sch_add(struct idset *set, struct subchannel_id schid)
|
||||
{
|
||||
idset_add(set, schid.ssid, schid.sch_no);
|
||||
}
|
||||
|
||||
void idset_sch_del(struct idset *set, struct subchannel_id schid)
|
||||
{
|
||||
idset_del(set, schid.ssid, schid.sch_no);
|
||||
}
|
||||
|
||||
/* Clear ids starting from @schid up to end of subchannel set. */
|
||||
void idset_sch_del_subseq(struct idset *set, struct subchannel_id schid)
|
||||
{
|
||||
int pos = schid.ssid * set->num_id + schid.sch_no;
|
||||
|
||||
bitmap_clear(set->bitmap, pos, set->num_id - schid.sch_no);
|
||||
}
|
||||
|
||||
int idset_sch_contains(struct idset *set, struct subchannel_id schid)
|
||||
{
|
||||
return idset_contains(set, schid.ssid, schid.sch_no);
|
||||
}
|
||||
|
||||
int idset_sch_get_first(struct idset *set, struct subchannel_id *schid)
|
||||
{
|
||||
int ssid = 0;
|
||||
int id = 0;
|
||||
int rc;
|
||||
|
||||
rc = idset_get_first(set, &ssid, &id);
|
||||
if (rc) {
|
||||
init_subchannel_id(schid);
|
||||
schid->ssid = ssid;
|
||||
schid->sch_no = id;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
int idset_is_empty(struct idset *set)
|
||||
{
|
||||
return bitmap_empty(set->bitmap, set->num_ssid * set->num_id);
|
||||
}
|
||||
|
||||
void idset_add_set(struct idset *to, struct idset *from)
|
||||
{
|
||||
int len = min(to->num_ssid * to->num_id, from->num_ssid * from->num_id);
|
||||
|
||||
bitmap_or(to->bitmap, to->bitmap, from->bitmap, len);
|
||||
}
|
26
drivers/s390/cio/idset.h
Normal file
26
drivers/s390/cio/idset.h
Normal file
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
* Copyright IBM Corp. 2007, 2012
|
||||
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
|
||||
*/
|
||||
|
||||
#ifndef S390_IDSET_H
|
||||
#define S390_IDSET_H S390_IDSET_H
|
||||
|
||||
#include <asm/schid.h>
|
||||
|
||||
struct idset;
|
||||
|
||||
void idset_free(struct idset *set);
|
||||
void idset_clear(struct idset *set);
|
||||
void idset_fill(struct idset *set);
|
||||
|
||||
struct idset *idset_sch_new(void);
|
||||
void idset_sch_add(struct idset *set, struct subchannel_id id);
|
||||
void idset_sch_del(struct idset *set, struct subchannel_id id);
|
||||
void idset_sch_del_subseq(struct idset *set, struct subchannel_id schid);
|
||||
int idset_sch_contains(struct idset *set, struct subchannel_id id);
|
||||
int idset_sch_get_first(struct idset *set, struct subchannel_id *id);
|
||||
int idset_is_empty(struct idset *set);
|
||||
void idset_add_set(struct idset *to, struct idset *from);
|
||||
|
||||
#endif /* S390_IDSET_H */
|
217
drivers/s390/cio/io_sch.h
Normal file
217
drivers/s390/cio/io_sch.h
Normal file
|
@ -0,0 +1,217 @@
|
|||
#ifndef S390_IO_SCH_H
|
||||
#define S390_IO_SCH_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <asm/schid.h>
|
||||
#include <asm/ccwdev.h>
|
||||
#include <asm/irq.h>
|
||||
#include "css.h"
|
||||
#include "orb.h"
|
||||
|
||||
struct io_subchannel_private {
|
||||
union orb orb; /* operation request block */
|
||||
struct ccw1 sense_ccw; /* static ccw for sense command */
|
||||
struct ccw_device *cdev;/* pointer to the child ccw device */
|
||||
struct {
|
||||
unsigned int suspend:1; /* allow suspend */
|
||||
unsigned int prefetch:1;/* deny prefetch */
|
||||
unsigned int inter:1; /* suppress intermediate interrupts */
|
||||
} __packed options;
|
||||
} __aligned(8);
|
||||
|
||||
#define to_io_private(n) ((struct io_subchannel_private *) \
|
||||
dev_get_drvdata(&(n)->dev))
|
||||
#define set_io_private(n, p) (dev_set_drvdata(&(n)->dev, p))
|
||||
|
||||
static inline struct ccw_device *sch_get_cdev(struct subchannel *sch)
|
||||
{
|
||||
struct io_subchannel_private *priv = to_io_private(sch);
|
||||
return priv ? priv->cdev : NULL;
|
||||
}
|
||||
|
||||
static inline void sch_set_cdev(struct subchannel *sch,
|
||||
struct ccw_device *cdev)
|
||||
{
|
||||
struct io_subchannel_private *priv = to_io_private(sch);
|
||||
if (priv)
|
||||
priv->cdev = cdev;
|
||||
}
|
||||
|
||||
#define MAX_CIWS 8
|
||||
|
||||
/*
|
||||
* Possible status values for a CCW request's I/O.
|
||||
*/
|
||||
enum io_status {
|
||||
IO_DONE,
|
||||
IO_RUNNING,
|
||||
IO_STATUS_ERROR,
|
||||
IO_PATH_ERROR,
|
||||
IO_REJECTED,
|
||||
IO_KILLED
|
||||
};
|
||||
|
||||
/**
|
||||
* ccw_request - Internal CCW request.
|
||||
* @cp: channel program to start
|
||||
* @timeout: maximum allowable time in jiffies between start I/O and interrupt
|
||||
* @maxretries: number of retries per I/O operation and path
|
||||
* @lpm: mask of paths to use
|
||||
* @check: optional callback that determines if results are final
|
||||
* @filter: optional callback to adjust request status based on IRB data
|
||||
* @callback: final callback
|
||||
* @data: user-defined pointer passed to all callbacks
|
||||
* @singlepath: if set, use only one path from @lpm per start I/O
|
||||
* @cancel: non-zero if request was cancelled
|
||||
* @done: non-zero if request was finished
|
||||
* @mask: current path mask
|
||||
* @retries: current number of retries
|
||||
* @drc: delayed return code
|
||||
*/
|
||||
struct ccw_request {
|
||||
struct ccw1 *cp;
|
||||
unsigned long timeout;
|
||||
u16 maxretries;
|
||||
u8 lpm;
|
||||
int (*check)(struct ccw_device *, void *);
|
||||
enum io_status (*filter)(struct ccw_device *, void *, struct irb *,
|
||||
enum io_status);
|
||||
void (*callback)(struct ccw_device *, void *, int);
|
||||
void *data;
|
||||
unsigned int singlepath:1;
|
||||
/* These fields are used internally. */
|
||||
unsigned int cancel:1;
|
||||
unsigned int done:1;
|
||||
u16 mask;
|
||||
u16 retries;
|
||||
int drc;
|
||||
} __attribute__((packed));
|
||||
|
||||
/*
|
||||
* sense-id response buffer layout
|
||||
*/
|
||||
struct senseid {
|
||||
/* common part */
|
||||
u8 reserved; /* always 0x'FF' */
|
||||
u16 cu_type; /* control unit type */
|
||||
u8 cu_model; /* control unit model */
|
||||
u16 dev_type; /* device type */
|
||||
u8 dev_model; /* device model */
|
||||
u8 unused; /* padding byte */
|
||||
/* extended part */
|
||||
struct ciw ciw[MAX_CIWS]; /* variable # of CIWs */
|
||||
} __attribute__ ((packed, aligned(4)));
|
||||
|
||||
enum cdev_todo {
|
||||
CDEV_TODO_NOTHING,
|
||||
CDEV_TODO_ENABLE_CMF,
|
||||
CDEV_TODO_REBIND,
|
||||
CDEV_TODO_REGISTER,
|
||||
CDEV_TODO_UNREG,
|
||||
CDEV_TODO_UNREG_EVAL,
|
||||
};
|
||||
|
||||
#define FAKE_CMD_IRB 1
|
||||
#define FAKE_TM_IRB 2
|
||||
|
||||
struct ccw_device_private {
|
||||
struct ccw_device *cdev;
|
||||
struct subchannel *sch;
|
||||
int state; /* device state */
|
||||
atomic_t onoff;
|
||||
struct ccw_dev_id dev_id; /* device id */
|
||||
struct subchannel_id schid; /* subchannel number */
|
||||
struct ccw_request req; /* internal I/O request */
|
||||
int iretry;
|
||||
u8 pgid_valid_mask; /* mask of valid PGIDs */
|
||||
u8 pgid_todo_mask; /* mask of PGIDs to be adjusted */
|
||||
u8 pgid_reset_mask; /* mask of PGIDs which were reset */
|
||||
u8 path_noirq_mask; /* mask of paths for which no irq was
|
||||
received */
|
||||
u8 path_notoper_mask; /* mask of paths which were found
|
||||
not operable */
|
||||
u8 path_gone_mask; /* mask of paths, that became unavailable */
|
||||
u8 path_new_mask; /* mask of paths, that became available */
|
||||
struct {
|
||||
unsigned int fast:1; /* post with "channel end" */
|
||||
unsigned int repall:1; /* report every interrupt status */
|
||||
unsigned int pgroup:1; /* do path grouping */
|
||||
unsigned int force:1; /* allow forced online */
|
||||
unsigned int mpath:1; /* do multipathing */
|
||||
} __attribute__ ((packed)) options;
|
||||
struct {
|
||||
unsigned int esid:1; /* Ext. SenseID supported by HW */
|
||||
unsigned int dosense:1; /* delayed SENSE required */
|
||||
unsigned int doverify:1; /* delayed path verification */
|
||||
unsigned int donotify:1; /* call notify function */
|
||||
unsigned int recog_done:1; /* dev. recog. complete */
|
||||
unsigned int fake_irb:2; /* deliver faked irb */
|
||||
unsigned int resuming:1; /* recognition while resume */
|
||||
unsigned int pgroup:1; /* pathgroup is set up */
|
||||
unsigned int mpath:1; /* multipathing is set up */
|
||||
unsigned int pgid_unknown:1;/* unknown pgid state */
|
||||
unsigned int initialized:1; /* set if initial reference held */
|
||||
} __attribute__((packed)) flags;
|
||||
unsigned long intparm; /* user interruption parameter */
|
||||
struct qdio_irq *qdio_data;
|
||||
struct irb irb; /* device status */
|
||||
struct senseid senseid; /* SenseID info */
|
||||
struct pgid pgid[8]; /* path group IDs per chpid*/
|
||||
struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */
|
||||
struct work_struct todo_work;
|
||||
enum cdev_todo todo;
|
||||
wait_queue_head_t wait_q;
|
||||
struct timer_list timer;
|
||||
void *cmb; /* measurement information */
|
||||
struct list_head cmb_list; /* list of measured devices */
|
||||
u64 cmb_start_time; /* clock value of cmb reset */
|
||||
void *cmb_wait; /* deferred cmb enable/disable */
|
||||
enum interruption_class int_class;
|
||||
};
|
||||
|
||||
static inline int rsch(struct subchannel_id schid)
|
||||
{
|
||||
register struct subchannel_id reg1 asm("1") = schid;
|
||||
int ccode;
|
||||
|
||||
asm volatile(
|
||||
" rsch\n"
|
||||
" ipm %0\n"
|
||||
" srl %0,28"
|
||||
: "=d" (ccode)
|
||||
: "d" (reg1)
|
||||
: "cc", "memory");
|
||||
return ccode;
|
||||
}
|
||||
|
||||
static inline int hsch(struct subchannel_id schid)
|
||||
{
|
||||
register struct subchannel_id reg1 asm("1") = schid;
|
||||
int ccode;
|
||||
|
||||
asm volatile(
|
||||
" hsch\n"
|
||||
" ipm %0\n"
|
||||
" srl %0,28"
|
||||
: "=d" (ccode)
|
||||
: "d" (reg1)
|
||||
: "cc");
|
||||
return ccode;
|
||||
}
|
||||
|
||||
static inline int xsch(struct subchannel_id schid)
|
||||
{
|
||||
register struct subchannel_id reg1 asm("1") = schid;
|
||||
int ccode;
|
||||
|
||||
asm volatile(
|
||||
" .insn rre,0xb2760000,%1,0\n"
|
||||
" ipm %0\n"
|
||||
" srl %0,28"
|
||||
: "=d" (ccode)
|
||||
: "d" (reg1)
|
||||
: "cc");
|
||||
return ccode;
|
||||
}
|
||||
|
||||
#endif
|
167
drivers/s390/cio/ioasm.h
Normal file
167
drivers/s390/cio/ioasm.h
Normal file
|
@ -0,0 +1,167 @@
|
|||
#ifndef S390_CIO_IOASM_H
|
||||
#define S390_CIO_IOASM_H
|
||||
|
||||
#include <asm/chpid.h>
|
||||
#include <asm/schid.h>
|
||||
#include "orb.h"
|
||||
#include "cio.h"
|
||||
|
||||
/*
|
||||
* TPI info structure
|
||||
*/
|
||||
struct tpi_info {
|
||||
struct subchannel_id schid;
|
||||
__u32 intparm; /* interruption parameter */
|
||||
__u32 adapter_IO : 1;
|
||||
__u32 reserved2 : 1;
|
||||
__u32 isc : 3;
|
||||
__u32 reserved3 : 12;
|
||||
__u32 int_type : 3;
|
||||
__u32 reserved4 : 12;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
|
||||
/*
|
||||
* Some S390 specific IO instructions as inline
|
||||
*/
|
||||
|
||||
static inline int stsch_err(struct subchannel_id schid, struct schib *addr)
|
||||
{
|
||||
register struct subchannel_id reg1 asm ("1") = schid;
|
||||
int ccode = -EIO;
|
||||
|
||||
asm volatile(
|
||||
" stsch 0(%3)\n"
|
||||
"0: ipm %0\n"
|
||||
" srl %0,28\n"
|
||||
"1:\n"
|
||||
EX_TABLE(0b,1b)
|
||||
: "+d" (ccode), "=m" (*addr)
|
||||
: "d" (reg1), "a" (addr)
|
||||
: "cc");
|
||||
return ccode;
|
||||
}
|
||||
|
||||
static inline int msch(struct subchannel_id schid, struct schib *addr)
|
||||
{
|
||||
register struct subchannel_id reg1 asm ("1") = schid;
|
||||
int ccode;
|
||||
|
||||
asm volatile(
|
||||
" msch 0(%2)\n"
|
||||
" ipm %0\n"
|
||||
" srl %0,28"
|
||||
: "=d" (ccode)
|
||||
: "d" (reg1), "a" (addr), "m" (*addr)
|
||||
: "cc");
|
||||
return ccode;
|
||||
}
|
||||
|
||||
static inline int msch_err(struct subchannel_id schid, struct schib *addr)
|
||||
{
|
||||
register struct subchannel_id reg1 asm ("1") = schid;
|
||||
int ccode = -EIO;
|
||||
|
||||
asm volatile(
|
||||
" msch 0(%2)\n"
|
||||
"0: ipm %0\n"
|
||||
" srl %0,28\n"
|
||||
"1:\n"
|
||||
EX_TABLE(0b,1b)
|
||||
: "+d" (ccode)
|
||||
: "d" (reg1), "a" (addr), "m" (*addr)
|
||||
: "cc");
|
||||
return ccode;
|
||||
}
|
||||
|
||||
static inline int tsch(struct subchannel_id schid, struct irb *addr)
|
||||
{
|
||||
register struct subchannel_id reg1 asm ("1") = schid;
|
||||
int ccode;
|
||||
|
||||
asm volatile(
|
||||
" tsch 0(%3)\n"
|
||||
" ipm %0\n"
|
||||
" srl %0,28"
|
||||
: "=d" (ccode), "=m" (*addr)
|
||||
: "d" (reg1), "a" (addr)
|
||||
: "cc");
|
||||
return ccode;
|
||||
}
|
||||
|
||||
static inline int ssch(struct subchannel_id schid, union orb *addr)
|
||||
{
|
||||
register struct subchannel_id reg1 asm("1") = schid;
|
||||
int ccode = -EIO;
|
||||
|
||||
asm volatile(
|
||||
" ssch 0(%2)\n"
|
||||
"0: ipm %0\n"
|
||||
" srl %0,28\n"
|
||||
"1:\n"
|
||||
EX_TABLE(0b, 1b)
|
||||
: "+d" (ccode)
|
||||
: "d" (reg1), "a" (addr), "m" (*addr)
|
||||
: "cc", "memory");
|
||||
return ccode;
|
||||
}
|
||||
|
||||
static inline int csch(struct subchannel_id schid)
|
||||
{
|
||||
register struct subchannel_id reg1 asm("1") = schid;
|
||||
int ccode;
|
||||
|
||||
asm volatile(
|
||||
" csch\n"
|
||||
" ipm %0\n"
|
||||
" srl %0,28"
|
||||
: "=d" (ccode)
|
||||
: "d" (reg1)
|
||||
: "cc");
|
||||
return ccode;
|
||||
}
|
||||
|
||||
static inline int tpi(struct tpi_info *addr)
|
||||
{
|
||||
int ccode;
|
||||
|
||||
asm volatile(
|
||||
" tpi 0(%2)\n"
|
||||
" ipm %0\n"
|
||||
" srl %0,28"
|
||||
: "=d" (ccode), "=m" (*addr)
|
||||
: "a" (addr)
|
||||
: "cc");
|
||||
return ccode;
|
||||
}
|
||||
|
||||
static inline int chsc(void *chsc_area)
|
||||
{
|
||||
typedef struct { char _[4096]; } addr_type;
|
||||
int cc;
|
||||
|
||||
asm volatile(
|
||||
" .insn rre,0xb25f0000,%2,0\n"
|
||||
" ipm %0\n"
|
||||
" srl %0,28\n"
|
||||
: "=d" (cc), "=m" (*(addr_type *) chsc_area)
|
||||
: "d" (chsc_area), "m" (*(addr_type *) chsc_area)
|
||||
: "cc");
|
||||
return cc;
|
||||
}
|
||||
|
||||
static inline int rchp(struct chp_id chpid)
|
||||
{
|
||||
register struct chp_id reg1 asm ("1") = chpid;
|
||||
int ccode;
|
||||
|
||||
asm volatile(
|
||||
" lr 1,%1\n"
|
||||
" rchp\n"
|
||||
" ipm %0\n"
|
||||
" srl %0,28"
|
||||
: "=d" (ccode) : "d" (reg1) : "cc");
|
||||
return ccode;
|
||||
}
|
||||
|
||||
#endif
|
68
drivers/s390/cio/isc.c
Normal file
68
drivers/s390/cio/isc.c
Normal file
|
@ -0,0 +1,68 @@
|
|||
/*
|
||||
* Functions for registration of I/O interruption subclasses on s390.
|
||||
*
|
||||
* Copyright IBM Corp. 2008
|
||||
* Authors: Sebastian Ott <sebott@linux.vnet.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/module.h>
|
||||
#include <asm/isc.h>
|
||||
|
||||
static unsigned int isc_refs[MAX_ISC + 1];
|
||||
static DEFINE_SPINLOCK(isc_ref_lock);
|
||||
|
||||
|
||||
/**
|
||||
* isc_register - register an I/O interruption subclass.
|
||||
* @isc: I/O interruption subclass to register
|
||||
*
|
||||
* The number of users for @isc is increased. If this is the first user to
|
||||
* register @isc, the corresponding I/O interruption subclass mask is enabled.
|
||||
*
|
||||
* Context:
|
||||
* This function must not be called in interrupt context.
|
||||
*/
|
||||
void isc_register(unsigned int isc)
|
||||
{
|
||||
if (isc > MAX_ISC) {
|
||||
WARN_ON(1);
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock(&isc_ref_lock);
|
||||
if (isc_refs[isc] == 0)
|
||||
ctl_set_bit(6, 31 - isc);
|
||||
isc_refs[isc]++;
|
||||
spin_unlock(&isc_ref_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(isc_register);
|
||||
|
||||
/**
|
||||
* isc_unregister - unregister an I/O interruption subclass.
|
||||
* @isc: I/O interruption subclass to unregister
|
||||
*
|
||||
* The number of users for @isc is decreased. If this is the last user to
|
||||
* unregister @isc, the corresponding I/O interruption subclass mask is
|
||||
* disabled.
|
||||
* Note: This function must not be called if isc_register() hasn't been called
|
||||
* before by the driver for @isc.
|
||||
*
|
||||
* Context:
|
||||
* This function must not be called in interrupt context.
|
||||
*/
|
||||
void isc_unregister(unsigned int isc)
|
||||
{
|
||||
spin_lock(&isc_ref_lock);
|
||||
/* check for misuse */
|
||||
if (isc > MAX_ISC || isc_refs[isc] == 0) {
|
||||
WARN_ON(1);
|
||||
goto out_unlock;
|
||||
}
|
||||
if (isc_refs[isc] == 1)
|
||||
ctl_clear_bit(6, 31 - isc);
|
||||
isc_refs[isc]--;
|
||||
out_unlock:
|
||||
spin_unlock(&isc_ref_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(isc_unregister);
|
369
drivers/s390/cio/itcw.c
Normal file
369
drivers/s390/cio/itcw.c
Normal file
|
@ -0,0 +1,369 @@
|
|||
/*
|
||||
* Functions for incremental construction of fcx enabled I/O control blocks.
|
||||
*
|
||||
* Copyright IBM Corp. 2008
|
||||
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/module.h>
|
||||
#include <asm/fcx.h>
|
||||
#include <asm/itcw.h>
|
||||
|
||||
/**
|
||||
* struct itcw - incremental tcw helper data type
|
||||
*
|
||||
* This structure serves as a handle for the incremental construction of a
|
||||
* tcw and associated tccb, tsb, data tidaw-list plus an optional interrogate
|
||||
* tcw and associated data. The data structures are contained inside a single
|
||||
* contiguous buffer provided by the user.
|
||||
*
|
||||
* The itcw construction functions take care of overall data integrity:
|
||||
* - reset unused fields to zero
|
||||
* - fill in required pointers
|
||||
* - ensure required alignment for data structures
|
||||
* - prevent data structures to cross 4k-byte boundary where required
|
||||
* - calculate tccb-related length fields
|
||||
* - optionally provide ready-made interrogate tcw and associated structures
|
||||
*
|
||||
* Restrictions apply to the itcws created with these construction functions:
|
||||
* - tida only supported for data address, not for tccb
|
||||
* - only contiguous tidaw-lists (no ttic)
|
||||
* - total number of bytes required per itcw may not exceed 4k bytes
|
||||
* - either read or write operation (may not work with r=0 and w=0)
|
||||
*
|
||||
* Example:
|
||||
* struct itcw *itcw;
|
||||
* void *buffer;
|
||||
* size_t size;
|
||||
*
|
||||
* size = itcw_calc_size(1, 2, 0);
|
||||
* buffer = kmalloc(size, GFP_KERNEL | GFP_DMA);
|
||||
* if (!buffer)
|
||||
* return -ENOMEM;
|
||||
* itcw = itcw_init(buffer, size, ITCW_OP_READ, 1, 2, 0);
|
||||
* if (IS_ERR(itcw))
|
||||
* return PTR_ER(itcw);
|
||||
* itcw_add_dcw(itcw, 0x2, 0, NULL, 0, 72);
|
||||
* itcw_add_tidaw(itcw, 0, 0x30000, 20);
|
||||
* itcw_add_tidaw(itcw, 0, 0x40000, 52);
|
||||
* itcw_finalize(itcw);
|
||||
*
|
||||
*/
|
||||
struct itcw {
|
||||
struct tcw *tcw;
|
||||
struct tcw *intrg_tcw;
|
||||
int num_tidaws;
|
||||
int max_tidaws;
|
||||
int intrg_num_tidaws;
|
||||
int intrg_max_tidaws;
|
||||
};
|
||||
|
||||
/**
|
||||
* itcw_get_tcw - return pointer to tcw associated with the itcw
|
||||
* @itcw: address of the itcw
|
||||
*
|
||||
* Return pointer to the tcw associated with the itcw.
|
||||
*/
|
||||
struct tcw *itcw_get_tcw(struct itcw *itcw)
|
||||
{
|
||||
return itcw->tcw;
|
||||
}
|
||||
EXPORT_SYMBOL(itcw_get_tcw);
|
||||
|
||||
/**
|
||||
* itcw_calc_size - return the size of an itcw with the given parameters
|
||||
* @intrg: if non-zero, add an interrogate tcw
|
||||
* @max_tidaws: maximum number of tidaws to be used for data addressing or zero
|
||||
* if no tida is to be used.
|
||||
* @intrg_max_tidaws: maximum number of tidaws to be used for data addressing
|
||||
* by the interrogate tcw, if specified
|
||||
*
|
||||
* Calculate and return the number of bytes required to hold an itcw with the
|
||||
* given parameters and assuming tccbs with maximum size.
|
||||
*
|
||||
* Note that the resulting size also contains bytes needed for alignment
|
||||
* padding as well as padding to ensure that data structures don't cross a
|
||||
* 4k-boundary where required.
|
||||
*/
|
||||
size_t itcw_calc_size(int intrg, int max_tidaws, int intrg_max_tidaws)
|
||||
{
|
||||
size_t len;
|
||||
int cross_count;
|
||||
|
||||
/* Main data. */
|
||||
len = sizeof(struct itcw);
|
||||
len += /* TCW */ sizeof(struct tcw) + /* TCCB */ TCCB_MAX_SIZE +
|
||||
/* TSB */ sizeof(struct tsb) +
|
||||
/* TIDAL */ max_tidaws * sizeof(struct tidaw);
|
||||
/* Interrogate data. */
|
||||
if (intrg) {
|
||||
len += /* TCW */ sizeof(struct tcw) + /* TCCB */ TCCB_MAX_SIZE +
|
||||
/* TSB */ sizeof(struct tsb) +
|
||||
/* TIDAL */ intrg_max_tidaws * sizeof(struct tidaw);
|
||||
}
|
||||
|
||||
/* Maximum required alignment padding. */
|
||||
len += /* Initial TCW */ 63 + /* Interrogate TCCB */ 7;
|
||||
|
||||
/* TIDAW lists may not cross a 4k boundary. To cross a
|
||||
* boundary we need to add a TTIC TIDAW. We need to reserve
|
||||
* one additional TIDAW for a TTIC that we may need to add due
|
||||
* to the placement of the data chunk in memory, and a further
|
||||
* TIDAW for each page boundary that the TIDAW list may cross
|
||||
* due to it's own size.
|
||||
*/
|
||||
if (max_tidaws) {
|
||||
cross_count = 1 + ((max_tidaws * sizeof(struct tidaw) - 1)
|
||||
>> PAGE_SHIFT);
|
||||
len += cross_count * sizeof(struct tidaw);
|
||||
}
|
||||
if (intrg_max_tidaws) {
|
||||
cross_count = 1 + ((intrg_max_tidaws * sizeof(struct tidaw) - 1)
|
||||
>> PAGE_SHIFT);
|
||||
len += cross_count * sizeof(struct tidaw);
|
||||
}
|
||||
return len;
|
||||
}
|
||||
EXPORT_SYMBOL(itcw_calc_size);
|
||||
|
||||
#define CROSS4K(x, l) (((x) & ~4095) != ((x + l) & ~4095))
|
||||
|
||||
static inline void *fit_chunk(addr_t *start, addr_t end, size_t len,
|
||||
int align, int check_4k)
|
||||
{
|
||||
addr_t addr;
|
||||
|
||||
addr = ALIGN(*start, align);
|
||||
if (check_4k && CROSS4K(addr, len)) {
|
||||
addr = ALIGN(addr, 4096);
|
||||
addr = ALIGN(addr, align);
|
||||
}
|
||||
if (addr + len > end)
|
||||
return ERR_PTR(-ENOSPC);
|
||||
*start = addr + len;
|
||||
return (void *) addr;
|
||||
}
|
||||
|
||||
/**
|
||||
* itcw_init - initialize incremental tcw data structure
|
||||
* @buffer: address of buffer to use for data structures
|
||||
* @size: number of bytes in buffer
|
||||
* @op: %ITCW_OP_READ for a read operation tcw, %ITCW_OP_WRITE for a write
|
||||
* operation tcw
|
||||
* @intrg: if non-zero, add and initialize an interrogate tcw
|
||||
* @max_tidaws: maximum number of tidaws to be used for data addressing or zero
|
||||
* if no tida is to be used.
|
||||
* @intrg_max_tidaws: maximum number of tidaws to be used for data addressing
|
||||
* by the interrogate tcw, if specified
|
||||
*
|
||||
* Prepare the specified buffer to be used as an incremental tcw, i.e. a
|
||||
* helper data structure that can be used to construct a valid tcw by
|
||||
* successive calls to other helper functions. Note: the buffer needs to be
|
||||
* located below the 2G address limit. The resulting tcw has the following
|
||||
* restrictions:
|
||||
* - no tccb tidal
|
||||
* - input/output tidal is contiguous (no ttic)
|
||||
* - total data should not exceed 4k
|
||||
* - tcw specifies either read or write operation
|
||||
*
|
||||
* On success, return pointer to the resulting incremental tcw data structure,
|
||||
* ERR_PTR otherwise.
|
||||
*/
|
||||
struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg,
|
||||
int max_tidaws, int intrg_max_tidaws)
|
||||
{
|
||||
struct itcw *itcw;
|
||||
void *chunk;
|
||||
addr_t start;
|
||||
addr_t end;
|
||||
int cross_count;
|
||||
|
||||
/* Check for 2G limit. */
|
||||
start = (addr_t) buffer;
|
||||
end = start + size;
|
||||
if (end > (1 << 31))
|
||||
return ERR_PTR(-EINVAL);
|
||||
memset(buffer, 0, size);
|
||||
/* ITCW. */
|
||||
chunk = fit_chunk(&start, end, sizeof(struct itcw), 1, 0);
|
||||
if (IS_ERR(chunk))
|
||||
return chunk;
|
||||
itcw = chunk;
|
||||
/* allow for TTIC tidaws that may be needed to cross a page boundary */
|
||||
cross_count = 0;
|
||||
if (max_tidaws)
|
||||
cross_count = 1 + ((max_tidaws * sizeof(struct tidaw) - 1)
|
||||
>> PAGE_SHIFT);
|
||||
itcw->max_tidaws = max_tidaws + cross_count;
|
||||
cross_count = 0;
|
||||
if (intrg_max_tidaws)
|
||||
cross_count = 1 + ((intrg_max_tidaws * sizeof(struct tidaw) - 1)
|
||||
>> PAGE_SHIFT);
|
||||
itcw->intrg_max_tidaws = intrg_max_tidaws + cross_count;
|
||||
/* Main TCW. */
|
||||
chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0);
|
||||
if (IS_ERR(chunk))
|
||||
return chunk;
|
||||
itcw->tcw = chunk;
|
||||
tcw_init(itcw->tcw, (op == ITCW_OP_READ) ? 1 : 0,
|
||||
(op == ITCW_OP_WRITE) ? 1 : 0);
|
||||
/* Interrogate TCW. */
|
||||
if (intrg) {
|
||||
chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0);
|
||||
if (IS_ERR(chunk))
|
||||
return chunk;
|
||||
itcw->intrg_tcw = chunk;
|
||||
tcw_init(itcw->intrg_tcw, 1, 0);
|
||||
tcw_set_intrg(itcw->tcw, itcw->intrg_tcw);
|
||||
}
|
||||
/* Data TIDAL. */
|
||||
if (max_tidaws > 0) {
|
||||
chunk = fit_chunk(&start, end, sizeof(struct tidaw) *
|
||||
itcw->max_tidaws, 16, 0);
|
||||
if (IS_ERR(chunk))
|
||||
return chunk;
|
||||
tcw_set_data(itcw->tcw, chunk, 1);
|
||||
}
|
||||
/* Interrogate data TIDAL. */
|
||||
if (intrg && (intrg_max_tidaws > 0)) {
|
||||
chunk = fit_chunk(&start, end, sizeof(struct tidaw) *
|
||||
itcw->intrg_max_tidaws, 16, 0);
|
||||
if (IS_ERR(chunk))
|
||||
return chunk;
|
||||
tcw_set_data(itcw->intrg_tcw, chunk, 1);
|
||||
}
|
||||
/* TSB. */
|
||||
chunk = fit_chunk(&start, end, sizeof(struct tsb), 8, 0);
|
||||
if (IS_ERR(chunk))
|
||||
return chunk;
|
||||
tsb_init(chunk);
|
||||
tcw_set_tsb(itcw->tcw, chunk);
|
||||
/* Interrogate TSB. */
|
||||
if (intrg) {
|
||||
chunk = fit_chunk(&start, end, sizeof(struct tsb), 8, 0);
|
||||
if (IS_ERR(chunk))
|
||||
return chunk;
|
||||
tsb_init(chunk);
|
||||
tcw_set_tsb(itcw->intrg_tcw, chunk);
|
||||
}
|
||||
/* TCCB. */
|
||||
chunk = fit_chunk(&start, end, TCCB_MAX_SIZE, 8, 0);
|
||||
if (IS_ERR(chunk))
|
||||
return chunk;
|
||||
tccb_init(chunk, TCCB_MAX_SIZE, TCCB_SAC_DEFAULT);
|
||||
tcw_set_tccb(itcw->tcw, chunk);
|
||||
/* Interrogate TCCB. */
|
||||
if (intrg) {
|
||||
chunk = fit_chunk(&start, end, TCCB_MAX_SIZE, 8, 0);
|
||||
if (IS_ERR(chunk))
|
||||
return chunk;
|
||||
tccb_init(chunk, TCCB_MAX_SIZE, TCCB_SAC_INTRG);
|
||||
tcw_set_tccb(itcw->intrg_tcw, chunk);
|
||||
tccb_add_dcw(chunk, TCCB_MAX_SIZE, DCW_CMD_INTRG, 0, NULL,
|
||||
sizeof(struct dcw_intrg_data), 0);
|
||||
tcw_finalize(itcw->intrg_tcw, 0);
|
||||
}
|
||||
return itcw;
|
||||
}
|
||||
EXPORT_SYMBOL(itcw_init);
|
||||
|
||||
/**
|
||||
* itcw_add_dcw - add a dcw to the itcw
|
||||
* @itcw: address of the itcw
|
||||
* @cmd: the dcw command
|
||||
* @flags: flags for the dcw
|
||||
* @cd: address of control data for this dcw or NULL if none is required
|
||||
* @cd_count: number of control data bytes for this dcw
|
||||
* @count: number of data bytes for this dcw
|
||||
*
|
||||
* Add a new dcw to the specified itcw by writing the dcw information specified
|
||||
* by @cmd, @flags, @cd, @cd_count and @count to the tca of the tccb. Return
|
||||
* a pointer to the newly added dcw on success or -%ENOSPC if the new dcw
|
||||
* would exceed the available space.
|
||||
*
|
||||
* Note: the tcal field of the tccb header will be updated to reflect added
|
||||
* content.
|
||||
*/
|
||||
struct dcw *itcw_add_dcw(struct itcw *itcw, u8 cmd, u8 flags, void *cd,
|
||||
u8 cd_count, u32 count)
|
||||
{
|
||||
return tccb_add_dcw(tcw_get_tccb(itcw->tcw), TCCB_MAX_SIZE, cmd,
|
||||
flags, cd, cd_count, count);
|
||||
}
|
||||
EXPORT_SYMBOL(itcw_add_dcw);
|
||||
|
||||
/**
|
||||
* itcw_add_tidaw - add a tidaw to the itcw
|
||||
* @itcw: address of the itcw
|
||||
* @flags: flags for the new tidaw
|
||||
* @addr: address value for the new tidaw
|
||||
* @count: count value for the new tidaw
|
||||
*
|
||||
* Add a new tidaw to the input/output data tidaw-list of the specified itcw
|
||||
* (depending on the value of the r-flag and w-flag). Return a pointer to
|
||||
* the new tidaw on success or -%ENOSPC if the new tidaw would exceed the
|
||||
* available space.
|
||||
*
|
||||
* Note: TTIC tidaws are automatically added when needed, so explicitly calling
|
||||
* this interface with the TTIC flag is not supported. The last-tidaw flag
|
||||
* for the last tidaw in the list will be set by itcw_finalize.
|
||||
*/
|
||||
struct tidaw *itcw_add_tidaw(struct itcw *itcw, u8 flags, void *addr, u32 count)
|
||||
{
|
||||
struct tidaw *following;
|
||||
|
||||
if (itcw->num_tidaws >= itcw->max_tidaws)
|
||||
return ERR_PTR(-ENOSPC);
|
||||
/*
|
||||
* Is the tidaw, which follows the one we are about to fill, on the next
|
||||
* page? Then we have to insert a TTIC tidaw first, that points to the
|
||||
* tidaw on the new page.
|
||||
*/
|
||||
following = ((struct tidaw *) tcw_get_data(itcw->tcw))
|
||||
+ itcw->num_tidaws + 1;
|
||||
if (itcw->num_tidaws && !((unsigned long) following & ~PAGE_MASK)) {
|
||||
tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++,
|
||||
TIDAW_FLAGS_TTIC, following, 0);
|
||||
if (itcw->num_tidaws >= itcw->max_tidaws)
|
||||
return ERR_PTR(-ENOSPC);
|
||||
}
|
||||
return tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++, flags, addr, count);
|
||||
}
|
||||
EXPORT_SYMBOL(itcw_add_tidaw);
|
||||
|
||||
/**
|
||||
* itcw_set_data - set data address and tida flag of the itcw
|
||||
* @itcw: address of the itcw
|
||||
* @addr: the data address
|
||||
* @use_tidal: zero of the data address specifies a contiguous block of data,
|
||||
* non-zero if it specifies a list if tidaws.
|
||||
*
|
||||
* Set the input/output data address of the itcw (depending on the value of the
|
||||
* r-flag and w-flag). If @use_tidal is non-zero, the corresponding tida flag
|
||||
* is set as well.
|
||||
*/
|
||||
void itcw_set_data(struct itcw *itcw, void *addr, int use_tidal)
|
||||
{
|
||||
tcw_set_data(itcw->tcw, addr, use_tidal);
|
||||
}
|
||||
EXPORT_SYMBOL(itcw_set_data);
|
||||
|
||||
/**
|
||||
* itcw_finalize - calculate length and count fields of the itcw
|
||||
* @itcw: address of the itcw
|
||||
*
|
||||
* Calculate tcw input-/output-count and tccbl fields and add a tcat the tccb.
|
||||
* In case input- or output-tida is used, the tidaw-list must be stored in
|
||||
* continuous storage (no ttic). The tcal field in the tccb must be
|
||||
* up-to-date.
|
||||
*/
|
||||
void itcw_finalize(struct itcw *itcw)
|
||||
{
|
||||
tcw_finalize(itcw->tcw, itcw->num_tidaws);
|
||||
}
|
||||
EXPORT_SYMBOL(itcw_finalize);
|
91
drivers/s390/cio/orb.h
Normal file
91
drivers/s390/cio/orb.h
Normal file
|
@ -0,0 +1,91 @@
|
|||
/*
|
||||
* Orb related data structures.
|
||||
*
|
||||
* Copyright IBM Corp. 2007, 2011
|
||||
*
|
||||
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
|
||||
* Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
|
||||
* Sebastian Ott <sebott@linux.vnet.ibm.com>
|
||||
*/
|
||||
|
||||
#ifndef S390_ORB_H
|
||||
#define S390_ORB_H
|
||||
|
||||
/*
|
||||
* Command-mode operation request block
|
||||
*/
|
||||
struct cmd_orb {
|
||||
u32 intparm; /* interruption parameter */
|
||||
u32 key:4; /* flags, like key, suspend control, etc. */
|
||||
u32 spnd:1; /* suspend control */
|
||||
u32 res1:1; /* reserved */
|
||||
u32 mod:1; /* modification control */
|
||||
u32 sync:1; /* synchronize control */
|
||||
u32 fmt:1; /* format control */
|
||||
u32 pfch:1; /* prefetch control */
|
||||
u32 isic:1; /* initial-status-interruption control */
|
||||
u32 alcc:1; /* address-limit-checking control */
|
||||
u32 ssic:1; /* suppress-suspended-interr. control */
|
||||
u32 res2:1; /* reserved */
|
||||
u32 c64:1; /* IDAW/QDIO 64 bit control */
|
||||
u32 i2k:1; /* IDAW 2/4kB block size control */
|
||||
u32 lpm:8; /* logical path mask */
|
||||
u32 ils:1; /* incorrect length */
|
||||
u32 zero:6; /* reserved zeros */
|
||||
u32 orbx:1; /* ORB extension control */
|
||||
u32 cpa; /* channel program address */
|
||||
} __packed __aligned(4);
|
||||
|
||||
/*
|
||||
* Transport-mode operation request block
|
||||
*/
|
||||
struct tm_orb {
|
||||
u32 intparm;
|
||||
u32 key:4;
|
||||
u32:9;
|
||||
u32 b:1;
|
||||
u32:2;
|
||||
u32 lpm:8;
|
||||
u32:7;
|
||||
u32 x:1;
|
||||
u32 tcw;
|
||||
u32 prio:8;
|
||||
u32:8;
|
||||
u32 rsvpgm:8;
|
||||
u32:8;
|
||||
u32:32;
|
||||
u32:32;
|
||||
u32:32;
|
||||
u32:32;
|
||||
} __packed __aligned(4);
|
||||
|
||||
/*
|
||||
* eadm operation request block
|
||||
*/
|
||||
struct eadm_orb {
|
||||
u32 intparm;
|
||||
u32 key:4;
|
||||
u32:4;
|
||||
u32 compat1:1;
|
||||
u32 compat2:1;
|
||||
u32:21;
|
||||
u32 x:1;
|
||||
u32 aob;
|
||||
u32 css_prio:8;
|
||||
u32:8;
|
||||
u32 scm_prio:8;
|
||||
u32:8;
|
||||
u32:29;
|
||||
u32 fmt:3;
|
||||
u32:32;
|
||||
u32:32;
|
||||
u32:32;
|
||||
} __packed __aligned(4);
|
||||
|
||||
union orb {
|
||||
struct cmd_orb cmd;
|
||||
struct tm_orb tm;
|
||||
struct eadm_orb eadm;
|
||||
} __packed __aligned(4);
|
||||
|
||||
#endif /* S390_ORB_H */
|
425
drivers/s390/cio/qdio.h
Normal file
425
drivers/s390/cio/qdio.h
Normal file
|
@ -0,0 +1,425 @@
|
|||
/*
|
||||
* Copyright IBM Corp. 2000, 2009
|
||||
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>
|
||||
* Jan Glauber <jang@linux.vnet.ibm.com>
|
||||
*/
|
||||
#ifndef _CIO_QDIO_H
|
||||
#define _CIO_QDIO_H
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm/schid.h>
|
||||
#include <asm/debug.h>
|
||||
#include "chsc.h"
|
||||
|
||||
#define QDIO_BUSY_BIT_PATIENCE (100 << 12) /* 100 microseconds */
|
||||
#define QDIO_BUSY_BIT_RETRY_DELAY 10 /* 10 milliseconds */
|
||||
#define QDIO_BUSY_BIT_RETRIES 1000 /* = 10s retry time */
|
||||
#define QDIO_INPUT_THRESHOLD (500 << 12) /* 500 microseconds */
|
||||
|
||||
enum qdio_irq_states {
|
||||
QDIO_IRQ_STATE_INACTIVE,
|
||||
QDIO_IRQ_STATE_ESTABLISHED,
|
||||
QDIO_IRQ_STATE_ACTIVE,
|
||||
QDIO_IRQ_STATE_STOPPED,
|
||||
QDIO_IRQ_STATE_CLEANUP,
|
||||
QDIO_IRQ_STATE_ERR,
|
||||
NR_QDIO_IRQ_STATES,
|
||||
};
|
||||
|
||||
/* used as intparm in do_IO */
|
||||
#define QDIO_DOING_ESTABLISH 1
|
||||
#define QDIO_DOING_ACTIVATE 2
|
||||
#define QDIO_DOING_CLEANUP 3
|
||||
|
||||
#define SLSB_STATE_NOT_INIT 0x0
|
||||
#define SLSB_STATE_EMPTY 0x1
|
||||
#define SLSB_STATE_PRIMED 0x2
|
||||
#define SLSB_STATE_PENDING 0x3
|
||||
#define SLSB_STATE_HALTED 0xe
|
||||
#define SLSB_STATE_ERROR 0xf
|
||||
#define SLSB_TYPE_INPUT 0x0
|
||||
#define SLSB_TYPE_OUTPUT 0x20
|
||||
#define SLSB_OWNER_PROG 0x80
|
||||
#define SLSB_OWNER_CU 0x40
|
||||
|
||||
#define SLSB_P_INPUT_NOT_INIT \
|
||||
(SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_NOT_INIT) /* 0x80 */
|
||||
#define SLSB_P_INPUT_ACK \
|
||||
(SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_EMPTY) /* 0x81 */
|
||||
#define SLSB_CU_INPUT_EMPTY \
|
||||
(SLSB_OWNER_CU | SLSB_TYPE_INPUT | SLSB_STATE_EMPTY) /* 0x41 */
|
||||
#define SLSB_P_INPUT_PRIMED \
|
||||
(SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_PRIMED) /* 0x82 */
|
||||
#define SLSB_P_INPUT_HALTED \
|
||||
(SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_HALTED) /* 0x8e */
|
||||
#define SLSB_P_INPUT_ERROR \
|
||||
(SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_ERROR) /* 0x8f */
|
||||
#define SLSB_P_OUTPUT_NOT_INIT \
|
||||
(SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_NOT_INIT) /* 0xa0 */
|
||||
#define SLSB_P_OUTPUT_EMPTY \
|
||||
(SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_EMPTY) /* 0xa1 */
|
||||
#define SLSB_P_OUTPUT_PENDING \
|
||||
(SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_PENDING) /* 0xa3 */
|
||||
#define SLSB_CU_OUTPUT_PRIMED \
|
||||
(SLSB_OWNER_CU | SLSB_TYPE_OUTPUT | SLSB_STATE_PRIMED) /* 0x62 */
|
||||
#define SLSB_P_OUTPUT_HALTED \
|
||||
(SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_HALTED) /* 0xae */
|
||||
#define SLSB_P_OUTPUT_ERROR \
|
||||
(SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_ERROR) /* 0xaf */
|
||||
|
||||
#define SLSB_ERROR_DURING_LOOKUP 0xff
|
||||
|
||||
/* additional CIWs returned by extended Sense-ID */
|
||||
#define CIW_TYPE_EQUEUE 0x3 /* establish QDIO queues */
|
||||
#define CIW_TYPE_AQUEUE 0x4 /* activate QDIO queues */
|
||||
|
||||
/* flags for st qdio sch data */
|
||||
#define CHSC_FLAG_QDIO_CAPABILITY 0x80
|
||||
#define CHSC_FLAG_VALIDITY 0x40
|
||||
|
||||
/* SIGA flags */
|
||||
#define QDIO_SIGA_WRITE 0x00
|
||||
#define QDIO_SIGA_READ 0x01
|
||||
#define QDIO_SIGA_SYNC 0x02
|
||||
#define QDIO_SIGA_WRITEQ 0x04
|
||||
#define QDIO_SIGA_QEBSM_FLAG 0x80
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
static inline int do_sqbs(u64 token, unsigned char state, int queue,
|
||||
int *start, int *count)
|
||||
{
|
||||
register unsigned long _ccq asm ("0") = *count;
|
||||
register unsigned long _token asm ("1") = token;
|
||||
unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
|
||||
|
||||
asm volatile(
|
||||
" .insn rsy,0xeb000000008A,%1,0,0(%2)"
|
||||
: "+d" (_ccq), "+d" (_queuestart)
|
||||
: "d" ((unsigned long)state), "d" (_token)
|
||||
: "memory", "cc");
|
||||
*count = _ccq & 0xff;
|
||||
*start = _queuestart & 0xff;
|
||||
|
||||
return (_ccq >> 32) & 0xff;
|
||||
}
|
||||
|
||||
static inline int do_eqbs(u64 token, unsigned char *state, int queue,
|
||||
int *start, int *count, int ack)
|
||||
{
|
||||
register unsigned long _ccq asm ("0") = *count;
|
||||
register unsigned long _token asm ("1") = token;
|
||||
unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
|
||||
unsigned long _state = (unsigned long)ack << 63;
|
||||
|
||||
asm volatile(
|
||||
" .insn rrf,0xB99c0000,%1,%2,0,0"
|
||||
: "+d" (_ccq), "+d" (_queuestart), "+d" (_state)
|
||||
: "d" (_token)
|
||||
: "memory", "cc");
|
||||
*count = _ccq & 0xff;
|
||||
*start = _queuestart & 0xff;
|
||||
*state = _state & 0xff;
|
||||
|
||||
return (_ccq >> 32) & 0xff;
|
||||
}
|
||||
#else
|
||||
static inline int do_sqbs(u64 token, unsigned char state, int queue,
|
||||
int *start, int *count) { return 0; }
|
||||
static inline int do_eqbs(u64 token, unsigned char *state, int queue,
|
||||
int *start, int *count, int ack) { return 0; }
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
struct qdio_irq;
|
||||
|
||||
struct siga_flag {
|
||||
u8 input:1;
|
||||
u8 output:1;
|
||||
u8 sync:1;
|
||||
u8 sync_after_ai:1;
|
||||
u8 sync_out_after_pci:1;
|
||||
u8:3;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct qdio_dev_perf_stat {
|
||||
unsigned int adapter_int;
|
||||
unsigned int qdio_int;
|
||||
unsigned int pci_request_int;
|
||||
|
||||
unsigned int tasklet_inbound;
|
||||
unsigned int tasklet_inbound_resched;
|
||||
unsigned int tasklet_inbound_resched2;
|
||||
unsigned int tasklet_outbound;
|
||||
|
||||
unsigned int siga_read;
|
||||
unsigned int siga_write;
|
||||
unsigned int siga_sync;
|
||||
|
||||
unsigned int inbound_call;
|
||||
unsigned int inbound_handler;
|
||||
unsigned int stop_polling;
|
||||
unsigned int inbound_queue_full;
|
||||
unsigned int outbound_call;
|
||||
unsigned int outbound_handler;
|
||||
unsigned int outbound_queue_full;
|
||||
unsigned int fast_requeue;
|
||||
unsigned int target_full;
|
||||
unsigned int eqbs;
|
||||
unsigned int eqbs_partial;
|
||||
unsigned int sqbs;
|
||||
unsigned int sqbs_partial;
|
||||
unsigned int int_discarded;
|
||||
} ____cacheline_aligned;
|
||||
|
||||
struct qdio_queue_perf_stat {
|
||||
/*
|
||||
* Sorted into order-2 buckets: 1, 2-3, 4-7, ... 64-127, 128.
|
||||
* Since max. 127 SBALs are scanned reuse entry for 128 as queue full
|
||||
* aka 127 SBALs found.
|
||||
*/
|
||||
unsigned int nr_sbals[8];
|
||||
unsigned int nr_sbal_error;
|
||||
unsigned int nr_sbal_nop;
|
||||
unsigned int nr_sbal_total;
|
||||
};
|
||||
|
||||
enum qdio_queue_irq_states {
|
||||
QDIO_QUEUE_IRQS_DISABLED,
|
||||
};
|
||||
|
||||
struct qdio_input_q {
|
||||
/* input buffer acknowledgement flag */
|
||||
int polling;
|
||||
/* first ACK'ed buffer */
|
||||
int ack_start;
|
||||
/* how much sbals are acknowledged with qebsm */
|
||||
int ack_count;
|
||||
/* last time of noticing incoming data */
|
||||
u64 timestamp;
|
||||
/* upper-layer polling flag */
|
||||
unsigned long queue_irq_state;
|
||||
/* callback to start upper-layer polling */
|
||||
void (*queue_start_poll) (struct ccw_device *, int, unsigned long);
|
||||
};
|
||||
|
||||
struct qdio_output_q {
|
||||
/* PCIs are enabled for the queue */
|
||||
int pci_out_enabled;
|
||||
/* cq: use asynchronous output buffers */
|
||||
int use_cq;
|
||||
/* cq: aobs used for particual SBAL */
|
||||
struct qaob **aobs;
|
||||
/* cq: sbal state related to asynchronous operation */
|
||||
struct qdio_outbuf_state *sbal_state;
|
||||
/* timer to check for more outbound work */
|
||||
struct timer_list timer;
|
||||
/* used SBALs before tasklet schedule */
|
||||
int scan_threshold;
|
||||
};
|
||||
|
||||
/*
|
||||
* Note on cache alignment: grouped slsb and write mostly data at the beginning
|
||||
* sbal[] is read-only and starts on a new cacheline followed by read mostly.
|
||||
*/
|
||||
struct qdio_q {
|
||||
struct slsb slsb;
|
||||
|
||||
union {
|
||||
struct qdio_input_q in;
|
||||
struct qdio_output_q out;
|
||||
} u;
|
||||
|
||||
/*
|
||||
* inbound: next buffer the program should check for
|
||||
* outbound: next buffer to check if adapter processed it
|
||||
*/
|
||||
int first_to_check;
|
||||
|
||||
/* first_to_check of the last time */
|
||||
int last_move;
|
||||
|
||||
/* beginning position for calling the program */
|
||||
int first_to_kick;
|
||||
|
||||
/* number of buffers in use by the adapter */
|
||||
atomic_t nr_buf_used;
|
||||
|
||||
/* error condition during a data transfer */
|
||||
unsigned int qdio_error;
|
||||
|
||||
/* last scan of the queue */
|
||||
u64 timestamp;
|
||||
|
||||
struct tasklet_struct tasklet;
|
||||
struct qdio_queue_perf_stat q_stats;
|
||||
|
||||
struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q] ____cacheline_aligned;
|
||||
|
||||
/* queue number */
|
||||
int nr;
|
||||
|
||||
/* bitmask of queue number */
|
||||
int mask;
|
||||
|
||||
/* input or output queue */
|
||||
int is_input_q;
|
||||
|
||||
/* list of thinint input queues */
|
||||
struct list_head entry;
|
||||
|
||||
/* upper-layer program handler */
|
||||
qdio_handler_t (*handler);
|
||||
|
||||
struct dentry *debugfs_q;
|
||||
struct qdio_irq *irq_ptr;
|
||||
struct sl *sl;
|
||||
/*
|
||||
* A page is allocated under this pointer and used for slib and sl.
|
||||
* slib is 2048 bytes big and sl points to offset PAGE_SIZE / 2.
|
||||
*/
|
||||
struct slib *slib;
|
||||
} __attribute__ ((aligned(256)));
|
||||
|
||||
struct qdio_irq {
|
||||
struct qib qib;
|
||||
u32 *dsci; /* address of device state change indicator */
|
||||
struct ccw_device *cdev;
|
||||
struct dentry *debugfs_dev;
|
||||
struct dentry *debugfs_perf;
|
||||
|
||||
unsigned long int_parm;
|
||||
struct subchannel_id schid;
|
||||
unsigned long sch_token; /* QEBSM facility */
|
||||
|
||||
enum qdio_irq_states state;
|
||||
|
||||
struct siga_flag siga_flag; /* siga sync information from qdioac */
|
||||
|
||||
int nr_input_qs;
|
||||
int nr_output_qs;
|
||||
|
||||
struct ccw1 ccw;
|
||||
struct ciw equeue;
|
||||
struct ciw aqueue;
|
||||
|
||||
struct qdio_ssqd_desc ssqd_desc;
|
||||
void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *);
|
||||
|
||||
int perf_stat_enabled;
|
||||
|
||||
struct qdr *qdr;
|
||||
unsigned long chsc_page;
|
||||
|
||||
struct qdio_q *input_qs[QDIO_MAX_QUEUES_PER_IRQ];
|
||||
struct qdio_q *output_qs[QDIO_MAX_QUEUES_PER_IRQ];
|
||||
|
||||
debug_info_t *debug_area;
|
||||
struct mutex setup_mutex;
|
||||
struct qdio_dev_perf_stat perf_stat;
|
||||
};
|
||||
|
||||
/* helper functions */
|
||||
#define queue_type(q) q->irq_ptr->qib.qfmt
|
||||
#define SCH_NO(q) (q->irq_ptr->schid.sch_no)
|
||||
|
||||
#define is_thinint_irq(irq) \
|
||||
(irq->qib.qfmt == QDIO_IQDIO_QFMT || \
|
||||
css_general_characteristics.aif_osa)
|
||||
|
||||
#define qperf(__qdev, __attr) ((__qdev)->perf_stat.(__attr))
|
||||
|
||||
#define qperf_inc(__q, __attr) \
|
||||
({ \
|
||||
struct qdio_irq *qdev = (__q)->irq_ptr; \
|
||||
if (qdev->perf_stat_enabled) \
|
||||
(qdev->perf_stat.__attr)++; \
|
||||
})
|
||||
|
||||
static inline void account_sbals_error(struct qdio_q *q, int count)
|
||||
{
|
||||
q->q_stats.nr_sbal_error += count;
|
||||
q->q_stats.nr_sbal_total += count;
|
||||
}
|
||||
|
||||
/* the highest iqdio queue is used for multicast */
|
||||
static inline int multicast_outbound(struct qdio_q *q)
|
||||
{
|
||||
return (q->irq_ptr->nr_output_qs > 1) &&
|
||||
(q->nr == q->irq_ptr->nr_output_qs - 1);
|
||||
}
|
||||
|
||||
#define pci_out_supported(q) \
|
||||
(q->irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)
|
||||
#define is_qebsm(q) (q->irq_ptr->sch_token != 0)
|
||||
|
||||
#define need_siga_in(q) (q->irq_ptr->siga_flag.input)
|
||||
#define need_siga_out(q) (q->irq_ptr->siga_flag.output)
|
||||
#define need_siga_sync(q) (unlikely(q->irq_ptr->siga_flag.sync))
|
||||
#define need_siga_sync_after_ai(q) \
|
||||
(unlikely(q->irq_ptr->siga_flag.sync_after_ai))
|
||||
#define need_siga_sync_out_after_pci(q) \
|
||||
(unlikely(q->irq_ptr->siga_flag.sync_out_after_pci))
|
||||
|
||||
#define for_each_input_queue(irq_ptr, q, i) \
|
||||
for (i = 0; i < irq_ptr->nr_input_qs && \
|
||||
({ q = irq_ptr->input_qs[i]; 1; }); i++)
|
||||
#define for_each_output_queue(irq_ptr, q, i) \
|
||||
for (i = 0; i < irq_ptr->nr_output_qs && \
|
||||
({ q = irq_ptr->output_qs[i]; 1; }); i++)
|
||||
|
||||
#define prev_buf(bufnr) \
|
||||
((bufnr + QDIO_MAX_BUFFERS_MASK) & QDIO_MAX_BUFFERS_MASK)
|
||||
#define next_buf(bufnr) \
|
||||
((bufnr + 1) & QDIO_MAX_BUFFERS_MASK)
|
||||
#define add_buf(bufnr, inc) \
|
||||
((bufnr + inc) & QDIO_MAX_BUFFERS_MASK)
|
||||
#define sub_buf(bufnr, dec) \
|
||||
((bufnr - dec) & QDIO_MAX_BUFFERS_MASK)
|
||||
|
||||
#define queue_irqs_enabled(q) \
|
||||
(test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) == 0)
|
||||
#define queue_irqs_disabled(q) \
|
||||
(test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) != 0)
|
||||
|
||||
extern u64 last_ai_time;
|
||||
|
||||
/* prototypes for thin interrupt */
|
||||
void qdio_setup_thinint(struct qdio_irq *irq_ptr);
|
||||
int qdio_establish_thinint(struct qdio_irq *irq_ptr);
|
||||
void qdio_shutdown_thinint(struct qdio_irq *irq_ptr);
|
||||
void tiqdio_add_input_queues(struct qdio_irq *irq_ptr);
|
||||
void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr);
|
||||
void tiqdio_inbound_processing(unsigned long q);
|
||||
int tiqdio_allocate_memory(void);
|
||||
void tiqdio_free_memory(void);
|
||||
int tiqdio_register_thinints(void);
|
||||
void tiqdio_unregister_thinints(void);
|
||||
void clear_nonshared_ind(struct qdio_irq *);
|
||||
int test_nonshared_ind(struct qdio_irq *);
|
||||
|
||||
/* prototypes for setup */
|
||||
void qdio_inbound_processing(unsigned long data);
|
||||
void qdio_outbound_processing(unsigned long data);
|
||||
void qdio_outbound_timer(unsigned long data);
|
||||
void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
|
||||
struct irb *irb);
|
||||
int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs,
|
||||
int nr_output_qs);
|
||||
void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr);
|
||||
int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr,
|
||||
struct subchannel_id *schid,
|
||||
struct qdio_ssqd_desc *data);
|
||||
int qdio_setup_irq(struct qdio_initialize *init_data);
|
||||
void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
|
||||
struct ccw_device *cdev);
|
||||
void qdio_release_memory(struct qdio_irq *irq_ptr);
|
||||
int qdio_setup_create_sysfs(struct ccw_device *cdev);
|
||||
void qdio_setup_destroy_sysfs(struct ccw_device *cdev);
|
||||
int qdio_setup_init(void);
|
||||
void qdio_setup_exit(void);
|
||||
int qdio_enable_async_operation(struct qdio_output_q *q);
|
||||
void qdio_disable_async_operation(struct qdio_output_q *q);
|
||||
struct qaob *qdio_allocate_aob(void);
|
||||
|
||||
int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
|
||||
unsigned char *state);
|
||||
#endif /* _CIO_QDIO_H */
|
373
drivers/s390/cio/qdio_debug.c
Normal file
373
drivers/s390/cio/qdio_debug.c
Normal file
|
@ -0,0 +1,373 @@
|
|||
/*
|
||||
* Copyright IBM Corp. 2008, 2009
|
||||
*
|
||||
* Author: Jan Glauber (jang@linux.vnet.ibm.com)
|
||||
*/
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/debug.h>
|
||||
#include "qdio_debug.h"
|
||||
#include "qdio.h"
|
||||
|
||||
debug_info_t *qdio_dbf_setup;
|
||||
debug_info_t *qdio_dbf_error;
|
||||
|
||||
static struct dentry *debugfs_root;
|
||||
#define QDIO_DEBUGFS_NAME_LEN 10
|
||||
#define QDIO_DBF_NAME_LEN 20
|
||||
|
||||
struct qdio_dbf_entry {
|
||||
char dbf_name[QDIO_DBF_NAME_LEN];
|
||||
debug_info_t *dbf_info;
|
||||
struct list_head dbf_list;
|
||||
};
|
||||
|
||||
static LIST_HEAD(qdio_dbf_list);
|
||||
static DEFINE_MUTEX(qdio_dbf_list_mutex);
|
||||
|
||||
static debug_info_t *qdio_get_dbf_entry(char *name)
|
||||
{
|
||||
struct qdio_dbf_entry *entry;
|
||||
debug_info_t *rc = NULL;
|
||||
|
||||
mutex_lock(&qdio_dbf_list_mutex);
|
||||
list_for_each_entry(entry, &qdio_dbf_list, dbf_list) {
|
||||
if (strcmp(entry->dbf_name, name) == 0) {
|
||||
rc = entry->dbf_info;
|
||||
break;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&qdio_dbf_list_mutex);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void qdio_clear_dbf_list(void)
|
||||
{
|
||||
struct qdio_dbf_entry *entry, *tmp;
|
||||
|
||||
mutex_lock(&qdio_dbf_list_mutex);
|
||||
list_for_each_entry_safe(entry, tmp, &qdio_dbf_list, dbf_list) {
|
||||
list_del(&entry->dbf_list);
|
||||
debug_unregister(entry->dbf_info);
|
||||
kfree(entry);
|
||||
}
|
||||
mutex_unlock(&qdio_dbf_list_mutex);
|
||||
}
|
||||
|
||||
int qdio_allocate_dbf(struct qdio_initialize *init_data,
|
||||
struct qdio_irq *irq_ptr)
|
||||
{
|
||||
char text[QDIO_DBF_NAME_LEN];
|
||||
struct qdio_dbf_entry *new_entry;
|
||||
|
||||
DBF_EVENT("qfmt:%1d", init_data->q_format);
|
||||
DBF_HEX(init_data->adapter_name, 8);
|
||||
DBF_EVENT("qpff%4x", init_data->qib_param_field_format);
|
||||
DBF_HEX(&init_data->qib_param_field, sizeof(void *));
|
||||
DBF_HEX(&init_data->input_slib_elements, sizeof(void *));
|
||||
DBF_HEX(&init_data->output_slib_elements, sizeof(void *));
|
||||
DBF_EVENT("niq:%1d noq:%1d", init_data->no_input_qs,
|
||||
init_data->no_output_qs);
|
||||
DBF_HEX(&init_data->input_handler, sizeof(void *));
|
||||
DBF_HEX(&init_data->output_handler, sizeof(void *));
|
||||
DBF_HEX(&init_data->int_parm, sizeof(long));
|
||||
DBF_HEX(&init_data->input_sbal_addr_array, sizeof(void *));
|
||||
DBF_HEX(&init_data->output_sbal_addr_array, sizeof(void *));
|
||||
DBF_EVENT("irq:%8lx", (unsigned long)irq_ptr);
|
||||
|
||||
/* allocate trace view for the interface */
|
||||
snprintf(text, QDIO_DBF_NAME_LEN, "qdio_%s",
|
||||
dev_name(&init_data->cdev->dev));
|
||||
irq_ptr->debug_area = qdio_get_dbf_entry(text);
|
||||
if (irq_ptr->debug_area)
|
||||
DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf reused");
|
||||
else {
|
||||
irq_ptr->debug_area = debug_register(text, 2, 1, 16);
|
||||
if (!irq_ptr->debug_area)
|
||||
return -ENOMEM;
|
||||
if (debug_register_view(irq_ptr->debug_area,
|
||||
&debug_hex_ascii_view)) {
|
||||
debug_unregister(irq_ptr->debug_area);
|
||||
return -ENOMEM;
|
||||
}
|
||||
debug_set_level(irq_ptr->debug_area, DBF_WARN);
|
||||
DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf created");
|
||||
new_entry = kzalloc(sizeof(struct qdio_dbf_entry), GFP_KERNEL);
|
||||
if (!new_entry) {
|
||||
debug_unregister(irq_ptr->debug_area);
|
||||
return -ENOMEM;
|
||||
}
|
||||
strlcpy(new_entry->dbf_name, text, QDIO_DBF_NAME_LEN);
|
||||
new_entry->dbf_info = irq_ptr->debug_area;
|
||||
mutex_lock(&qdio_dbf_list_mutex);
|
||||
list_add(&new_entry->dbf_list, &qdio_dbf_list);
|
||||
mutex_unlock(&qdio_dbf_list_mutex);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qstat_show(struct seq_file *m, void *v)
|
||||
{
|
||||
unsigned char state;
|
||||
struct qdio_q *q = m->private;
|
||||
int i;
|
||||
|
||||
if (!q)
|
||||
return 0;
|
||||
|
||||
seq_printf(m, "Timestamp: %Lx Last AI: %Lx\n",
|
||||
q->timestamp, last_ai_time);
|
||||
seq_printf(m, "nr_used: %d ftc: %d last_move: %d\n",
|
||||
atomic_read(&q->nr_buf_used),
|
||||
q->first_to_check, q->last_move);
|
||||
if (q->is_input_q) {
|
||||
seq_printf(m, "polling: %d ack start: %d ack count: %d\n",
|
||||
q->u.in.polling, q->u.in.ack_start,
|
||||
q->u.in.ack_count);
|
||||
seq_printf(m, "DSCI: %d IRQs disabled: %u\n",
|
||||
*(u32 *)q->irq_ptr->dsci,
|
||||
test_bit(QDIO_QUEUE_IRQS_DISABLED,
|
||||
&q->u.in.queue_irq_state));
|
||||
}
|
||||
seq_printf(m, "SBAL states:\n");
|
||||
seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n");
|
||||
|
||||
for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
|
||||
debug_get_buf_state(q, i, &state);
|
||||
switch (state) {
|
||||
case SLSB_P_INPUT_NOT_INIT:
|
||||
case SLSB_P_OUTPUT_NOT_INIT:
|
||||
seq_printf(m, "N");
|
||||
break;
|
||||
case SLSB_P_OUTPUT_PENDING:
|
||||
seq_printf(m, "P");
|
||||
break;
|
||||
case SLSB_P_INPUT_PRIMED:
|
||||
case SLSB_CU_OUTPUT_PRIMED:
|
||||
seq_printf(m, "+");
|
||||
break;
|
||||
case SLSB_P_INPUT_ACK:
|
||||
seq_printf(m, "A");
|
||||
break;
|
||||
case SLSB_P_INPUT_ERROR:
|
||||
case SLSB_P_OUTPUT_ERROR:
|
||||
seq_printf(m, "x");
|
||||
break;
|
||||
case SLSB_CU_INPUT_EMPTY:
|
||||
case SLSB_P_OUTPUT_EMPTY:
|
||||
seq_printf(m, "-");
|
||||
break;
|
||||
case SLSB_P_INPUT_HALTED:
|
||||
case SLSB_P_OUTPUT_HALTED:
|
||||
seq_printf(m, ".");
|
||||
break;
|
||||
default:
|
||||
seq_printf(m, "?");
|
||||
}
|
||||
if (i == 63)
|
||||
seq_printf(m, "\n");
|
||||
}
|
||||
seq_printf(m, "\n");
|
||||
seq_printf(m, "|64 |72 |80 |88 |96 |104 |112 | 127|\n");
|
||||
|
||||
seq_printf(m, "\nSBAL statistics:");
|
||||
if (!q->irq_ptr->perf_stat_enabled) {
|
||||
seq_printf(m, " disabled\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
seq_printf(m, "\n1 2.. 4.. 8.. "
|
||||
"16.. 32.. 64.. 127\n");
|
||||
for (i = 0; i < ARRAY_SIZE(q->q_stats.nr_sbals); i++)
|
||||
seq_printf(m, "%-10u ", q->q_stats.nr_sbals[i]);
|
||||
seq_printf(m, "\nError NOP Total\n%-10u %-10u %-10u\n\n",
|
||||
q->q_stats.nr_sbal_error, q->q_stats.nr_sbal_nop,
|
||||
q->q_stats.nr_sbal_total);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qstat_seq_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
return single_open(filp, qstat_show,
|
||||
file_inode(filp)->i_private);
|
||||
}
|
||||
|
||||
static const struct file_operations debugfs_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = qstat_seq_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
static char *qperf_names[] = {
|
||||
"Assumed adapter interrupts",
|
||||
"QDIO interrupts",
|
||||
"Requested PCIs",
|
||||
"Inbound tasklet runs",
|
||||
"Inbound tasklet resched",
|
||||
"Inbound tasklet resched2",
|
||||
"Outbound tasklet runs",
|
||||
"SIGA read",
|
||||
"SIGA write",
|
||||
"SIGA sync",
|
||||
"Inbound calls",
|
||||
"Inbound handler",
|
||||
"Inbound stop_polling",
|
||||
"Inbound queue full",
|
||||
"Outbound calls",
|
||||
"Outbound handler",
|
||||
"Outbound queue full",
|
||||
"Outbound fast_requeue",
|
||||
"Outbound target_full",
|
||||
"QEBSM eqbs",
|
||||
"QEBSM eqbs partial",
|
||||
"QEBSM sqbs",
|
||||
"QEBSM sqbs partial",
|
||||
"Discarded interrupts"
|
||||
};
|
||||
|
||||
static int qperf_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct qdio_irq *irq_ptr = m->private;
|
||||
unsigned int *stat;
|
||||
int i;
|
||||
|
||||
if (!irq_ptr)
|
||||
return 0;
|
||||
if (!irq_ptr->perf_stat_enabled) {
|
||||
seq_printf(m, "disabled\n");
|
||||
return 0;
|
||||
}
|
||||
stat = (unsigned int *)&irq_ptr->perf_stat;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(qperf_names); i++)
|
||||
seq_printf(m, "%26s:\t%u\n",
|
||||
qperf_names[i], *(stat + i));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t qperf_seq_write(struct file *file, const char __user *ubuf,
|
||||
size_t count, loff_t *off)
|
||||
{
|
||||
struct seq_file *seq = file->private_data;
|
||||
struct qdio_irq *irq_ptr = seq->private;
|
||||
struct qdio_q *q;
|
||||
unsigned long val;
|
||||
int ret, i;
|
||||
|
||||
if (!irq_ptr)
|
||||
return 0;
|
||||
|
||||
ret = kstrtoul_from_user(ubuf, count, 10, &val);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
switch (val) {
|
||||
case 0:
|
||||
irq_ptr->perf_stat_enabled = 0;
|
||||
memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
|
||||
for_each_input_queue(irq_ptr, q, i)
|
||||
memset(&q->q_stats, 0, sizeof(q->q_stats));
|
||||
for_each_output_queue(irq_ptr, q, i)
|
||||
memset(&q->q_stats, 0, sizeof(q->q_stats));
|
||||
break;
|
||||
case 1:
|
||||
irq_ptr->perf_stat_enabled = 1;
|
||||
break;
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
static int qperf_seq_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
return single_open(filp, qperf_show,
|
||||
file_inode(filp)->i_private);
|
||||
}
|
||||
|
||||
static const struct file_operations debugfs_perf_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = qperf_seq_open,
|
||||
.read = seq_read,
|
||||
.write = qperf_seq_write,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
static void setup_debugfs_entry(struct qdio_q *q)
|
||||
{
|
||||
char name[QDIO_DEBUGFS_NAME_LEN];
|
||||
|
||||
snprintf(name, QDIO_DEBUGFS_NAME_LEN, "%s_%d",
|
||||
q->is_input_q ? "input" : "output",
|
||||
q->nr);
|
||||
q->debugfs_q = debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR,
|
||||
q->irq_ptr->debugfs_dev, q, &debugfs_fops);
|
||||
if (IS_ERR(q->debugfs_q))
|
||||
q->debugfs_q = NULL;
|
||||
}
|
||||
|
||||
void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
|
||||
{
|
||||
struct qdio_q *q;
|
||||
int i;
|
||||
|
||||
irq_ptr->debugfs_dev = debugfs_create_dir(dev_name(&cdev->dev),
|
||||
debugfs_root);
|
||||
if (IS_ERR(irq_ptr->debugfs_dev))
|
||||
irq_ptr->debugfs_dev = NULL;
|
||||
|
||||
irq_ptr->debugfs_perf = debugfs_create_file("statistics",
|
||||
S_IFREG | S_IRUGO | S_IWUSR,
|
||||
irq_ptr->debugfs_dev, irq_ptr,
|
||||
&debugfs_perf_fops);
|
||||
if (IS_ERR(irq_ptr->debugfs_perf))
|
||||
irq_ptr->debugfs_perf = NULL;
|
||||
|
||||
for_each_input_queue(irq_ptr, q, i)
|
||||
setup_debugfs_entry(q);
|
||||
for_each_output_queue(irq_ptr, q, i)
|
||||
setup_debugfs_entry(q);
|
||||
}
|
||||
|
||||
void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr)
|
||||
{
|
||||
struct qdio_q *q;
|
||||
int i;
|
||||
|
||||
for_each_input_queue(irq_ptr, q, i)
|
||||
debugfs_remove(q->debugfs_q);
|
||||
for_each_output_queue(irq_ptr, q, i)
|
||||
debugfs_remove(q->debugfs_q);
|
||||
debugfs_remove(irq_ptr->debugfs_perf);
|
||||
debugfs_remove(irq_ptr->debugfs_dev);
|
||||
}
|
||||
|
||||
int __init qdio_debug_init(void)
|
||||
{
|
||||
debugfs_root = debugfs_create_dir("qdio", NULL);
|
||||
|
||||
qdio_dbf_setup = debug_register("qdio_setup", 16, 1, 16);
|
||||
debug_register_view(qdio_dbf_setup, &debug_hex_ascii_view);
|
||||
debug_set_level(qdio_dbf_setup, DBF_INFO);
|
||||
DBF_EVENT("dbf created\n");
|
||||
|
||||
qdio_dbf_error = debug_register("qdio_error", 4, 1, 16);
|
||||
debug_register_view(qdio_dbf_error, &debug_hex_ascii_view);
|
||||
debug_set_level(qdio_dbf_error, DBF_INFO);
|
||||
DBF_ERROR("dbf created\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
void qdio_debug_exit(void)
|
||||
{
|
||||
qdio_clear_dbf_list();
|
||||
debugfs_remove(debugfs_root);
|
||||
if (qdio_dbf_setup)
|
||||
debug_unregister(qdio_dbf_setup);
|
||||
if (qdio_dbf_error)
|
||||
debug_unregister(qdio_dbf_error);
|
||||
}
|
86
drivers/s390/cio/qdio_debug.h
Normal file
86
drivers/s390/cio/qdio_debug.h
Normal file
|
@ -0,0 +1,86 @@
|
|||
/*
|
||||
* Copyright IBM Corp. 2008
|
||||
*
|
||||
* Author: Jan Glauber (jang@linux.vnet.ibm.com)
|
||||
*/
|
||||
#ifndef QDIO_DEBUG_H
|
||||
#define QDIO_DEBUG_H
|
||||
|
||||
#include <asm/debug.h>
|
||||
#include <asm/qdio.h>
|
||||
#include "qdio.h"
|
||||
|
||||
/* that gives us 15 characters in the text event views */
|
||||
#define QDIO_DBF_LEN 16
|
||||
|
||||
extern debug_info_t *qdio_dbf_setup;
|
||||
extern debug_info_t *qdio_dbf_error;
|
||||
|
||||
#define DBF_ERR 3 /* error conditions */
|
||||
#define DBF_WARN 4 /* warning conditions */
|
||||
#define DBF_INFO 6 /* informational */
|
||||
|
||||
#undef DBF_EVENT
|
||||
#undef DBF_ERROR
|
||||
#undef DBF_DEV_EVENT
|
||||
|
||||
#define DBF_EVENT(text...) \
|
||||
do { \
|
||||
char debug_buffer[QDIO_DBF_LEN]; \
|
||||
snprintf(debug_buffer, QDIO_DBF_LEN, text); \
|
||||
debug_text_event(qdio_dbf_setup, DBF_ERR, debug_buffer); \
|
||||
} while (0)
|
||||
|
||||
static inline void DBF_HEX(void *addr, int len)
|
||||
{
|
||||
while (len > 0) {
|
||||
debug_event(qdio_dbf_setup, DBF_ERR, addr, len);
|
||||
len -= qdio_dbf_setup->buf_size;
|
||||
addr += qdio_dbf_setup->buf_size;
|
||||
}
|
||||
}
|
||||
|
||||
#define DBF_ERROR(text...) \
|
||||
do { \
|
||||
char debug_buffer[QDIO_DBF_LEN]; \
|
||||
snprintf(debug_buffer, QDIO_DBF_LEN, text); \
|
||||
debug_text_event(qdio_dbf_error, DBF_ERR, debug_buffer); \
|
||||
} while (0)
|
||||
|
||||
static inline void DBF_ERROR_HEX(void *addr, int len)
|
||||
{
|
||||
while (len > 0) {
|
||||
debug_event(qdio_dbf_error, DBF_ERR, addr, len);
|
||||
len -= qdio_dbf_error->buf_size;
|
||||
addr += qdio_dbf_error->buf_size;
|
||||
}
|
||||
}
|
||||
|
||||
#define DBF_DEV_EVENT(level, device, text...) \
|
||||
do { \
|
||||
char debug_buffer[QDIO_DBF_LEN]; \
|
||||
if (debug_level_enabled(device->debug_area, level)) { \
|
||||
snprintf(debug_buffer, QDIO_DBF_LEN, text); \
|
||||
debug_text_event(device->debug_area, level, debug_buffer); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
static inline void DBF_DEV_HEX(struct qdio_irq *dev, void *addr,
|
||||
int len, int level)
|
||||
{
|
||||
while (len > 0) {
|
||||
debug_event(dev->debug_area, level, addr, len);
|
||||
len -= dev->debug_area->buf_size;
|
||||
addr += dev->debug_area->buf_size;
|
||||
}
|
||||
}
|
||||
|
||||
int qdio_allocate_dbf(struct qdio_initialize *init_data,
|
||||
struct qdio_irq *irq_ptr);
|
||||
void qdio_setup_debug_entries(struct qdio_irq *irq_ptr,
|
||||
struct ccw_device *cdev);
|
||||
void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr);
|
||||
int qdio_debug_init(void);
|
||||
void qdio_debug_exit(void);
|
||||
|
||||
#endif
|
1880
drivers/s390/cio/qdio_main.c
Normal file
1880
drivers/s390/cio/qdio_main.c
Normal file
File diff suppressed because it is too large
Load diff
600
drivers/s390/cio/qdio_setup.c
Normal file
600
drivers/s390/cio/qdio_setup.c
Normal file
|
@ -0,0 +1,600 @@
|
|||
/*
|
||||
* qdio queue initialization
|
||||
*
|
||||
* Copyright IBM Corp. 2008
|
||||
* Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/export.h>
|
||||
#include <asm/qdio.h>
|
||||
|
||||
#include "cio.h"
|
||||
#include "css.h"
|
||||
#include "device.h"
|
||||
#include "ioasm.h"
|
||||
#include "chsc.h"
|
||||
#include "qdio.h"
|
||||
#include "qdio_debug.h"
|
||||
|
||||
#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer))
|
||||
|
||||
static struct kmem_cache *qdio_q_cache;
|
||||
static struct kmem_cache *qdio_aob_cache;
|
||||
|
||||
struct qaob *qdio_allocate_aob(void)
|
||||
{
|
||||
return kmem_cache_zalloc(qdio_aob_cache, GFP_ATOMIC);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qdio_allocate_aob);
|
||||
|
||||
void qdio_release_aob(struct qaob *aob)
|
||||
{
|
||||
kmem_cache_free(qdio_aob_cache, aob);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qdio_release_aob);
|
||||
|
||||
/**
|
||||
* qdio_free_buffers() - free qdio buffers
|
||||
* @buf: array of pointers to qdio buffers
|
||||
* @count: number of qdio buffers to free
|
||||
*/
|
||||
void qdio_free_buffers(struct qdio_buffer **buf, unsigned int count)
|
||||
{
|
||||
int pos;
|
||||
|
||||
for (pos = 0; pos < count; pos += QBUFF_PER_PAGE)
|
||||
free_page((unsigned long) buf[pos]);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qdio_free_buffers);
|
||||
|
||||
/**
|
||||
* qdio_alloc_buffers() - allocate qdio buffers
|
||||
* @buf: array of pointers to qdio buffers
|
||||
* @count: number of qdio buffers to allocate
|
||||
*/
|
||||
int qdio_alloc_buffers(struct qdio_buffer **buf, unsigned int count)
|
||||
{
|
||||
int pos;
|
||||
|
||||
for (pos = 0; pos < count; pos += QBUFF_PER_PAGE) {
|
||||
buf[pos] = (void *) get_zeroed_page(GFP_KERNEL);
|
||||
if (!buf[pos]) {
|
||||
qdio_free_buffers(buf, count);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
for (pos = 0; pos < count; pos++)
|
||||
if (pos % QBUFF_PER_PAGE)
|
||||
buf[pos] = buf[pos - 1] + 1;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qdio_alloc_buffers);
|
||||
|
||||
/**
|
||||
* qdio_reset_buffers() - reset qdio buffers
|
||||
* @buf: array of pointers to qdio buffers
|
||||
* @count: number of qdio buffers that will be zeroed
|
||||
*/
|
||||
void qdio_reset_buffers(struct qdio_buffer **buf, unsigned int count)
|
||||
{
|
||||
int pos;
|
||||
|
||||
for (pos = 0; pos < count; pos++)
|
||||
memset(buf[pos], 0, sizeof(struct qdio_buffer));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qdio_reset_buffers);
|
||||
|
||||
/*
|
||||
* qebsm is only available under 64bit but the adapter sets the feature
|
||||
* flag anyway, so we manually override it.
|
||||
*/
|
||||
static inline int qebsm_possible(void)
|
||||
{
|
||||
#ifdef CONFIG_64BIT
|
||||
return css_general_characteristics.qebsm;
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* qib_param_field: pointer to 128 bytes or NULL, if no param field
|
||||
* nr_input_qs: pointer to nr_queues*128 words of data or NULL
|
||||
*/
|
||||
static void set_impl_params(struct qdio_irq *irq_ptr,
|
||||
unsigned int qib_param_field_format,
|
||||
unsigned char *qib_param_field,
|
||||
unsigned long *input_slib_elements,
|
||||
unsigned long *output_slib_elements)
|
||||
{
|
||||
struct qdio_q *q;
|
||||
int i, j;
|
||||
|
||||
if (!irq_ptr)
|
||||
return;
|
||||
|
||||
irq_ptr->qib.pfmt = qib_param_field_format;
|
||||
if (qib_param_field)
|
||||
memcpy(irq_ptr->qib.parm, qib_param_field,
|
||||
QDIO_MAX_BUFFERS_PER_Q);
|
||||
|
||||
if (!input_slib_elements)
|
||||
goto output;
|
||||
|
||||
for_each_input_queue(irq_ptr, q, i) {
|
||||
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
|
||||
q->slib->slibe[j].parms =
|
||||
input_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j];
|
||||
}
|
||||
output:
|
||||
if (!output_slib_elements)
|
||||
return;
|
||||
|
||||
for_each_output_queue(irq_ptr, q, i) {
|
||||
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
|
||||
q->slib->slibe[j].parms =
|
||||
output_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j];
|
||||
}
|
||||
}
|
||||
|
||||
static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
|
||||
{
|
||||
struct qdio_q *q;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nr_queues; i++) {
|
||||
q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
|
||||
if (!q)
|
||||
return -ENOMEM;
|
||||
|
||||
q->slib = (struct slib *) __get_free_page(GFP_KERNEL);
|
||||
if (!q->slib) {
|
||||
kmem_cache_free(qdio_q_cache, q);
|
||||
return -ENOMEM;
|
||||
}
|
||||
irq_ptr_qs[i] = q;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, int nr_output_qs)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = __qdio_allocate_qs(irq_ptr->input_qs, nr_input_qs);
|
||||
if (rc)
|
||||
return rc;
|
||||
rc = __qdio_allocate_qs(irq_ptr->output_qs, nr_output_qs);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
|
||||
qdio_handler_t *handler, int i)
|
||||
{
|
||||
struct slib *slib = q->slib;
|
||||
|
||||
/* queue must be cleared for qdio_establish */
|
||||
memset(q, 0, sizeof(*q));
|
||||
memset(slib, 0, PAGE_SIZE);
|
||||
q->slib = slib;
|
||||
q->irq_ptr = irq_ptr;
|
||||
q->mask = 1 << (31 - i);
|
||||
q->nr = i;
|
||||
q->handler = handler;
|
||||
}
|
||||
|
||||
static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
|
||||
void **sbals_array, int i)
|
||||
{
|
||||
struct qdio_q *prev;
|
||||
int j;
|
||||
|
||||
DBF_HEX(&q, sizeof(void *));
|
||||
q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2);
|
||||
|
||||
/* fill in sbal */
|
||||
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
|
||||
q->sbal[j] = *sbals_array++;
|
||||
|
||||
/* fill in slib */
|
||||
if (i > 0) {
|
||||
prev = (q->is_input_q) ? irq_ptr->input_qs[i - 1]
|
||||
: irq_ptr->output_qs[i - 1];
|
||||
prev->slib->nsliba = (unsigned long)q->slib;
|
||||
}
|
||||
|
||||
q->slib->sla = (unsigned long)q->sl;
|
||||
q->slib->slsba = (unsigned long)&q->slsb.val[0];
|
||||
|
||||
/* fill in sl */
|
||||
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
|
||||
q->sl->element[j].sbal = (unsigned long)q->sbal[j];
|
||||
}
|
||||
|
||||
static void setup_queues(struct qdio_irq *irq_ptr,
|
||||
struct qdio_initialize *qdio_init)
|
||||
{
|
||||
struct qdio_q *q;
|
||||
void **input_sbal_array = qdio_init->input_sbal_addr_array;
|
||||
void **output_sbal_array = qdio_init->output_sbal_addr_array;
|
||||
struct qdio_outbuf_state *output_sbal_state_array =
|
||||
qdio_init->output_sbal_state_array;
|
||||
int i;
|
||||
|
||||
for_each_input_queue(irq_ptr, q, i) {
|
||||
DBF_EVENT("inq:%1d", i);
|
||||
setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i);
|
||||
|
||||
q->is_input_q = 1;
|
||||
q->u.in.queue_start_poll = qdio_init->queue_start_poll_array ?
|
||||
qdio_init->queue_start_poll_array[i] : NULL;
|
||||
|
||||
setup_storage_lists(q, irq_ptr, input_sbal_array, i);
|
||||
input_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
|
||||
|
||||
if (is_thinint_irq(irq_ptr)) {
|
||||
tasklet_init(&q->tasklet, tiqdio_inbound_processing,
|
||||
(unsigned long) q);
|
||||
} else {
|
||||
tasklet_init(&q->tasklet, qdio_inbound_processing,
|
||||
(unsigned long) q);
|
||||
}
|
||||
}
|
||||
|
||||
for_each_output_queue(irq_ptr, q, i) {
|
||||
DBF_EVENT("outq:%1d", i);
|
||||
setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i);
|
||||
|
||||
q->u.out.sbal_state = output_sbal_state_array;
|
||||
output_sbal_state_array += QDIO_MAX_BUFFERS_PER_Q;
|
||||
|
||||
q->is_input_q = 0;
|
||||
q->u.out.scan_threshold = qdio_init->scan_threshold;
|
||||
setup_storage_lists(q, irq_ptr, output_sbal_array, i);
|
||||
output_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
|
||||
|
||||
tasklet_init(&q->tasklet, qdio_outbound_processing,
|
||||
(unsigned long) q);
|
||||
setup_timer(&q->u.out.timer, (void(*)(unsigned long))
|
||||
&qdio_outbound_timer, (unsigned long)q);
|
||||
}
|
||||
}
|
||||
|
||||
static void process_ac_flags(struct qdio_irq *irq_ptr, unsigned char qdioac)
|
||||
{
|
||||
if (qdioac & AC1_SIGA_INPUT_NEEDED)
|
||||
irq_ptr->siga_flag.input = 1;
|
||||
if (qdioac & AC1_SIGA_OUTPUT_NEEDED)
|
||||
irq_ptr->siga_flag.output = 1;
|
||||
if (qdioac & AC1_SIGA_SYNC_NEEDED)
|
||||
irq_ptr->siga_flag.sync = 1;
|
||||
if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_THININT))
|
||||
irq_ptr->siga_flag.sync_after_ai = 1;
|
||||
if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_OUT_PCI))
|
||||
irq_ptr->siga_flag.sync_out_after_pci = 1;
|
||||
}
|
||||
|
||||
static void check_and_setup_qebsm(struct qdio_irq *irq_ptr,
|
||||
unsigned char qdioac, unsigned long token)
|
||||
{
|
||||
if (!(irq_ptr->qib.rflags & QIB_RFLAGS_ENABLE_QEBSM))
|
||||
goto no_qebsm;
|
||||
if (!(qdioac & AC1_SC_QEBSM_AVAILABLE) ||
|
||||
(!(qdioac & AC1_SC_QEBSM_ENABLED)))
|
||||
goto no_qebsm;
|
||||
|
||||
irq_ptr->sch_token = token;
|
||||
|
||||
DBF_EVENT("V=V:1");
|
||||
DBF_EVENT("%8lx", irq_ptr->sch_token);
|
||||
return;
|
||||
|
||||
no_qebsm:
|
||||
irq_ptr->sch_token = 0;
|
||||
irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
|
||||
DBF_EVENT("noV=V");
|
||||
}
|
||||
|
||||
/*
|
||||
* If there is a qdio_irq we use the chsc_page and store the information
|
||||
* in the qdio_irq, otherwise we copy it to the specified structure.
|
||||
*/
|
||||
int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr,
|
||||
struct subchannel_id *schid,
|
||||
struct qdio_ssqd_desc *data)
|
||||
{
|
||||
struct chsc_ssqd_area *ssqd;
|
||||
int rc;
|
||||
|
||||
DBF_EVENT("getssqd:%4x", schid->sch_no);
|
||||
if (!irq_ptr) {
|
||||
ssqd = (struct chsc_ssqd_area *)__get_free_page(GFP_KERNEL);
|
||||
if (!ssqd)
|
||||
return -ENOMEM;
|
||||
} else {
|
||||
ssqd = (struct chsc_ssqd_area *)irq_ptr->chsc_page;
|
||||
}
|
||||
|
||||
rc = chsc_ssqd(*schid, ssqd);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
if (!(ssqd->qdio_ssqd.flags & CHSC_FLAG_QDIO_CAPABILITY) ||
|
||||
!(ssqd->qdio_ssqd.flags & CHSC_FLAG_VALIDITY) ||
|
||||
(ssqd->qdio_ssqd.sch != schid->sch_no))
|
||||
rc = -EINVAL;
|
||||
|
||||
if (!rc)
|
||||
memcpy(data, &ssqd->qdio_ssqd, sizeof(*data));
|
||||
|
||||
out:
|
||||
if (!irq_ptr)
|
||||
free_page((unsigned long)ssqd);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr)
|
||||
{
|
||||
unsigned char qdioac;
|
||||
int rc;
|
||||
|
||||
rc = qdio_setup_get_ssqd(irq_ptr, &irq_ptr->schid, &irq_ptr->ssqd_desc);
|
||||
if (rc) {
|
||||
DBF_ERROR("%4x ssqd ERR", irq_ptr->schid.sch_no);
|
||||
DBF_ERROR("rc:%x", rc);
|
||||
/* all flags set, worst case */
|
||||
qdioac = AC1_SIGA_INPUT_NEEDED | AC1_SIGA_OUTPUT_NEEDED |
|
||||
AC1_SIGA_SYNC_NEEDED;
|
||||
} else
|
||||
qdioac = irq_ptr->ssqd_desc.qdioac1;
|
||||
|
||||
check_and_setup_qebsm(irq_ptr, qdioac, irq_ptr->ssqd_desc.sch_token);
|
||||
process_ac_flags(irq_ptr, qdioac);
|
||||
DBF_EVENT("ac 1:%2x 2:%4x", qdioac, irq_ptr->ssqd_desc.qdioac2);
|
||||
DBF_EVENT("3:%4x qib:%4x", irq_ptr->ssqd_desc.qdioac3, irq_ptr->qib.ac);
|
||||
}
|
||||
|
||||
void qdio_release_memory(struct qdio_irq *irq_ptr)
|
||||
{
|
||||
struct qdio_q *q;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Must check queue array manually since irq_ptr->nr_input_queues /
|
||||
* irq_ptr->nr_input_queues may not yet be set.
|
||||
*/
|
||||
for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
|
||||
q = irq_ptr->input_qs[i];
|
||||
if (q) {
|
||||
free_page((unsigned long) q->slib);
|
||||
kmem_cache_free(qdio_q_cache, q);
|
||||
}
|
||||
}
|
||||
for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
|
||||
q = irq_ptr->output_qs[i];
|
||||
if (q) {
|
||||
if (q->u.out.use_cq) {
|
||||
int n;
|
||||
|
||||
for (n = 0; n < QDIO_MAX_BUFFERS_PER_Q; ++n) {
|
||||
struct qaob *aob = q->u.out.aobs[n];
|
||||
if (aob) {
|
||||
qdio_release_aob(aob);
|
||||
q->u.out.aobs[n] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
qdio_disable_async_operation(&q->u.out);
|
||||
}
|
||||
free_page((unsigned long) q->slib);
|
||||
kmem_cache_free(qdio_q_cache, q);
|
||||
}
|
||||
}
|
||||
free_page((unsigned long) irq_ptr->qdr);
|
||||
free_page(irq_ptr->chsc_page);
|
||||
free_page((unsigned long) irq_ptr);
|
||||
}
|
||||
|
||||
static void __qdio_allocate_fill_qdr(struct qdio_irq *irq_ptr,
|
||||
struct qdio_q **irq_ptr_qs,
|
||||
int i, int nr)
|
||||
{
|
||||
irq_ptr->qdr->qdf0[i + nr].sliba =
|
||||
(unsigned long)irq_ptr_qs[i]->slib;
|
||||
|
||||
irq_ptr->qdr->qdf0[i + nr].sla =
|
||||
(unsigned long)irq_ptr_qs[i]->sl;
|
||||
|
||||
irq_ptr->qdr->qdf0[i + nr].slsba =
|
||||
(unsigned long)&irq_ptr_qs[i]->slsb.val[0];
|
||||
|
||||
irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY >> 4;
|
||||
irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY >> 4;
|
||||
irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY >> 4;
|
||||
irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY >> 4;
|
||||
}
|
||||
|
||||
static void setup_qdr(struct qdio_irq *irq_ptr,
|
||||
struct qdio_initialize *qdio_init)
|
||||
{
|
||||
int i;
|
||||
|
||||
irq_ptr->qdr->qfmt = qdio_init->q_format;
|
||||
irq_ptr->qdr->ac = qdio_init->qdr_ac;
|
||||
irq_ptr->qdr->iqdcnt = qdio_init->no_input_qs;
|
||||
irq_ptr->qdr->oqdcnt = qdio_init->no_output_qs;
|
||||
irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */
|
||||
irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4;
|
||||
irq_ptr->qdr->qiba = (unsigned long)&irq_ptr->qib;
|
||||
irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY >> 4;
|
||||
|
||||
for (i = 0; i < qdio_init->no_input_qs; i++)
|
||||
__qdio_allocate_fill_qdr(irq_ptr, irq_ptr->input_qs, i, 0);
|
||||
|
||||
for (i = 0; i < qdio_init->no_output_qs; i++)
|
||||
__qdio_allocate_fill_qdr(irq_ptr, irq_ptr->output_qs, i,
|
||||
qdio_init->no_input_qs);
|
||||
}
|
||||
|
||||
static void setup_qib(struct qdio_irq *irq_ptr,
|
||||
struct qdio_initialize *init_data)
|
||||
{
|
||||
if (qebsm_possible())
|
||||
irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM;
|
||||
|
||||
irq_ptr->qib.rflags |= init_data->qib_rflags;
|
||||
|
||||
irq_ptr->qib.qfmt = init_data->q_format;
|
||||
if (init_data->no_input_qs)
|
||||
irq_ptr->qib.isliba =
|
||||
(unsigned long)(irq_ptr->input_qs[0]->slib);
|
||||
if (init_data->no_output_qs)
|
||||
irq_ptr->qib.osliba =
|
||||
(unsigned long)(irq_ptr->output_qs[0]->slib);
|
||||
memcpy(irq_ptr->qib.ebcnam, init_data->adapter_name, 8);
|
||||
}
|
||||
|
||||
int qdio_setup_irq(struct qdio_initialize *init_data)
|
||||
{
|
||||
struct ciw *ciw;
|
||||
struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data;
|
||||
int rc;
|
||||
|
||||
memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
|
||||
memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag));
|
||||
memset(&irq_ptr->ccw, 0, sizeof(irq_ptr->ccw));
|
||||
memset(&irq_ptr->ssqd_desc, 0, sizeof(irq_ptr->ssqd_desc));
|
||||
memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
|
||||
|
||||
irq_ptr->debugfs_dev = irq_ptr->debugfs_perf = NULL;
|
||||
irq_ptr->sch_token = irq_ptr->state = irq_ptr->perf_stat_enabled = 0;
|
||||
|
||||
/* wipes qib.ac, required by ar7063 */
|
||||
memset(irq_ptr->qdr, 0, sizeof(struct qdr));
|
||||
|
||||
irq_ptr->int_parm = init_data->int_parm;
|
||||
irq_ptr->nr_input_qs = init_data->no_input_qs;
|
||||
irq_ptr->nr_output_qs = init_data->no_output_qs;
|
||||
irq_ptr->cdev = init_data->cdev;
|
||||
ccw_device_get_schid(irq_ptr->cdev, &irq_ptr->schid);
|
||||
setup_queues(irq_ptr, init_data);
|
||||
|
||||
setup_qib(irq_ptr, init_data);
|
||||
qdio_setup_thinint(irq_ptr);
|
||||
set_impl_params(irq_ptr, init_data->qib_param_field_format,
|
||||
init_data->qib_param_field,
|
||||
init_data->input_slib_elements,
|
||||
init_data->output_slib_elements);
|
||||
|
||||
/* fill input and output descriptors */
|
||||
setup_qdr(irq_ptr, init_data);
|
||||
|
||||
/* qdr, qib, sls, slsbs, slibs, sbales are filled now */
|
||||
|
||||
/* get qdio commands */
|
||||
ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
|
||||
if (!ciw) {
|
||||
DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no);
|
||||
rc = -EINVAL;
|
||||
goto out_err;
|
||||
}
|
||||
irq_ptr->equeue = *ciw;
|
||||
|
||||
ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
|
||||
if (!ciw) {
|
||||
DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no);
|
||||
rc = -EINVAL;
|
||||
goto out_err;
|
||||
}
|
||||
irq_ptr->aqueue = *ciw;
|
||||
|
||||
/* set new interrupt handler */
|
||||
irq_ptr->orig_handler = init_data->cdev->handler;
|
||||
init_data->cdev->handler = qdio_int_handler;
|
||||
return 0;
|
||||
out_err:
|
||||
qdio_release_memory(irq_ptr);
|
||||
return rc;
|
||||
}
|
||||
|
||||
void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
|
||||
struct ccw_device *cdev)
|
||||
{
|
||||
char s[80];
|
||||
|
||||
snprintf(s, 80, "qdio: %s %s on SC %x using "
|
||||
"AI:%d QEBSM:%d PRI:%d TDD:%d SIGA:%s%s%s%s%s\n",
|
||||
dev_name(&cdev->dev),
|
||||
(irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" :
|
||||
((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"),
|
||||
irq_ptr->schid.sch_no,
|
||||
is_thinint_irq(irq_ptr),
|
||||
(irq_ptr->sch_token) ? 1 : 0,
|
||||
(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) ? 1 : 0,
|
||||
css_general_characteristics.aif_tdd,
|
||||
(irq_ptr->siga_flag.input) ? "R" : " ",
|
||||
(irq_ptr->siga_flag.output) ? "W" : " ",
|
||||
(irq_ptr->siga_flag.sync) ? "S" : " ",
|
||||
(irq_ptr->siga_flag.sync_after_ai) ? "A" : " ",
|
||||
(irq_ptr->siga_flag.sync_out_after_pci) ? "P" : " ");
|
||||
printk(KERN_INFO "%s", s);
|
||||
}
|
||||
|
||||
int qdio_enable_async_operation(struct qdio_output_q *outq)
|
||||
{
|
||||
outq->aobs = kzalloc(sizeof(struct qaob *) * QDIO_MAX_BUFFERS_PER_Q,
|
||||
GFP_ATOMIC);
|
||||
if (!outq->aobs) {
|
||||
outq->use_cq = 0;
|
||||
return -ENOMEM;
|
||||
}
|
||||
outq->use_cq = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void qdio_disable_async_operation(struct qdio_output_q *q)
|
||||
{
|
||||
kfree(q->aobs);
|
||||
q->aobs = NULL;
|
||||
q->use_cq = 0;
|
||||
}
|
||||
|
||||
int __init qdio_setup_init(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q),
|
||||
256, 0, NULL);
|
||||
if (!qdio_q_cache)
|
||||
return -ENOMEM;
|
||||
|
||||
qdio_aob_cache = kmem_cache_create("qdio_aob",
|
||||
sizeof(struct qaob),
|
||||
sizeof(struct qaob),
|
||||
0,
|
||||
NULL);
|
||||
if (!qdio_aob_cache) {
|
||||
rc = -ENOMEM;
|
||||
goto free_qdio_q_cache;
|
||||
}
|
||||
|
||||
/* Check for OSA/FCP thin interrupts (bit 67). */
|
||||
DBF_EVENT("thinint:%1d",
|
||||
(css_general_characteristics.aif_osa) ? 1 : 0);
|
||||
|
||||
/* Check for QEBSM support in general (bit 58). */
|
||||
DBF_EVENT("cssQEBSM:%1d", (qebsm_possible()) ? 1 : 0);
|
||||
rc = 0;
|
||||
out:
|
||||
return rc;
|
||||
free_qdio_q_cache:
|
||||
kmem_cache_destroy(qdio_q_cache);
|
||||
goto out;
|
||||
}
|
||||
|
||||
void qdio_setup_exit(void)
|
||||
{
|
||||
kmem_cache_destroy(qdio_aob_cache);
|
||||
kmem_cache_destroy(qdio_q_cache);
|
||||
}
|
299
drivers/s390/cio/qdio_thinint.c
Normal file
299
drivers/s390/cio/qdio_thinint.c
Normal file
|
@ -0,0 +1,299 @@
|
|||
/*
|
||||
* Copyright IBM Corp. 2000, 2009
|
||||
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>
|
||||
* Cornelia Huck <cornelia.huck@de.ibm.com>
|
||||
* Jan Glauber <jang@linux.vnet.ibm.com>
|
||||
*/
|
||||
#include <linux/io.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <asm/debug.h>
|
||||
#include <asm/qdio.h>
|
||||
#include <asm/airq.h>
|
||||
#include <asm/isc.h>
|
||||
|
||||
#include "cio.h"
|
||||
#include "ioasm.h"
|
||||
#include "qdio.h"
|
||||
#include "qdio_debug.h"
|
||||
|
||||
/*
|
||||
* Restriction: only 63 iqdio subchannels would have its own indicator,
|
||||
* after that, subsequent subchannels share one indicator
|
||||
*/
|
||||
#define TIQDIO_NR_NONSHARED_IND 63
|
||||
#define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1)
|
||||
#define TIQDIO_SHARED_IND 63
|
||||
|
||||
/* device state change indicators */
|
||||
struct indicator_t {
|
||||
u32 ind; /* u32 because of compare-and-swap performance */
|
||||
atomic_t count; /* use count, 0 or 1 for non-shared indicators */
|
||||
};
|
||||
|
||||
/* list of thin interrupt input queues */
|
||||
static LIST_HEAD(tiq_list);
|
||||
static DEFINE_MUTEX(tiq_list_lock);
|
||||
|
||||
/* Adapter interrupt definitions */
|
||||
static void tiqdio_thinint_handler(struct airq_struct *airq);
|
||||
|
||||
static struct airq_struct tiqdio_airq = {
|
||||
.handler = tiqdio_thinint_handler,
|
||||
.isc = QDIO_AIRQ_ISC,
|
||||
};
|
||||
|
||||
static struct indicator_t *q_indicators;
|
||||
|
||||
u64 last_ai_time;
|
||||
|
||||
/* returns addr for the device state change indicator */
|
||||
static u32 *get_indicator(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < TIQDIO_NR_NONSHARED_IND; i++)
|
||||
if (!atomic_read(&q_indicators[i].count)) {
|
||||
atomic_set(&q_indicators[i].count, 1);
|
||||
return &q_indicators[i].ind;
|
||||
}
|
||||
|
||||
/* use the shared indicator */
|
||||
atomic_inc(&q_indicators[TIQDIO_SHARED_IND].count);
|
||||
return &q_indicators[TIQDIO_SHARED_IND].ind;
|
||||
}
|
||||
|
||||
static void put_indicator(u32 *addr)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!addr)
|
||||
return;
|
||||
i = ((unsigned long)addr - (unsigned long)q_indicators) /
|
||||
sizeof(struct indicator_t);
|
||||
atomic_dec(&q_indicators[i].count);
|
||||
}
|
||||
|
||||
void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
|
||||
{
|
||||
mutex_lock(&tiq_list_lock);
|
||||
list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list);
|
||||
mutex_unlock(&tiq_list_lock);
|
||||
xchg(irq_ptr->dsci, 1 << 7);
|
||||
}
|
||||
|
||||
void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
|
||||
{
|
||||
struct qdio_q *q;
|
||||
|
||||
q = irq_ptr->input_qs[0];
|
||||
/* if establish triggered an error */
|
||||
if (!q || !q->entry.prev || !q->entry.next)
|
||||
return;
|
||||
|
||||
mutex_lock(&tiq_list_lock);
|
||||
list_del_rcu(&q->entry);
|
||||
mutex_unlock(&tiq_list_lock);
|
||||
synchronize_rcu();
|
||||
}
|
||||
|
||||
static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq_ptr)
|
||||
{
|
||||
return irq_ptr->nr_input_qs > 1;
|
||||
}
|
||||
|
||||
static inline int references_shared_dsci(struct qdio_irq *irq_ptr)
|
||||
{
|
||||
return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
|
||||
}
|
||||
|
||||
static inline int shared_ind(struct qdio_irq *irq_ptr)
|
||||
{
|
||||
return references_shared_dsci(irq_ptr) ||
|
||||
has_multiple_inq_on_dsci(irq_ptr);
|
||||
}
|
||||
|
||||
void clear_nonshared_ind(struct qdio_irq *irq_ptr)
|
||||
{
|
||||
if (!is_thinint_irq(irq_ptr))
|
||||
return;
|
||||
if (shared_ind(irq_ptr))
|
||||
return;
|
||||
xchg(irq_ptr->dsci, 0);
|
||||
}
|
||||
|
||||
int test_nonshared_ind(struct qdio_irq *irq_ptr)
|
||||
{
|
||||
if (!is_thinint_irq(irq_ptr))
|
||||
return 0;
|
||||
if (shared_ind(irq_ptr))
|
||||
return 0;
|
||||
if (*irq_ptr->dsci)
|
||||
return 1;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline u32 clear_shared_ind(void)
|
||||
{
|
||||
if (!atomic_read(&q_indicators[TIQDIO_SHARED_IND].count))
|
||||
return 0;
|
||||
return xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0);
|
||||
}
|
||||
|
||||
static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
|
||||
{
|
||||
struct qdio_q *q;
|
||||
int i;
|
||||
|
||||
for_each_input_queue(irq, q, i) {
|
||||
if (!references_shared_dsci(irq) &&
|
||||
has_multiple_inq_on_dsci(irq))
|
||||
xchg(q->irq_ptr->dsci, 0);
|
||||
|
||||
if (q->u.in.queue_start_poll) {
|
||||
/* skip if polling is enabled or already in work */
|
||||
if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
|
||||
&q->u.in.queue_irq_state)) {
|
||||
qperf_inc(q, int_discarded);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* avoid dsci clear here, done after processing */
|
||||
q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
|
||||
q->irq_ptr->int_parm);
|
||||
} else {
|
||||
if (!shared_ind(q->irq_ptr))
|
||||
xchg(q->irq_ptr->dsci, 0);
|
||||
|
||||
/*
|
||||
* Call inbound processing but not directly
|
||||
* since that could starve other thinint queues.
|
||||
*/
|
||||
tasklet_schedule(&q->tasklet);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* tiqdio_thinint_handler - thin interrupt handler for qdio
|
||||
* @alsi: pointer to adapter local summary indicator
|
||||
* @data: NULL
|
||||
*/
|
||||
static void tiqdio_thinint_handler(struct airq_struct *airq)
|
||||
{
|
||||
u32 si_used = clear_shared_ind();
|
||||
struct qdio_q *q;
|
||||
|
||||
last_ai_time = S390_lowcore.int_clock;
|
||||
inc_irq_stat(IRQIO_QAI);
|
||||
|
||||
/* protect tiq_list entries, only changed in activate or shutdown */
|
||||
rcu_read_lock();
|
||||
|
||||
/* check for work on all inbound thinint queues */
|
||||
list_for_each_entry_rcu(q, &tiq_list, entry) {
|
||||
struct qdio_irq *irq;
|
||||
|
||||
/* only process queues from changed sets */
|
||||
irq = q->irq_ptr;
|
||||
if (unlikely(references_shared_dsci(irq))) {
|
||||
if (!si_used)
|
||||
continue;
|
||||
} else if (!*irq->dsci)
|
||||
continue;
|
||||
|
||||
tiqdio_call_inq_handlers(irq);
|
||||
|
||||
qperf_inc(q, adapter_int);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
|
||||
{
|
||||
struct chsc_scssc_area *scssc = (void *)irq_ptr->chsc_page;
|
||||
u64 summary_indicator_addr, subchannel_indicator_addr;
|
||||
int rc;
|
||||
|
||||
if (reset) {
|
||||
summary_indicator_addr = 0;
|
||||
subchannel_indicator_addr = 0;
|
||||
} else {
|
||||
summary_indicator_addr = virt_to_phys(tiqdio_airq.lsi_ptr);
|
||||
subchannel_indicator_addr = virt_to_phys(irq_ptr->dsci);
|
||||
}
|
||||
|
||||
rc = chsc_sadc(irq_ptr->schid, scssc, summary_indicator_addr,
|
||||
subchannel_indicator_addr);
|
||||
if (rc) {
|
||||
DBF_ERROR("%4x SSI r:%4x", irq_ptr->schid.sch_no,
|
||||
scssc->response.code);
|
||||
goto out;
|
||||
}
|
||||
|
||||
DBF_EVENT("setscind");
|
||||
DBF_HEX(&summary_indicator_addr, sizeof(summary_indicator_addr));
|
||||
DBF_HEX(&subchannel_indicator_addr, sizeof(subchannel_indicator_addr));
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* allocate non-shared indicators and shared indicator */
|
||||
int __init tiqdio_allocate_memory(void)
|
||||
{
|
||||
q_indicators = kzalloc(sizeof(struct indicator_t) * TIQDIO_NR_INDICATORS,
|
||||
GFP_KERNEL);
|
||||
if (!q_indicators)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void tiqdio_free_memory(void)
|
||||
{
|
||||
kfree(q_indicators);
|
||||
}
|
||||
|
||||
int __init tiqdio_register_thinints(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = register_adapter_interrupt(&tiqdio_airq);
|
||||
if (rc) {
|
||||
DBF_EVENT("RTI:%x", rc);
|
||||
return rc;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int qdio_establish_thinint(struct qdio_irq *irq_ptr)
|
||||
{
|
||||
if (!is_thinint_irq(irq_ptr))
|
||||
return 0;
|
||||
return set_subchannel_ind(irq_ptr, 0);
|
||||
}
|
||||
|
||||
void qdio_setup_thinint(struct qdio_irq *irq_ptr)
|
||||
{
|
||||
if (!is_thinint_irq(irq_ptr))
|
||||
return;
|
||||
irq_ptr->dsci = get_indicator();
|
||||
DBF_HEX(&irq_ptr->dsci, sizeof(void *));
|
||||
}
|
||||
|
||||
void qdio_shutdown_thinint(struct qdio_irq *irq_ptr)
|
||||
{
|
||||
if (!is_thinint_irq(irq_ptr))
|
||||
return;
|
||||
|
||||
/* reset adapter interrupt indicators */
|
||||
set_subchannel_ind(irq_ptr, 1);
|
||||
put_indicator(irq_ptr->dsci);
|
||||
}
|
||||
|
||||
void __exit tiqdio_unregister_thinints(void)
|
||||
{
|
||||
WARN_ON(!list_empty(&tiq_list));
|
||||
unregister_adapter_interrupt(&tiqdio_airq);
|
||||
}
|
288
drivers/s390/cio/scm.c
Normal file
288
drivers/s390/cio/scm.c
Normal file
|
@ -0,0 +1,288 @@
|
|||
/*
|
||||
* Recognize and maintain s390 storage class memory.
|
||||
*
|
||||
* Copyright IBM Corp. 2012
|
||||
* Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/err.h>
|
||||
#include <asm/eadm.h>
|
||||
#include "chsc.h"
|
||||
|
||||
static struct device *scm_root;
|
||||
|
||||
#define to_scm_dev(n) container_of(n, struct scm_device, dev)
|
||||
#define to_scm_drv(d) container_of(d, struct scm_driver, drv)
|
||||
|
||||
static int scmdev_probe(struct device *dev)
|
||||
{
|
||||
struct scm_device *scmdev = to_scm_dev(dev);
|
||||
struct scm_driver *scmdrv = to_scm_drv(dev->driver);
|
||||
|
||||
return scmdrv->probe ? scmdrv->probe(scmdev) : -ENODEV;
|
||||
}
|
||||
|
||||
static int scmdev_remove(struct device *dev)
|
||||
{
|
||||
struct scm_device *scmdev = to_scm_dev(dev);
|
||||
struct scm_driver *scmdrv = to_scm_drv(dev->driver);
|
||||
|
||||
return scmdrv->remove ? scmdrv->remove(scmdev) : -ENODEV;
|
||||
}
|
||||
|
||||
static int scmdev_uevent(struct device *dev, struct kobj_uevent_env *env)
|
||||
{
|
||||
return add_uevent_var(env, "MODALIAS=scm:scmdev");
|
||||
}
|
||||
|
||||
static struct bus_type scm_bus_type = {
|
||||
.name = "scm",
|
||||
.probe = scmdev_probe,
|
||||
.remove = scmdev_remove,
|
||||
.uevent = scmdev_uevent,
|
||||
};
|
||||
|
||||
/**
|
||||
* scm_driver_register() - register a scm driver
|
||||
* @scmdrv: driver to be registered
|
||||
*/
|
||||
int scm_driver_register(struct scm_driver *scmdrv)
|
||||
{
|
||||
struct device_driver *drv = &scmdrv->drv;
|
||||
|
||||
drv->bus = &scm_bus_type;
|
||||
|
||||
return driver_register(drv);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(scm_driver_register);
|
||||
|
||||
/**
|
||||
* scm_driver_unregister() - deregister a scm driver
|
||||
* @scmdrv: driver to be deregistered
|
||||
*/
|
||||
void scm_driver_unregister(struct scm_driver *scmdrv)
|
||||
{
|
||||
driver_unregister(&scmdrv->drv);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(scm_driver_unregister);
|
||||
|
||||
void scm_irq_handler(struct aob *aob, int error)
|
||||
{
|
||||
struct aob_rq_header *aobrq = (void *) aob->request.data;
|
||||
struct scm_device *scmdev = aobrq->scmdev;
|
||||
struct scm_driver *scmdrv = to_scm_drv(scmdev->dev.driver);
|
||||
|
||||
scmdrv->handler(scmdev, aobrq->data, error);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(scm_irq_handler);
|
||||
|
||||
#define scm_attr(name) \
|
||||
static ssize_t show_##name(struct device *dev, \
|
||||
struct device_attribute *attr, char *buf) \
|
||||
{ \
|
||||
struct scm_device *scmdev = to_scm_dev(dev); \
|
||||
int ret; \
|
||||
\
|
||||
device_lock(dev); \
|
||||
ret = sprintf(buf, "%u\n", scmdev->attrs.name); \
|
||||
device_unlock(dev); \
|
||||
\
|
||||
return ret; \
|
||||
} \
|
||||
static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL);
|
||||
|
||||
scm_attr(persistence);
|
||||
scm_attr(oper_state);
|
||||
scm_attr(data_state);
|
||||
scm_attr(rank);
|
||||
scm_attr(release);
|
||||
scm_attr(res_id);
|
||||
|
||||
static struct attribute *scmdev_attrs[] = {
|
||||
&dev_attr_persistence.attr,
|
||||
&dev_attr_oper_state.attr,
|
||||
&dev_attr_data_state.attr,
|
||||
&dev_attr_rank.attr,
|
||||
&dev_attr_release.attr,
|
||||
&dev_attr_res_id.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group scmdev_attr_group = {
|
||||
.attrs = scmdev_attrs,
|
||||
};
|
||||
|
||||
static const struct attribute_group *scmdev_attr_groups[] = {
|
||||
&scmdev_attr_group,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static void scmdev_release(struct device *dev)
|
||||
{
|
||||
struct scm_device *scmdev = to_scm_dev(dev);
|
||||
|
||||
kfree(scmdev);
|
||||
}
|
||||
|
||||
static void scmdev_setup(struct scm_device *scmdev, struct sale *sale,
|
||||
unsigned int size, unsigned int max_blk_count)
|
||||
{
|
||||
dev_set_name(&scmdev->dev, "%016llx", (unsigned long long) sale->sa);
|
||||
scmdev->nr_max_block = max_blk_count;
|
||||
scmdev->address = sale->sa;
|
||||
scmdev->size = 1UL << size;
|
||||
scmdev->attrs.rank = sale->rank;
|
||||
scmdev->attrs.persistence = sale->p;
|
||||
scmdev->attrs.oper_state = sale->op_state;
|
||||
scmdev->attrs.data_state = sale->data_state;
|
||||
scmdev->attrs.rank = sale->rank;
|
||||
scmdev->attrs.release = sale->r;
|
||||
scmdev->attrs.res_id = sale->rid;
|
||||
scmdev->dev.parent = scm_root;
|
||||
scmdev->dev.bus = &scm_bus_type;
|
||||
scmdev->dev.release = scmdev_release;
|
||||
scmdev->dev.groups = scmdev_attr_groups;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check for state-changes, notify the driver and userspace.
|
||||
*/
|
||||
static void scmdev_update(struct scm_device *scmdev, struct sale *sale)
|
||||
{
|
||||
struct scm_driver *scmdrv;
|
||||
bool changed;
|
||||
|
||||
device_lock(&scmdev->dev);
|
||||
changed = scmdev->attrs.rank != sale->rank ||
|
||||
scmdev->attrs.oper_state != sale->op_state;
|
||||
scmdev->attrs.rank = sale->rank;
|
||||
scmdev->attrs.oper_state = sale->op_state;
|
||||
if (!scmdev->dev.driver)
|
||||
goto out;
|
||||
scmdrv = to_scm_drv(scmdev->dev.driver);
|
||||
if (changed && scmdrv->notify)
|
||||
scmdrv->notify(scmdev, SCM_CHANGE);
|
||||
out:
|
||||
device_unlock(&scmdev->dev);
|
||||
if (changed)
|
||||
kobject_uevent(&scmdev->dev.kobj, KOBJ_CHANGE);
|
||||
}
|
||||
|
||||
static int check_address(struct device *dev, void *data)
|
||||
{
|
||||
struct scm_device *scmdev = to_scm_dev(dev);
|
||||
struct sale *sale = data;
|
||||
|
||||
return scmdev->address == sale->sa;
|
||||
}
|
||||
|
||||
static struct scm_device *scmdev_find(struct sale *sale)
|
||||
{
|
||||
struct device *dev;
|
||||
|
||||
dev = bus_find_device(&scm_bus_type, NULL, sale, check_address);
|
||||
|
||||
return dev ? to_scm_dev(dev) : NULL;
|
||||
}
|
||||
|
||||
static int scm_add(struct chsc_scm_info *scm_info, size_t num)
|
||||
{
|
||||
struct sale *sale, *scmal = scm_info->scmal;
|
||||
struct scm_device *scmdev;
|
||||
int ret;
|
||||
|
||||
for (sale = scmal; sale < scmal + num; sale++) {
|
||||
scmdev = scmdev_find(sale);
|
||||
if (scmdev) {
|
||||
scmdev_update(scmdev, sale);
|
||||
/* Release reference from scm_find(). */
|
||||
put_device(&scmdev->dev);
|
||||
continue;
|
||||
}
|
||||
scmdev = kzalloc(sizeof(*scmdev), GFP_KERNEL);
|
||||
if (!scmdev)
|
||||
return -ENODEV;
|
||||
scmdev_setup(scmdev, sale, scm_info->is, scm_info->mbc);
|
||||
ret = device_register(&scmdev->dev);
|
||||
if (ret) {
|
||||
/* Release reference from device_initialize(). */
|
||||
put_device(&scmdev->dev);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int scm_update_information(void)
|
||||
{
|
||||
struct chsc_scm_info *scm_info;
|
||||
u64 token = 0;
|
||||
size_t num;
|
||||
int ret;
|
||||
|
||||
scm_info = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
|
||||
if (!scm_info)
|
||||
return -ENOMEM;
|
||||
|
||||
do {
|
||||
ret = chsc_scm_info(scm_info, token);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
num = (scm_info->response.length -
|
||||
(offsetof(struct chsc_scm_info, scmal) -
|
||||
offsetof(struct chsc_scm_info, response))
|
||||
) / sizeof(struct sale);
|
||||
|
||||
ret = scm_add(scm_info, num);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
token = scm_info->restok;
|
||||
} while (token);
|
||||
|
||||
free_page((unsigned long)scm_info);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int scm_dev_avail(struct device *dev, void *unused)
|
||||
{
|
||||
struct scm_driver *scmdrv = to_scm_drv(dev->driver);
|
||||
struct scm_device *scmdev = to_scm_dev(dev);
|
||||
|
||||
if (dev->driver && scmdrv->notify)
|
||||
scmdrv->notify(scmdev, SCM_AVAIL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int scm_process_availability_information(void)
|
||||
{
|
||||
return bus_for_each_dev(&scm_bus_type, NULL, NULL, scm_dev_avail);
|
||||
}
|
||||
|
||||
static int __init scm_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = bus_register(&scm_bus_type);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
scm_root = root_device_register("scm");
|
||||
if (IS_ERR(scm_root)) {
|
||||
bus_unregister(&scm_bus_type);
|
||||
return PTR_ERR(scm_root);
|
||||
}
|
||||
|
||||
scm_update_information();
|
||||
return 0;
|
||||
}
|
||||
subsys_initcall_sync(scm_init);
|
Loading…
Add table
Add a link
Reference in a new issue