Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

191
drivers/s390/char/Kconfig Normal file
View file

@ -0,0 +1,191 @@
comment "S/390 character device drivers"
depends on S390
config TN3270
def_tristate y
prompt "Support for locally attached 3270 terminals"
depends on CCW
help
Include support for IBM 3270 terminals.
config TN3270_TTY
def_tristate y
prompt "Support for tty input/output on 3270 terminals"
depends on TN3270 && TTY
help
Include support for using an IBM 3270 terminal as a Linux tty.
config TN3270_FS
def_tristate m
prompt "Support for fullscreen applications on 3270 terminals"
depends on TN3270
help
Include support for fullscreen applications on an IBM 3270 terminal.
config TN3270_CONSOLE
def_bool y
prompt "Support for console on 3270 terminal"
depends on TN3270=y && TN3270_TTY=y
help
Include support for using an IBM 3270 terminal as a Linux system
console. Available only if 3270 support is compiled in statically.
config TN3215
def_bool y
prompt "Support for 3215 line mode terminal"
depends on CCW && TTY
help
Include support for IBM 3215 line-mode terminals.
config TN3215_CONSOLE
def_bool y
prompt "Support for console on 3215 line mode terminal"
depends on TN3215
help
Include support for using an IBM 3215 line-mode terminal as a
Linux system console.
config CCW_CONSOLE
def_bool y if TN3215_CONSOLE || TN3270_CONSOLE
config SCLP_TTY
def_bool y
prompt "Support for SCLP line mode terminal"
depends on S390 && TTY
help
Include support for IBM SCLP line-mode terminals.
config SCLP_CONSOLE
def_bool y
prompt "Support for console on SCLP line mode terminal"
depends on SCLP_TTY
help
Include support for using an IBM HWC line-mode terminal as the Linux
system console.
config SCLP_VT220_TTY
def_bool y
prompt "Support for SCLP VT220-compatible terminal"
depends on S390 && TTY
help
Include support for an IBM SCLP VT220-compatible terminal.
config SCLP_VT220_CONSOLE
def_bool y
prompt "Support for console on SCLP VT220-compatible terminal"
depends on SCLP_VT220_TTY
help
Include support for using an IBM SCLP VT220-compatible terminal as a
Linux system console.
config SCLP_CPI
def_tristate m
prompt "Control-Program Identification"
depends on S390
help
This option enables the hardware console interface for system
identification. This is commonly used for workload management and
gives you a nice name for the system on the service element.
Please select this option as a module since built-in operation is
completely untested.
You should only select this option if you know what you are doing,
need this feature and intend to run your kernel in LPAR.
config SCLP_ASYNC
def_tristate m
prompt "Support for Call Home via Asynchronous SCLP Records"
depends on S390
help
This option enables the call home function, which is able to inform
the service element and connected organisations about a kernel panic.
You should only select this option if you know what you are doing,
want for inform other people about your kernel panics,
need this feature and intend to run your kernel in LPAR.
config HMC_DRV
def_tristate m
prompt "Support for file transfers from HMC drive CD/DVD-ROM"
depends on S390 && 64BIT
select CRC16
help
This option enables support for file transfers from a Hardware
Management Console (HMC) drive CD/DVD-ROM. It is available as a
module, called 'hmcdrv', and also as kernel built-in. There is one
optional parameter for this module: cachesize=N, which modifies the
transfer cache size from it's default value 0.5MB to N bytes. If N
is zero, then no caching is performed.
config S390_TAPE
def_tristate m
prompt "S/390 tape device support"
depends on CCW
help
Select this option if you want to access channel-attached tape
devices on IBM S/390 or zSeries.
If you select this option you will also want to select at
least one of the tape interface options and one of the tape
hardware options in order to access a tape device.
This option is also available as a module. The module will be
called tape390 and include all selected interfaces and
hardware drivers.
comment "S/390 tape hardware support"
depends on S390_TAPE
config S390_TAPE_34XX
def_tristate m
prompt "Support for 3480/3490 tape hardware"
depends on S390_TAPE
help
Select this option if you want to access IBM 3480/3490 magnetic
tape subsystems and 100% compatibles.
It is safe to say "Y" here.
config S390_TAPE_3590
def_tristate m
prompt "Support for 3590 tape hardware"
depends on S390_TAPE
help
Select this option if you want to access IBM 3590 magnetic
tape subsystems and 100% compatibles.
It is safe to say "Y" here.
config VMLOGRDR
def_tristate m
prompt "Support for the z/VM recording system services (VM only)"
depends on IUCV
help
Select this option if you want to be able to receive records collected
by the z/VM recording system services, eg. from *LOGREC, *ACCOUNT or
*SYMPTOM.
This driver depends on the IUCV support driver.
config VMCP
def_bool y
prompt "Support for the z/VM CP interface"
depends on S390
help
Select this option if you want to be able to interact with the control
program on z/VM
config MONREADER
def_tristate m
prompt "API for reading z/VM monitor service records"
depends on IUCV
help
Character device driver for reading z/VM monitor service records
config MONWRITER
def_tristate m
prompt "API for writing z/VM monitor service records"
depends on S390
help
Character device driver for writing z/VM monitor service records
config S390_VMUR
def_tristate m
prompt "z/VM unit record device driver"
depends on S390
help
Character device driver for z/VM reader, puncher and printer.

View file

@ -0,0 +1,38 @@
#
# S/390 character devices
#
obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \
sclp_early.o
obj-$(CONFIG_TN3270) += raw3270.o
obj-$(CONFIG_TN3270_CONSOLE) += con3270.o
obj-$(CONFIG_TN3270_TTY) += tty3270.o
obj-$(CONFIG_TN3270_FS) += fs3270.o
obj-$(CONFIG_TN3215) += con3215.o
obj-$(CONFIG_SCLP_TTY) += sclp_tty.o
obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o
obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o
obj-$(CONFIG_SCLP_CPI) += sclp_cpi.o
obj-$(CONFIG_SCLP_ASYNC) += sclp_async.o
obj-$(CONFIG_VMLOGRDR) += vmlogrdr.o
obj-$(CONFIG_VMCP) += vmcp.o
tape-$(CONFIG_PROC_FS) += tape_proc.o
tape-objs := tape_core.o tape_std.o tape_char.o $(tape-y)
obj-$(CONFIG_S390_TAPE) += tape.o tape_class.o
obj-$(CONFIG_S390_TAPE_34XX) += tape_34xx.o
obj-$(CONFIG_S390_TAPE_3590) += tape_3590.o
obj-$(CONFIG_MONREADER) += monreader.o
obj-$(CONFIG_MONWRITER) += monwriter.o
obj-$(CONFIG_S390_VMUR) += vmur.o
zcore_mod-objs := sclp_sdias.o zcore.o
obj-$(CONFIG_CRASH_DUMP) += zcore_mod.o
hmcdrv-objs := hmcdrv_mod.o hmcdrv_dev.o hmcdrv_ftp.o hmcdrv_cache.o diag_ftp.o sclp_ftp.o
obj-$(CONFIG_HMC_DRV) += hmcdrv.o

1226
drivers/s390/char/con3215.c Normal file

File diff suppressed because it is too large Load diff

638
drivers/s390/char/con3270.c Normal file
View file

@ -0,0 +1,638 @@
/*
* IBM/3270 Driver - console view.
*
* Author(s):
* Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
* Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
* Copyright IBM Corp. 2003, 2009
*/
#include <linux/module.h>
#include <linux/console.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/reboot.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
#include <asm/cpcmd.h>
#include <asm/ebcdic.h>
#include "raw3270.h"
#include "tty3270.h"
#include "ctrlchar.h"
#define CON3270_OUTPUT_BUFFER_SIZE 1024
#define CON3270_STRING_PAGES 4
static struct raw3270_fn con3270_fn;
static bool auto_update = 1;
module_param(auto_update, bool, 0);
/*
* Main 3270 console view data structure.
*/
struct con3270 {
struct raw3270_view view;
struct list_head freemem; /* list of free memory for strings. */
/* Output stuff. */
struct list_head lines; /* list of lines. */
struct list_head update; /* list of lines to update. */
int line_nr; /* line number for next update. */
int nr_lines; /* # lines in list. */
int nr_up; /* # lines up in history. */
unsigned long update_flags; /* Update indication bits. */
struct string *cline; /* current output line. */
struct string *status; /* last line of display. */
struct raw3270_request *write; /* single write request. */
struct timer_list timer;
/* Input stuff. */
struct string *input; /* input string for read request. */
struct raw3270_request *read; /* single read request. */
struct raw3270_request *kreset; /* single keyboard reset request. */
struct tasklet_struct readlet; /* tasklet to issue read request. */
};
static struct con3270 *condev;
/* con3270->update_flags. See con3270_update for details. */
#define CON_UPDATE_ERASE 1 /* Use EWRITEA instead of WRITE. */
#define CON_UPDATE_LIST 2 /* Update lines in tty3270->update. */
#define CON_UPDATE_STATUS 4 /* Update status line. */
#define CON_UPDATE_ALL 8 /* Recreate screen. */
static void con3270_update(struct con3270 *);
/*
* Setup timeout for a device. On timeout trigger an update.
*/
static void con3270_set_timer(struct con3270 *cp, int expires)
{
if (expires == 0)
del_timer(&cp->timer);
else
mod_timer(&cp->timer, jiffies + expires);
}
/*
* The status line is the last line of the screen. It shows the string
* "console view" in the lower left corner and "Running"/"More..."/"Holding"
* in the lower right corner of the screen.
*/
static void
con3270_update_status(struct con3270 *cp)
{
char *str;
str = (cp->nr_up != 0) ? "History" : "Running";
memcpy(cp->status->string + 24, str, 7);
codepage_convert(cp->view.ascebc, cp->status->string + 24, 7);
cp->update_flags |= CON_UPDATE_STATUS;
}
static void
con3270_create_status(struct con3270 *cp)
{
static const unsigned char blueprint[] =
{ TO_SBA, 0, 0, TO_SF,TF_LOG,TO_SA,TAT_COLOR, TAC_GREEN,
'c','o','n','s','o','l','e',' ','v','i','e','w',
TO_RA,0,0,0,'R','u','n','n','i','n','g',TO_SF,TF_LOG };
cp->status = alloc_string(&cp->freemem, sizeof(blueprint));
/* Copy blueprint to status line */
memcpy(cp->status->string, blueprint, sizeof(blueprint));
/* Set TO_RA addresses. */
raw3270_buffer_address(cp->view.dev, cp->status->string + 1,
cp->view.cols * (cp->view.rows - 1));
raw3270_buffer_address(cp->view.dev, cp->status->string + 21,
cp->view.cols * cp->view.rows - 8);
/* Convert strings to ebcdic. */
codepage_convert(cp->view.ascebc, cp->status->string + 8, 12);
codepage_convert(cp->view.ascebc, cp->status->string + 24, 7);
}
/*
* Set output offsets to 3270 datastream fragment of a console string.
*/
static void
con3270_update_string(struct con3270 *cp, struct string *s, int nr)
{
if (s->len >= cp->view.cols - 5)
return;
raw3270_buffer_address(cp->view.dev, s->string + s->len - 3,
cp->view.cols * (nr + 1));
}
/*
* Rebuild update list to print all lines.
*/
static void
con3270_rebuild_update(struct con3270 *cp)
{
struct string *s, *n;
int nr;
/*
* Throw away update list and create a new one,
* containing all lines that will fit on the screen.
*/
list_for_each_entry_safe(s, n, &cp->update, update)
list_del_init(&s->update);
nr = cp->view.rows - 2 + cp->nr_up;
list_for_each_entry_reverse(s, &cp->lines, list) {
if (nr < cp->view.rows - 1)
list_add(&s->update, &cp->update);
if (--nr < 0)
break;
}
cp->line_nr = 0;
cp->update_flags |= CON_UPDATE_LIST;
}
/*
* Alloc string for size bytes. Free strings from history if necessary.
*/
static struct string *
con3270_alloc_string(struct con3270 *cp, size_t size)
{
struct string *s, *n;
s = alloc_string(&cp->freemem, size);
if (s)
return s;
list_for_each_entry_safe(s, n, &cp->lines, list) {
list_del(&s->list);
if (!list_empty(&s->update))
list_del(&s->update);
cp->nr_lines--;
if (free_string(&cp->freemem, s) >= size)
break;
}
s = alloc_string(&cp->freemem, size);
BUG_ON(!s);
if (cp->nr_up != 0 && cp->nr_up + cp->view.rows > cp->nr_lines) {
cp->nr_up = cp->nr_lines - cp->view.rows + 1;
con3270_rebuild_update(cp);
con3270_update_status(cp);
}
return s;
}
/*
* Write completion callback.
*/
static void
con3270_write_callback(struct raw3270_request *rq, void *data)
{
raw3270_request_reset(rq);
xchg(&((struct con3270 *) rq->view)->write, rq);
}
/*
* Update console display.
*/
static void
con3270_update(struct con3270 *cp)
{
struct raw3270_request *wrq;
char wcc, prolog[6];
unsigned long flags;
unsigned long updated;
struct string *s, *n;
int rc;
if (!auto_update && !raw3270_view_active(&cp->view))
return;
if (cp->view.dev)
raw3270_activate_view(&cp->view);
wrq = xchg(&cp->write, 0);
if (!wrq) {
con3270_set_timer(cp, 1);
return;
}
spin_lock_irqsave(&cp->view.lock, flags);
updated = 0;
if (cp->update_flags & CON_UPDATE_ALL) {
con3270_rebuild_update(cp);
con3270_update_status(cp);
cp->update_flags = CON_UPDATE_ERASE | CON_UPDATE_LIST |
CON_UPDATE_STATUS;
}
if (cp->update_flags & CON_UPDATE_ERASE) {
/* Use erase write alternate to initialize display. */
raw3270_request_set_cmd(wrq, TC_EWRITEA);
updated |= CON_UPDATE_ERASE;
} else
raw3270_request_set_cmd(wrq, TC_WRITE);
wcc = TW_NONE;
raw3270_request_add_data(wrq, &wcc, 1);
/*
* Update status line.
*/
if (cp->update_flags & CON_UPDATE_STATUS)
if (raw3270_request_add_data(wrq, cp->status->string,
cp->status->len) == 0)
updated |= CON_UPDATE_STATUS;
if (cp->update_flags & CON_UPDATE_LIST) {
prolog[0] = TO_SBA;
prolog[3] = TO_SA;
prolog[4] = TAT_COLOR;
prolog[5] = TAC_TURQ;
raw3270_buffer_address(cp->view.dev, prolog + 1,
cp->view.cols * cp->line_nr);
raw3270_request_add_data(wrq, prolog, 6);
/* Write strings in the update list to the screen. */
list_for_each_entry_safe(s, n, &cp->update, update) {
if (s != cp->cline)
con3270_update_string(cp, s, cp->line_nr);
if (raw3270_request_add_data(wrq, s->string,
s->len) != 0)
break;
list_del_init(&s->update);
if (s != cp->cline)
cp->line_nr++;
}
if (list_empty(&cp->update))
updated |= CON_UPDATE_LIST;
}
wrq->callback = con3270_write_callback;
rc = raw3270_start(&cp->view, wrq);
if (rc == 0) {
cp->update_flags &= ~updated;
if (cp->update_flags)
con3270_set_timer(cp, 1);
} else {
raw3270_request_reset(wrq);
xchg(&cp->write, wrq);
}
spin_unlock_irqrestore(&cp->view.lock, flags);
}
/*
* Read tasklet.
*/
static void
con3270_read_tasklet(struct raw3270_request *rrq)
{
static char kreset_data = TW_KR;
struct con3270 *cp;
unsigned long flags;
int nr_up, deactivate;
cp = (struct con3270 *) rrq->view;
spin_lock_irqsave(&cp->view.lock, flags);
nr_up = cp->nr_up;
deactivate = 0;
/* Check aid byte. */
switch (cp->input->string[0]) {
case 0x7d: /* enter: jump to bottom. */
nr_up = 0;
break;
case 0xf3: /* PF3: deactivate the console view. */
deactivate = 1;
break;
case 0x6d: /* clear: start from scratch. */
cp->update_flags = CON_UPDATE_ALL;
con3270_set_timer(cp, 1);
break;
case 0xf7: /* PF7: do a page up in the console log. */
nr_up += cp->view.rows - 2;
if (nr_up + cp->view.rows - 1 > cp->nr_lines) {
nr_up = cp->nr_lines - cp->view.rows + 1;
if (nr_up < 0)
nr_up = 0;
}
break;
case 0xf8: /* PF8: do a page down in the console log. */
nr_up -= cp->view.rows - 2;
if (nr_up < 0)
nr_up = 0;
break;
}
if (nr_up != cp->nr_up) {
cp->nr_up = nr_up;
con3270_rebuild_update(cp);
con3270_update_status(cp);
con3270_set_timer(cp, 1);
}
spin_unlock_irqrestore(&cp->view.lock, flags);
/* Start keyboard reset command. */
raw3270_request_reset(cp->kreset);
raw3270_request_set_cmd(cp->kreset, TC_WRITE);
raw3270_request_add_data(cp->kreset, &kreset_data, 1);
raw3270_start(&cp->view, cp->kreset);
if (deactivate)
raw3270_deactivate_view(&cp->view);
raw3270_request_reset(rrq);
xchg(&cp->read, rrq);
raw3270_put_view(&cp->view);
}
/*
* Read request completion callback.
*/
static void
con3270_read_callback(struct raw3270_request *rq, void *data)
{
raw3270_get_view(rq->view);
/* Schedule tasklet to pass input to tty. */
tasklet_schedule(&((struct con3270 *) rq->view)->readlet);
}
/*
* Issue a read request. Called only from interrupt function.
*/
static void
con3270_issue_read(struct con3270 *cp)
{
struct raw3270_request *rrq;
int rc;
rrq = xchg(&cp->read, 0);
if (!rrq)
/* Read already scheduled. */
return;
rrq->callback = con3270_read_callback;
rrq->callback_data = cp;
raw3270_request_set_cmd(rrq, TC_READMOD);
raw3270_request_set_data(rrq, cp->input->string, cp->input->len);
/* Issue the read modified request. */
rc = raw3270_start_irq(&cp->view, rrq);
if (rc)
raw3270_request_reset(rrq);
}
/*
* Switch to the console view.
*/
static int
con3270_activate(struct raw3270_view *view)
{
struct con3270 *cp;
cp = (struct con3270 *) view;
cp->update_flags = CON_UPDATE_ALL;
con3270_set_timer(cp, 1);
return 0;
}
static void
con3270_deactivate(struct raw3270_view *view)
{
struct con3270 *cp;
cp = (struct con3270 *) view;
del_timer(&cp->timer);
}
static int
con3270_irq(struct con3270 *cp, struct raw3270_request *rq, struct irb *irb)
{
/* Handle ATTN. Schedule tasklet to read aid. */
if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION)
con3270_issue_read(cp);
if (rq) {
if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
rq->rc = -EIO;
else
/* Normal end. Copy residual count. */
rq->rescnt = irb->scsw.cmd.count;
}
return RAW3270_IO_DONE;
}
/* Console view to a 3270 device. */
static struct raw3270_fn con3270_fn = {
.activate = con3270_activate,
.deactivate = con3270_deactivate,
.intv = (void *) con3270_irq
};
static inline void
con3270_cline_add(struct con3270 *cp)
{
if (!list_empty(&cp->cline->list))
/* Already added. */
return;
list_add_tail(&cp->cline->list, &cp->lines);
cp->nr_lines++;
con3270_rebuild_update(cp);
}
static inline void
con3270_cline_insert(struct con3270 *cp, unsigned char c)
{
cp->cline->string[cp->cline->len++] =
cp->view.ascebc[(c < ' ') ? ' ' : c];
if (list_empty(&cp->cline->update)) {
list_add_tail(&cp->cline->update, &cp->update);
cp->update_flags |= CON_UPDATE_LIST;
}
}
static inline void
con3270_cline_end(struct con3270 *cp)
{
struct string *s;
unsigned int size;
/* Copy cline. */
size = (cp->cline->len < cp->view.cols - 5) ?
cp->cline->len + 4 : cp->view.cols;
s = con3270_alloc_string(cp, size);
memcpy(s->string, cp->cline->string, cp->cline->len);
if (s->len < cp->view.cols - 5) {
s->string[s->len - 4] = TO_RA;
s->string[s->len - 1] = 0;
} else {
while (--size > cp->cline->len)
s->string[size] = cp->view.ascebc[' '];
}
/* Replace cline with allocated line s and reset cline. */
list_add(&s->list, &cp->cline->list);
list_del_init(&cp->cline->list);
if (!list_empty(&cp->cline->update)) {
list_add(&s->update, &cp->cline->update);
list_del_init(&cp->cline->update);
}
cp->cline->len = 0;
}
/*
* Write a string to the 3270 console
*/
static void
con3270_write(struct console *co, const char *str, unsigned int count)
{
struct con3270 *cp;
unsigned long flags;
unsigned char c;
cp = condev;
spin_lock_irqsave(&cp->view.lock, flags);
while (count-- > 0) {
c = *str++;
if (cp->cline->len == 0)
con3270_cline_add(cp);
if (c != '\n')
con3270_cline_insert(cp, c);
if (c == '\n' || cp->cline->len >= cp->view.cols)
con3270_cline_end(cp);
}
/* Setup timer to output current console buffer after 1/10 second */
cp->nr_up = 0;
if (cp->view.dev && !timer_pending(&cp->timer))
con3270_set_timer(cp, HZ/10);
spin_unlock_irqrestore(&cp->view.lock,flags);
}
static struct tty_driver *
con3270_device(struct console *c, int *index)
{
*index = c->index;
return tty3270_driver;
}
/*
* Wait for end of write request.
*/
static void
con3270_wait_write(struct con3270 *cp)
{
while (!cp->write) {
raw3270_wait_cons_dev(cp->view.dev);
barrier();
}
}
/*
* panic() calls con3270_flush through a panic_notifier
* before the system enters a disabled, endless loop.
*/
static void
con3270_flush(void)
{
struct con3270 *cp;
unsigned long flags;
cp = condev;
if (!cp->view.dev)
return;
raw3270_pm_unfreeze(&cp->view);
raw3270_activate_view(&cp->view);
spin_lock_irqsave(&cp->view.lock, flags);
con3270_wait_write(cp);
cp->nr_up = 0;
con3270_rebuild_update(cp);
con3270_update_status(cp);
while (cp->update_flags != 0) {
spin_unlock_irqrestore(&cp->view.lock, flags);
con3270_update(cp);
spin_lock_irqsave(&cp->view.lock, flags);
con3270_wait_write(cp);
}
spin_unlock_irqrestore(&cp->view.lock, flags);
}
static int con3270_notify(struct notifier_block *self,
unsigned long event, void *data)
{
con3270_flush();
return NOTIFY_OK;
}
static struct notifier_block on_panic_nb = {
.notifier_call = con3270_notify,
.priority = 0,
};
static struct notifier_block on_reboot_nb = {
.notifier_call = con3270_notify,
.priority = 0,
};
/*
* The console structure for the 3270 console
*/
static struct console con3270 = {
.name = "tty3270",
.write = con3270_write,
.device = con3270_device,
.flags = CON_PRINTBUFFER,
};
/*
* 3270 console initialization code called from console_init().
*/
static int __init
con3270_init(void)
{
struct raw3270 *rp;
void *cbuf;
int i;
/* Check if 3270 is to be the console */
if (!CONSOLE_IS_3270)
return -ENODEV;
/* Set the console mode for VM */
if (MACHINE_IS_VM) {
cpcmd("TERM CONMODE 3270", NULL, 0, NULL);
cpcmd("TERM AUTOCR OFF", NULL, 0, NULL);
}
rp = raw3270_setup_console();
if (IS_ERR(rp))
return PTR_ERR(rp);
condev = kzalloc(sizeof(struct con3270), GFP_KERNEL | GFP_DMA);
condev->view.dev = rp;
condev->read = raw3270_request_alloc(0);
condev->read->callback = con3270_read_callback;
condev->read->callback_data = condev;
condev->write = raw3270_request_alloc(CON3270_OUTPUT_BUFFER_SIZE);
condev->kreset = raw3270_request_alloc(1);
INIT_LIST_HEAD(&condev->lines);
INIT_LIST_HEAD(&condev->update);
setup_timer(&condev->timer, (void (*)(unsigned long)) con3270_update,
(unsigned long) condev);
tasklet_init(&condev->readlet,
(void (*)(unsigned long)) con3270_read_tasklet,
(unsigned long) condev->read);
raw3270_add_view(&condev->view, &con3270_fn, 1);
INIT_LIST_HEAD(&condev->freemem);
for (i = 0; i < CON3270_STRING_PAGES; i++) {
cbuf = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
add_string_memory(&condev->freemem, cbuf, PAGE_SIZE);
}
condev->cline = alloc_string(&condev->freemem, condev->view.cols);
condev->cline->len = 0;
con3270_create_status(condev);
condev->input = alloc_string(&condev->freemem, 80);
atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
register_reboot_notifier(&on_reboot_nb);
register_console(&con3270);
return 0;
}
console_initcall(con3270_init);

View file

@ -0,0 +1,72 @@
/*
* Unified handling of special chars.
*
* Copyright IBM Corp. 2001
* Author(s): Fritz Elfert <felfert@millenux.com> <elfert@de.ibm.com>
*
*/
#include <linux/stddef.h>
#include <asm/errno.h>
#include <linux/sysrq.h>
#include <linux/ctype.h>
#include "ctrlchar.h"
#ifdef CONFIG_MAGIC_SYSRQ
static int ctrlchar_sysrq_key;
static void
ctrlchar_handle_sysrq(struct work_struct *work)
{
handle_sysrq(ctrlchar_sysrq_key);
}
static DECLARE_WORK(ctrlchar_work, ctrlchar_handle_sysrq);
#endif
/**
* Check for special chars at start of input.
*
* @param buf Console input buffer.
* @param len Length of valid data in buffer.
* @param tty The tty struct for this console.
* @return CTRLCHAR_NONE, if nothing matched,
* CTRLCHAR_SYSRQ, if sysrq was encountered
* otherwise char to be inserted logically or'ed
* with CTRLCHAR_CTRL
*/
unsigned int
ctrlchar_handle(const unsigned char *buf, int len, struct tty_struct *tty)
{
if ((len < 2) || (len > 3))
return CTRLCHAR_NONE;
/* hat is 0xb1 in codepage 037 (US etc.) and thus */
/* converted to 0x5e in ascii ('^') */
if ((buf[0] != '^') && (buf[0] != '\252'))
return CTRLCHAR_NONE;
#ifdef CONFIG_MAGIC_SYSRQ
/* racy */
if (len == 3 && buf[1] == '-') {
ctrlchar_sysrq_key = buf[2];
schedule_work(&ctrlchar_work);
return CTRLCHAR_SYSRQ;
}
#endif
if (len != 2)
return CTRLCHAR_NONE;
switch (tolower(buf[1])) {
case 'c':
return INTR_CHAR(tty) | CTRLCHAR_CTRL;
case 'd':
return EOF_CHAR(tty) | CTRLCHAR_CTRL;
case 'z':
return SUSP_CHAR(tty) | CTRLCHAR_CTRL;
}
return CTRLCHAR_NONE;
}

View file

@ -0,0 +1,19 @@
/*
* Unified handling of special chars.
*
* Copyright IBM Corp. 2001
* Author(s): Fritz Elfert <felfert@millenux.com> <elfert@de.ibm.com>
*
*/
#include <linux/tty.h>
extern unsigned int
ctrlchar_handle(const unsigned char *buf, int len, struct tty_struct *tty);
#define CTRLCHAR_NONE (1 << 8)
#define CTRLCHAR_CTRL (2 << 8)
#define CTRLCHAR_SYSRQ (3 << 8)
#define CTRLCHAR_MASK (~0xffu)

View file

@ -0,0 +1,158 @@
/* Do not edit this file! It was automatically generated by */
/* loadkeys --mktable defkeymap.map > defkeymap.c */
#include <linux/types.h>
#include <linux/keyboard.h>
#include <linux/kd.h>
#include <linux/kbd_kern.h>
#include <linux/kbd_diacr.h>
u_short plain_map[NR_KEYS] = {
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf020, 0xf000, 0xf0e2, 0xf0e4, 0xf0e0, 0xf0e1, 0xf0e3, 0xf0e5,
0xf0e7, 0xf0f1, 0xf0a2, 0xf02e, 0xf03c, 0xf028, 0xf02b, 0xf07c,
0xf026, 0xf0e9, 0xf0e2, 0xf0eb, 0xf0e8, 0xf0ed, 0xf0ee, 0xf0ef,
0xf0ec, 0xf0df, 0xf021, 0xf024, 0xf02a, 0xf029, 0xf03b, 0xf0ac,
0xf02d, 0xf02f, 0xf0c2, 0xf0c4, 0xf0c0, 0xf0c1, 0xf0c3, 0xf0c5,
0xf0c7, 0xf0d1, 0xf0a6, 0xf02c, 0xf025, 0xf05f, 0xf03e, 0xf03f,
0xf0f8, 0xf0c9, 0xf0ca, 0xf0cb, 0xf0c8, 0xf0cd, 0xf0ce, 0xf0cf,
0xf0cc, 0xf060, 0xf03a, 0xf023, 0xf040, 0xf027, 0xf03d, 0xf022,
};
static u_short shift_map[NR_KEYS] = {
0xf0d8, 0xf061, 0xf062, 0xf063, 0xf064, 0xf065, 0xf066, 0xf067,
0xf068, 0xf069, 0xf0ab, 0xf0bb, 0xf0f0, 0xf0fd, 0xf0fe, 0xf0b1,
0xf0b0, 0xf06a, 0xf06b, 0xf06c, 0xf06d, 0xf06e, 0xf06f, 0xf070,
0xf071, 0xf072, 0xf000, 0xf000, 0xf0e6, 0xf0b8, 0xf0c6, 0xf0a4,
0xf0b5, 0xf07e, 0xf073, 0xf074, 0xf075, 0xf076, 0xf077, 0xf078,
0xf079, 0xf07a, 0xf0a1, 0xf0bf, 0xf0d0, 0xf0dd, 0xf0de, 0xf0ae,
0xf402, 0xf0a3, 0xf0a5, 0xf0b7, 0xf0a9, 0xf0a7, 0xf0b6, 0xf0bc,
0xf0bd, 0xf0be, 0xf05b, 0xf05d, 0xf000, 0xf0a8, 0xf0b4, 0xf0d7,
0xf07b, 0xf041, 0xf042, 0xf043, 0xf044, 0xf045, 0xf046, 0xf047,
0xf048, 0xf049, 0xf000, 0xf0f4, 0xf0f6, 0xf0f2, 0xf0f3, 0xf0f5,
0xf07d, 0xf04a, 0xf04b, 0xf04c, 0xf04d, 0xf04e, 0xf04f, 0xf050,
0xf051, 0xf052, 0xf0b9, 0xf0fb, 0xf0fc, 0xf0f9, 0xf0fa, 0xf0ff,
0xf05c, 0xf0f7, 0xf053, 0xf054, 0xf055, 0xf056, 0xf057, 0xf058,
0xf059, 0xf05a, 0xf0b2, 0xf0d4, 0xf0d6, 0xf0d2, 0xf0d3, 0xf0d5,
0xf030, 0xf031, 0xf032, 0xf033, 0xf034, 0xf035, 0xf036, 0xf037,
0xf038, 0xf039, 0xf0b3, 0xf0db, 0xf0dc, 0xf0d9, 0xf0da, 0xf000,
};
static u_short ctrl_map[NR_KEYS] = {
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf11f, 0xf120, 0xf121, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf01a, 0xf003, 0xf212, 0xf004, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf109, 0xf10a, 0xf206, 0xf00a, 0xf200, 0xf200,
};
static u_short shift_ctrl_map[NR_KEYS] = {
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf10c, 0xf10d, 0xf10e, 0xf10f, 0xf110, 0xf111, 0xf112,
0xf113, 0xf11e, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf100, 0xf101, 0xf211, 0xf103, 0xf104, 0xf105, 0xf20b,
0xf20a, 0xf108, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
};
ushort *key_maps[MAX_NR_KEYMAPS] = {
plain_map, shift_map, NULL, NULL,
ctrl_map, shift_ctrl_map, NULL,
};
unsigned int keymap_count = 4;
/*
* Philosophy: most people do not define more strings, but they who do
* often want quite a lot of string space. So, we statically allocate
* the default and allocate dynamically in chunks of 512 bytes.
*/
char func_buf[] = {
'\033', '[', '[', 'A', 0,
'\033', '[', '[', 'B', 0,
'\033', '[', '[', 'C', 0,
'\033', '[', '[', 'D', 0,
'\033', '[', '[', 'E', 0,
'\033', '[', '1', '7', '~', 0,
'\033', '[', '1', '8', '~', 0,
'\033', '[', '1', '9', '~', 0,
'\033', '[', '2', '0', '~', 0,
'\033', '[', '2', '1', '~', 0,
'\033', '[', '2', '3', '~', 0,
'\033', '[', '2', '4', '~', 0,
'\033', '[', '2', '5', '~', 0,
'\033', '[', '2', '6', '~', 0,
'\033', '[', '2', '8', '~', 0,
'\033', '[', '2', '9', '~', 0,
'\033', '[', '3', '1', '~', 0,
'\033', '[', '3', '2', '~', 0,
'\033', '[', '3', '3', '~', 0,
'\033', '[', '3', '4', '~', 0,
};
char *funcbufptr = func_buf;
int funcbufsize = sizeof(func_buf);
int funcbufleft = 0; /* space left */
char *func_table[MAX_NR_FUNC] = {
func_buf + 0,
func_buf + 5,
func_buf + 10,
func_buf + 15,
func_buf + 20,
func_buf + 25,
func_buf + 31,
func_buf + 37,
func_buf + 43,
func_buf + 49,
func_buf + 55,
func_buf + 61,
func_buf + 67,
func_buf + 73,
func_buf + 79,
func_buf + 85,
func_buf + 91,
func_buf + 97,
func_buf + 103,
func_buf + 109,
NULL,
};
struct kbdiacruc accent_table[MAX_DIACR] = {
{'^', 'c', 0003}, {'^', 'd', 0004},
{'^', 'z', 0032}, {'^', 0012, 0000},
};
unsigned int accent_table_size = 4;

View file

@ -0,0 +1,191 @@
# Default keymap for 3270 (ebcdic codepage 037).
keymaps 0-1,4-5
keycode 0 = nul Oslash
keycode 1 = nul a
keycode 2 = nul b
keycode 3 = nul c
keycode 4 = nul d
keycode 5 = nul e
keycode 6 = nul f
keycode 7 = nul g
keycode 8 = nul h
keycode 9 = nul i
keycode 10 = nul guillemotleft
keycode 11 = nul guillemotright
keycode 12 = nul eth
keycode 13 = nul yacute
keycode 14 = nul thorn
keycode 15 = nul plusminus
keycode 16 = nul degree
keycode 17 = nul j
keycode 18 = nul k
keycode 19 = nul l
keycode 20 = nul m
keycode 21 = nul n
keycode 22 = nul o
keycode 23 = nul p
keycode 24 = nul q
keycode 25 = nul r
keycode 26 = nul nul
keycode 27 = nul nul
keycode 28 = nul ae
keycode 29 = nul cedilla
keycode 30 = nul AE
keycode 31 = nul currency
keycode 32 = nul mu
keycode 33 = nul tilde
keycode 34 = nul s
keycode 35 = nul t
keycode 36 = nul u
keycode 37 = nul v
keycode 38 = nul w
keycode 39 = nul x
keycode 40 = nul y
keycode 41 = nul z
keycode 42 = nul exclamdown
keycode 43 = nul questiondown
keycode 44 = nul ETH
keycode 45 = nul Yacute
keycode 46 = nul THORN
keycode 47 = nul registered
keycode 48 = nul dead_circumflex
keycode 49 = nul sterling
keycode 50 = nul yen
keycode 51 = nul periodcentered
keycode 52 = nul copyright
keycode 53 = nul section
keycode 54 = nul paragraph
keycode 55 = nul onequarter
keycode 56 = nul onehalf
keycode 57 = nul threequarters
keycode 58 = nul bracketleft
keycode 59 = nul bracketright
keycode 60 = nul nul
keycode 61 = nul diaeresis
keycode 62 = nul acute
keycode 63 = nul multiply
keycode 64 = space braceleft
keycode 65 = nul A
keycode 66 = acircumflex B
keycode 67 = adiaeresis C
keycode 68 = agrave D
keycode 69 = aacute E
keycode 70 = atilde F
keycode 71 = aring G
keycode 72 = ccedilla H
keycode 73 = ntilde I
keycode 74 = cent nul
keycode 75 = period ocircumflex
keycode 76 = less odiaeresis
keycode 77 = parenleft ograve
keycode 78 = plus oacute
keycode 79 = bar otilde
keycode 80 = ampersand braceright
keycode 81 = eacute J
keycode 82 = acircumflex K
keycode 83 = ediaeresis L
keycode 84 = egrave M
keycode 85 = iacute N
keycode 86 = icircumflex O
keycode 87 = idiaeresis P
keycode 88 = igrave Q
keycode 89 = ssharp R
keycode 90 = exclam onesuperior
keycode 91 = dollar ucircumflex
keycode 92 = asterisk udiaeresis
keycode 93 = parenright ugrave
keycode 94 = semicolon uacute
keycode 95 = notsign ydiaeresis
keycode 96 = minus backslash
keycode 97 = slash division
keycode 98 = Acircumflex S
keycode 99 = Adiaeresis T
keycode 100 = Agrave U
keycode 101 = Aacute V
keycode 102 = Atilde W
keycode 103 = Aring X
keycode 104 = Ccedilla Y
keycode 105 = Ntilde Z
keycode 106 = brokenbar twosuperior
keycode 107 = comma Ocircumflex
keycode 108 = percent Odiaeresis
keycode 109 = underscore Ograve
keycode 110 = greater Oacute
keycode 111 = question Otilde
keycode 112 = oslash zero
keycode 113 = Eacute one
keycode 114 = Ecircumflex two
keycode 115 = Ediaeresis three
keycode 116 = Egrave four
keycode 117 = Iacute five
keycode 118 = Icircumflex six
keycode 119 = Idiaeresis seven
keycode 120 = Igrave eight
keycode 121 = grave nine
keycode 122 = colon threesuperior
keycode 123 = numbersign Ucircumflex
keycode 124 = at Udiaeresis
keycode 125 = apostrophe Ugrave
keycode 126 = equal Uacute
keycode 127 = quotedbl nul
# AID keys
control keycode 74 = F22
control keycode 75 = F23
control keycode 76 = F24
control keycode 107 = Control_z # PA3
control keycode 108 = Control_c # PA1
control keycode 109 = KeyboardSignal # Clear
control keycode 110 = Control_d # PA2
control keycode 122 = F10
control keycode 123 = F11 # F11
control keycode 124 = Last_Console # F12
control keycode 125 = Linefeed
shift control keycode 65 = F13
shift control keycode 66 = F14
shift control keycode 67 = F15
shift control keycode 68 = F16
shift control keycode 69 = F17
shift control keycode 70 = F18
shift control keycode 71 = F19
shift control keycode 72 = F20
shift control keycode 73 = F21
shift control keycode 113 = F1
shift control keycode 114 = F2
shift control keycode 115 = Incr_Console
shift control keycode 116 = F4
shift control keycode 117 = F5
shift control keycode 118 = F6
shift control keycode 119 = Scroll_Backward
shift control keycode 120 = Scroll_Forward
shift control keycode 121 = F9
string F1 = "\033[[A"
string F2 = "\033[[B"
string F3 = "\033[[C"
string F4 = "\033[[D"
string F5 = "\033[[E"
string F6 = "\033[17~"
string F7 = "\033[18~"
string F8 = "\033[19~"
string F9 = "\033[20~"
string F10 = "\033[21~"
string F11 = "\033[23~"
string F12 = "\033[24~"
string F13 = "\033[25~"
string F14 = "\033[26~"
string F15 = "\033[28~"
string F16 = "\033[29~"
string F17 = "\033[31~"
string F18 = "\033[32~"
string F19 = "\033[33~"
string F20 = "\033[34~"
# string F21 ??
# string F22 ??
# string F23 ??
# string F24 ??
compose '^' 'c' to Control_c
compose '^' 'd' to Control_d
compose '^' 'z' to Control_z
compose '^' '\012' to nul

View file

@ -0,0 +1,237 @@
/*
* DIAGNOSE X'2C4' instruction based HMC FTP services, useable on z/VM
*
* Copyright IBM Corp. 2013
* Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
*
*/
#define KMSG_COMPONENT "hmcdrv"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/irq.h>
#include <linux/wait.h>
#include <linux/string.h>
#include <asm/ctl_reg.h>
#include "hmcdrv_ftp.h"
#include "diag_ftp.h"
/* DIAGNOSE X'2C4' return codes in Ry */
#define DIAG_FTP_RET_OK 0 /* HMC FTP started successfully */
#define DIAG_FTP_RET_EBUSY 4 /* HMC FTP service currently busy */
#define DIAG_FTP_RET_EIO 8 /* HMC FTP service I/O error */
/* and an artificial extension */
#define DIAG_FTP_RET_EPERM 2 /* HMC FTP service privilege error */
/* FTP service status codes (after INTR at guest real location 133) */
#define DIAG_FTP_STAT_OK 0U /* request completed successfully */
#define DIAG_FTP_STAT_PGCC 4U /* program check condition */
#define DIAG_FTP_STAT_PGIOE 8U /* paging I/O error */
#define DIAG_FTP_STAT_TIMEOUT 12U /* timeout */
#define DIAG_FTP_STAT_EBASE 16U /* base of error codes from SCLP */
#define DIAG_FTP_STAT_LDFAIL (DIAG_FTP_STAT_EBASE + 1U) /* failed */
#define DIAG_FTP_STAT_LDNPERM (DIAG_FTP_STAT_EBASE + 2U) /* not allowed */
#define DIAG_FTP_STAT_LDRUNS (DIAG_FTP_STAT_EBASE + 3U) /* runs */
#define DIAG_FTP_STAT_LDNRUNS (DIAG_FTP_STAT_EBASE + 4U) /* not runs */
/**
* struct diag_ftp_ldfpl - load file FTP parameter list (LDFPL)
* @bufaddr: real buffer address (at 4k boundary)
* @buflen: length of buffer
* @offset: dir/file offset
* @intparm: interruption parameter (unused)
* @transferred: bytes transferred
* @fsize: file size, filled on GET
* @failaddr: failing address
* @spare: padding
* @fident: file name - ASCII
*/
struct diag_ftp_ldfpl {
u64 bufaddr;
u64 buflen;
u64 offset;
u64 intparm;
u64 transferred;
u64 fsize;
u64 failaddr;
u64 spare;
u8 fident[HMCDRV_FTP_FIDENT_MAX];
} __packed;
static DECLARE_COMPLETION(diag_ftp_rx_complete);
static int diag_ftp_subcode;
/**
* diag_ftp_handler() - FTP services IRQ handler
* @extirq: external interrupt (sub-) code
* @param32: 32-bit interruption parameter from &struct diag_ftp_ldfpl
* @param64: unused (for 64-bit interrupt parameters)
*/
static void diag_ftp_handler(struct ext_code extirq,
unsigned int param32,
unsigned long param64)
{
if ((extirq.subcode >> 8) != 8)
return; /* not a FTP services sub-code */
inc_irq_stat(IRQEXT_FTP);
diag_ftp_subcode = extirq.subcode & 0xffU;
complete(&diag_ftp_rx_complete);
}
/**
* diag_ftp_2c4() - DIAGNOSE X'2C4' service call
* @fpl: pointer to prepared LDFPL
* @cmd: FTP command to be executed
*
* Performs a DIAGNOSE X'2C4' call with (input/output) FTP parameter list
* @fpl and FTP function code @cmd. In case of an error the function does
* nothing and returns an (negative) error code.
*
* Notes:
* 1. This function only initiates a transfer, so the caller must wait
* for completion (asynchronous execution).
* 2. The FTP parameter list @fpl must be aligned to a double-word boundary.
* 3. fpl->bufaddr must be a real address, 4k aligned
*/
static int diag_ftp_2c4(struct diag_ftp_ldfpl *fpl,
enum hmcdrv_ftp_cmdid cmd)
{
int rc;
asm volatile(
" diag %[addr],%[cmd],0x2c4\n"
"0: j 2f\n"
"1: la %[rc],%[err]\n"
"2:\n"
EX_TABLE(0b, 1b)
: [rc] "=d" (rc), "+m" (*fpl)
: [cmd] "0" (cmd), [addr] "d" (virt_to_phys(fpl)),
[err] "i" (DIAG_FTP_RET_EPERM)
: "cc");
switch (rc) {
case DIAG_FTP_RET_OK:
return 0;
case DIAG_FTP_RET_EBUSY:
return -EBUSY;
case DIAG_FTP_RET_EPERM:
return -EPERM;
case DIAG_FTP_RET_EIO:
default:
return -EIO;
}
}
/**
* diag_ftp_cmd() - executes a DIAG X'2C4' FTP command, targeting a HMC
* @ftp: pointer to FTP command specification
* @fsize: return of file size (or NULL if undesirable)
*
* Attention: Notice that this function is not reentrant - so the caller
* must ensure locking.
*
* Return: number of bytes read/written or a (negative) error code
*/
ssize_t diag_ftp_cmd(const struct hmcdrv_ftp_cmdspec *ftp, size_t *fsize)
{
struct diag_ftp_ldfpl *ldfpl;
ssize_t len;
#ifdef DEBUG
unsigned long start_jiffies;
pr_debug("starting DIAG X'2C4' on '%s', requesting %zd bytes\n",
ftp->fname, ftp->len);
start_jiffies = jiffies;
#endif
init_completion(&diag_ftp_rx_complete);
ldfpl = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!ldfpl) {
len = -ENOMEM;
goto out;
}
len = strlcpy(ldfpl->fident, ftp->fname, sizeof(ldfpl->fident));
if (len >= HMCDRV_FTP_FIDENT_MAX) {
len = -EINVAL;
goto out_free;
}
ldfpl->transferred = 0;
ldfpl->fsize = 0;
ldfpl->offset = ftp->ofs;
ldfpl->buflen = ftp->len;
ldfpl->bufaddr = virt_to_phys(ftp->buf);
len = diag_ftp_2c4(ldfpl, ftp->id);
if (len)
goto out_free;
/*
* There is no way to cancel the running diag X'2C4', the code
* needs to wait unconditionally until the transfer is complete.
*/
wait_for_completion(&diag_ftp_rx_complete);
#ifdef DEBUG
pr_debug("completed DIAG X'2C4' after %lu ms\n",
(jiffies - start_jiffies) * 1000 / HZ);
pr_debug("status of DIAG X'2C4' is %u, with %lld/%lld bytes\n",
diag_ftp_subcode, ldfpl->transferred, ldfpl->fsize);
#endif
switch (diag_ftp_subcode) {
case DIAG_FTP_STAT_OK: /* success */
len = ldfpl->transferred;
if (fsize)
*fsize = ldfpl->fsize;
break;
case DIAG_FTP_STAT_LDNPERM:
len = -EPERM;
break;
case DIAG_FTP_STAT_LDRUNS:
len = -EBUSY;
break;
case DIAG_FTP_STAT_LDFAIL:
len = -ENOENT; /* no such file or media */
break;
default:
len = -EIO;
break;
}
out_free:
free_page((unsigned long) ldfpl);
out:
return len;
}
/**
* diag_ftp_startup() - startup of FTP services, when running on z/VM
*
* Return: 0 on success, else an (negative) error code
*/
int diag_ftp_startup(void)
{
int rc;
rc = register_external_irq(EXT_IRQ_CP_SERVICE, diag_ftp_handler);
if (rc)
return rc;
ctl_set_bit(0, 63 - 22);
return 0;
}
/**
* diag_ftp_shutdown() - shutdown of FTP services, when running on z/VM
*/
void diag_ftp_shutdown(void)
{
ctl_clear_bit(0, 63 - 22);
unregister_external_irq(EXT_IRQ_CP_SERVICE, diag_ftp_handler);
}

View file

@ -0,0 +1,21 @@
/*
* DIAGNOSE X'2C4' instruction based SE/HMC FTP Services, useable on z/VM
*
* Notice that all functions exported here are not reentrant.
* So usage should be exclusive, ensured by the caller (e.g. using a
* mutex).
*
* Copyright IBM Corp. 2013
* Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
*/
#ifndef __DIAG_FTP_H__
#define __DIAG_FTP_H__
#include "hmcdrv_ftp.h"
int diag_ftp_startup(void);
void diag_ftp_shutdown(void);
ssize_t diag_ftp_cmd(const struct hmcdrv_ftp_cmdspec *ftp, size_t *fsize);
#endif /* __DIAG_FTP_H__ */

575
drivers/s390/char/fs3270.c Normal file
View file

@ -0,0 +1,575 @@
/*
* IBM/3270 Driver - fullscreen driver.
*
* Author(s):
* Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
* Rewritten for 2.5/2.6 by Martin Schwidefsky <schwidefsky@de.ibm.com>
* Copyright IBM Corp. 2003, 2009
*/
#include <linux/bootmem.h>
#include <linux/console.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/compat.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <asm/compat.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
#include <asm/ebcdic.h>
#include <asm/idals.h>
#include "raw3270.h"
#include "ctrlchar.h"
static struct raw3270_fn fs3270_fn;
struct fs3270 {
struct raw3270_view view;
struct pid *fs_pid; /* Pid of controlling program. */
int read_command; /* ccw command to use for reads. */
int write_command; /* ccw command to use for writes. */
int attention; /* Got attention. */
int active; /* Fullscreen view is active. */
struct raw3270_request *init; /* single init request. */
wait_queue_head_t wait; /* Init & attention wait queue. */
struct idal_buffer *rdbuf; /* full-screen-deactivate buffer */
size_t rdbuf_size; /* size of data returned by RDBUF */
};
static DEFINE_MUTEX(fs3270_mutex);
static void
fs3270_wake_up(struct raw3270_request *rq, void *data)
{
wake_up((wait_queue_head_t *) data);
}
static inline int
fs3270_working(struct fs3270 *fp)
{
/*
* The fullscreen view is in working order if the view
* has been activated AND the initial request is finished.
*/
return fp->active && raw3270_request_final(fp->init);
}
static int
fs3270_do_io(struct raw3270_view *view, struct raw3270_request *rq)
{
struct fs3270 *fp;
int rc;
fp = (struct fs3270 *) view;
rq->callback = fs3270_wake_up;
rq->callback_data = &fp->wait;
do {
if (!fs3270_working(fp)) {
/* Fullscreen view isn't ready yet. */
rc = wait_event_interruptible(fp->wait,
fs3270_working(fp));
if (rc != 0)
break;
}
rc = raw3270_start(view, rq);
if (rc == 0) {
/* Started successfully. Now wait for completion. */
wait_event(fp->wait, raw3270_request_final(rq));
}
} while (rc == -EACCES);
return rc;
}
/*
* Switch to the fullscreen view.
*/
static void
fs3270_reset_callback(struct raw3270_request *rq, void *data)
{
struct fs3270 *fp;
fp = (struct fs3270 *) rq->view;
raw3270_request_reset(rq);
wake_up(&fp->wait);
}
static void
fs3270_restore_callback(struct raw3270_request *rq, void *data)
{
struct fs3270 *fp;
fp = (struct fs3270 *) rq->view;
if (rq->rc != 0 || rq->rescnt != 0) {
if (fp->fs_pid)
kill_pid(fp->fs_pid, SIGHUP, 1);
}
fp->rdbuf_size = 0;
raw3270_request_reset(rq);
wake_up(&fp->wait);
}
static int
fs3270_activate(struct raw3270_view *view)
{
struct fs3270 *fp;
char *cp;
int rc;
fp = (struct fs3270 *) view;
/* If an old init command is still running just return. */
if (!raw3270_request_final(fp->init))
return 0;
if (fp->rdbuf_size == 0) {
/* No saved buffer. Just clear the screen. */
raw3270_request_set_cmd(fp->init, TC_EWRITEA);
fp->init->callback = fs3270_reset_callback;
} else {
/* Restore fullscreen buffer saved by fs3270_deactivate. */
raw3270_request_set_cmd(fp->init, TC_EWRITEA);
raw3270_request_set_idal(fp->init, fp->rdbuf);
fp->init->ccw.count = fp->rdbuf_size;
cp = fp->rdbuf->data[0];
cp[0] = TW_KR;
cp[1] = TO_SBA;
cp[2] = cp[6];
cp[3] = cp[7];
cp[4] = TO_IC;
cp[5] = TO_SBA;
cp[6] = 0x40;
cp[7] = 0x40;
fp->init->rescnt = 0;
fp->init->callback = fs3270_restore_callback;
}
rc = fp->init->rc = raw3270_start_locked(view, fp->init);
if (rc)
fp->init->callback(fp->init, NULL);
else
fp->active = 1;
return rc;
}
/*
* Shutdown fullscreen view.
*/
static void
fs3270_save_callback(struct raw3270_request *rq, void *data)
{
struct fs3270 *fp;
fp = (struct fs3270 *) rq->view;
/* Correct idal buffer element 0 address. */
fp->rdbuf->data[0] -= 5;
fp->rdbuf->size += 5;
/*
* If the rdbuf command failed or the idal buffer is
* to small for the amount of data returned by the
* rdbuf command, then we have no choice but to send
* a SIGHUP to the application.
*/
if (rq->rc != 0 || rq->rescnt == 0) {
if (fp->fs_pid)
kill_pid(fp->fs_pid, SIGHUP, 1);
fp->rdbuf_size = 0;
} else
fp->rdbuf_size = fp->rdbuf->size - rq->rescnt;
raw3270_request_reset(rq);
wake_up(&fp->wait);
}
static void
fs3270_deactivate(struct raw3270_view *view)
{
struct fs3270 *fp;
fp = (struct fs3270 *) view;
fp->active = 0;
/* If an old init command is still running just return. */
if (!raw3270_request_final(fp->init))
return;
/* Prepare read-buffer request. */
raw3270_request_set_cmd(fp->init, TC_RDBUF);
/*
* Hackish: skip first 5 bytes of the idal buffer to make
* room for the TW_KR/TO_SBA/<address>/<address>/TO_IC sequence
* in the activation command.
*/
fp->rdbuf->data[0] += 5;
fp->rdbuf->size -= 5;
raw3270_request_set_idal(fp->init, fp->rdbuf);
fp->init->rescnt = 0;
fp->init->callback = fs3270_save_callback;
/* Start I/O to read in the 3270 buffer. */
fp->init->rc = raw3270_start_locked(view, fp->init);
if (fp->init->rc)
fp->init->callback(fp->init, NULL);
}
static int
fs3270_irq(struct fs3270 *fp, struct raw3270_request *rq, struct irb *irb)
{
/* Handle ATTN. Set indication and wake waiters for attention. */
if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
fp->attention = 1;
wake_up(&fp->wait);
}
if (rq) {
if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
rq->rc = -EIO;
else
/* Normal end. Copy residual count. */
rq->rescnt = irb->scsw.cmd.count;
}
return RAW3270_IO_DONE;
}
/*
* Process reads from fullscreen 3270.
*/
static ssize_t
fs3270_read(struct file *filp, char __user *data, size_t count, loff_t *off)
{
struct fs3270 *fp;
struct raw3270_request *rq;
struct idal_buffer *ib;
ssize_t rc;
if (count == 0 || count > 65535)
return -EINVAL;
fp = filp->private_data;
if (!fp)
return -ENODEV;
ib = idal_buffer_alloc(count, 0);
if (IS_ERR(ib))
return -ENOMEM;
rq = raw3270_request_alloc(0);
if (!IS_ERR(rq)) {
if (fp->read_command == 0 && fp->write_command != 0)
fp->read_command = 6;
raw3270_request_set_cmd(rq, fp->read_command ? : 2);
raw3270_request_set_idal(rq, ib);
rc = wait_event_interruptible(fp->wait, fp->attention);
fp->attention = 0;
if (rc == 0) {
rc = fs3270_do_io(&fp->view, rq);
if (rc == 0) {
count -= rq->rescnt;
if (idal_buffer_to_user(ib, data, count) != 0)
rc = -EFAULT;
else
rc = count;
}
}
raw3270_request_free(rq);
} else
rc = PTR_ERR(rq);
idal_buffer_free(ib);
return rc;
}
/*
* Process writes to fullscreen 3270.
*/
static ssize_t
fs3270_write(struct file *filp, const char __user *data, size_t count, loff_t *off)
{
struct fs3270 *fp;
struct raw3270_request *rq;
struct idal_buffer *ib;
int write_command;
ssize_t rc;
fp = filp->private_data;
if (!fp)
return -ENODEV;
ib = idal_buffer_alloc(count, 0);
if (IS_ERR(ib))
return -ENOMEM;
rq = raw3270_request_alloc(0);
if (!IS_ERR(rq)) {
if (idal_buffer_from_user(ib, data, count) == 0) {
write_command = fp->write_command ? : 1;
if (write_command == 5)
write_command = 13;
raw3270_request_set_cmd(rq, write_command);
raw3270_request_set_idal(rq, ib);
rc = fs3270_do_io(&fp->view, rq);
if (rc == 0)
rc = count - rq->rescnt;
} else
rc = -EFAULT;
raw3270_request_free(rq);
} else
rc = PTR_ERR(rq);
idal_buffer_free(ib);
return rc;
}
/*
* process ioctl commands for the tube driver
*/
static long
fs3270_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
char __user *argp;
struct fs3270 *fp;
struct raw3270_iocb iocb;
int rc;
fp = filp->private_data;
if (!fp)
return -ENODEV;
if (is_compat_task())
argp = compat_ptr(arg);
else
argp = (char __user *)arg;
rc = 0;
mutex_lock(&fs3270_mutex);
switch (cmd) {
case TUBICMD:
fp->read_command = arg;
break;
case TUBOCMD:
fp->write_command = arg;
break;
case TUBGETI:
rc = put_user(fp->read_command, argp);
break;
case TUBGETO:
rc = put_user(fp->write_command, argp);
break;
case TUBGETMOD:
iocb.model = fp->view.model;
iocb.line_cnt = fp->view.rows;
iocb.col_cnt = fp->view.cols;
iocb.pf_cnt = 24;
iocb.re_cnt = 20;
iocb.map = 0;
if (copy_to_user(argp, &iocb, sizeof(struct raw3270_iocb)))
rc = -EFAULT;
break;
}
mutex_unlock(&fs3270_mutex);
return rc;
}
/*
* Allocate fs3270 structure.
*/
static struct fs3270 *
fs3270_alloc_view(void)
{
struct fs3270 *fp;
fp = kzalloc(sizeof(struct fs3270),GFP_KERNEL);
if (!fp)
return ERR_PTR(-ENOMEM);
fp->init = raw3270_request_alloc(0);
if (IS_ERR(fp->init)) {
kfree(fp);
return ERR_PTR(-ENOMEM);
}
return fp;
}
/*
* Free fs3270 structure.
*/
static void
fs3270_free_view(struct raw3270_view *view)
{
struct fs3270 *fp;
fp = (struct fs3270 *) view;
if (fp->rdbuf)
idal_buffer_free(fp->rdbuf);
raw3270_request_free(((struct fs3270 *) view)->init);
kfree(view);
}
/*
* Unlink fs3270 data structure from filp.
*/
static void
fs3270_release(struct raw3270_view *view)
{
struct fs3270 *fp;
fp = (struct fs3270 *) view;
if (fp->fs_pid)
kill_pid(fp->fs_pid, SIGHUP, 1);
}
/* View to a 3270 device. Can be console, tty or fullscreen. */
static struct raw3270_fn fs3270_fn = {
.activate = fs3270_activate,
.deactivate = fs3270_deactivate,
.intv = (void *) fs3270_irq,
.release = fs3270_release,
.free = fs3270_free_view
};
/*
* This routine is called whenever a 3270 fullscreen device is opened.
*/
static int
fs3270_open(struct inode *inode, struct file *filp)
{
struct fs3270 *fp;
struct idal_buffer *ib;
int minor, rc = 0;
if (imajor(file_inode(filp)) != IBM_FS3270_MAJOR)
return -ENODEV;
minor = iminor(file_inode(filp));
/* Check for minor 0 multiplexer. */
if (minor == 0) {
struct tty_struct *tty = get_current_tty();
if (!tty || tty->driver->major != IBM_TTY3270_MAJOR) {
tty_kref_put(tty);
return -ENODEV;
}
minor = tty->index;
tty_kref_put(tty);
}
mutex_lock(&fs3270_mutex);
/* Check if some other program is already using fullscreen mode. */
fp = (struct fs3270 *) raw3270_find_view(&fs3270_fn, minor);
if (!IS_ERR(fp)) {
raw3270_put_view(&fp->view);
rc = -EBUSY;
goto out;
}
/* Allocate fullscreen view structure. */
fp = fs3270_alloc_view();
if (IS_ERR(fp)) {
rc = PTR_ERR(fp);
goto out;
}
init_waitqueue_head(&fp->wait);
fp->fs_pid = get_pid(task_pid(current));
rc = raw3270_add_view(&fp->view, &fs3270_fn, minor);
if (rc) {
fs3270_free_view(&fp->view);
goto out;
}
/* Allocate idal-buffer. */
ib = idal_buffer_alloc(2*fp->view.rows*fp->view.cols + 5, 0);
if (IS_ERR(ib)) {
raw3270_put_view(&fp->view);
raw3270_del_view(&fp->view);
rc = PTR_ERR(ib);
goto out;
}
fp->rdbuf = ib;
rc = raw3270_activate_view(&fp->view);
if (rc) {
raw3270_put_view(&fp->view);
raw3270_del_view(&fp->view);
goto out;
}
nonseekable_open(inode, filp);
filp->private_data = fp;
out:
mutex_unlock(&fs3270_mutex);
return rc;
}
/*
* This routine is called when the 3270 tty is closed. We wait
* for the remaining request to be completed. Then we clean up.
*/
static int
fs3270_close(struct inode *inode, struct file *filp)
{
struct fs3270 *fp;
fp = filp->private_data;
filp->private_data = NULL;
if (fp) {
put_pid(fp->fs_pid);
fp->fs_pid = NULL;
raw3270_reset(&fp->view);
raw3270_put_view(&fp->view);
raw3270_del_view(&fp->view);
}
return 0;
}
static const struct file_operations fs3270_fops = {
.owner = THIS_MODULE, /* owner */
.read = fs3270_read, /* read */
.write = fs3270_write, /* write */
.unlocked_ioctl = fs3270_ioctl, /* ioctl */
.compat_ioctl = fs3270_ioctl, /* ioctl */
.open = fs3270_open, /* open */
.release = fs3270_close, /* release */
.llseek = no_llseek,
};
static void fs3270_create_cb(int minor)
{
__register_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub", &fs3270_fops);
device_create(class3270, NULL, MKDEV(IBM_FS3270_MAJOR, minor),
NULL, "3270/tub%d", minor);
}
static void fs3270_destroy_cb(int minor)
{
device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, minor));
__unregister_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub");
}
static struct raw3270_notifier fs3270_notifier =
{
.create = fs3270_create_cb,
.destroy = fs3270_destroy_cb,
};
/*
* 3270 fullscreen driver initialization.
*/
static int __init
fs3270_init(void)
{
int rc;
rc = __register_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270", &fs3270_fops);
if (rc)
return rc;
device_create(class3270, NULL, MKDEV(IBM_FS3270_MAJOR, 0),
NULL, "3270/tub");
raw3270_register_notifier(&fs3270_notifier);
return 0;
}
static void __exit
fs3270_exit(void)
{
raw3270_unregister_notifier(&fs3270_notifier);
device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, 0));
__unregister_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270");
}
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV_MAJOR(IBM_FS3270_MAJOR);
module_init(fs3270_init);
module_exit(fs3270_exit);

View file

@ -0,0 +1,252 @@
/*
* SE/HMC Drive (Read) Cache Functions
*
* Copyright IBM Corp. 2013
* Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
*
*/
#define KMSG_COMPONENT "hmcdrv"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/jiffies.h>
#include "hmcdrv_ftp.h"
#include "hmcdrv_cache.h"
#define HMCDRV_CACHE_TIMEOUT 30 /* aging timeout in seconds */
/**
* struct hmcdrv_cache_entry - file cache (only used on read/dir)
* @id: FTP command ID
* @content: kernel-space buffer, 4k aligned
* @len: size of @content cache (0 if caching disabled)
* @ofs: start of content within file (-1 if no cached content)
* @fname: file name
* @fsize: file size
* @timeout: cache timeout in jiffies
*
* Notice that the first three members (id, fname, fsize) are cached on all
* read/dir requests. But content is cached only under some preconditions.
* Uncached content is signalled by a negative value of @ofs.
*/
struct hmcdrv_cache_entry {
enum hmcdrv_ftp_cmdid id;
char fname[HMCDRV_FTP_FIDENT_MAX];
size_t fsize;
loff_t ofs;
unsigned long timeout;
void *content;
size_t len;
};
static int hmcdrv_cache_order; /* cache allocated page order */
static struct hmcdrv_cache_entry hmcdrv_cache_file = {
.fsize = SIZE_MAX,
.ofs = -1,
.len = 0,
.fname = {'\0'}
};
/**
* hmcdrv_cache_get() - looks for file data/content in read cache
* @ftp: pointer to FTP command specification
*
* Return: number of bytes read from cache or a negative number if nothing
* in content cache (for the file/cmd specified in @ftp)
*/
static ssize_t hmcdrv_cache_get(const struct hmcdrv_ftp_cmdspec *ftp)
{
loff_t pos; /* position in cache (signed) */
ssize_t len;
if ((ftp->id != hmcdrv_cache_file.id) ||
strcmp(hmcdrv_cache_file.fname, ftp->fname))
return -1;
if (ftp->ofs >= hmcdrv_cache_file.fsize) /* EOF ? */
return 0;
if ((hmcdrv_cache_file.ofs < 0) || /* has content? */
time_after(jiffies, hmcdrv_cache_file.timeout))
return -1;
/* there seems to be cached content - calculate the maximum number
* of bytes that can be returned (regarding file size and offset)
*/
len = hmcdrv_cache_file.fsize - ftp->ofs;
if (len > ftp->len)
len = ftp->len;
/* check if the requested chunk falls into our cache (which starts
* at offset 'hmcdrv_cache_file.ofs' in the file of interest)
*/
pos = ftp->ofs - hmcdrv_cache_file.ofs;
if ((pos >= 0) &&
((pos + len) <= hmcdrv_cache_file.len)) {
memcpy(ftp->buf,
hmcdrv_cache_file.content + pos,
len);
pr_debug("using cached content of '%s', returning %zd/%zd bytes\n",
hmcdrv_cache_file.fname, len,
hmcdrv_cache_file.fsize);
return len;
}
return -1;
}
/**
* hmcdrv_cache_do() - do a HMC drive CD/DVD transfer with cache update
* @ftp: pointer to FTP command specification
* @func: FTP transfer function to be used
*
* Return: number of bytes read/written or a (negative) error code
*/
static ssize_t hmcdrv_cache_do(const struct hmcdrv_ftp_cmdspec *ftp,
hmcdrv_cache_ftpfunc func)
{
ssize_t len;
/* only cache content if the read/dir cache really exists
* (hmcdrv_cache_file.len > 0), is large enough to handle the
* request (hmcdrv_cache_file.len >= ftp->len) and there is a need
* to do so (ftp->len > 0)
*/
if ((ftp->len > 0) && (hmcdrv_cache_file.len >= ftp->len)) {
/* because the cache is not located at ftp->buf, we have to
* assemble a new HMC drive FTP cmd specification (pointing
* to our cache, and using the increased size)
*/
struct hmcdrv_ftp_cmdspec cftp = *ftp; /* make a copy */
cftp.buf = hmcdrv_cache_file.content; /* and update */
cftp.len = hmcdrv_cache_file.len; /* buffer data */
len = func(&cftp, &hmcdrv_cache_file.fsize); /* now do */
if (len > 0) {
pr_debug("caching %zd bytes content for '%s'\n",
len, ftp->fname);
if (len > ftp->len)
len = ftp->len;
hmcdrv_cache_file.ofs = ftp->ofs;
hmcdrv_cache_file.timeout = jiffies +
HMCDRV_CACHE_TIMEOUT * HZ;
memcpy(ftp->buf, hmcdrv_cache_file.content, len);
}
} else {
len = func(ftp, &hmcdrv_cache_file.fsize);
hmcdrv_cache_file.ofs = -1; /* invalidate content */
}
if (len > 0) {
/* cache some file info (FTP command, file name and file
* size) unconditionally
*/
strlcpy(hmcdrv_cache_file.fname, ftp->fname,
HMCDRV_FTP_FIDENT_MAX);
hmcdrv_cache_file.id = ftp->id;
pr_debug("caching cmd %d, file size %zu for '%s'\n",
ftp->id, hmcdrv_cache_file.fsize, ftp->fname);
}
return len;
}
/**
* hmcdrv_cache_cmd() - perform a cached HMC drive CD/DVD transfer
* @ftp: pointer to FTP command specification
* @func: FTP transfer function to be used
*
* Attention: Notice that this function is not reentrant - so the caller
* must ensure exclusive execution.
*
* Return: number of bytes read/written or a (negative) error code
*/
ssize_t hmcdrv_cache_cmd(const struct hmcdrv_ftp_cmdspec *ftp,
hmcdrv_cache_ftpfunc func)
{
ssize_t len;
if ((ftp->id == HMCDRV_FTP_DIR) || /* read cache */
(ftp->id == HMCDRV_FTP_NLIST) ||
(ftp->id == HMCDRV_FTP_GET)) {
len = hmcdrv_cache_get(ftp);
if (len >= 0) /* got it from cache ? */
return len; /* yes */
len = hmcdrv_cache_do(ftp, func);
if (len >= 0)
return len;
} else {
len = func(ftp, NULL); /* simply do original command */
}
/* invalidate the (read) cache in case there was a write operation
* or an error on read/dir
*/
hmcdrv_cache_file.id = HMCDRV_FTP_NOOP;
hmcdrv_cache_file.fsize = LLONG_MAX;
hmcdrv_cache_file.ofs = -1;
return len;
}
/**
* hmcdrv_cache_startup() - startup of HMC drive cache
* @cachesize: cache size
*
* Return: 0 on success, else a (negative) error code
*/
int hmcdrv_cache_startup(size_t cachesize)
{
if (cachesize > 0) { /* perform caching ? */
hmcdrv_cache_order = get_order(cachesize);
hmcdrv_cache_file.content =
(void *) __get_free_pages(GFP_KERNEL | GFP_DMA,
hmcdrv_cache_order);
if (!hmcdrv_cache_file.content) {
pr_err("Allocating the requested cache size of %zu bytes failed\n",
cachesize);
return -ENOMEM;
}
pr_debug("content cache enabled, size is %zu bytes\n",
cachesize);
}
hmcdrv_cache_file.len = cachesize;
return 0;
}
/**
* hmcdrv_cache_shutdown() - shutdown of HMC drive cache
*/
void hmcdrv_cache_shutdown(void)
{
if (hmcdrv_cache_file.content) {
free_pages((unsigned long) hmcdrv_cache_file.content,
hmcdrv_cache_order);
hmcdrv_cache_file.content = NULL;
}
hmcdrv_cache_file.id = HMCDRV_FTP_NOOP;
hmcdrv_cache_file.fsize = LLONG_MAX;
hmcdrv_cache_file.ofs = -1;
hmcdrv_cache_file.len = 0; /* no cache */
}

View file

@ -0,0 +1,24 @@
/*
* SE/HMC Drive (Read) Cache Functions
*
* Copyright IBM Corp. 2013
* Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
*/
#ifndef __HMCDRV_CACHE_H__
#define __HMCDRV_CACHE_H__
#include <linux/mmzone.h>
#include "hmcdrv_ftp.h"
#define HMCDRV_CACHE_SIZE_DFLT (MAX_ORDER_NR_PAGES * PAGE_SIZE / 2UL)
typedef ssize_t (*hmcdrv_cache_ftpfunc)(const struct hmcdrv_ftp_cmdspec *ftp,
size_t *fsize);
ssize_t hmcdrv_cache_cmd(const struct hmcdrv_ftp_cmdspec *ftp,
hmcdrv_cache_ftpfunc func);
int hmcdrv_cache_startup(size_t cachesize);
void hmcdrv_cache_shutdown(void);
#endif /* __HMCDRV_CACHE_H__ */

View file

@ -0,0 +1,370 @@
/*
* HMC Drive CD/DVD Device
*
* Copyright IBM Corp. 2013
* Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
*
* This file provides a Linux "misc" character device for access to an
* assigned HMC drive CD/DVD-ROM. It works as follows: First create the
* device by calling hmcdrv_dev_init(). After open() a lseek(fd, 0,
* SEEK_END) indicates that a new FTP command follows (not needed on the
* first command after open). Then write() the FTP command ASCII string
* to it, e.g. "dir /" or "nls <directory>" or "get <filename>". At the
* end read() the response.
*/
#define KMSG_COMPONENT "hmcdrv"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/cdev.h>
#include <linux/miscdevice.h>
#include <linux/device.h>
#include <linux/capability.h>
#include <linux/delay.h>
#include <linux/uaccess.h>
#include "hmcdrv_dev.h"
#include "hmcdrv_ftp.h"
/* If the following macro is defined, then the HMC device creates it's own
* separated device class (and dynamically assigns a major number). If not
* defined then the HMC device is assigned to the "misc" class devices.
*
#define HMCDRV_DEV_CLASS "hmcftp"
*/
#define HMCDRV_DEV_NAME "hmcdrv"
#define HMCDRV_DEV_BUSY_DELAY 500 /* delay between -EBUSY trials in ms */
#define HMCDRV_DEV_BUSY_RETRIES 3 /* number of retries on -EBUSY */
struct hmcdrv_dev_node {
#ifdef HMCDRV_DEV_CLASS
struct cdev dev; /* character device structure */
umode_t mode; /* mode of device node (unused, zero) */
#else
struct miscdevice dev; /* "misc" device structure */
#endif
};
static int hmcdrv_dev_open(struct inode *inode, struct file *fp);
static int hmcdrv_dev_release(struct inode *inode, struct file *fp);
static loff_t hmcdrv_dev_seek(struct file *fp, loff_t pos, int whence);
static ssize_t hmcdrv_dev_read(struct file *fp, char __user *ubuf,
size_t len, loff_t *pos);
static ssize_t hmcdrv_dev_write(struct file *fp, const char __user *ubuf,
size_t len, loff_t *pos);
static ssize_t hmcdrv_dev_transfer(char __kernel *cmd, loff_t offset,
char __user *buf, size_t len);
/*
* device operations
*/
static const struct file_operations hmcdrv_dev_fops = {
.open = hmcdrv_dev_open,
.llseek = hmcdrv_dev_seek,
.release = hmcdrv_dev_release,
.read = hmcdrv_dev_read,
.write = hmcdrv_dev_write,
};
static struct hmcdrv_dev_node hmcdrv_dev; /* HMC device struct (static) */
#ifdef HMCDRV_DEV_CLASS
static struct class *hmcdrv_dev_class; /* device class pointer */
static dev_t hmcdrv_dev_no; /* device number (major/minor) */
/**
* hmcdrv_dev_name() - provides a naming hint for a device node in /dev
* @dev: device for which the naming/mode hint is
* @mode: file mode for device node created in /dev
*
* See: devtmpfs.c, function devtmpfs_create_node()
*
* Return: recommended device file name in /dev
*/
static char *hmcdrv_dev_name(struct device *dev, umode_t *mode)
{
char *nodename = NULL;
const char *devname = dev_name(dev); /* kernel device name */
if (devname)
nodename = kasprintf(GFP_KERNEL, "%s", devname);
/* on device destroy (rmmod) the mode pointer may be NULL
*/
if (mode)
*mode = hmcdrv_dev.mode;
return nodename;
}
#endif /* HMCDRV_DEV_CLASS */
/*
* open()
*/
static int hmcdrv_dev_open(struct inode *inode, struct file *fp)
{
int rc;
/* check for non-blocking access, which is really unsupported
*/
if (fp->f_flags & O_NONBLOCK)
return -EINVAL;
/* Because it makes no sense to open this device read-only (then a
* FTP command cannot be emitted), we respond with an error.
*/
if ((fp->f_flags & O_ACCMODE) == O_RDONLY)
return -EINVAL;
/* prevent unloading this module as long as anyone holds the
* device file open - so increment the reference count here
*/
if (!try_module_get(THIS_MODULE))
return -ENODEV;
fp->private_data = NULL; /* no command yet */
rc = hmcdrv_ftp_startup();
if (rc)
module_put(THIS_MODULE);
pr_debug("open file '/dev/%s' with return code %d\n",
fp->f_dentry->d_name.name, rc);
return rc;
}
/*
* release()
*/
static int hmcdrv_dev_release(struct inode *inode, struct file *fp)
{
pr_debug("closing file '/dev/%s'\n", fp->f_dentry->d_name.name);
kfree(fp->private_data);
fp->private_data = NULL;
hmcdrv_ftp_shutdown();
module_put(THIS_MODULE);
return 0;
}
/*
* lseek()
*/
static loff_t hmcdrv_dev_seek(struct file *fp, loff_t pos, int whence)
{
switch (whence) {
case SEEK_CUR: /* relative to current file position */
pos += fp->f_pos; /* new position stored in 'pos' */
break;
case SEEK_SET: /* absolute (relative to beginning of file) */
break; /* SEEK_SET */
/* We use SEEK_END as a special indicator for a SEEK_SET
* (set absolute position), combined with a FTP command
* clear.
*/
case SEEK_END:
if (fp->private_data) {
kfree(fp->private_data);
fp->private_data = NULL;
}
break; /* SEEK_END */
default: /* SEEK_DATA, SEEK_HOLE: unsupported */
return -EINVAL;
}
if (pos < 0)
return -EINVAL;
if (fp->f_pos != pos)
++fp->f_version;
fp->f_pos = pos;
return pos;
}
/*
* transfer (helper function)
*/
static ssize_t hmcdrv_dev_transfer(char __kernel *cmd, loff_t offset,
char __user *buf, size_t len)
{
ssize_t retlen;
unsigned trials = HMCDRV_DEV_BUSY_RETRIES;
do {
retlen = hmcdrv_ftp_cmd(cmd, offset, buf, len);
if (retlen != -EBUSY)
break;
msleep(HMCDRV_DEV_BUSY_DELAY);
} while (--trials > 0);
return retlen;
}
/*
* read()
*/
static ssize_t hmcdrv_dev_read(struct file *fp, char __user *ubuf,
size_t len, loff_t *pos)
{
ssize_t retlen;
if (((fp->f_flags & O_ACCMODE) == O_WRONLY) ||
(fp->private_data == NULL)) { /* no FTP cmd defined ? */
return -EBADF;
}
retlen = hmcdrv_dev_transfer((char *) fp->private_data,
*pos, ubuf, len);
pr_debug("read from file '/dev/%s' at %lld returns %zd/%zu\n",
fp->f_dentry->d_name.name, (long long) *pos, retlen, len);
if (retlen > 0)
*pos += retlen;
return retlen;
}
/*
* write()
*/
static ssize_t hmcdrv_dev_write(struct file *fp, const char __user *ubuf,
size_t len, loff_t *pos)
{
ssize_t retlen;
pr_debug("writing file '/dev/%s' at pos. %lld with length %zd\n",
fp->f_dentry->d_name.name, (long long) *pos, len);
if (!fp->private_data) { /* first expect a cmd write */
fp->private_data = kmalloc(len + 1, GFP_KERNEL);
if (!fp->private_data)
return -ENOMEM;
if (!copy_from_user(fp->private_data, ubuf, len)) {
((char *)fp->private_data)[len] = '\0';
return len;
}
kfree(fp->private_data);
fp->private_data = NULL;
return -EFAULT;
}
retlen = hmcdrv_dev_transfer((char *) fp->private_data,
*pos, (char __user *) ubuf, len);
if (retlen > 0)
*pos += retlen;
pr_debug("write to file '/dev/%s' returned %zd\n",
fp->f_dentry->d_name.name, retlen);
return retlen;
}
/**
* hmcdrv_dev_init() - creates a HMC drive CD/DVD device
*
* This function creates a HMC drive CD/DVD kernel device and an associated
* device under /dev, using a dynamically allocated major number.
*
* Return: 0 on success, else an error code.
*/
int hmcdrv_dev_init(void)
{
int rc;
#ifdef HMCDRV_DEV_CLASS
struct device *dev;
rc = alloc_chrdev_region(&hmcdrv_dev_no, 0, 1, HMCDRV_DEV_NAME);
if (rc)
goto out_err;
cdev_init(&hmcdrv_dev.dev, &hmcdrv_dev_fops);
hmcdrv_dev.dev.owner = THIS_MODULE;
rc = cdev_add(&hmcdrv_dev.dev, hmcdrv_dev_no, 1);
if (rc)
goto out_unreg;
/* At this point the character device exists in the kernel (see
* /proc/devices), but not under /dev nor /sys/devices/virtual. So
* we have to create an associated class (see /sys/class).
*/
hmcdrv_dev_class = class_create(THIS_MODULE, HMCDRV_DEV_CLASS);
if (IS_ERR(hmcdrv_dev_class)) {
rc = PTR_ERR(hmcdrv_dev_class);
goto out_devdel;
}
/* Finally a device node in /dev has to be established (as 'mkdev'
* does from the command line). Notice that assignment of a device
* node name/mode function is optional (only for mode != 0600).
*/
hmcdrv_dev.mode = 0; /* "unset" */
hmcdrv_dev_class->devnode = hmcdrv_dev_name;
dev = device_create(hmcdrv_dev_class, NULL, hmcdrv_dev_no, NULL,
"%s", HMCDRV_DEV_NAME);
if (!IS_ERR(dev))
return 0;
rc = PTR_ERR(dev);
class_destroy(hmcdrv_dev_class);
hmcdrv_dev_class = NULL;
out_devdel:
cdev_del(&hmcdrv_dev.dev);
out_unreg:
unregister_chrdev_region(hmcdrv_dev_no, 1);
out_err:
#else /* !HMCDRV_DEV_CLASS */
hmcdrv_dev.dev.minor = MISC_DYNAMIC_MINOR;
hmcdrv_dev.dev.name = HMCDRV_DEV_NAME;
hmcdrv_dev.dev.fops = &hmcdrv_dev_fops;
hmcdrv_dev.dev.mode = 0; /* finally produces 0600 */
rc = misc_register(&hmcdrv_dev.dev);
#endif /* HMCDRV_DEV_CLASS */
return rc;
}
/**
* hmcdrv_dev_exit() - destroys a HMC drive CD/DVD device
*/
void hmcdrv_dev_exit(void)
{
#ifdef HMCDRV_DEV_CLASS
if (!IS_ERR_OR_NULL(hmcdrv_dev_class)) {
device_destroy(hmcdrv_dev_class, hmcdrv_dev_no);
class_destroy(hmcdrv_dev_class);
}
cdev_del(&hmcdrv_dev.dev);
unregister_chrdev_region(hmcdrv_dev_no, 1);
#else /* !HMCDRV_DEV_CLASS */
misc_deregister(&hmcdrv_dev.dev);
#endif /* HMCDRV_DEV_CLASS */
}

View file

@ -0,0 +1,14 @@
/*
* SE/HMC Drive FTP Device
*
* Copyright IBM Corp. 2013
* Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
*/
#ifndef __HMCDRV_DEV_H__
#define __HMCDRV_DEV_H__
int hmcdrv_dev_init(void);
void hmcdrv_dev_exit(void);
#endif /* __HMCDRV_DEV_H__ */

View file

@ -0,0 +1,343 @@
/*
* HMC Drive FTP Services
*
* Copyright IBM Corp. 2013
* Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
*/
#define KMSG_COMPONENT "hmcdrv"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/export.h>
#include <linux/ctype.h>
#include <linux/crc16.h>
#include "hmcdrv_ftp.h"
#include "hmcdrv_cache.h"
#include "sclp_ftp.h"
#include "diag_ftp.h"
/**
* struct hmcdrv_ftp_ops - HMC drive FTP operations
* @startup: startup function
* @shutdown: shutdown function
* @cmd: FTP transfer function
*/
struct hmcdrv_ftp_ops {
int (*startup)(void);
void (*shutdown)(void);
ssize_t (*transfer)(const struct hmcdrv_ftp_cmdspec *ftp,
size_t *fsize);
};
static enum hmcdrv_ftp_cmdid hmcdrv_ftp_cmd_getid(const char *cmd, int len);
static int hmcdrv_ftp_parse(char *cmd, struct hmcdrv_ftp_cmdspec *ftp);
static struct hmcdrv_ftp_ops *hmcdrv_ftp_funcs; /* current operations */
static DEFINE_MUTEX(hmcdrv_ftp_mutex); /* mutex for hmcdrv_ftp_funcs */
static unsigned hmcdrv_ftp_refcnt; /* start/shutdown reference counter */
/**
* hmcdrv_ftp_cmd_getid() - determine FTP command ID from a command string
* @cmd: FTP command string (NOT zero-terminated)
* @len: length of FTP command string in @cmd
*/
static enum hmcdrv_ftp_cmdid hmcdrv_ftp_cmd_getid(const char *cmd, int len)
{
/* HMC FTP command descriptor */
struct hmcdrv_ftp_cmd_desc {
const char *str; /* command string */
enum hmcdrv_ftp_cmdid cmd; /* associated command as enum */
};
/* Description of all HMC drive FTP commands
*
* Notes:
* 1. Array size should be a prime number.
* 2. Do not change the order of commands in table (because the
* index is determined by CRC % ARRAY_SIZE).
* 3. Original command 'nlist' was renamed, else the CRC would
* collide with 'append' (see point 2).
*/
static const struct hmcdrv_ftp_cmd_desc ftpcmds[7] = {
{.str = "get", /* [0] get (CRC = 0x68eb) */
.cmd = HMCDRV_FTP_GET},
{.str = "dir", /* [1] dir (CRC = 0x6a9e) */
.cmd = HMCDRV_FTP_DIR},
{.str = "delete", /* [2] delete (CRC = 0x53ae) */
.cmd = HMCDRV_FTP_DELETE},
{.str = "nls", /* [3] nls (CRC = 0xf87c) */
.cmd = HMCDRV_FTP_NLIST},
{.str = "put", /* [4] put (CRC = 0xac56) */
.cmd = HMCDRV_FTP_PUT},
{.str = "append", /* [5] append (CRC = 0xf56e) */
.cmd = HMCDRV_FTP_APPEND},
{.str = NULL} /* [6] unused */
};
const struct hmcdrv_ftp_cmd_desc *pdesc;
u16 crc = 0xffffU;
if (len == 0)
return HMCDRV_FTP_NOOP; /* error indiactor */
crc = crc16(crc, cmd, len);
pdesc = ftpcmds + (crc % ARRAY_SIZE(ftpcmds));
pr_debug("FTP command '%s' has CRC 0x%04x, at table pos. %lu\n",
cmd, crc, (crc % ARRAY_SIZE(ftpcmds)));
if (!pdesc->str || strncmp(pdesc->str, cmd, len))
return HMCDRV_FTP_NOOP;
pr_debug("FTP command '%s' found, with ID %d\n",
pdesc->str, pdesc->cmd);
return pdesc->cmd;
}
/**
* hmcdrv_ftp_parse() - HMC drive FTP command parser
* @cmd: FTP command string "<cmd> <filename>"
* @ftp: Pointer to FTP command specification buffer (output)
*
* Return: 0 on success, else a (negative) error code
*/
static int hmcdrv_ftp_parse(char *cmd, struct hmcdrv_ftp_cmdspec *ftp)
{
char *start;
int argc = 0;
ftp->id = HMCDRV_FTP_NOOP;
ftp->fname = NULL;
while (*cmd != '\0') {
while (isspace(*cmd))
++cmd;
if (*cmd == '\0')
break;
start = cmd;
switch (argc) {
case 0: /* 1st argument (FTP command) */
while ((*cmd != '\0') && !isspace(*cmd))
++cmd;
ftp->id = hmcdrv_ftp_cmd_getid(start, cmd - start);
break;
case 1: /* 2nd / last argument (rest of line) */
while ((*cmd != '\0') && !iscntrl(*cmd))
++cmd;
ftp->fname = start;
/* fall through */
default:
*cmd = '\0';
break;
} /* switch */
++argc;
} /* while */
if (!ftp->fname || (ftp->id == HMCDRV_FTP_NOOP))
return -EINVAL;
return 0;
}
/**
* hmcdrv_ftp_do() - perform a HMC drive FTP, with data from kernel-space
* @ftp: pointer to FTP command specification
*
* Return: number of bytes read/written or a negative error code
*/
ssize_t hmcdrv_ftp_do(const struct hmcdrv_ftp_cmdspec *ftp)
{
ssize_t len;
mutex_lock(&hmcdrv_ftp_mutex);
if (hmcdrv_ftp_funcs && hmcdrv_ftp_refcnt) {
pr_debug("starting transfer, cmd %d for '%s' at %lld with %zd bytes\n",
ftp->id, ftp->fname, (long long) ftp->ofs, ftp->len);
len = hmcdrv_cache_cmd(ftp, hmcdrv_ftp_funcs->transfer);
} else {
len = -ENXIO;
}
mutex_unlock(&hmcdrv_ftp_mutex);
return len;
}
EXPORT_SYMBOL(hmcdrv_ftp_do);
/**
* hmcdrv_ftp_probe() - probe for the HMC drive FTP service
*
* Return: 0 if service is available, else an (negative) error code
*/
int hmcdrv_ftp_probe(void)
{
int rc;
struct hmcdrv_ftp_cmdspec ftp = {
.id = HMCDRV_FTP_NOOP,
.ofs = 0,
.fname = "",
.len = PAGE_SIZE
};
ftp.buf = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!ftp.buf)
return -ENOMEM;
rc = hmcdrv_ftp_startup();
if (rc)
return rc;
rc = hmcdrv_ftp_do(&ftp);
free_page((unsigned long) ftp.buf);
hmcdrv_ftp_shutdown();
switch (rc) {
case -ENOENT: /* no such file/media or currently busy, */
case -EBUSY: /* but service seems to be available */
rc = 0;
break;
default: /* leave 'rc' as it is for [0, -EPERM, -E...] */
if (rc > 0)
rc = 0; /* clear length (success) */
break;
} /* switch */
return rc;
}
EXPORT_SYMBOL(hmcdrv_ftp_probe);
/**
* hmcdrv_ftp_cmd() - Perform a HMC drive FTP, with data from user-space
*
* @cmd: FTP command string "<cmd> <filename>"
* @offset: file position to read/write
* @buf: user-space buffer for read/written directory/file
* @len: size of @buf (read/dir) or number of bytes to write
*
* This function must not be called before hmcdrv_ftp_startup() was called.
*
* Return: number of bytes read/written or a negative error code
*/
ssize_t hmcdrv_ftp_cmd(char __kernel *cmd, loff_t offset,
char __user *buf, size_t len)
{
int order;
struct hmcdrv_ftp_cmdspec ftp = {.len = len, .ofs = offset};
ssize_t retlen = hmcdrv_ftp_parse(cmd, &ftp);
if (retlen)
return retlen;
order = get_order(ftp.len);
ftp.buf = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, order);
if (!ftp.buf)
return -ENOMEM;
switch (ftp.id) {
case HMCDRV_FTP_DIR:
case HMCDRV_FTP_NLIST:
case HMCDRV_FTP_GET:
retlen = hmcdrv_ftp_do(&ftp);
if ((retlen >= 0) &&
copy_to_user(buf, ftp.buf, retlen))
retlen = -EFAULT;
break;
case HMCDRV_FTP_PUT:
case HMCDRV_FTP_APPEND:
if (!copy_from_user(ftp.buf, buf, ftp.len))
retlen = hmcdrv_ftp_do(&ftp);
else
retlen = -EFAULT;
break;
case HMCDRV_FTP_DELETE:
retlen = hmcdrv_ftp_do(&ftp);
break;
default:
retlen = -EOPNOTSUPP;
break;
}
free_pages((unsigned long) ftp.buf, order);
return retlen;
}
/**
* hmcdrv_ftp_startup() - startup of HMC drive FTP functionality for a
* dedicated (owner) instance
*
* Return: 0 on success, else an (negative) error code
*/
int hmcdrv_ftp_startup(void)
{
static struct hmcdrv_ftp_ops hmcdrv_ftp_zvm = {
.startup = diag_ftp_startup,
.shutdown = diag_ftp_shutdown,
.transfer = diag_ftp_cmd
};
static struct hmcdrv_ftp_ops hmcdrv_ftp_lpar = {
.startup = sclp_ftp_startup,
.shutdown = sclp_ftp_shutdown,
.transfer = sclp_ftp_cmd
};
int rc = 0;
mutex_lock(&hmcdrv_ftp_mutex); /* block transfers while start-up */
if (hmcdrv_ftp_refcnt == 0) {
if (MACHINE_IS_VM)
hmcdrv_ftp_funcs = &hmcdrv_ftp_zvm;
else if (MACHINE_IS_LPAR || MACHINE_IS_KVM)
hmcdrv_ftp_funcs = &hmcdrv_ftp_lpar;
else
rc = -EOPNOTSUPP;
if (hmcdrv_ftp_funcs)
rc = hmcdrv_ftp_funcs->startup();
}
if (!rc)
++hmcdrv_ftp_refcnt;
mutex_unlock(&hmcdrv_ftp_mutex);
return rc;
}
EXPORT_SYMBOL(hmcdrv_ftp_startup);
/**
* hmcdrv_ftp_shutdown() - shutdown of HMC drive FTP functionality for a
* dedicated (owner) instance
*/
void hmcdrv_ftp_shutdown(void)
{
mutex_lock(&hmcdrv_ftp_mutex);
--hmcdrv_ftp_refcnt;
if ((hmcdrv_ftp_refcnt == 0) && hmcdrv_ftp_funcs)
hmcdrv_ftp_funcs->shutdown();
mutex_unlock(&hmcdrv_ftp_mutex);
}
EXPORT_SYMBOL(hmcdrv_ftp_shutdown);

View file

@ -0,0 +1,63 @@
/*
* SE/HMC Drive FTP Services
*
* Copyright IBM Corp. 2013
* Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
*/
#ifndef __HMCDRV_FTP_H__
#define __HMCDRV_FTP_H__
#include <linux/types.h> /* size_t, loff_t */
/*
* HMC drive FTP Service max. length of path (w/ EOS)
*/
#define HMCDRV_FTP_FIDENT_MAX 192
/**
* enum hmcdrv_ftp_cmdid - HMC drive FTP commands
* @HMCDRV_FTP_NOOP: do nothing (only for probing)
* @HMCDRV_FTP_GET: read a file
* @HMCDRV_FTP_PUT: (over-) write a file
* @HMCDRV_FTP_APPEND: append to a file
* @HMCDRV_FTP_DIR: list directory long (ls -l)
* @HMCDRV_FTP_NLIST: list files, no directories (name list)
* @HMCDRV_FTP_DELETE: delete a file
* @HMCDRV_FTP_CANCEL: cancel operation (SCLP/LPAR only)
*/
enum hmcdrv_ftp_cmdid {
HMCDRV_FTP_NOOP = 0,
HMCDRV_FTP_GET = 1,
HMCDRV_FTP_PUT = 2,
HMCDRV_FTP_APPEND = 3,
HMCDRV_FTP_DIR = 4,
HMCDRV_FTP_NLIST = 5,
HMCDRV_FTP_DELETE = 6,
HMCDRV_FTP_CANCEL = 7
};
/**
* struct hmcdrv_ftp_cmdspec - FTP command specification
* @id: FTP command ID
* @ofs: offset in file
* @fname: filename (ASCII), null-terminated
* @buf: kernel-space transfer data buffer, 4k aligned
* @len: (max) number of bytes to transfer from/to @buf
*/
struct hmcdrv_ftp_cmdspec {
enum hmcdrv_ftp_cmdid id;
loff_t ofs;
const char *fname;
void __kernel *buf;
size_t len;
};
int hmcdrv_ftp_startup(void);
void hmcdrv_ftp_shutdown(void);
int hmcdrv_ftp_probe(void);
ssize_t hmcdrv_ftp_do(const struct hmcdrv_ftp_cmdspec *ftp);
ssize_t hmcdrv_ftp_cmd(char __kernel *cmd, loff_t offset,
char __user *buf, size_t len);
#endif /* __HMCDRV_FTP_H__ */

View file

@ -0,0 +1,64 @@
/*
* HMC Drive DVD Module
*
* Copyright IBM Corp. 2013
* Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
*/
#define KMSG_COMPONENT "hmcdrv"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/version.h>
#include <linux/stat.h>
#include "hmcdrv_ftp.h"
#include "hmcdrv_dev.h"
#include "hmcdrv_cache.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Copyright 2013 IBM Corporation");
MODULE_DESCRIPTION("HMC drive DVD access");
/*
* module parameter 'cachesize'
*/
static size_t hmcdrv_mod_cachesize = HMCDRV_CACHE_SIZE_DFLT;
module_param_named(cachesize, hmcdrv_mod_cachesize, ulong, S_IRUGO);
/**
* hmcdrv_mod_init() - module init function
*/
static int __init hmcdrv_mod_init(void)
{
int rc = hmcdrv_ftp_probe(); /* perform w/o cache */
if (rc)
return rc;
rc = hmcdrv_cache_startup(hmcdrv_mod_cachesize);
if (rc)
return rc;
rc = hmcdrv_dev_init();
if (rc)
hmcdrv_cache_shutdown();
return rc;
}
/**
* hmcdrv_mod_exit() - module exit function
*/
static void __exit hmcdrv_mod_exit(void)
{
hmcdrv_dev_exit();
hmcdrv_cache_shutdown();
}
module_init(hmcdrv_mod_init);
module_exit(hmcdrv_mod_exit);

View file

@ -0,0 +1,559 @@
/*
* ebcdic keycode functions for s390 console drivers
*
* S390 version
* Copyright IBM Corp. 2003
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/sysrq.h>
#include <linux/consolemap.h>
#include <linux/kbd_kern.h>
#include <linux/kbd_diacr.h>
#include <asm/uaccess.h>
#include "keyboard.h"
/*
* Handler Tables.
*/
#define K_HANDLERS\
k_self, k_fn, k_spec, k_ignore,\
k_dead, k_ignore, k_ignore, k_ignore,\
k_ignore, k_ignore, k_ignore, k_ignore,\
k_ignore, k_ignore, k_ignore, k_ignore
typedef void (k_handler_fn)(struct kbd_data *, unsigned char);
static k_handler_fn K_HANDLERS;
static k_handler_fn *k_handler[16] = { K_HANDLERS };
/* maximum values each key_handler can handle */
static const int kbd_max_vals[] = {
255, ARRAY_SIZE(func_table) - 1, NR_FN_HANDLER - 1, 0,
NR_DEAD - 1, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
static const int KBD_NR_TYPES = ARRAY_SIZE(kbd_max_vals);
static unsigned char ret_diacr[NR_DEAD] = {
'`', '\'', '^', '~', '"', ','
};
/*
* Alloc/free of kbd_data structures.
*/
struct kbd_data *
kbd_alloc(void) {
struct kbd_data *kbd;
int i;
kbd = kzalloc(sizeof(struct kbd_data), GFP_KERNEL);
if (!kbd)
goto out;
kbd->key_maps = kzalloc(sizeof(key_maps), GFP_KERNEL);
if (!kbd->key_maps)
goto out_kbd;
for (i = 0; i < ARRAY_SIZE(key_maps); i++) {
if (key_maps[i]) {
kbd->key_maps[i] = kmemdup(key_maps[i],
sizeof(u_short) * NR_KEYS,
GFP_KERNEL);
if (!kbd->key_maps[i])
goto out_maps;
}
}
kbd->func_table = kzalloc(sizeof(func_table), GFP_KERNEL);
if (!kbd->func_table)
goto out_maps;
for (i = 0; i < ARRAY_SIZE(func_table); i++) {
if (func_table[i]) {
kbd->func_table[i] = kstrdup(func_table[i],
GFP_KERNEL);
if (!kbd->func_table[i])
goto out_func;
}
}
kbd->fn_handler =
kzalloc(sizeof(fn_handler_fn *) * NR_FN_HANDLER, GFP_KERNEL);
if (!kbd->fn_handler)
goto out_func;
kbd->accent_table = kmemdup(accent_table,
sizeof(struct kbdiacruc) * MAX_DIACR,
GFP_KERNEL);
if (!kbd->accent_table)
goto out_fn_handler;
kbd->accent_table_size = accent_table_size;
return kbd;
out_fn_handler:
kfree(kbd->fn_handler);
out_func:
for (i = 0; i < ARRAY_SIZE(func_table); i++)
kfree(kbd->func_table[i]);
kfree(kbd->func_table);
out_maps:
for (i = 0; i < ARRAY_SIZE(key_maps); i++)
kfree(kbd->key_maps[i]);
kfree(kbd->key_maps);
out_kbd:
kfree(kbd);
out:
return NULL;
}
void
kbd_free(struct kbd_data *kbd)
{
int i;
kfree(kbd->accent_table);
kfree(kbd->fn_handler);
for (i = 0; i < ARRAY_SIZE(func_table); i++)
kfree(kbd->func_table[i]);
kfree(kbd->func_table);
for (i = 0; i < ARRAY_SIZE(key_maps); i++)
kfree(kbd->key_maps[i]);
kfree(kbd->key_maps);
kfree(kbd);
}
/*
* Generate ascii -> ebcdic translation table from kbd_data.
*/
void
kbd_ascebc(struct kbd_data *kbd, unsigned char *ascebc)
{
unsigned short *keymap, keysym;
int i, j, k;
memset(ascebc, 0x40, 256);
for (i = 0; i < ARRAY_SIZE(key_maps); i++) {
keymap = kbd->key_maps[i];
if (!keymap)
continue;
for (j = 0; j < NR_KEYS; j++) {
k = ((i & 1) << 7) + j;
keysym = keymap[j];
if (KTYP(keysym) == (KT_LATIN | 0xf0) ||
KTYP(keysym) == (KT_LETTER | 0xf0))
ascebc[KVAL(keysym)] = k;
else if (KTYP(keysym) == (KT_DEAD | 0xf0))
ascebc[ret_diacr[KVAL(keysym)]] = k;
}
}
}
#if 0
/*
* Generate ebcdic -> ascii translation table from kbd_data.
*/
void
kbd_ebcasc(struct kbd_data *kbd, unsigned char *ebcasc)
{
unsigned short *keymap, keysym;
int i, j, k;
memset(ebcasc, ' ', 256);
for (i = 0; i < ARRAY_SIZE(key_maps); i++) {
keymap = kbd->key_maps[i];
if (!keymap)
continue;
for (j = 0; j < NR_KEYS; j++) {
keysym = keymap[j];
k = ((i & 1) << 7) + j;
if (KTYP(keysym) == (KT_LATIN | 0xf0) ||
KTYP(keysym) == (KT_LETTER | 0xf0))
ebcasc[k] = KVAL(keysym);
else if (KTYP(keysym) == (KT_DEAD | 0xf0))
ebcasc[k] = ret_diacr[KVAL(keysym)];
}
}
}
#endif
/*
* We have a combining character DIACR here, followed by the character CH.
* If the combination occurs in the table, return the corresponding value.
* Otherwise, if CH is a space or equals DIACR, return DIACR.
* Otherwise, conclude that DIACR was not combining after all,
* queue it and return CH.
*/
static unsigned int
handle_diacr(struct kbd_data *kbd, unsigned int ch)
{
int i, d;
d = kbd->diacr;
kbd->diacr = 0;
for (i = 0; i < kbd->accent_table_size; i++) {
if (kbd->accent_table[i].diacr == d &&
kbd->accent_table[i].base == ch)
return kbd->accent_table[i].result;
}
if (ch == ' ' || ch == d)
return d;
kbd_put_queue(kbd->port, d);
return ch;
}
/*
* Handle dead key.
*/
static void
k_dead(struct kbd_data *kbd, unsigned char value)
{
value = ret_diacr[value];
kbd->diacr = (kbd->diacr ? handle_diacr(kbd, value) : value);
}
/*
* Normal character handler.
*/
static void
k_self(struct kbd_data *kbd, unsigned char value)
{
if (kbd->diacr)
value = handle_diacr(kbd, value);
kbd_put_queue(kbd->port, value);
}
/*
* Special key handlers
*/
static void
k_ignore(struct kbd_data *kbd, unsigned char value)
{
}
/*
* Function key handler.
*/
static void
k_fn(struct kbd_data *kbd, unsigned char value)
{
if (kbd->func_table[value])
kbd_puts_queue(kbd->port, kbd->func_table[value]);
}
static void
k_spec(struct kbd_data *kbd, unsigned char value)
{
if (value >= NR_FN_HANDLER)
return;
if (kbd->fn_handler[value])
kbd->fn_handler[value](kbd);
}
/*
* Put utf8 character to tty flip buffer.
* UTF-8 is defined for words of up to 31 bits,
* but we need only 16 bits here
*/
static void
to_utf8(struct tty_port *port, ushort c)
{
if (c < 0x80)
/* 0******* */
kbd_put_queue(port, c);
else if (c < 0x800) {
/* 110***** 10****** */
kbd_put_queue(port, 0xc0 | (c >> 6));
kbd_put_queue(port, 0x80 | (c & 0x3f));
} else {
/* 1110**** 10****** 10****** */
kbd_put_queue(port, 0xe0 | (c >> 12));
kbd_put_queue(port, 0x80 | ((c >> 6) & 0x3f));
kbd_put_queue(port, 0x80 | (c & 0x3f));
}
}
/*
* Process keycode.
*/
void
kbd_keycode(struct kbd_data *kbd, unsigned int keycode)
{
unsigned short keysym;
unsigned char type, value;
if (!kbd)
return;
if (keycode >= 384)
keysym = kbd->key_maps[5][keycode - 384];
else if (keycode >= 256)
keysym = kbd->key_maps[4][keycode - 256];
else if (keycode >= 128)
keysym = kbd->key_maps[1][keycode - 128];
else
keysym = kbd->key_maps[0][keycode];
type = KTYP(keysym);
if (type >= 0xf0) {
type -= 0xf0;
if (type == KT_LETTER)
type = KT_LATIN;
value = KVAL(keysym);
#ifdef CONFIG_MAGIC_SYSRQ /* Handle the SysRq Hack */
if (kbd->sysrq) {
if (kbd->sysrq == K(KT_LATIN, '-')) {
kbd->sysrq = 0;
handle_sysrq(value);
return;
}
if (value == '-') {
kbd->sysrq = K(KT_LATIN, '-');
return;
}
/* Incomplete sysrq sequence. */
(*k_handler[KTYP(kbd->sysrq)])(kbd, KVAL(kbd->sysrq));
kbd->sysrq = 0;
} else if ((type == KT_LATIN && value == '^') ||
(type == KT_DEAD && ret_diacr[value] == '^')) {
kbd->sysrq = K(type, value);
return;
}
#endif
(*k_handler[type])(kbd, value);
} else
to_utf8(kbd->port, keysym);
}
/*
* Ioctl stuff.
*/
static int
do_kdsk_ioctl(struct kbd_data *kbd, struct kbentry __user *user_kbe,
int cmd, int perm)
{
struct kbentry tmp;
ushort *key_map, val, ov;
if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
return -EFAULT;
#if NR_KEYS < 256
if (tmp.kb_index >= NR_KEYS)
return -EINVAL;
#endif
#if MAX_NR_KEYMAPS < 256
if (tmp.kb_table >= MAX_NR_KEYMAPS)
return -EINVAL;
#endif
switch (cmd) {
case KDGKBENT:
key_map = kbd->key_maps[tmp.kb_table];
if (key_map) {
val = U(key_map[tmp.kb_index]);
if (KTYP(val) >= KBD_NR_TYPES)
val = K_HOLE;
} else
val = (tmp.kb_index ? K_HOLE : K_NOSUCHMAP);
return put_user(val, &user_kbe->kb_value);
case KDSKBENT:
if (!perm)
return -EPERM;
if (!tmp.kb_index && tmp.kb_value == K_NOSUCHMAP) {
/* disallocate map */
key_map = kbd->key_maps[tmp.kb_table];
if (key_map) {
kbd->key_maps[tmp.kb_table] = NULL;
kfree(key_map);
}
break;
}
if (KTYP(tmp.kb_value) >= KBD_NR_TYPES)
return -EINVAL;
if (KVAL(tmp.kb_value) > kbd_max_vals[KTYP(tmp.kb_value)])
return -EINVAL;
if (!(key_map = kbd->key_maps[tmp.kb_table])) {
int j;
key_map = kmalloc(sizeof(plain_map),
GFP_KERNEL);
if (!key_map)
return -ENOMEM;
kbd->key_maps[tmp.kb_table] = key_map;
for (j = 0; j < NR_KEYS; j++)
key_map[j] = U(K_HOLE);
}
ov = U(key_map[tmp.kb_index]);
if (tmp.kb_value == ov)
break; /* nothing to do */
/*
* Attention Key.
*/
if (((ov == K_SAK) || (tmp.kb_value == K_SAK)) &&
!capable(CAP_SYS_ADMIN))
return -EPERM;
key_map[tmp.kb_index] = U(tmp.kb_value);
break;
}
return 0;
}
static int
do_kdgkb_ioctl(struct kbd_data *kbd, struct kbsentry __user *u_kbs,
int cmd, int perm)
{
unsigned char kb_func;
char *p;
int len;
/* Get u_kbs->kb_func. */
if (get_user(kb_func, &u_kbs->kb_func))
return -EFAULT;
#if MAX_NR_FUNC < 256
if (kb_func >= MAX_NR_FUNC)
return -EINVAL;
#endif
switch (cmd) {
case KDGKBSENT:
p = kbd->func_table[kb_func];
if (p) {
len = strlen(p);
if (len >= sizeof(u_kbs->kb_string))
len = sizeof(u_kbs->kb_string) - 1;
if (copy_to_user(u_kbs->kb_string, p, len))
return -EFAULT;
} else
len = 0;
if (put_user('\0', u_kbs->kb_string + len))
return -EFAULT;
break;
case KDSKBSENT:
if (!perm)
return -EPERM;
len = strnlen_user(u_kbs->kb_string,
sizeof(u_kbs->kb_string) - 1);
if (!len)
return -EFAULT;
if (len > sizeof(u_kbs->kb_string) - 1)
return -EINVAL;
p = kmalloc(len + 1, GFP_KERNEL);
if (!p)
return -ENOMEM;
if (copy_from_user(p, u_kbs->kb_string, len)) {
kfree(p);
return -EFAULT;
}
p[len] = 0;
kfree(kbd->func_table[kb_func]);
kbd->func_table[kb_func] = p;
break;
}
return 0;
}
int kbd_ioctl(struct kbd_data *kbd, unsigned int cmd, unsigned long arg)
{
struct tty_struct *tty;
void __user *argp;
unsigned int ct;
int perm;
argp = (void __user *)arg;
/*
* To have permissions to do most of the vt ioctls, we either have
* to be the owner of the tty, or have CAP_SYS_TTY_CONFIG.
*/
tty = tty_port_tty_get(kbd->port);
/* FIXME this test is pretty racy */
perm = current->signal->tty == tty || capable(CAP_SYS_TTY_CONFIG);
tty_kref_put(tty);
switch (cmd) {
case KDGKBTYPE:
return put_user(KB_101, (char __user *)argp);
case KDGKBENT:
case KDSKBENT:
return do_kdsk_ioctl(kbd, argp, cmd, perm);
case KDGKBSENT:
case KDSKBSENT:
return do_kdgkb_ioctl(kbd, argp, cmd, perm);
case KDGKBDIACR:
{
struct kbdiacrs __user *a = argp;
struct kbdiacr diacr;
int i;
if (put_user(kbd->accent_table_size, &a->kb_cnt))
return -EFAULT;
for (i = 0; i < kbd->accent_table_size; i++) {
diacr.diacr = kbd->accent_table[i].diacr;
diacr.base = kbd->accent_table[i].base;
diacr.result = kbd->accent_table[i].result;
if (copy_to_user(a->kbdiacr + i, &diacr, sizeof(struct kbdiacr)))
return -EFAULT;
}
return 0;
}
case KDGKBDIACRUC:
{
struct kbdiacrsuc __user *a = argp;
ct = kbd->accent_table_size;
if (put_user(ct, &a->kb_cnt))
return -EFAULT;
if (copy_to_user(a->kbdiacruc, kbd->accent_table,
ct * sizeof(struct kbdiacruc)))
return -EFAULT;
return 0;
}
case KDSKBDIACR:
{
struct kbdiacrs __user *a = argp;
struct kbdiacr diacr;
int i;
if (!perm)
return -EPERM;
if (get_user(ct, &a->kb_cnt))
return -EFAULT;
if (ct >= MAX_DIACR)
return -EINVAL;
kbd->accent_table_size = ct;
for (i = 0; i < ct; i++) {
if (copy_from_user(&diacr, a->kbdiacr + i, sizeof(struct kbdiacr)))
return -EFAULT;
kbd->accent_table[i].diacr = diacr.diacr;
kbd->accent_table[i].base = diacr.base;
kbd->accent_table[i].result = diacr.result;
}
return 0;
}
case KDSKBDIACRUC:
{
struct kbdiacrsuc __user *a = argp;
if (!perm)
return -EPERM;
if (get_user(ct, &a->kb_cnt))
return -EFAULT;
if (ct >= MAX_DIACR)
return -EINVAL;
kbd->accent_table_size = ct;
if (copy_from_user(kbd->accent_table, a->kbdiacruc,
ct * sizeof(struct kbdiacruc)))
return -EFAULT;
return 0;
}
default:
return -ENOIOCTLCMD;
}
}
EXPORT_SYMBOL(kbd_ioctl);
EXPORT_SYMBOL(kbd_ascebc);
EXPORT_SYMBOL(kbd_free);
EXPORT_SYMBOL(kbd_alloc);
EXPORT_SYMBOL(kbd_keycode);

View file

@ -0,0 +1,56 @@
/*
* ebcdic keycode functions for s390 console drivers
*
* Copyright IBM Corp. 2003
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
*/
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/keyboard.h>
#define NR_FN_HANDLER 20
struct kbd_data;
typedef void (fn_handler_fn)(struct kbd_data *);
/*
* FIXME: explain key_maps tricks.
*/
struct kbd_data {
struct tty_port *port;
unsigned short **key_maps;
char **func_table;
fn_handler_fn **fn_handler;
struct kbdiacruc *accent_table;
unsigned int accent_table_size;
unsigned int diacr;
unsigned short sysrq;
};
struct kbd_data *kbd_alloc(void);
void kbd_free(struct kbd_data *);
void kbd_ascebc(struct kbd_data *, unsigned char *);
void kbd_keycode(struct kbd_data *, unsigned int);
int kbd_ioctl(struct kbd_data *, unsigned int, unsigned long);
/*
* Helper Functions.
*/
static inline void
kbd_put_queue(struct tty_port *port, int ch)
{
tty_insert_flip_char(port, ch, 0);
tty_schedule_flip(port);
}
static inline void
kbd_puts_queue(struct tty_port *port, char *cp)
{
while (*cp)
tty_insert_flip_char(port, *cp++, 0);
tty_schedule_flip(port);
}

View file

@ -0,0 +1,651 @@
/*
* Character device driver for reading z/VM *MONITOR service records.
*
* Copyright IBM Corp. 2004, 2009
*
* Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
*/
#define KMSG_COMPONENT "monreader"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/ctype.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/poll.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <net/iucv/iucv.h>
#include <asm/uaccess.h>
#include <asm/ebcdic.h>
#include <asm/extmem.h>
#define MON_COLLECT_SAMPLE 0x80
#define MON_COLLECT_EVENT 0x40
#define MON_SERVICE "*MONITOR"
#define MON_IN_USE 0x01
#define MON_MSGLIM 255
static char mon_dcss_name[9] = "MONDCSS\0";
struct mon_msg {
u32 pos;
u32 mca_offset;
struct iucv_message msg;
char msglim_reached;
char replied_msglim;
};
struct mon_private {
struct iucv_path *path;
struct mon_msg *msg_array[MON_MSGLIM];
unsigned int write_index;
unsigned int read_index;
atomic_t msglim_count;
atomic_t read_ready;
atomic_t iucv_connected;
atomic_t iucv_severed;
};
static unsigned long mon_in_use = 0;
static unsigned long mon_dcss_start;
static unsigned long mon_dcss_end;
static DECLARE_WAIT_QUEUE_HEAD(mon_read_wait_queue);
static DECLARE_WAIT_QUEUE_HEAD(mon_conn_wait_queue);
static u8 user_data_connect[16] = {
/* Version code, must be 0x01 for shared mode */
0x01,
/* what to collect */
MON_COLLECT_SAMPLE | MON_COLLECT_EVENT,
/* DCSS name in EBCDIC, 8 bytes padded with blanks */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
};
static u8 user_data_sever[16] = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
};
static struct device *monreader_device;
/******************************************************************************
* helper functions *
*****************************************************************************/
/*
* Create the 8 bytes EBCDIC DCSS segment name from
* an ASCII name, incl. padding
*/
static void dcss_mkname(char *ascii_name, char *ebcdic_name)
{
int i;
for (i = 0; i < 8; i++) {
if (ascii_name[i] == '\0')
break;
ebcdic_name[i] = toupper(ascii_name[i]);
};
for (; i < 8; i++)
ebcdic_name[i] = ' ';
ASCEBC(ebcdic_name, 8);
}
static inline unsigned long mon_mca_start(struct mon_msg *monmsg)
{
return *(u32 *) &monmsg->msg.rmmsg;
}
static inline unsigned long mon_mca_end(struct mon_msg *monmsg)
{
return *(u32 *) &monmsg->msg.rmmsg[4];
}
static inline u8 mon_mca_type(struct mon_msg *monmsg, u8 index)
{
return *((u8 *) mon_mca_start(monmsg) + monmsg->mca_offset + index);
}
static inline u32 mon_mca_size(struct mon_msg *monmsg)
{
return mon_mca_end(monmsg) - mon_mca_start(monmsg) + 1;
}
static inline u32 mon_rec_start(struct mon_msg *monmsg)
{
return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 4));
}
static inline u32 mon_rec_end(struct mon_msg *monmsg)
{
return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 8));
}
static int mon_check_mca(struct mon_msg *monmsg)
{
if ((mon_rec_end(monmsg) <= mon_rec_start(monmsg)) ||
(mon_rec_start(monmsg) < mon_dcss_start) ||
(mon_rec_end(monmsg) > mon_dcss_end) ||
(mon_mca_type(monmsg, 0) == 0) ||
(mon_mca_size(monmsg) % 12 != 0) ||
(mon_mca_end(monmsg) <= mon_mca_start(monmsg)) ||
(mon_mca_end(monmsg) > mon_dcss_end) ||
(mon_mca_start(monmsg) < mon_dcss_start) ||
((mon_mca_type(monmsg, 1) == 0) && (mon_mca_type(monmsg, 2) == 0)))
return -EINVAL;
return 0;
}
static int mon_send_reply(struct mon_msg *monmsg,
struct mon_private *monpriv)
{
int rc;
rc = iucv_message_reply(monpriv->path, &monmsg->msg,
IUCV_IPRMDATA, NULL, 0);
atomic_dec(&monpriv->msglim_count);
if (likely(!monmsg->msglim_reached)) {
monmsg->pos = 0;
monmsg->mca_offset = 0;
monpriv->read_index = (monpriv->read_index + 1) %
MON_MSGLIM;
atomic_dec(&monpriv->read_ready);
} else
monmsg->replied_msglim = 1;
if (rc) {
pr_err("Reading monitor data failed with rc=%i\n", rc);
return -EIO;
}
return 0;
}
static void mon_free_mem(struct mon_private *monpriv)
{
int i;
for (i = 0; i < MON_MSGLIM; i++)
kfree(monpriv->msg_array[i]);
kfree(monpriv);
}
static struct mon_private *mon_alloc_mem(void)
{
int i;
struct mon_private *monpriv;
monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL);
if (!monpriv)
return NULL;
for (i = 0; i < MON_MSGLIM; i++) {
monpriv->msg_array[i] = kzalloc(sizeof(struct mon_msg),
GFP_KERNEL);
if (!monpriv->msg_array[i]) {
mon_free_mem(monpriv);
return NULL;
}
}
return monpriv;
}
static inline void mon_next_mca(struct mon_msg *monmsg)
{
if (likely((mon_mca_size(monmsg) - monmsg->mca_offset) == 12))
return;
monmsg->mca_offset += 12;
monmsg->pos = 0;
}
static struct mon_msg *mon_next_message(struct mon_private *monpriv)
{
struct mon_msg *monmsg;
if (!atomic_read(&monpriv->read_ready))
return NULL;
monmsg = monpriv->msg_array[monpriv->read_index];
if (unlikely(monmsg->replied_msglim)) {
monmsg->replied_msglim = 0;
monmsg->msglim_reached = 0;
monmsg->pos = 0;
monmsg->mca_offset = 0;
monpriv->read_index = (monpriv->read_index + 1) %
MON_MSGLIM;
atomic_dec(&monpriv->read_ready);
return ERR_PTR(-EOVERFLOW);
}
return monmsg;
}
/******************************************************************************
* IUCV handler *
*****************************************************************************/
static void mon_iucv_path_complete(struct iucv_path *path, u8 ipuser[16])
{
struct mon_private *monpriv = path->private;
atomic_set(&monpriv->iucv_connected, 1);
wake_up(&mon_conn_wait_queue);
}
static void mon_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
{
struct mon_private *monpriv = path->private;
pr_err("z/VM *MONITOR system service disconnected with rc=%i\n",
ipuser[0]);
iucv_path_sever(path, NULL);
atomic_set(&monpriv->iucv_severed, 1);
wake_up(&mon_conn_wait_queue);
wake_up_interruptible(&mon_read_wait_queue);
}
static void mon_iucv_message_pending(struct iucv_path *path,
struct iucv_message *msg)
{
struct mon_private *monpriv = path->private;
memcpy(&monpriv->msg_array[monpriv->write_index]->msg,
msg, sizeof(*msg));
if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) {
pr_warning("The read queue for monitor data is full\n");
monpriv->msg_array[monpriv->write_index]->msglim_reached = 1;
}
monpriv->write_index = (monpriv->write_index + 1) % MON_MSGLIM;
atomic_inc(&monpriv->read_ready);
wake_up_interruptible(&mon_read_wait_queue);
}
static struct iucv_handler monreader_iucv_handler = {
.path_complete = mon_iucv_path_complete,
.path_severed = mon_iucv_path_severed,
.message_pending = mon_iucv_message_pending,
};
/******************************************************************************
* file operations *
*****************************************************************************/
static int mon_open(struct inode *inode, struct file *filp)
{
struct mon_private *monpriv;
int rc;
/*
* only one user allowed
*/
rc = -EBUSY;
if (test_and_set_bit(MON_IN_USE, &mon_in_use))
goto out;
rc = -ENOMEM;
monpriv = mon_alloc_mem();
if (!monpriv)
goto out_use;
/*
* Connect to *MONITOR service
*/
monpriv->path = iucv_path_alloc(MON_MSGLIM, IUCV_IPRMDATA, GFP_KERNEL);
if (!monpriv->path)
goto out_priv;
rc = iucv_path_connect(monpriv->path, &monreader_iucv_handler,
MON_SERVICE, NULL, user_data_connect, monpriv);
if (rc) {
pr_err("Connecting to the z/VM *MONITOR system service "
"failed with rc=%i\n", rc);
rc = -EIO;
goto out_path;
}
/*
* Wait for connection confirmation
*/
wait_event(mon_conn_wait_queue,
atomic_read(&monpriv->iucv_connected) ||
atomic_read(&monpriv->iucv_severed));
if (atomic_read(&monpriv->iucv_severed)) {
atomic_set(&monpriv->iucv_severed, 0);
atomic_set(&monpriv->iucv_connected, 0);
rc = -EIO;
goto out_path;
}
filp->private_data = monpriv;
dev_set_drvdata(monreader_device, monpriv);
return nonseekable_open(inode, filp);
out_path:
iucv_path_free(monpriv->path);
out_priv:
mon_free_mem(monpriv);
out_use:
clear_bit(MON_IN_USE, &mon_in_use);
out:
return rc;
}
static int mon_close(struct inode *inode, struct file *filp)
{
int rc, i;
struct mon_private *monpriv = filp->private_data;
/*
* Close IUCV connection and unregister
*/
if (monpriv->path) {
rc = iucv_path_sever(monpriv->path, user_data_sever);
if (rc)
pr_warning("Disconnecting the z/VM *MONITOR system "
"service failed with rc=%i\n", rc);
iucv_path_free(monpriv->path);
}
atomic_set(&monpriv->iucv_severed, 0);
atomic_set(&monpriv->iucv_connected, 0);
atomic_set(&monpriv->read_ready, 0);
atomic_set(&monpriv->msglim_count, 0);
monpriv->write_index = 0;
monpriv->read_index = 0;
dev_set_drvdata(monreader_device, NULL);
for (i = 0; i < MON_MSGLIM; i++)
kfree(monpriv->msg_array[i]);
kfree(monpriv);
clear_bit(MON_IN_USE, &mon_in_use);
return 0;
}
static ssize_t mon_read(struct file *filp, char __user *data,
size_t count, loff_t *ppos)
{
struct mon_private *monpriv = filp->private_data;
struct mon_msg *monmsg;
int ret;
u32 mce_start;
monmsg = mon_next_message(monpriv);
if (IS_ERR(monmsg))
return PTR_ERR(monmsg);
if (!monmsg) {
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
ret = wait_event_interruptible(mon_read_wait_queue,
atomic_read(&monpriv->read_ready) ||
atomic_read(&monpriv->iucv_severed));
if (ret)
return ret;
if (unlikely(atomic_read(&monpriv->iucv_severed)))
return -EIO;
monmsg = monpriv->msg_array[monpriv->read_index];
}
if (!monmsg->pos)
monmsg->pos = mon_mca_start(monmsg) + monmsg->mca_offset;
if (mon_check_mca(monmsg))
goto reply;
/* read monitor control element (12 bytes) first */
mce_start = mon_mca_start(monmsg) + monmsg->mca_offset;
if ((monmsg->pos >= mce_start) && (monmsg->pos < mce_start + 12)) {
count = min(count, (size_t) mce_start + 12 - monmsg->pos);
ret = copy_to_user(data, (void *) (unsigned long) monmsg->pos,
count);
if (ret)
return -EFAULT;
monmsg->pos += count;
if (monmsg->pos == mce_start + 12)
monmsg->pos = mon_rec_start(monmsg);
goto out_copy;
}
/* read records */
if (monmsg->pos <= mon_rec_end(monmsg)) {
count = min(count, (size_t) mon_rec_end(monmsg) - monmsg->pos
+ 1);
ret = copy_to_user(data, (void *) (unsigned long) monmsg->pos,
count);
if (ret)
return -EFAULT;
monmsg->pos += count;
if (monmsg->pos > mon_rec_end(monmsg))
mon_next_mca(monmsg);
goto out_copy;
}
reply:
ret = mon_send_reply(monmsg, monpriv);
return ret;
out_copy:
*ppos += count;
return count;
}
static unsigned int mon_poll(struct file *filp, struct poll_table_struct *p)
{
struct mon_private *monpriv = filp->private_data;
poll_wait(filp, &mon_read_wait_queue, p);
if (unlikely(atomic_read(&monpriv->iucv_severed)))
return POLLERR;
if (atomic_read(&monpriv->read_ready))
return POLLIN | POLLRDNORM;
return 0;
}
static const struct file_operations mon_fops = {
.owner = THIS_MODULE,
.open = &mon_open,
.release = &mon_close,
.read = &mon_read,
.poll = &mon_poll,
.llseek = noop_llseek,
};
static struct miscdevice mon_dev = {
.name = "monreader",
.fops = &mon_fops,
.minor = MISC_DYNAMIC_MINOR,
};
/******************************************************************************
* suspend / resume *
*****************************************************************************/
static int monreader_freeze(struct device *dev)
{
struct mon_private *monpriv = dev_get_drvdata(dev);
int rc;
if (!monpriv)
return 0;
if (monpriv->path) {
rc = iucv_path_sever(monpriv->path, user_data_sever);
if (rc)
pr_warning("Disconnecting the z/VM *MONITOR system "
"service failed with rc=%i\n", rc);
iucv_path_free(monpriv->path);
}
atomic_set(&monpriv->iucv_severed, 0);
atomic_set(&monpriv->iucv_connected, 0);
atomic_set(&monpriv->read_ready, 0);
atomic_set(&monpriv->msglim_count, 0);
monpriv->write_index = 0;
monpriv->read_index = 0;
monpriv->path = NULL;
return 0;
}
static int monreader_thaw(struct device *dev)
{
struct mon_private *monpriv = dev_get_drvdata(dev);
int rc;
if (!monpriv)
return 0;
rc = -ENOMEM;
monpriv->path = iucv_path_alloc(MON_MSGLIM, IUCV_IPRMDATA, GFP_KERNEL);
if (!monpriv->path)
goto out;
rc = iucv_path_connect(monpriv->path, &monreader_iucv_handler,
MON_SERVICE, NULL, user_data_connect, monpriv);
if (rc) {
pr_err("Connecting to the z/VM *MONITOR system service "
"failed with rc=%i\n", rc);
goto out_path;
}
wait_event(mon_conn_wait_queue,
atomic_read(&monpriv->iucv_connected) ||
atomic_read(&monpriv->iucv_severed));
if (atomic_read(&monpriv->iucv_severed))
goto out_path;
return 0;
out_path:
rc = -EIO;
iucv_path_free(monpriv->path);
monpriv->path = NULL;
out:
atomic_set(&monpriv->iucv_severed, 1);
return rc;
}
static int monreader_restore(struct device *dev)
{
int rc;
segment_unload(mon_dcss_name);
rc = segment_load(mon_dcss_name, SEGMENT_SHARED,
&mon_dcss_start, &mon_dcss_end);
if (rc < 0) {
segment_warning(rc, mon_dcss_name);
panic("fatal monreader resume error: no monitor dcss\n");
}
return monreader_thaw(dev);
}
static const struct dev_pm_ops monreader_pm_ops = {
.freeze = monreader_freeze,
.thaw = monreader_thaw,
.restore = monreader_restore,
};
static struct device_driver monreader_driver = {
.name = "monreader",
.bus = &iucv_bus,
.pm = &monreader_pm_ops,
};
/******************************************************************************
* module init/exit *
*****************************************************************************/
static int __init mon_init(void)
{
int rc;
if (!MACHINE_IS_VM) {
pr_err("The z/VM *MONITOR record device driver cannot be "
"loaded without z/VM\n");
return -ENODEV;
}
/*
* Register with IUCV and connect to *MONITOR service
*/
rc = iucv_register(&monreader_iucv_handler, 1);
if (rc) {
pr_err("The z/VM *MONITOR record device driver failed to "
"register with IUCV\n");
return rc;
}
rc = driver_register(&monreader_driver);
if (rc)
goto out_iucv;
monreader_device = kzalloc(sizeof(struct device), GFP_KERNEL);
if (!monreader_device) {
rc = -ENOMEM;
goto out_driver;
}
dev_set_name(monreader_device, "monreader-dev");
monreader_device->bus = &iucv_bus;
monreader_device->parent = iucv_root;
monreader_device->driver = &monreader_driver;
monreader_device->release = (void (*)(struct device *))kfree;
rc = device_register(monreader_device);
if (rc) {
put_device(monreader_device);
goto out_driver;
}
rc = segment_type(mon_dcss_name);
if (rc < 0) {
segment_warning(rc, mon_dcss_name);
goto out_device;
}
if (rc != SEG_TYPE_SC) {
pr_err("The specified *MONITOR DCSS %s does not have the "
"required type SC\n", mon_dcss_name);
rc = -EINVAL;
goto out_device;
}
rc = segment_load(mon_dcss_name, SEGMENT_SHARED,
&mon_dcss_start, &mon_dcss_end);
if (rc < 0) {
segment_warning(rc, mon_dcss_name);
rc = -EINVAL;
goto out_device;
}
dcss_mkname(mon_dcss_name, &user_data_connect[8]);
/*
* misc_register() has to be the last action in module_init(), because
* file operations will be available right after this.
*/
rc = misc_register(&mon_dev);
if (rc < 0 )
goto out;
return 0;
out:
segment_unload(mon_dcss_name);
out_device:
device_unregister(monreader_device);
out_driver:
driver_unregister(&monreader_driver);
out_iucv:
iucv_unregister(&monreader_iucv_handler, 1);
return rc;
}
static void __exit mon_exit(void)
{
segment_unload(mon_dcss_name);
misc_deregister(&mon_dev);
device_unregister(monreader_device);
driver_unregister(&monreader_driver);
iucv_unregister(&monreader_iucv_handler, 1);
return;
}
module_init(mon_init);
module_exit(mon_exit);
module_param_string(mondcss, mon_dcss_name, 9, 0444);
MODULE_PARM_DESC(mondcss, "Name of DCSS segment to be used for *MONITOR "
"service, max. 8 chars. Default is MONDCSS");
MODULE_AUTHOR("Gerald Schaefer <geraldsc@de.ibm.com>");
MODULE_DESCRIPTION("Character device driver for reading z/VM "
"monitor service records.");
MODULE_LICENSE("GPL");

View file

@ -0,0 +1,399 @@
/*
* Character device driver for writing z/VM *MONITOR service records.
*
* Copyright IBM Corp. 2006, 2009
*
* Author(s): Melissa Howland <Melissa.Howland@us.ibm.com>
*/
#define KMSG_COMPONENT "monwriter"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/ctype.h>
#include <linux/poll.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include <asm/ebcdic.h>
#include <asm/io.h>
#include <asm/appldata.h>
#include <asm/monwriter.h>
#define MONWRITE_MAX_DATALEN 4010
static int mon_max_bufs = 255;
static int mon_buf_count;
struct mon_buf {
struct list_head list;
struct monwrite_hdr hdr;
int diag_done;
char *data;
};
static LIST_HEAD(mon_priv_list);
struct mon_private {
struct list_head priv_list;
struct list_head list;
struct monwrite_hdr hdr;
size_t hdr_to_read;
size_t data_to_read;
struct mon_buf *current_buf;
struct mutex thread_mutex;
};
/*
* helper functions
*/
static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn)
{
struct appldata_product_id id;
int rc;
strncpy(id.prod_nr, "LNXAPPL", 7);
id.prod_fn = myhdr->applid;
id.record_nr = myhdr->record_num;
id.version_nr = myhdr->version;
id.release_nr = myhdr->release;
id.mod_lvl = myhdr->mod_level;
rc = appldata_asm(&id, fcn, (void *) buffer, myhdr->datalen);
if (rc <= 0)
return rc;
pr_err("Writing monitor data failed with rc=%i\n", rc);
if (rc == 5)
return -EPERM;
return -EINVAL;
}
static struct mon_buf *monwrite_find_hdr(struct mon_private *monpriv,
struct monwrite_hdr *monhdr)
{
struct mon_buf *entry, *next;
list_for_each_entry_safe(entry, next, &monpriv->list, list)
if ((entry->hdr.mon_function == monhdr->mon_function ||
monhdr->mon_function == MONWRITE_STOP_INTERVAL) &&
entry->hdr.applid == monhdr->applid &&
entry->hdr.record_num == monhdr->record_num &&
entry->hdr.version == monhdr->version &&
entry->hdr.release == monhdr->release &&
entry->hdr.mod_level == monhdr->mod_level)
return entry;
return NULL;
}
static int monwrite_new_hdr(struct mon_private *monpriv)
{
struct monwrite_hdr *monhdr = &monpriv->hdr;
struct mon_buf *monbuf;
int rc = 0;
if (monhdr->datalen > MONWRITE_MAX_DATALEN ||
monhdr->mon_function > MONWRITE_START_CONFIG ||
monhdr->hdrlen != sizeof(struct monwrite_hdr))
return -EINVAL;
monbuf = NULL;
if (monhdr->mon_function != MONWRITE_GEN_EVENT)
monbuf = monwrite_find_hdr(monpriv, monhdr);
if (monbuf) {
if (monhdr->mon_function == MONWRITE_STOP_INTERVAL) {
monhdr->datalen = monbuf->hdr.datalen;
rc = monwrite_diag(monhdr, monbuf->data,
APPLDATA_STOP_REC);
list_del(&monbuf->list);
mon_buf_count--;
kfree(monbuf->data);
kfree(monbuf);
monbuf = NULL;
}
} else if (monhdr->mon_function != MONWRITE_STOP_INTERVAL) {
if (mon_buf_count >= mon_max_bufs)
return -ENOSPC;
monbuf = kzalloc(sizeof(struct mon_buf), GFP_KERNEL);
if (!monbuf)
return -ENOMEM;
monbuf->data = kzalloc(monhdr->datalen,
GFP_KERNEL | GFP_DMA);
if (!monbuf->data) {
kfree(monbuf);
return -ENOMEM;
}
monbuf->hdr = *monhdr;
list_add_tail(&monbuf->list, &monpriv->list);
if (monhdr->mon_function != MONWRITE_GEN_EVENT)
mon_buf_count++;
}
monpriv->current_buf = monbuf;
return rc;
}
static int monwrite_new_data(struct mon_private *monpriv)
{
struct monwrite_hdr *monhdr = &monpriv->hdr;
struct mon_buf *monbuf = monpriv->current_buf;
int rc = 0;
switch (monhdr->mon_function) {
case MONWRITE_START_INTERVAL:
if (!monbuf->diag_done) {
rc = monwrite_diag(monhdr, monbuf->data,
APPLDATA_START_INTERVAL_REC);
monbuf->diag_done = 1;
}
break;
case MONWRITE_START_CONFIG:
if (!monbuf->diag_done) {
rc = monwrite_diag(monhdr, monbuf->data,
APPLDATA_START_CONFIG_REC);
monbuf->diag_done = 1;
}
break;
case MONWRITE_GEN_EVENT:
rc = monwrite_diag(monhdr, monbuf->data,
APPLDATA_GEN_EVENT_REC);
list_del(&monpriv->current_buf->list);
kfree(monpriv->current_buf->data);
kfree(monpriv->current_buf);
monpriv->current_buf = NULL;
break;
default:
/* monhdr->mon_function is checked in monwrite_new_hdr */
BUG();
}
return rc;
}
/*
* file operations
*/
static int monwrite_open(struct inode *inode, struct file *filp)
{
struct mon_private *monpriv;
monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL);
if (!monpriv)
return -ENOMEM;
INIT_LIST_HEAD(&monpriv->list);
monpriv->hdr_to_read = sizeof(monpriv->hdr);
mutex_init(&monpriv->thread_mutex);
filp->private_data = monpriv;
list_add_tail(&monpriv->priv_list, &mon_priv_list);
return nonseekable_open(inode, filp);
}
static int monwrite_close(struct inode *inode, struct file *filp)
{
struct mon_private *monpriv = filp->private_data;
struct mon_buf *entry, *next;
list_for_each_entry_safe(entry, next, &monpriv->list, list) {
if (entry->hdr.mon_function != MONWRITE_GEN_EVENT)
monwrite_diag(&entry->hdr, entry->data,
APPLDATA_STOP_REC);
mon_buf_count--;
list_del(&entry->list);
kfree(entry->data);
kfree(entry);
}
list_del(&monpriv->priv_list);
kfree(monpriv);
return 0;
}
static ssize_t monwrite_write(struct file *filp, const char __user *data,
size_t count, loff_t *ppos)
{
struct mon_private *monpriv = filp->private_data;
size_t len, written;
void *to;
int rc;
mutex_lock(&monpriv->thread_mutex);
for (written = 0; written < count; ) {
if (monpriv->hdr_to_read) {
len = min(count - written, monpriv->hdr_to_read);
to = (char *) &monpriv->hdr +
sizeof(monpriv->hdr) - monpriv->hdr_to_read;
if (copy_from_user(to, data + written, len)) {
rc = -EFAULT;
goto out_error;
}
monpriv->hdr_to_read -= len;
written += len;
if (monpriv->hdr_to_read > 0)
continue;
rc = monwrite_new_hdr(monpriv);
if (rc)
goto out_error;
monpriv->data_to_read = monpriv->current_buf ?
monpriv->current_buf->hdr.datalen : 0;
}
if (monpriv->data_to_read) {
len = min(count - written, monpriv->data_to_read);
to = monpriv->current_buf->data +
monpriv->hdr.datalen - monpriv->data_to_read;
if (copy_from_user(to, data + written, len)) {
rc = -EFAULT;
goto out_error;
}
monpriv->data_to_read -= len;
written += len;
if (monpriv->data_to_read > 0)
continue;
rc = monwrite_new_data(monpriv);
if (rc)
goto out_error;
}
monpriv->hdr_to_read = sizeof(monpriv->hdr);
}
mutex_unlock(&monpriv->thread_mutex);
return written;
out_error:
monpriv->data_to_read = 0;
monpriv->hdr_to_read = sizeof(struct monwrite_hdr);
mutex_unlock(&monpriv->thread_mutex);
return rc;
}
static const struct file_operations monwrite_fops = {
.owner = THIS_MODULE,
.open = &monwrite_open,
.release = &monwrite_close,
.write = &monwrite_write,
.llseek = noop_llseek,
};
static struct miscdevice mon_dev = {
.name = "monwriter",
.fops = &monwrite_fops,
.minor = MISC_DYNAMIC_MINOR,
};
/*
* suspend/resume
*/
static int monwriter_freeze(struct device *dev)
{
struct mon_private *monpriv;
struct mon_buf *monbuf;
list_for_each_entry(monpriv, &mon_priv_list, priv_list) {
list_for_each_entry(monbuf, &monpriv->list, list) {
if (monbuf->hdr.mon_function != MONWRITE_GEN_EVENT)
monwrite_diag(&monbuf->hdr, monbuf->data,
APPLDATA_STOP_REC);
}
}
return 0;
}
static int monwriter_restore(struct device *dev)
{
struct mon_private *monpriv;
struct mon_buf *monbuf;
list_for_each_entry(monpriv, &mon_priv_list, priv_list) {
list_for_each_entry(monbuf, &monpriv->list, list) {
if (monbuf->hdr.mon_function == MONWRITE_START_INTERVAL)
monwrite_diag(&monbuf->hdr, monbuf->data,
APPLDATA_START_INTERVAL_REC);
if (monbuf->hdr.mon_function == MONWRITE_START_CONFIG)
monwrite_diag(&monbuf->hdr, monbuf->data,
APPLDATA_START_CONFIG_REC);
}
}
return 0;
}
static int monwriter_thaw(struct device *dev)
{
return monwriter_restore(dev);
}
static const struct dev_pm_ops monwriter_pm_ops = {
.freeze = monwriter_freeze,
.thaw = monwriter_thaw,
.restore = monwriter_restore,
};
static struct platform_driver monwriter_pdrv = {
.driver = {
.name = "monwriter",
.owner = THIS_MODULE,
.pm = &monwriter_pm_ops,
},
};
static struct platform_device *monwriter_pdev;
/*
* module init/exit
*/
static int __init mon_init(void)
{
int rc;
if (!MACHINE_IS_VM)
return -ENODEV;
rc = platform_driver_register(&monwriter_pdrv);
if (rc)
return rc;
monwriter_pdev = platform_device_register_simple("monwriter", -1, NULL,
0);
if (IS_ERR(monwriter_pdev)) {
rc = PTR_ERR(monwriter_pdev);
goto out_driver;
}
/*
* misc_register() has to be the last action in module_init(), because
* file operations will be available right after this.
*/
rc = misc_register(&mon_dev);
if (rc)
goto out_device;
return 0;
out_device:
platform_device_unregister(monwriter_pdev);
out_driver:
platform_driver_unregister(&monwriter_pdrv);
return rc;
}
static void __exit mon_exit(void)
{
misc_deregister(&mon_dev);
platform_device_unregister(monwriter_pdev);
platform_driver_unregister(&monwriter_pdrv);
}
module_init(mon_init);
module_exit(mon_exit);
module_param_named(max_bufs, mon_max_bufs, int, 0644);
MODULE_PARM_DESC(max_bufs, "Maximum number of sample monitor data buffers "
"that can be active at one time");
MODULE_AUTHOR("Melissa Howland <Melissa.Howland@us.ibm.com>");
MODULE_DESCRIPTION("Character device driver for writing z/VM "
"APPLDATA monitor records.");
MODULE_LICENSE("GPL");

1393
drivers/s390/char/raw3270.c Normal file

File diff suppressed because it is too large Load diff

288
drivers/s390/char/raw3270.h Normal file
View file

@ -0,0 +1,288 @@
/*
* IBM/3270 Driver
*
* Author(s):
* Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
* Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
* Copyright IBM Corp. 2003, 2009
*/
#include <asm/idals.h>
#include <asm/ioctl.h>
/* ioctls for fullscreen 3270 */
#define TUBICMD _IO('3', 3) /* set ccw command for fs reads. */
#define TUBOCMD _IO('3', 4) /* set ccw command for fs writes. */
#define TUBGETI _IO('3', 7) /* get ccw command for fs reads. */
#define TUBGETO _IO('3', 8) /* get ccw command for fs writes. */
#define TUBSETMOD _IO('3',12) /* FIXME: what does it do ?*/
#define TUBGETMOD _IO('3',13) /* FIXME: what does it do ?*/
/* Local Channel Commands */
#define TC_WRITE 0x01 /* Write */
#define TC_RDBUF 0x02 /* Read Buffer */
#define TC_EWRITE 0x05 /* Erase write */
#define TC_READMOD 0x06 /* Read modified */
#define TC_EWRITEA 0x0d /* Erase write alternate */
#define TC_WRITESF 0x11 /* Write structured field */
/* Buffer Control Orders */
#define TO_SF 0x1d /* Start field */
#define TO_SBA 0x11 /* Set buffer address */
#define TO_IC 0x13 /* Insert cursor */
#define TO_PT 0x05 /* Program tab */
#define TO_RA 0x3c /* Repeat to address */
#define TO_SFE 0x29 /* Start field extended */
#define TO_EUA 0x12 /* Erase unprotected to address */
#define TO_MF 0x2c /* Modify field */
#define TO_SA 0x28 /* Set attribute */
/* Field Attribute Bytes */
#define TF_INPUT 0x40 /* Visible input */
#define TF_INPUTN 0x4c /* Invisible input */
#define TF_INMDT 0xc1 /* Visible, Set-MDT */
#define TF_LOG 0x60
/* Character Attribute Bytes */
#define TAT_RESET 0x00
#define TAT_FIELD 0xc0
#define TAT_EXTHI 0x41
#define TAT_COLOR 0x42
#define TAT_CHARS 0x43
#define TAT_TRANS 0x46
/* Extended-Highlighting Bytes */
#define TAX_RESET 0x00
#define TAX_BLINK 0xf1
#define TAX_REVER 0xf2
#define TAX_UNDER 0xf4
/* Reset value */
#define TAR_RESET 0x00
/* Color values */
#define TAC_RESET 0x00
#define TAC_BLUE 0xf1
#define TAC_RED 0xf2
#define TAC_PINK 0xf3
#define TAC_GREEN 0xf4
#define TAC_TURQ 0xf5
#define TAC_YELLOW 0xf6
#define TAC_WHITE 0xf7
#define TAC_DEFAULT 0x00
/* Write Control Characters */
#define TW_NONE 0x40 /* No particular action */
#define TW_KR 0xc2 /* Keyboard restore */
#define TW_PLUSALARM 0x04 /* Add this bit for alarm */
#define RAW3270_FIRSTMINOR 1 /* First minor number */
#define RAW3270_MAXDEVS 255 /* Max number of 3270 devices */
/* For TUBGETMOD and TUBSETMOD. Should include. */
struct raw3270_iocb {
short model;
short line_cnt;
short col_cnt;
short pf_cnt;
short re_cnt;
short map;
};
struct raw3270;
struct raw3270_view;
extern struct class *class3270;
/* 3270 CCW request */
struct raw3270_request {
struct list_head list; /* list head for request queueing. */
struct raw3270_view *view; /* view of this request */
struct ccw1 ccw; /* single ccw. */
void *buffer; /* output buffer. */
size_t size; /* size of output buffer. */
int rescnt; /* residual count from devstat. */
int rc; /* return code for this request. */
/* Callback for delivering final status. */
void (*callback)(struct raw3270_request *, void *);
void *callback_data;
};
struct raw3270_request *raw3270_request_alloc(size_t size);
struct raw3270_request *raw3270_request_alloc_bootmem(size_t size);
void raw3270_request_free(struct raw3270_request *);
void raw3270_request_reset(struct raw3270_request *);
void raw3270_request_set_cmd(struct raw3270_request *, u8 cmd);
int raw3270_request_add_data(struct raw3270_request *, void *, size_t);
void raw3270_request_set_data(struct raw3270_request *, void *, size_t);
void raw3270_request_set_idal(struct raw3270_request *, struct idal_buffer *);
static inline int
raw3270_request_final(struct raw3270_request *rq)
{
return list_empty(&rq->list);
}
void raw3270_buffer_address(struct raw3270 *, char *, unsigned short);
/* Return value of *intv (see raw3270_fn below) can be one of the following: */
#define RAW3270_IO_DONE 0 /* request finished */
#define RAW3270_IO_BUSY 1 /* request still active */
#define RAW3270_IO_RETRY 2 /* retry current request */
#define RAW3270_IO_STOP 3 /* kill current request */
/*
* Functions of a 3270 view.
*/
struct raw3270_fn {
int (*activate)(struct raw3270_view *);
void (*deactivate)(struct raw3270_view *);
int (*intv)(struct raw3270_view *,
struct raw3270_request *, struct irb *);
void (*release)(struct raw3270_view *);
void (*free)(struct raw3270_view *);
void (*resize)(struct raw3270_view *, int, int, int);
};
/*
* View structure chaining. The raw3270_view structure is meant to
* be embedded at the start of the real view data structure, e.g.:
* struct example {
* struct raw3270_view view;
* ...
* };
*/
struct raw3270_view {
struct list_head list;
spinlock_t lock;
atomic_t ref_count;
struct raw3270 *dev;
struct raw3270_fn *fn;
unsigned int model;
unsigned int rows, cols; /* # of rows & colums of the view */
unsigned char *ascebc; /* ascii -> ebcdic table */
};
int raw3270_add_view(struct raw3270_view *, struct raw3270_fn *, int);
int raw3270_activate_view(struct raw3270_view *);
void raw3270_del_view(struct raw3270_view *);
void raw3270_deactivate_view(struct raw3270_view *);
struct raw3270_view *raw3270_find_view(struct raw3270_fn *, int);
int raw3270_start(struct raw3270_view *, struct raw3270_request *);
int raw3270_start_locked(struct raw3270_view *, struct raw3270_request *);
int raw3270_start_irq(struct raw3270_view *, struct raw3270_request *);
int raw3270_reset(struct raw3270_view *);
struct raw3270_view *raw3270_view(struct raw3270_view *);
int raw3270_view_active(struct raw3270_view *);
/* Reference count inliner for view structures. */
static inline void
raw3270_get_view(struct raw3270_view *view)
{
atomic_inc(&view->ref_count);
}
extern wait_queue_head_t raw3270_wait_queue;
static inline void
raw3270_put_view(struct raw3270_view *view)
{
if (atomic_dec_return(&view->ref_count) == 0)
wake_up(&raw3270_wait_queue);
}
struct raw3270 *raw3270_setup_console(void);
void raw3270_wait_cons_dev(struct raw3270 *);
/* Notifier for device addition/removal */
struct raw3270_notifier {
struct list_head list;
void (*create)(int minor);
void (*destroy)(int minor);
};
int raw3270_register_notifier(struct raw3270_notifier *);
void raw3270_unregister_notifier(struct raw3270_notifier *);
void raw3270_pm_unfreeze(struct raw3270_view *);
/*
* Little memory allocator for string objects.
*/
struct string
{
struct list_head list;
struct list_head update;
unsigned long size;
unsigned long len;
char string[0];
} __attribute__ ((aligned(8)));
static inline struct string *
alloc_string(struct list_head *free_list, unsigned long len)
{
struct string *cs, *tmp;
unsigned long size;
size = (len + 7L) & -8L;
list_for_each_entry(cs, free_list, list) {
if (cs->size < size)
continue;
if (cs->size > size + sizeof(struct string)) {
char *endaddr = (char *) (cs + 1) + cs->size;
tmp = (struct string *) (endaddr - size) - 1;
tmp->size = size;
cs->size -= size + sizeof(struct string);
cs = tmp;
} else
list_del(&cs->list);
cs->len = len;
INIT_LIST_HEAD(&cs->list);
INIT_LIST_HEAD(&cs->update);
return cs;
}
return NULL;
}
static inline unsigned long
free_string(struct list_head *free_list, struct string *cs)
{
struct string *tmp;
struct list_head *p, *left;
/* Find out the left neighbour in free memory list. */
left = free_list;
list_for_each(p, free_list) {
if (list_entry(p, struct string, list) > cs)
break;
left = p;
}
/* Try to merge with right neighbour = next element from left. */
if (left->next != free_list) {
tmp = list_entry(left->next, struct string, list);
if ((char *) (cs + 1) + cs->size == (char *) tmp) {
list_del(&tmp->list);
cs->size += tmp->size + sizeof(struct string);
}
}
/* Try to merge with left neighbour. */
if (left != free_list) {
tmp = list_entry(left, struct string, list);
if ((char *) (tmp + 1) + tmp->size == (char *) cs) {
tmp->size += cs->size + sizeof(struct string);
return tmp->size;
}
}
__list_add(&cs->list, left, left->next);
return cs->size;
}
static inline void
add_string_memory(struct list_head *free_list, void *mem, unsigned long size)
{
struct string *cs;
cs = (struct string *) mem;
cs->size = size - sizeof(struct string);
free_string(free_list, cs);
}

1273
drivers/s390/char/sclp.c Normal file

File diff suppressed because it is too large Load diff

244
drivers/s390/char/sclp.h Normal file
View file

@ -0,0 +1,244 @@
/*
* Copyright IBM Corp. 1999,2012
*
* Author(s): Martin Peschke <mpeschke@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#ifndef __SCLP_H__
#define __SCLP_H__
#include <linux/types.h>
#include <linux/list.h>
#include <asm/sclp.h>
#include <asm/ebcdic.h>
/* maximum number of pages concerning our own memory management */
#define MAX_KMEM_PAGES (sizeof(unsigned long) << 3)
#define SCLP_CONSOLE_PAGES 6
#define EVTYP_OPCMD 0x01
#define EVTYP_MSG 0x02
#define EVTYP_DIAG_TEST 0x07
#define EVTYP_STATECHANGE 0x08
#define EVTYP_PMSGCMD 0x09
#define EVTYP_CNTLPROGOPCMD 0x20
#define EVTYP_CNTLPROGIDENT 0x0B
#define EVTYP_SIGQUIESCE 0x1D
#define EVTYP_VT220MSG 0x1A
#define EVTYP_CONFMGMDATA 0x04
#define EVTYP_SDIAS 0x1C
#define EVTYP_ASYNC 0x0A
#define EVTYP_OCF 0x1E
#define EVTYP_OPCMD_MASK 0x80000000
#define EVTYP_MSG_MASK 0x40000000
#define EVTYP_DIAG_TEST_MASK 0x02000000
#define EVTYP_STATECHANGE_MASK 0x01000000
#define EVTYP_PMSGCMD_MASK 0x00800000
#define EVTYP_CTLPROGOPCMD_MASK 0x00000001
#define EVTYP_CTLPROGIDENT_MASK 0x00200000
#define EVTYP_SIGQUIESCE_MASK 0x00000008
#define EVTYP_VT220MSG_MASK 0x00000040
#define EVTYP_CONFMGMDATA_MASK 0x10000000
#define EVTYP_SDIAS_MASK 0x00000010
#define EVTYP_ASYNC_MASK 0x00400000
#define EVTYP_OCF_MASK 0x00000004
#define GNRLMSGFLGS_DOM 0x8000
#define GNRLMSGFLGS_SNDALRM 0x4000
#define GNRLMSGFLGS_HOLDMSG 0x2000
#define LNTPFLGS_CNTLTEXT 0x8000
#define LNTPFLGS_LABELTEXT 0x4000
#define LNTPFLGS_DATATEXT 0x2000
#define LNTPFLGS_ENDTEXT 0x1000
#define LNTPFLGS_PROMPTTEXT 0x0800
typedef unsigned int sclp_cmdw_t;
#define SCLP_CMDW_READ_EVENT_DATA 0x00770005
#define SCLP_CMDW_WRITE_EVENT_DATA 0x00760005
#define SCLP_CMDW_WRITE_EVENT_MASK 0x00780005
#define GDS_ID_MDSMU 0x1310
#define GDS_ID_MDSROUTEINFO 0x1311
#define GDS_ID_AGUNWRKCORR 0x1549
#define GDS_ID_SNACONDREPORT 0x1532
#define GDS_ID_CPMSU 0x1212
#define GDS_ID_ROUTTARGINSTR 0x154D
#define GDS_ID_OPREQ 0x8070
#define GDS_ID_TEXTCMD 0x1320
#define GDS_KEY_SELFDEFTEXTMSG 0x31
enum sclp_pm_event {
SCLP_PM_EVENT_FREEZE,
SCLP_PM_EVENT_THAW,
SCLP_PM_EVENT_RESTORE,
};
#define SCLP_PANIC_PRIO 1
#define SCLP_PANIC_PRIO_CLIENT 0
typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */
struct sccb_header {
u16 length;
u8 function_code;
u8 control_mask[3];
u16 response_code;
} __attribute__((packed));
struct init_sccb {
struct sccb_header header;
u16 _reserved;
u16 mask_length;
sccb_mask_t receive_mask;
sccb_mask_t send_mask;
sccb_mask_t sclp_receive_mask;
sccb_mask_t sclp_send_mask;
} __attribute__((packed));
extern u64 sclp_facilities;
#define SCLP_HAS_CHP_INFO (sclp_facilities & 0x8000000000000000ULL)
#define SCLP_HAS_CHP_RECONFIG (sclp_facilities & 0x2000000000000000ULL)
#define SCLP_HAS_CPU_INFO (sclp_facilities & 0x0800000000000000ULL)
#define SCLP_HAS_CPU_RECONFIG (sclp_facilities & 0x0400000000000000ULL)
#define SCLP_HAS_PCI_RECONFIG (sclp_facilities & 0x0000000040000000ULL)
struct gds_subvector {
u8 length;
u8 key;
} __attribute__((packed));
struct gds_vector {
u16 length;
u16 gds_id;
} __attribute__((packed));
struct evbuf_header {
u16 length;
u8 type;
u8 flags;
u16 _reserved;
} __attribute__((packed));
struct sclp_req {
struct list_head list; /* list_head for request queueing. */
sclp_cmdw_t command; /* sclp command to execute */
void *sccb; /* pointer to the sccb to execute */
char status; /* status of this request */
int start_count; /* number of SVCs done for this req */
/* Callback that is called after reaching final status. */
void (*callback)(struct sclp_req *, void *data);
void *callback_data;
int queue_timeout; /* request queue timeout (sec), set by
caller of sclp_add_request(), if
needed */
/* Internal fields */
unsigned long queue_expires; /* request queue timeout (jiffies) */
};
#define SCLP_REQ_FILLED 0x00 /* request is ready to be processed */
#define SCLP_REQ_QUEUED 0x01 /* request is queued to be processed */
#define SCLP_REQ_RUNNING 0x02 /* request is currently running */
#define SCLP_REQ_DONE 0x03 /* request is completed successfully */
#define SCLP_REQ_FAILED 0x05 /* request is finally failed */
#define SCLP_REQ_QUEUED_TIMEOUT 0x06 /* request on queue timed out */
#define SCLP_QUEUE_INTERVAL 5 /* timeout interval for request queue */
/* function pointers that a high level driver has to use for registration */
/* of some routines it wants to be called from the low level driver */
struct sclp_register {
struct list_head list;
/* User wants to receive: */
sccb_mask_t receive_mask;
/* User wants to send: */
sccb_mask_t send_mask;
/* H/W can receive: */
sccb_mask_t sclp_receive_mask;
/* H/W can send: */
sccb_mask_t sclp_send_mask;
/* called if event type availability changes */
void (*state_change_fn)(struct sclp_register *);
/* called for events in cp_receive_mask/sclp_receive_mask */
void (*receiver_fn)(struct evbuf_header *);
/* called for power management events */
void (*pm_event_fn)(struct sclp_register *, enum sclp_pm_event);
/* pm event posted flag */
int pm_event_posted;
};
/* externals from sclp.c */
int sclp_add_request(struct sclp_req *req);
void sclp_sync_wait(void);
int sclp_register(struct sclp_register *reg);
void sclp_unregister(struct sclp_register *reg);
int sclp_remove_processed(struct sccb_header *sccb);
int sclp_deactivate(void);
int sclp_reactivate(void);
int sclp_service_call(sclp_cmdw_t command, void *sccb);
int sclp_sync_request(sclp_cmdw_t command, void *sccb);
int sclp_sync_request_timeout(sclp_cmdw_t command, void *sccb, int timeout);
int sclp_sdias_init(void);
void sclp_sdias_exit(void);
extern int sclp_console_pages;
extern int sclp_console_drop;
extern unsigned long sclp_console_full;
extern u8 sclp_fac84;
extern unsigned long long sclp_rzm;
extern unsigned long long sclp_rnmax;
/* useful inlines */
/* VM uses EBCDIC 037, LPAR+native(SE+HMC) use EBCDIC 500 */
/* translate single character from ASCII to EBCDIC */
static inline unsigned char
sclp_ascebc(unsigned char ch)
{
return (MACHINE_IS_VM) ? _ascebc[ch] : _ascebc_500[ch];
}
/* translate string from EBCDIC to ASCII */
static inline void
sclp_ebcasc_str(unsigned char *str, int nr)
{
(MACHINE_IS_VM) ? EBCASC(str, nr) : EBCASC_500(str, nr);
}
/* translate string from ASCII to EBCDIC */
static inline void
sclp_ascebc_str(unsigned char *str, int nr)
{
(MACHINE_IS_VM) ? ASCEBC(str, nr) : ASCEBC_500(str, nr);
}
static inline struct gds_vector *
sclp_find_gds_vector(void *start, void *end, u16 id)
{
struct gds_vector *v;
for (v = start; (void *) v < end; v = (void *) v + v->length)
if (v->gds_id == id)
return v;
return NULL;
}
static inline struct gds_subvector *
sclp_find_gds_subvector(void *start, void *end, u8 key)
{
struct gds_subvector *sv;
for (sv = start; (void *) sv < end; sv = (void *) sv + sv->length)
if (sv->key == key)
return sv;
return NULL;
}
#endif /* __SCLP_H__ */

View file

@ -0,0 +1,211 @@
/*
* Enable Asynchronous Notification via SCLP.
*
* Copyright IBM Corp. 2009
* Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/ctype.h>
#include <linux/kmod.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/proc_fs.h>
#include <linux/sysctl.h>
#include <linux/utsname.h>
#include "sclp.h"
static int callhome_enabled;
static struct sclp_req *request;
static struct sclp_async_sccb *sccb;
static int sclp_async_send_wait(char *message);
static struct ctl_table_header *callhome_sysctl_header;
static DEFINE_SPINLOCK(sclp_async_lock);
#define SCLP_NORMAL_WRITE 0x00
struct async_evbuf {
struct evbuf_header header;
u64 reserved;
u8 rflags;
u8 empty;
u8 rtype;
u8 otype;
char comp_id[12];
char data[3000]; /* there is still some space left */
} __attribute__((packed));
struct sclp_async_sccb {
struct sccb_header header;
struct async_evbuf evbuf;
} __attribute__((packed));
static struct sclp_register sclp_async_register = {
.send_mask = EVTYP_ASYNC_MASK,
};
static int call_home_on_panic(struct notifier_block *self,
unsigned long event, void *data)
{
strncat(data, init_utsname()->nodename,
sizeof(init_utsname()->nodename));
sclp_async_send_wait(data);
return NOTIFY_DONE;
}
static struct notifier_block call_home_panic_nb = {
.notifier_call = call_home_on_panic,
.priority = INT_MAX,
};
static int proc_handler_callhome(struct ctl_table *ctl, int write,
void __user *buffer, size_t *count,
loff_t *ppos)
{
unsigned long val;
int len, rc;
char buf[3];
if (!*count || (*ppos && !write)) {
*count = 0;
return 0;
}
if (!write) {
len = snprintf(buf, sizeof(buf), "%d\n", callhome_enabled);
rc = copy_to_user(buffer, buf, sizeof(buf));
if (rc != 0)
return -EFAULT;
} else {
len = *count;
rc = kstrtoul_from_user(buffer, len, 0, &val);
if (rc)
return rc;
if (val != 0 && val != 1)
return -EINVAL;
callhome_enabled = val;
}
*count = len;
*ppos += len;
return 0;
}
static struct ctl_table callhome_table[] = {
{
.procname = "callhome",
.mode = 0644,
.proc_handler = proc_handler_callhome,
},
{}
};
static struct ctl_table kern_dir_table[] = {
{
.procname = "kernel",
.maxlen = 0,
.mode = 0555,
.child = callhome_table,
},
{}
};
/*
* Function used to transfer asynchronous notification
* records which waits for send completion
*/
static int sclp_async_send_wait(char *message)
{
struct async_evbuf *evb;
int rc;
unsigned long flags;
if (!callhome_enabled)
return 0;
sccb->evbuf.header.type = EVTYP_ASYNC;
sccb->evbuf.rtype = 0xA5;
sccb->evbuf.otype = 0x00;
evb = &sccb->evbuf;
request->command = SCLP_CMDW_WRITE_EVENT_DATA;
request->sccb = sccb;
request->status = SCLP_REQ_FILLED;
strncpy(sccb->evbuf.data, message, sizeof(sccb->evbuf.data));
/*
* Retain Queue
* e.g. 5639CC140 500 Red Hat RHEL5 Linux for zSeries (RHEL AS)
*/
strncpy(sccb->evbuf.comp_id, "000000000", sizeof(sccb->evbuf.comp_id));
sccb->evbuf.header.length = sizeof(sccb->evbuf);
sccb->header.length = sizeof(sccb->evbuf) + sizeof(sccb->header);
sccb->header.function_code = SCLP_NORMAL_WRITE;
rc = sclp_add_request(request);
if (rc)
return rc;
spin_lock_irqsave(&sclp_async_lock, flags);
while (request->status != SCLP_REQ_DONE &&
request->status != SCLP_REQ_FAILED) {
sclp_sync_wait();
}
spin_unlock_irqrestore(&sclp_async_lock, flags);
if (request->status != SCLP_REQ_DONE)
return -EIO;
rc = ((struct sclp_async_sccb *)
request->sccb)->header.response_code;
if (rc != 0x0020)
return -EIO;
if (evb->header.flags != 0x80)
return -EIO;
return rc;
}
static int __init sclp_async_init(void)
{
int rc;
rc = sclp_register(&sclp_async_register);
if (rc)
return rc;
rc = -EOPNOTSUPP;
if (!(sclp_async_register.sclp_receive_mask & EVTYP_ASYNC_MASK))
goto out_sclp;
rc = -ENOMEM;
callhome_sysctl_header = register_sysctl_table(kern_dir_table);
if (!callhome_sysctl_header)
goto out_sclp;
request = kzalloc(sizeof(struct sclp_req), GFP_KERNEL);
sccb = (struct sclp_async_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!request || !sccb)
goto out_mem;
rc = atomic_notifier_chain_register(&panic_notifier_list,
&call_home_panic_nb);
if (!rc)
goto out;
out_mem:
kfree(request);
free_page((unsigned long) sccb);
unregister_sysctl_table(callhome_sysctl_header);
out_sclp:
sclp_unregister(&sclp_async_register);
out:
return rc;
}
module_init(sclp_async_init);
static void __exit sclp_async_exit(void)
{
atomic_notifier_chain_unregister(&panic_notifier_list,
&call_home_panic_nb);
unregister_sysctl_table(callhome_sysctl_header);
sclp_unregister(&sclp_async_register);
free_page((unsigned long) sccb);
kfree(request);
}
module_exit(sclp_async_exit);
MODULE_AUTHOR("Copyright IBM Corp. 2009");
MODULE_AUTHOR("Hans-Joachim Picht <hans@linux.vnet.ibm.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SCLP Asynchronous Notification Records");

View file

@ -0,0 +1,716 @@
/*
* Copyright IBM Corp. 2007,2012
*
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
* Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
#define KMSG_COMPONENT "sclp_cmd"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/completion.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/memory.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <asm/ctl_reg.h>
#include <asm/chpid.h>
#include <asm/setup.h>
#include <asm/page.h>
#include <asm/sclp.h>
#include "sclp.h"
static void sclp_sync_callback(struct sclp_req *req, void *data)
{
struct completion *completion = data;
complete(completion);
}
int sclp_sync_request(sclp_cmdw_t cmd, void *sccb)
{
return sclp_sync_request_timeout(cmd, sccb, 0);
}
int sclp_sync_request_timeout(sclp_cmdw_t cmd, void *sccb, int timeout)
{
struct completion completion;
struct sclp_req *request;
int rc;
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (!request)
return -ENOMEM;
if (timeout)
request->queue_timeout = timeout;
request->command = cmd;
request->sccb = sccb;
request->status = SCLP_REQ_FILLED;
request->callback = sclp_sync_callback;
request->callback_data = &completion;
init_completion(&completion);
/* Perform sclp request. */
rc = sclp_add_request(request);
if (rc)
goto out;
wait_for_completion(&completion);
/* Check response. */
if (request->status != SCLP_REQ_DONE) {
pr_warning("sync request failed (cmd=0x%08x, "
"status=0x%02x)\n", cmd, request->status);
rc = -EIO;
}
out:
kfree(request);
return rc;
}
/*
* CPU configuration related functions.
*/
#define SCLP_CMDW_READ_CPU_INFO 0x00010001
#define SCLP_CMDW_CONFIGURE_CPU 0x00110001
#define SCLP_CMDW_DECONFIGURE_CPU 0x00100001
struct read_cpu_info_sccb {
struct sccb_header header;
u16 nr_configured;
u16 offset_configured;
u16 nr_standby;
u16 offset_standby;
u8 reserved[4096 - 16];
} __attribute__((packed, aligned(PAGE_SIZE)));
static void sclp_fill_cpu_info(struct sclp_cpu_info *info,
struct read_cpu_info_sccb *sccb)
{
char *page = (char *) sccb;
memset(info, 0, sizeof(*info));
info->configured = sccb->nr_configured;
info->standby = sccb->nr_standby;
info->combined = sccb->nr_configured + sccb->nr_standby;
info->has_cpu_type = sclp_fac84 & 0x1;
memcpy(&info->cpu, page + sccb->offset_configured,
info->combined * sizeof(struct sclp_cpu_entry));
}
int sclp_get_cpu_info(struct sclp_cpu_info *info)
{
int rc;
struct read_cpu_info_sccb *sccb;
if (!SCLP_HAS_CPU_INFO)
return -EOPNOTSUPP;
sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = sizeof(*sccb);
rc = sclp_sync_request_timeout(SCLP_CMDW_READ_CPU_INFO, sccb,
SCLP_QUEUE_INTERVAL);
if (rc)
goto out;
if (sccb->header.response_code != 0x0010) {
pr_warning("readcpuinfo failed (response=0x%04x)\n",
sccb->header.response_code);
rc = -EIO;
goto out;
}
sclp_fill_cpu_info(info, sccb);
out:
free_page((unsigned long) sccb);
return rc;
}
struct cpu_configure_sccb {
struct sccb_header header;
} __attribute__((packed, aligned(8)));
static int do_cpu_configure(sclp_cmdw_t cmd)
{
struct cpu_configure_sccb *sccb;
int rc;
if (!SCLP_HAS_CPU_RECONFIG)
return -EOPNOTSUPP;
/*
* This is not going to cross a page boundary since we force
* kmalloc to have a minimum alignment of 8 bytes on s390.
*/
sccb = kzalloc(sizeof(*sccb), GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = sizeof(*sccb);
rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL);
if (rc)
goto out;
switch (sccb->header.response_code) {
case 0x0020:
case 0x0120:
break;
default:
pr_warning("configure cpu failed (cmd=0x%08x, "
"response=0x%04x)\n", cmd,
sccb->header.response_code);
rc = -EIO;
break;
}
out:
kfree(sccb);
return rc;
}
int sclp_cpu_configure(u8 cpu)
{
return do_cpu_configure(SCLP_CMDW_CONFIGURE_CPU | cpu << 8);
}
int sclp_cpu_deconfigure(u8 cpu)
{
return do_cpu_configure(SCLP_CMDW_DECONFIGURE_CPU | cpu << 8);
}
#ifdef CONFIG_MEMORY_HOTPLUG
static DEFINE_MUTEX(sclp_mem_mutex);
static LIST_HEAD(sclp_mem_list);
static u8 sclp_max_storage_id;
static unsigned long sclp_storage_ids[256 / BITS_PER_LONG];
static int sclp_mem_state_changed;
struct memory_increment {
struct list_head list;
u16 rn;
int standby;
};
struct assign_storage_sccb {
struct sccb_header header;
u16 rn;
} __packed;
int arch_get_memory_phys_device(unsigned long start_pfn)
{
if (!sclp_rzm)
return 0;
return PFN_PHYS(start_pfn) >> ilog2(sclp_rzm);
}
static unsigned long long rn2addr(u16 rn)
{
return (unsigned long long) (rn - 1) * sclp_rzm;
}
static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
{
struct assign_storage_sccb *sccb;
int rc;
sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = PAGE_SIZE;
sccb->rn = rn;
rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL);
if (rc)
goto out;
switch (sccb->header.response_code) {
case 0x0020:
case 0x0120:
break;
default:
pr_warning("assign storage failed (cmd=0x%08x, "
"response=0x%04x, rn=0x%04x)\n", cmd,
sccb->header.response_code, rn);
rc = -EIO;
break;
}
out:
free_page((unsigned long) sccb);
return rc;
}
static int sclp_assign_storage(u16 rn)
{
unsigned long long start;
int rc;
rc = do_assign_storage(0x000d0001, rn);
if (rc)
return rc;
start = rn2addr(rn);
storage_key_init_range(start, start + sclp_rzm);
return 0;
}
static int sclp_unassign_storage(u16 rn)
{
return do_assign_storage(0x000c0001, rn);
}
struct attach_storage_sccb {
struct sccb_header header;
u16 :16;
u16 assigned;
u32 :32;
u32 entries[0];
} __packed;
static int sclp_attach_storage(u8 id)
{
struct attach_storage_sccb *sccb;
int rc;
int i;
sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = PAGE_SIZE;
rc = sclp_sync_request_timeout(0x00080001 | id << 8, sccb,
SCLP_QUEUE_INTERVAL);
if (rc)
goto out;
switch (sccb->header.response_code) {
case 0x0020:
set_bit(id, sclp_storage_ids);
for (i = 0; i < sccb->assigned; i++) {
if (sccb->entries[i])
sclp_unassign_storage(sccb->entries[i] >> 16);
}
break;
default:
rc = -EIO;
break;
}
out:
free_page((unsigned long) sccb);
return rc;
}
static int sclp_mem_change_state(unsigned long start, unsigned long size,
int online)
{
struct memory_increment *incr;
unsigned long long istart;
int rc = 0;
list_for_each_entry(incr, &sclp_mem_list, list) {
istart = rn2addr(incr->rn);
if (start + size - 1 < istart)
break;
if (start > istart + sclp_rzm - 1)
continue;
if (online)
rc |= sclp_assign_storage(incr->rn);
else
sclp_unassign_storage(incr->rn);
}
return rc ? -EIO : 0;
}
static int sclp_mem_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
unsigned long start, size;
struct memory_notify *arg;
unsigned char id;
int rc = 0;
arg = data;
start = arg->start_pfn << PAGE_SHIFT;
size = arg->nr_pages << PAGE_SHIFT;
mutex_lock(&sclp_mem_mutex);
for_each_clear_bit(id, sclp_storage_ids, sclp_max_storage_id + 1)
sclp_attach_storage(id);
switch (action) {
case MEM_ONLINE:
case MEM_GOING_OFFLINE:
case MEM_CANCEL_OFFLINE:
break;
case MEM_GOING_ONLINE:
rc = sclp_mem_change_state(start, size, 1);
break;
case MEM_CANCEL_ONLINE:
sclp_mem_change_state(start, size, 0);
break;
case MEM_OFFLINE:
sclp_mem_change_state(start, size, 0);
break;
default:
rc = -EINVAL;
break;
}
if (!rc)
sclp_mem_state_changed = 1;
mutex_unlock(&sclp_mem_mutex);
return rc ? NOTIFY_BAD : NOTIFY_OK;
}
static struct notifier_block sclp_mem_nb = {
.notifier_call = sclp_mem_notifier,
};
static void __init add_memory_merged(u16 rn)
{
static u16 first_rn, num;
unsigned long long start, size;
if (rn && first_rn && (first_rn + num == rn)) {
num++;
return;
}
if (!first_rn)
goto skip_add;
start = rn2addr(first_rn);
size = (unsigned long long) num * sclp_rzm;
if (start >= VMEM_MAX_PHYS)
goto skip_add;
if (start + size > VMEM_MAX_PHYS)
size = VMEM_MAX_PHYS - start;
if (memory_end_set && (start >= memory_end))
goto skip_add;
if (memory_end_set && (start + size > memory_end))
size = memory_end - start;
add_memory(0, start, size);
skip_add:
first_rn = rn;
num = 1;
}
static void __init sclp_add_standby_memory(void)
{
struct memory_increment *incr;
list_for_each_entry(incr, &sclp_mem_list, list)
if (incr->standby)
add_memory_merged(incr->rn);
add_memory_merged(0);
}
static void __init insert_increment(u16 rn, int standby, int assigned)
{
struct memory_increment *incr, *new_incr;
struct list_head *prev;
u16 last_rn;
new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL);
if (!new_incr)
return;
new_incr->rn = rn;
new_incr->standby = standby;
last_rn = 0;
prev = &sclp_mem_list;
list_for_each_entry(incr, &sclp_mem_list, list) {
if (assigned && incr->rn > rn)
break;
if (!assigned && incr->rn - last_rn > 1)
break;
last_rn = incr->rn;
prev = &incr->list;
}
if (!assigned)
new_incr->rn = last_rn + 1;
if (new_incr->rn > sclp_rnmax) {
kfree(new_incr);
return;
}
list_add(&new_incr->list, prev);
}
static int sclp_mem_freeze(struct device *dev)
{
if (!sclp_mem_state_changed)
return 0;
pr_err("Memory hotplug state changed, suspend refused.\n");
return -EPERM;
}
struct read_storage_sccb {
struct sccb_header header;
u16 max_id;
u16 assigned;
u16 standby;
u16 :16;
u32 entries[0];
} __packed;
static const struct dev_pm_ops sclp_mem_pm_ops = {
.freeze = sclp_mem_freeze,
};
static struct platform_driver sclp_mem_pdrv = {
.driver = {
.name = "sclp_mem",
.pm = &sclp_mem_pm_ops,
},
};
static int __init sclp_detect_standby_memory(void)
{
struct platform_device *sclp_pdev;
struct read_storage_sccb *sccb;
int i, id, assigned, rc;
if (OLDMEM_BASE) /* No standby memory in kdump mode */
return 0;
if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
return 0;
rc = -ENOMEM;
sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
goto out;
assigned = 0;
for (id = 0; id <= sclp_max_storage_id; id++) {
memset(sccb, 0, PAGE_SIZE);
sccb->header.length = PAGE_SIZE;
rc = sclp_sync_request(0x00040001 | id << 8, sccb);
if (rc)
goto out;
switch (sccb->header.response_code) {
case 0x0010:
set_bit(id, sclp_storage_ids);
for (i = 0; i < sccb->assigned; i++) {
if (!sccb->entries[i])
continue;
assigned++;
insert_increment(sccb->entries[i] >> 16, 0, 1);
}
break;
case 0x0310:
break;
case 0x0410:
for (i = 0; i < sccb->assigned; i++) {
if (!sccb->entries[i])
continue;
assigned++;
insert_increment(sccb->entries[i] >> 16, 1, 1);
}
break;
default:
rc = -EIO;
break;
}
if (!rc)
sclp_max_storage_id = sccb->max_id;
}
if (rc || list_empty(&sclp_mem_list))
goto out;
for (i = 1; i <= sclp_rnmax - assigned; i++)
insert_increment(0, 1, 0);
rc = register_memory_notifier(&sclp_mem_nb);
if (rc)
goto out;
rc = platform_driver_register(&sclp_mem_pdrv);
if (rc)
goto out;
sclp_pdev = platform_device_register_simple("sclp_mem", -1, NULL, 0);
rc = PTR_ERR_OR_ZERO(sclp_pdev);
if (rc)
goto out_driver;
sclp_add_standby_memory();
goto out;
out_driver:
platform_driver_unregister(&sclp_mem_pdrv);
out:
free_page((unsigned long) sccb);
return rc;
}
__initcall(sclp_detect_standby_memory);
#endif /* CONFIG_MEMORY_HOTPLUG */
/*
* PCI I/O adapter configuration related functions.
*/
#define SCLP_CMDW_CONFIGURE_PCI 0x001a0001
#define SCLP_CMDW_DECONFIGURE_PCI 0x001b0001
#define SCLP_RECONFIG_PCI_ATPYE 2
struct pci_cfg_sccb {
struct sccb_header header;
u8 atype; /* adapter type */
u8 reserved1;
u16 reserved2;
u32 aid; /* adapter identifier */
} __packed;
static int do_pci_configure(sclp_cmdw_t cmd, u32 fid)
{
struct pci_cfg_sccb *sccb;
int rc;
if (!SCLP_HAS_PCI_RECONFIG)
return -EOPNOTSUPP;
sccb = (struct pci_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = PAGE_SIZE;
sccb->atype = SCLP_RECONFIG_PCI_ATPYE;
sccb->aid = fid;
rc = sclp_sync_request(cmd, sccb);
if (rc)
goto out;
switch (sccb->header.response_code) {
case 0x0020:
case 0x0120:
break;
default:
pr_warn("configure PCI I/O adapter failed: cmd=0x%08x response=0x%04x\n",
cmd, sccb->header.response_code);
rc = -EIO;
break;
}
out:
free_page((unsigned long) sccb);
return rc;
}
int sclp_pci_configure(u32 fid)
{
return do_pci_configure(SCLP_CMDW_CONFIGURE_PCI, fid);
}
EXPORT_SYMBOL(sclp_pci_configure);
int sclp_pci_deconfigure(u32 fid)
{
return do_pci_configure(SCLP_CMDW_DECONFIGURE_PCI, fid);
}
EXPORT_SYMBOL(sclp_pci_deconfigure);
/*
* Channel path configuration related functions.
*/
#define SCLP_CMDW_CONFIGURE_CHPATH 0x000f0001
#define SCLP_CMDW_DECONFIGURE_CHPATH 0x000e0001
#define SCLP_CMDW_READ_CHPATH_INFORMATION 0x00030001
struct chp_cfg_sccb {
struct sccb_header header;
u8 ccm;
u8 reserved[6];
u8 cssid;
} __attribute__((packed));
static int do_chp_configure(sclp_cmdw_t cmd)
{
struct chp_cfg_sccb *sccb;
int rc;
if (!SCLP_HAS_CHP_RECONFIG)
return -EOPNOTSUPP;
/* Prepare sccb. */
sccb = (struct chp_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = sizeof(*sccb);
rc = sclp_sync_request(cmd, sccb);
if (rc)
goto out;
switch (sccb->header.response_code) {
case 0x0020:
case 0x0120:
case 0x0440:
case 0x0450:
break;
default:
pr_warning("configure channel-path failed "
"(cmd=0x%08x, response=0x%04x)\n", cmd,
sccb->header.response_code);
rc = -EIO;
break;
}
out:
free_page((unsigned long) sccb);
return rc;
}
/**
* sclp_chp_configure - perform configure channel-path sclp command
* @chpid: channel-path ID
*
* Perform configure channel-path command sclp command for specified chpid.
* Return 0 after command successfully finished, non-zero otherwise.
*/
int sclp_chp_configure(struct chp_id chpid)
{
return do_chp_configure(SCLP_CMDW_CONFIGURE_CHPATH | chpid.id << 8);
}
/**
* sclp_chp_deconfigure - perform deconfigure channel-path sclp command
* @chpid: channel-path ID
*
* Perform deconfigure channel-path command sclp command for specified chpid
* and wait for completion. On success return 0. Return non-zero otherwise.
*/
int sclp_chp_deconfigure(struct chp_id chpid)
{
return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8);
}
struct chp_info_sccb {
struct sccb_header header;
u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
u8 standby[SCLP_CHP_INFO_MASK_SIZE];
u8 configured[SCLP_CHP_INFO_MASK_SIZE];
u8 ccm;
u8 reserved[6];
u8 cssid;
} __attribute__((packed));
/**
* sclp_chp_read_info - perform read channel-path information sclp command
* @info: resulting channel-path information data
*
* Perform read channel-path information sclp command and wait for completion.
* On success, store channel-path information in @info and return 0. Return
* non-zero otherwise.
*/
int sclp_chp_read_info(struct sclp_chp_info *info)
{
struct chp_info_sccb *sccb;
int rc;
if (!SCLP_HAS_CHP_INFO)
return -EOPNOTSUPP;
/* Prepare sccb. */
sccb = (struct chp_info_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = sizeof(*sccb);
rc = sclp_sync_request(SCLP_CMDW_READ_CHPATH_INFORMATION, sccb);
if (rc)
goto out;
if (sccb->header.response_code != 0x0010) {
pr_warning("read channel-path info failed "
"(response=0x%04x)\n", sccb->header.response_code);
rc = -EIO;
goto out;
}
memcpy(info->recognized, sccb->recognized, SCLP_CHP_INFO_MASK_SIZE);
memcpy(info->standby, sccb->standby, SCLP_CHP_INFO_MASK_SIZE);
memcpy(info->configured, sccb->configured, SCLP_CHP_INFO_MASK_SIZE);
out:
free_page((unsigned long) sccb);
return rc;
}
bool sclp_has_sprp(void)
{
return !!(sclp_fac84 & 0x2);
}

View file

@ -0,0 +1,356 @@
/*
* SCLP line mode console driver
*
* Copyright IBM Corp. 1999, 2009
* Author(s): Martin Peschke <mpeschke@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/kmod.h>
#include <linux/console.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/jiffies.h>
#include <linux/termios.h>
#include <linux/err.h>
#include <linux/reboot.h>
#include <linux/gfp.h>
#include "sclp.h"
#include "sclp_rw.h"
#include "sclp_tty.h"
#define sclp_console_major 4 /* TTYAUX_MAJOR */
#define sclp_console_minor 64
#define sclp_console_name "ttyS"
/* Lock to guard over changes to global variables */
static spinlock_t sclp_con_lock;
/* List of free pages that can be used for console output buffering */
static struct list_head sclp_con_pages;
/* List of full struct sclp_buffer structures ready for output */
static struct list_head sclp_con_outqueue;
/* Pointer to current console buffer */
static struct sclp_buffer *sclp_conbuf;
/* Timer for delayed output of console messages */
static struct timer_list sclp_con_timer;
/* Suspend mode flag */
static int sclp_con_suspended;
/* Flag that output queue is currently running */
static int sclp_con_queue_running;
/* Output format for console messages */
static unsigned short sclp_con_columns;
static unsigned short sclp_con_width_htab;
static void
sclp_conbuf_callback(struct sclp_buffer *buffer, int rc)
{
unsigned long flags;
void *page;
do {
page = sclp_unmake_buffer(buffer);
spin_lock_irqsave(&sclp_con_lock, flags);
/* Remove buffer from outqueue */
list_del(&buffer->list);
list_add_tail((struct list_head *) page, &sclp_con_pages);
/* Check if there is a pending buffer on the out queue. */
buffer = NULL;
if (!list_empty(&sclp_con_outqueue))
buffer = list_first_entry(&sclp_con_outqueue,
struct sclp_buffer, list);
if (!buffer || sclp_con_suspended) {
sclp_con_queue_running = 0;
spin_unlock_irqrestore(&sclp_con_lock, flags);
break;
}
spin_unlock_irqrestore(&sclp_con_lock, flags);
} while (sclp_emit_buffer(buffer, sclp_conbuf_callback));
}
/*
* Finalize and emit first pending buffer.
*/
static void sclp_conbuf_emit(void)
{
struct sclp_buffer* buffer;
unsigned long flags;
int rc;
spin_lock_irqsave(&sclp_con_lock, flags);
if (sclp_conbuf)
list_add_tail(&sclp_conbuf->list, &sclp_con_outqueue);
sclp_conbuf = NULL;
if (sclp_con_queue_running || sclp_con_suspended)
goto out_unlock;
if (list_empty(&sclp_con_outqueue))
goto out_unlock;
buffer = list_first_entry(&sclp_con_outqueue, struct sclp_buffer,
list);
sclp_con_queue_running = 1;
spin_unlock_irqrestore(&sclp_con_lock, flags);
rc = sclp_emit_buffer(buffer, sclp_conbuf_callback);
if (rc)
sclp_conbuf_callback(buffer, rc);
return;
out_unlock:
spin_unlock_irqrestore(&sclp_con_lock, flags);
}
/*
* Wait until out queue is empty
*/
static void sclp_console_sync_queue(void)
{
unsigned long flags;
spin_lock_irqsave(&sclp_con_lock, flags);
if (timer_pending(&sclp_con_timer))
del_timer(&sclp_con_timer);
while (sclp_con_queue_running) {
spin_unlock_irqrestore(&sclp_con_lock, flags);
sclp_sync_wait();
spin_lock_irqsave(&sclp_con_lock, flags);
}
spin_unlock_irqrestore(&sclp_con_lock, flags);
}
/*
* When this routine is called from the timer then we flush the
* temporary write buffer without further waiting on a final new line.
*/
static void
sclp_console_timeout(unsigned long data)
{
sclp_conbuf_emit();
}
/*
* Drop oldest console buffer if sclp_con_drop is set
*/
static int
sclp_console_drop_buffer(void)
{
struct list_head *list;
struct sclp_buffer *buffer;
void *page;
if (!sclp_console_drop)
return 0;
list = sclp_con_outqueue.next;
if (sclp_con_queue_running)
/* The first element is in I/O */
list = list->next;
if (list == &sclp_con_outqueue)
return 0;
list_del(list);
buffer = list_entry(list, struct sclp_buffer, list);
page = sclp_unmake_buffer(buffer);
list_add_tail((struct list_head *) page, &sclp_con_pages);
return 1;
}
/*
* Writes the given message to S390 system console
*/
static void
sclp_console_write(struct console *console, const char *message,
unsigned int count)
{
unsigned long flags;
void *page;
int written;
if (count == 0)
return;
spin_lock_irqsave(&sclp_con_lock, flags);
/*
* process escape characters, write message into buffer,
* send buffer to SCLP
*/
do {
/* make sure we have a console output buffer */
if (sclp_conbuf == NULL) {
if (list_empty(&sclp_con_pages))
sclp_console_full++;
while (list_empty(&sclp_con_pages)) {
if (sclp_con_suspended)
goto out;
if (sclp_console_drop_buffer())
break;
spin_unlock_irqrestore(&sclp_con_lock, flags);
sclp_sync_wait();
spin_lock_irqsave(&sclp_con_lock, flags);
}
page = sclp_con_pages.next;
list_del((struct list_head *) page);
sclp_conbuf = sclp_make_buffer(page, sclp_con_columns,
sclp_con_width_htab);
}
/* try to write the string to the current output buffer */
written = sclp_write(sclp_conbuf, (const unsigned char *)
message, count);
if (written == count)
break;
/*
* Not all characters could be written to the current
* output buffer. Emit the buffer, create a new buffer
* and then output the rest of the string.
*/
spin_unlock_irqrestore(&sclp_con_lock, flags);
sclp_conbuf_emit();
spin_lock_irqsave(&sclp_con_lock, flags);
message += written;
count -= written;
} while (count > 0);
/* Setup timer to output current console buffer after 1/10 second */
if (sclp_conbuf != NULL && sclp_chars_in_buffer(sclp_conbuf) != 0 &&
!timer_pending(&sclp_con_timer)) {
init_timer(&sclp_con_timer);
sclp_con_timer.function = sclp_console_timeout;
sclp_con_timer.data = 0UL;
sclp_con_timer.expires = jiffies + HZ/10;
add_timer(&sclp_con_timer);
}
out:
spin_unlock_irqrestore(&sclp_con_lock, flags);
}
static struct tty_driver *
sclp_console_device(struct console *c, int *index)
{
*index = c->index;
return sclp_tty_driver;
}
/*
* Make sure that all buffers will be flushed to the SCLP.
*/
static void
sclp_console_flush(void)
{
sclp_conbuf_emit();
sclp_console_sync_queue();
}
/*
* Resume console: If there are cached messages, emit them.
*/
static void sclp_console_resume(void)
{
unsigned long flags;
spin_lock_irqsave(&sclp_con_lock, flags);
sclp_con_suspended = 0;
spin_unlock_irqrestore(&sclp_con_lock, flags);
sclp_conbuf_emit();
}
/*
* Suspend console: Set suspend flag and flush console
*/
static void sclp_console_suspend(void)
{
unsigned long flags;
spin_lock_irqsave(&sclp_con_lock, flags);
sclp_con_suspended = 1;
spin_unlock_irqrestore(&sclp_con_lock, flags);
sclp_console_flush();
}
static int sclp_console_notify(struct notifier_block *self,
unsigned long event, void *data)
{
sclp_console_flush();
return NOTIFY_OK;
}
static struct notifier_block on_panic_nb = {
.notifier_call = sclp_console_notify,
.priority = SCLP_PANIC_PRIO_CLIENT,
};
static struct notifier_block on_reboot_nb = {
.notifier_call = sclp_console_notify,
.priority = 1,
};
/*
* used to register the SCLP console to the kernel and to
* give printk necessary information
*/
static struct console sclp_console =
{
.name = sclp_console_name,
.write = sclp_console_write,
.device = sclp_console_device,
.flags = CON_PRINTBUFFER,
.index = 0 /* ttyS0 */
};
/*
* This function is called for SCLP suspend and resume events.
*/
void sclp_console_pm_event(enum sclp_pm_event sclp_pm_event)
{
switch (sclp_pm_event) {
case SCLP_PM_EVENT_FREEZE:
sclp_console_suspend();
break;
case SCLP_PM_EVENT_RESTORE:
case SCLP_PM_EVENT_THAW:
sclp_console_resume();
break;
}
}
/*
* called by console_init() in drivers/char/tty_io.c at boot-time.
*/
static int __init
sclp_console_init(void)
{
void *page;
int i;
int rc;
if (!CONSOLE_IS_SCLP)
return 0;
rc = sclp_rw_init();
if (rc)
return rc;
/* Allocate pages for output buffering */
INIT_LIST_HEAD(&sclp_con_pages);
for (i = 0; i < sclp_console_pages; i++) {
page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
list_add_tail(page, &sclp_con_pages);
}
INIT_LIST_HEAD(&sclp_con_outqueue);
spin_lock_init(&sclp_con_lock);
sclp_conbuf = NULL;
init_timer(&sclp_con_timer);
/* Set output format */
if (MACHINE_IS_VM)
/*
* save 4 characters for the CPU number
* written at start of each line by VM/CP
*/
sclp_con_columns = 76;
else
sclp_con_columns = 80;
sclp_con_width_htab = 8;
/* enable printk-access to this driver */
atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
register_reboot_notifier(&on_reboot_nb);
register_console(&sclp_console);
return 0;
}
console_initcall(sclp_console_init);

View file

@ -0,0 +1,77 @@
/*
* Copyright IBM Corp. 2007
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
#define KMSG_COMPONENT "sclp_config"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/cpu.h>
#include <linux/device.h>
#include <linux/workqueue.h>
#include <asm/smp.h>
#include "sclp.h"
struct conf_mgm_data {
u8 reserved;
u8 ev_qualifier;
} __attribute__((packed));
#define EV_QUAL_CPU_CHANGE 1
#define EV_QUAL_CAP_CHANGE 3
static struct work_struct sclp_cpu_capability_work;
static struct work_struct sclp_cpu_change_work;
static void sclp_cpu_capability_notify(struct work_struct *work)
{
int cpu;
struct device *dev;
s390_adjust_jiffies();
pr_info("CPU capability may have changed\n");
get_online_cpus();
for_each_online_cpu(cpu) {
dev = get_cpu_device(cpu);
kobject_uevent(&dev->kobj, KOBJ_CHANGE);
}
put_online_cpus();
}
static void __ref sclp_cpu_change_notify(struct work_struct *work)
{
smp_rescan_cpus();
}
static void sclp_conf_receiver_fn(struct evbuf_header *evbuf)
{
struct conf_mgm_data *cdata;
cdata = (struct conf_mgm_data *)(evbuf + 1);
switch (cdata->ev_qualifier) {
case EV_QUAL_CPU_CHANGE:
schedule_work(&sclp_cpu_change_work);
break;
case EV_QUAL_CAP_CHANGE:
schedule_work(&sclp_cpu_capability_work);
break;
}
}
static struct sclp_register sclp_conf_register =
{
.receive_mask = EVTYP_CONFMGMDATA_MASK,
.receiver_fn = sclp_conf_receiver_fn,
};
static int __init sclp_conf_init(void)
{
INIT_WORK(&sclp_cpu_capability_work, sclp_cpu_capability_notify);
INIT_WORK(&sclp_cpu_change_work, sclp_cpu_change_notify);
return sclp_register(&sclp_conf_register);
}
__initcall(sclp_conf_init);

View file

@ -0,0 +1,40 @@
/*
* SCLP control programm identification
*
* Copyright IBM Corp. 2001, 2007
* Author(s): Martin Peschke <mpeschke@de.ibm.com>
* Michael Ernst <mernst@de.ibm.com>
*/
#include <linux/kmod.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/version.h>
#include "sclp_cpi_sys.h"
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Identify this operating system instance "
"to the System z hardware");
MODULE_AUTHOR("Martin Peschke <mpeschke@de.ibm.com>, "
"Michael Ernst <mernst@de.ibm.com>");
static char *system_name = "";
static char *sysplex_name = "";
module_param(system_name, charp, 0);
MODULE_PARM_DESC(system_name, "e.g. hostname - max. 8 characters");
module_param(sysplex_name, charp, 0);
MODULE_PARM_DESC(sysplex_name, "if applicable - max. 8 characters");
static int __init cpi_module_init(void)
{
return sclp_cpi_set_data(system_name, sysplex_name, "LINUX",
LINUX_VERSION_CODE);
}
static void __exit cpi_module_exit(void)
{
}
module_init(cpi_module_init);
module_exit(cpi_module_exit);

View file

@ -0,0 +1,429 @@
/*
* SCLP control program identification sysfs interface
*
* Copyright IBM Corp. 2001, 2007
* Author(s): Martin Peschke <mpeschke@de.ibm.com>
* Michael Ernst <mernst@de.ibm.com>
*/
#define KMSG_COMPONENT "sclp_cpi"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/stat.h>
#include <linux/device.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/kmod.h>
#include <linux/timer.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/completion.h>
#include <linux/export.h>
#include <asm/ebcdic.h>
#include <asm/sclp.h>
#include "sclp.h"
#include "sclp_rw.h"
#include "sclp_cpi_sys.h"
#define CPI_LENGTH_NAME 8
#define CPI_LENGTH_LEVEL 16
static DEFINE_MUTEX(sclp_cpi_mutex);
struct cpi_evbuf {
struct evbuf_header header;
u8 id_format;
u8 reserved0;
u8 system_type[CPI_LENGTH_NAME];
u64 reserved1;
u8 system_name[CPI_LENGTH_NAME];
u64 reserved2;
u64 system_level;
u64 reserved3;
u8 sysplex_name[CPI_LENGTH_NAME];
u8 reserved4[16];
} __attribute__((packed));
struct cpi_sccb {
struct sccb_header header;
struct cpi_evbuf cpi_evbuf;
} __attribute__((packed));
static struct sclp_register sclp_cpi_event = {
.send_mask = EVTYP_CTLPROGIDENT_MASK,
};
static char system_name[CPI_LENGTH_NAME + 1];
static char sysplex_name[CPI_LENGTH_NAME + 1];
static char system_type[CPI_LENGTH_NAME + 1];
static u64 system_level;
static void set_data(char *field, char *data)
{
memset(field, ' ', CPI_LENGTH_NAME);
memcpy(field, data, strlen(data));
sclp_ascebc_str(field, CPI_LENGTH_NAME);
}
static void cpi_callback(struct sclp_req *req, void *data)
{
struct completion *completion = data;
complete(completion);
}
static struct sclp_req *cpi_prepare_req(void)
{
struct sclp_req *req;
struct cpi_sccb *sccb;
struct cpi_evbuf *evb;
req = kzalloc(sizeof(struct sclp_req), GFP_KERNEL);
if (!req)
return ERR_PTR(-ENOMEM);
sccb = (struct cpi_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb) {
kfree(req);
return ERR_PTR(-ENOMEM);
}
/* setup SCCB for Control-Program Identification */
sccb->header.length = sizeof(struct cpi_sccb);
sccb->cpi_evbuf.header.length = sizeof(struct cpi_evbuf);
sccb->cpi_evbuf.header.type = 0x0b;
evb = &sccb->cpi_evbuf;
/* set system type */
set_data(evb->system_type, system_type);
/* set system name */
set_data(evb->system_name, system_name);
/* set system level */
evb->system_level = system_level;
/* set sysplex name */
set_data(evb->sysplex_name, sysplex_name);
/* prepare request data structure presented to SCLP driver */
req->command = SCLP_CMDW_WRITE_EVENT_DATA;
req->sccb = sccb;
req->status = SCLP_REQ_FILLED;
req->callback = cpi_callback;
return req;
}
static void cpi_free_req(struct sclp_req *req)
{
free_page((unsigned long) req->sccb);
kfree(req);
}
static int cpi_req(void)
{
struct completion completion;
struct sclp_req *req;
int rc;
int response;
rc = sclp_register(&sclp_cpi_event);
if (rc)
goto out;
if (!(sclp_cpi_event.sclp_receive_mask & EVTYP_CTLPROGIDENT_MASK)) {
rc = -EOPNOTSUPP;
goto out_unregister;
}
req = cpi_prepare_req();
if (IS_ERR(req)) {
rc = PTR_ERR(req);
goto out_unregister;
}
init_completion(&completion);
req->callback_data = &completion;
/* Add request to sclp queue */
rc = sclp_add_request(req);
if (rc)
goto out_free_req;
wait_for_completion(&completion);
if (req->status != SCLP_REQ_DONE) {
pr_warning("request failed (status=0x%02x)\n",
req->status);
rc = -EIO;
goto out_free_req;
}
response = ((struct cpi_sccb *) req->sccb)->header.response_code;
if (response != 0x0020) {
pr_warning("request failed with response code 0x%x\n",
response);
rc = -EIO;
}
out_free_req:
cpi_free_req(req);
out_unregister:
sclp_unregister(&sclp_cpi_event);
out:
return rc;
}
static int check_string(const char *attr, const char *str)
{
size_t len;
size_t i;
len = strlen(str);
if ((len > 0) && (str[len - 1] == '\n'))
len--;
if (len > CPI_LENGTH_NAME)
return -EINVAL;
for (i = 0; i < len ; i++) {
if (isalpha(str[i]) || isdigit(str[i]) ||
strchr("$@# ", str[i]))
continue;
return -EINVAL;
}
return 0;
}
static void set_string(char *attr, const char *value)
{
size_t len;
size_t i;
len = strlen(value);
if ((len > 0) && (value[len - 1] == '\n'))
len--;
for (i = 0; i < CPI_LENGTH_NAME; i++) {
if (i < len)
attr[i] = toupper(value[i]);
else
attr[i] = ' ';
}
}
static ssize_t system_name_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
int rc;
mutex_lock(&sclp_cpi_mutex);
rc = snprintf(page, PAGE_SIZE, "%s\n", system_name);
mutex_unlock(&sclp_cpi_mutex);
return rc;
}
static ssize_t system_name_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf,
size_t len)
{
int rc;
rc = check_string("system_name", buf);
if (rc)
return rc;
mutex_lock(&sclp_cpi_mutex);
set_string(system_name, buf);
mutex_unlock(&sclp_cpi_mutex);
return len;
}
static struct kobj_attribute system_name_attr =
__ATTR(system_name, 0644, system_name_show, system_name_store);
static ssize_t sysplex_name_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
int rc;
mutex_lock(&sclp_cpi_mutex);
rc = snprintf(page, PAGE_SIZE, "%s\n", sysplex_name);
mutex_unlock(&sclp_cpi_mutex);
return rc;
}
static ssize_t sysplex_name_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf,
size_t len)
{
int rc;
rc = check_string("sysplex_name", buf);
if (rc)
return rc;
mutex_lock(&sclp_cpi_mutex);
set_string(sysplex_name, buf);
mutex_unlock(&sclp_cpi_mutex);
return len;
}
static struct kobj_attribute sysplex_name_attr =
__ATTR(sysplex_name, 0644, sysplex_name_show, sysplex_name_store);
static ssize_t system_type_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
int rc;
mutex_lock(&sclp_cpi_mutex);
rc = snprintf(page, PAGE_SIZE, "%s\n", system_type);
mutex_unlock(&sclp_cpi_mutex);
return rc;
}
static ssize_t system_type_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf,
size_t len)
{
int rc;
rc = check_string("system_type", buf);
if (rc)
return rc;
mutex_lock(&sclp_cpi_mutex);
set_string(system_type, buf);
mutex_unlock(&sclp_cpi_mutex);
return len;
}
static struct kobj_attribute system_type_attr =
__ATTR(system_type, 0644, system_type_show, system_type_store);
static ssize_t system_level_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
unsigned long long level;
mutex_lock(&sclp_cpi_mutex);
level = system_level;
mutex_unlock(&sclp_cpi_mutex);
return snprintf(page, PAGE_SIZE, "%#018llx\n", level);
}
static ssize_t system_level_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf,
size_t len)
{
unsigned long long level;
char *endp;
level = simple_strtoull(buf, &endp, 16);
if (endp == buf)
return -EINVAL;
if (*endp == '\n')
endp++;
if (*endp)
return -EINVAL;
mutex_lock(&sclp_cpi_mutex);
system_level = level;
mutex_unlock(&sclp_cpi_mutex);
return len;
}
static struct kobj_attribute system_level_attr =
__ATTR(system_level, 0644, system_level_show, system_level_store);
static ssize_t set_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t len)
{
int rc;
mutex_lock(&sclp_cpi_mutex);
rc = cpi_req();
mutex_unlock(&sclp_cpi_mutex);
if (rc)
return rc;
return len;
}
static struct kobj_attribute set_attr = __ATTR(set, 0200, NULL, set_store);
static struct attribute *cpi_attrs[] = {
&system_name_attr.attr,
&sysplex_name_attr.attr,
&system_type_attr.attr,
&system_level_attr.attr,
&set_attr.attr,
NULL,
};
static struct attribute_group cpi_attr_group = {
.attrs = cpi_attrs,
};
static struct kset *cpi_kset;
int sclp_cpi_set_data(const char *system, const char *sysplex, const char *type,
const u64 level)
{
int rc;
rc = check_string("system_name", system);
if (rc)
return rc;
rc = check_string("sysplex_name", sysplex);
if (rc)
return rc;
rc = check_string("system_type", type);
if (rc)
return rc;
mutex_lock(&sclp_cpi_mutex);
set_string(system_name, system);
set_string(sysplex_name, sysplex);
set_string(system_type, type);
system_level = level;
rc = cpi_req();
mutex_unlock(&sclp_cpi_mutex);
return rc;
}
EXPORT_SYMBOL(sclp_cpi_set_data);
static int __init cpi_init(void)
{
int rc;
cpi_kset = kset_create_and_add("cpi", NULL, firmware_kobj);
if (!cpi_kset)
return -ENOMEM;
rc = sysfs_create_group(&cpi_kset->kobj, &cpi_attr_group);
if (rc)
kset_unregister(cpi_kset);
return rc;
}
__initcall(cpi_init);

View file

@ -0,0 +1,14 @@
/*
* SCLP control program identification sysfs interface
*
* Copyright IBM Corp. 2007
* Author(s): Michael Ernst <mernst@de.ibm.com>
*/
#ifndef __SCLP_CPI_SYS_H__
#define __SCLP_CPI_SYS_H__
int sclp_cpi_set_data(const char *system, const char *sysplex,
const char *type, u64 level);
#endif /* __SCLP_CPI_SYS_H__ */

View file

@ -0,0 +1,144 @@
/*
* IOCTL interface for SCLP
*
* Copyright IBM Corp. 2012
*
* Author: Michael Holzheu <holzheu@linux.vnet.ibm.com>
*/
#include <linux/compat.h>
#include <linux/uaccess.h>
#include <linux/miscdevice.h>
#include <linux/gfp.h>
#include <linux/module.h>
#include <linux/ioctl.h>
#include <linux/fs.h>
#include <asm/compat.h>
#include <asm/sclp_ctl.h>
#include <asm/sclp.h>
#include "sclp.h"
/*
* Supported command words
*/
static unsigned int sclp_ctl_sccb_wlist[] = {
0x00400002,
0x00410002,
};
/*
* Check if command word is supported
*/
static int sclp_ctl_cmdw_supported(unsigned int cmdw)
{
int i;
for (i = 0; i < ARRAY_SIZE(sclp_ctl_sccb_wlist); i++) {
if (cmdw == sclp_ctl_sccb_wlist[i])
return 1;
}
return 0;
}
static void __user *u64_to_uptr(u64 value)
{
if (is_compat_task())
return compat_ptr(value);
else
return (void __user *)(unsigned long)value;
}
/*
* Start SCLP request
*/
static int sclp_ctl_ioctl_sccb(void __user *user_area)
{
struct sclp_ctl_sccb ctl_sccb;
struct sccb_header *sccb;
int rc;
if (copy_from_user(&ctl_sccb, user_area, sizeof(ctl_sccb)))
return -EFAULT;
if (!sclp_ctl_cmdw_supported(ctl_sccb.cmdw))
return -EOPNOTSUPP;
sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sizeof(*sccb))) {
rc = -EFAULT;
goto out_free;
}
if (sccb->length > PAGE_SIZE || sccb->length < 8)
return -EINVAL;
if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sccb->length)) {
rc = -EFAULT;
goto out_free;
}
rc = sclp_sync_request(ctl_sccb.cmdw, sccb);
if (rc)
goto out_free;
if (copy_to_user(u64_to_uptr(ctl_sccb.sccb), sccb, sccb->length))
rc = -EFAULT;
out_free:
free_page((unsigned long) sccb);
return rc;
}
/*
* SCLP SCCB ioctl function
*/
static long sclp_ctl_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
void __user *argp;
if (is_compat_task())
argp = compat_ptr(arg);
else
argp = (void __user *) arg;
switch (cmd) {
case SCLP_CTL_SCCB:
return sclp_ctl_ioctl_sccb(argp);
default: /* unknown ioctl number */
return -ENOTTY;
}
}
/*
* File operations
*/
static const struct file_operations sclp_ctl_fops = {
.owner = THIS_MODULE,
.open = nonseekable_open,
.unlocked_ioctl = sclp_ctl_ioctl,
.compat_ioctl = sclp_ctl_ioctl,
.llseek = no_llseek,
};
/*
* Misc device definition
*/
static struct miscdevice sclp_ctl_device = {
.minor = MISC_DYNAMIC_MINOR,
.name = "sclp",
.fops = &sclp_ctl_fops,
};
/*
* Register sclp_ctl misc device
*/
static int __init sclp_ctl_init(void)
{
return misc_register(&sclp_ctl_device);
}
module_init(sclp_ctl_init);
/*
* Deregister sclp_ctl misc device
*/
static void __exit sclp_ctl_exit(void)
{
misc_deregister(&sclp_ctl_device);
}
module_exit(sclp_ctl_exit);

View file

@ -0,0 +1,89 @@
/*
* Copyright IBM Corp. 2013
* Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
*/
#ifndef _SCLP_DIAG_H
#define _SCLP_DIAG_H
#include <linux/types.h>
/* return codes for Diagnostic Test FTP Service, as indicated in member
* sclp_diag_ftp::ldflg
*/
#define SCLP_DIAG_FTP_OK 0x80U /* success */
#define SCLP_DIAG_FTP_LDFAIL 0x01U /* load failed */
#define SCLP_DIAG_FTP_LDNPERM 0x02U /* not allowed */
#define SCLP_DIAG_FTP_LDRUNS 0x03U /* LD runs */
#define SCLP_DIAG_FTP_LDNRUNS 0x04U /* LD does not run */
#define SCLP_DIAG_FTP_XPCX 0x80 /* PCX communication code */
#define SCLP_DIAG_FTP_ROUTE 4 /* routing code for new FTP service */
/*
* length of Diagnostic Test FTP Service event buffer
*/
#define SCLP_DIAG_FTP_EVBUF_LEN \
(offsetof(struct sclp_diag_evbuf, mdd) + \
sizeof(struct sclp_diag_ftp))
/**
* struct sclp_diag_ftp - Diagnostic Test FTP Service model-dependent data
* @pcx: code for PCX communication (should be 0x80)
* @ldflg: load flag (see defines above)
* @cmd: FTP command
* @pgsize: page size (0 = 4kB, 1 = large page size)
* @srcflg: source flag
* @spare: reserved (zeroes)
* @offset: file offset
* @fsize: file size
* @length: buffer size resp. bytes transferred
* @failaddr: failing address
* @bufaddr: buffer address, virtual
* @asce: region or segment table designation
* @fident: file name (ASCII, zero-terminated)
*/
struct sclp_diag_ftp {
u8 pcx;
u8 ldflg;
u8 cmd;
u8 pgsize;
u8 srcflg;
u8 spare;
u64 offset;
u64 fsize;
u64 length;
u64 failaddr;
u64 bufaddr;
u64 asce;
u8 fident[256];
} __packed;
/**
* struct sclp_diag_evbuf - Diagnostic Test (ET7) Event Buffer
* @hdr: event buffer header
* @route: diagnostic route
* @mdd: model-dependent data (@route dependent)
*/
struct sclp_diag_evbuf {
struct evbuf_header hdr;
u16 route;
union {
struct sclp_diag_ftp ftp;
} mdd;
} __packed;
/**
* struct sclp_diag_sccb - Diagnostic Test (ET7) SCCB
* @hdr: SCCB header
* @evbuf: event buffer
*/
struct sclp_diag_sccb {
struct sccb_header hdr;
struct sclp_diag_evbuf evbuf;
} __packed;
#endif /* _SCLP_DIAG_H */

View file

@ -0,0 +1,315 @@
/*
* SCLP early driver
*
* Copyright IBM Corp. 2013
*/
#define KMSG_COMPONENT "sclp_early"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <asm/ctl_reg.h>
#include <asm/sclp.h>
#include <asm/ipl.h>
#include "sclp_sdias.h"
#include "sclp.h"
#define SCLP_CMDW_READ_SCP_INFO 0x00020001
#define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001
struct read_info_sccb {
struct sccb_header header; /* 0-7 */
u16 rnmax; /* 8-9 */
u8 rnsize; /* 10 */
u8 _reserved0[16 - 11]; /* 11-15 */
u16 ncpurl; /* 16-17 */
u16 cpuoff; /* 18-19 */
u8 _reserved7[24 - 20]; /* 20-23 */
u8 loadparm[8]; /* 24-31 */
u8 _reserved1[48 - 32]; /* 32-47 */
u64 facilities; /* 48-55 */
u8 _reserved2a[76 - 56]; /* 56-75 */
u32 ibc; /* 76-79 */
u8 _reserved2b[84 - 80]; /* 80-83 */
u8 fac84; /* 84 */
u8 fac85; /* 85 */
u8 _reserved3[91 - 86]; /* 86-90 */
u8 flags; /* 91 */
u8 _reserved4[100 - 92]; /* 92-99 */
u32 rnsize2; /* 100-103 */
u64 rnmax2; /* 104-111 */
u8 _reserved5[120 - 112]; /* 112-119 */
u16 hcpua; /* 120-121 */
u8 _reserved6[4096 - 122]; /* 122-4095 */
} __packed __aligned(PAGE_SIZE);
static char sccb_early[PAGE_SIZE] __aligned(PAGE_SIZE) __initdata;
static unsigned int sclp_con_has_vt220 __initdata;
static unsigned int sclp_con_has_linemode __initdata;
static unsigned long sclp_hsa_size;
static unsigned int sclp_max_cpu;
static struct sclp_ipl_info sclp_ipl_info;
static unsigned char sclp_siif;
static u32 sclp_ibc;
u64 sclp_facilities;
u8 sclp_fac84;
unsigned long long sclp_rzm;
unsigned long long sclp_rnmax;
static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb)
{
int rc;
__ctl_set_bit(0, 9);
rc = sclp_service_call(cmd, sccb);
if (rc)
goto out;
__load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA |
PSW_MASK_BA | PSW_MASK_EXT | PSW_MASK_WAIT);
local_irq_disable();
out:
/* Contents of the sccb might have changed. */
barrier();
__ctl_clear_bit(0, 9);
return rc;
}
static int __init sclp_read_info_early(struct read_info_sccb *sccb)
{
int rc, i;
sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED,
SCLP_CMDW_READ_SCP_INFO};
for (i = 0; i < ARRAY_SIZE(commands); i++) {
do {
memset(sccb, 0, sizeof(*sccb));
sccb->header.length = sizeof(*sccb);
sccb->header.function_code = 0x80;
sccb->header.control_mask[2] = 0x80;
rc = sclp_cmd_sync_early(commands[i], sccb);
} while (rc == -EBUSY);
if (rc)
break;
if (sccb->header.response_code == 0x10)
return 0;
if (sccb->header.response_code != 0x1f0)
break;
}
return -EIO;
}
static void __init sclp_facilities_detect(struct read_info_sccb *sccb)
{
struct sclp_cpu_entry *cpue;
u16 boot_cpu_address, cpu;
if (sclp_read_info_early(sccb))
return;
sclp_facilities = sccb->facilities;
sclp_fac84 = sccb->fac84;
if (sccb->fac85 & 0x02)
S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP;
sclp_rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
sclp_rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
sclp_rzm <<= 20;
sclp_ibc = sccb->ibc;
if (!sccb->hcpua) {
if (MACHINE_IS_VM)
sclp_max_cpu = 64;
else
sclp_max_cpu = sccb->ncpurl;
} else {
sclp_max_cpu = sccb->hcpua + 1;
}
boot_cpu_address = stap();
cpue = (void *)sccb + sccb->cpuoff;
for (cpu = 0; cpu < sccb->ncpurl; cpue++, cpu++) {
if (boot_cpu_address != cpue->address)
continue;
sclp_siif = cpue->siif;
break;
}
/* Save IPL information */
sclp_ipl_info.is_valid = 1;
if (sccb->flags & 0x2)
sclp_ipl_info.has_dump = 1;
memcpy(&sclp_ipl_info.loadparm, &sccb->loadparm, LOADPARM_LEN);
}
bool __init sclp_has_linemode(void)
{
return !!sclp_con_has_linemode;
}
bool __init sclp_has_vt220(void)
{
return !!sclp_con_has_vt220;
}
unsigned long long sclp_get_rnmax(void)
{
return sclp_rnmax;
}
unsigned long long sclp_get_rzm(void)
{
return sclp_rzm;
}
unsigned int sclp_get_max_cpu(void)
{
return sclp_max_cpu;
}
int sclp_has_siif(void)
{
return sclp_siif;
}
EXPORT_SYMBOL(sclp_has_siif);
unsigned int sclp_get_ibc(void)
{
return sclp_ibc;
}
EXPORT_SYMBOL(sclp_get_ibc);
/*
* This function will be called after sclp_facilities_detect(), which gets
* called from early.c code. The sclp_facilities_detect() function retrieves
* and saves the IPL information.
*/
void __init sclp_get_ipl_info(struct sclp_ipl_info *info)
{
*info = sclp_ipl_info;
}
static int __init sclp_cmd_early(sclp_cmdw_t cmd, void *sccb)
{
int rc;
do {
rc = sclp_cmd_sync_early(cmd, sccb);
} while (rc == -EBUSY);
if (rc)
return -EIO;
if (((struct sccb_header *) sccb)->response_code != 0x0020)
return -EIO;
return 0;
}
static void __init sccb_init_eq_size(struct sdias_sccb *sccb)
{
memset(sccb, 0, sizeof(*sccb));
sccb->hdr.length = sizeof(*sccb);
sccb->evbuf.hdr.length = sizeof(struct sdias_evbuf);
sccb->evbuf.hdr.type = EVTYP_SDIAS;
sccb->evbuf.event_qual = SDIAS_EQ_SIZE;
sccb->evbuf.data_id = SDIAS_DI_FCP_DUMP;
sccb->evbuf.event_id = 4712;
sccb->evbuf.dbs = 1;
}
static int __init sclp_set_event_mask(struct init_sccb *sccb,
unsigned long receive_mask,
unsigned long send_mask)
{
memset(sccb, 0, sizeof(*sccb));
sccb->header.length = sizeof(*sccb);
sccb->mask_length = sizeof(sccb_mask_t);
sccb->receive_mask = receive_mask;
sccb->send_mask = send_mask;
return sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_MASK, sccb);
}
static long __init sclp_hsa_size_init(struct sdias_sccb *sccb)
{
sccb_init_eq_size(sccb);
if (sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_DATA, sccb))
return -EIO;
if (sccb->evbuf.blk_cnt == 0)
return 0;
return (sccb->evbuf.blk_cnt - 1) * PAGE_SIZE;
}
static long __init sclp_hsa_copy_wait(struct sccb_header *sccb)
{
memset(sccb, 0, PAGE_SIZE);
sccb->length = PAGE_SIZE;
if (sclp_cmd_early(SCLP_CMDW_READ_EVENT_DATA, sccb))
return -EIO;
if (((struct sdias_sccb *) sccb)->evbuf.blk_cnt == 0)
return 0;
return (((struct sdias_sccb *) sccb)->evbuf.blk_cnt - 1) * PAGE_SIZE;
}
unsigned long sclp_get_hsa_size(void)
{
return sclp_hsa_size;
}
static void __init sclp_hsa_size_detect(void *sccb)
{
long size;
/* First try synchronous interface (LPAR) */
if (sclp_set_event_mask(sccb, 0, 0x40000010))
return;
size = sclp_hsa_size_init(sccb);
if (size < 0)
return;
if (size != 0)
goto out;
/* Then try asynchronous interface (z/VM) */
if (sclp_set_event_mask(sccb, 0x00000010, 0x40000010))
return;
size = sclp_hsa_size_init(sccb);
if (size < 0)
return;
size = sclp_hsa_copy_wait(sccb);
if (size < 0)
return;
out:
sclp_hsa_size = size;
}
static unsigned int __init sclp_con_check_linemode(struct init_sccb *sccb)
{
if (!(sccb->sclp_send_mask & EVTYP_OPCMD_MASK))
return 0;
if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK)))
return 0;
return 1;
}
static void __init sclp_console_detect(struct init_sccb *sccb)
{
if (sccb->header.response_code != 0x20)
return;
if (sccb->sclp_send_mask & EVTYP_VT220MSG_MASK)
sclp_con_has_vt220 = 1;
if (sclp_con_check_linemode(sccb))
sclp_con_has_linemode = 1;
}
void __init sclp_early_detect(void)
{
void *sccb = &sccb_early;
sclp_facilities_detect(sccb);
sclp_hsa_size_detect(sccb);
/* Turn off SCLP event notifications. Also save remote masks in the
* sccb. These are sufficient to detect sclp console capabilities.
*/
sclp_set_event_mask(sccb, 0, 0);
sclp_console_detect(sccb);
}

View file

@ -0,0 +1,275 @@
/*
* SCLP Event Type (ET) 7 - Diagnostic Test FTP Services, useable on LPAR
*
* Copyright IBM Corp. 2013
* Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
*
*/
#define KMSG_COMPONENT "hmcdrv"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/wait.h>
#include <linux/string.h>
#include <linux/jiffies.h>
#include <asm/sysinfo.h>
#include <asm/ebcdic.h>
#include "sclp.h"
#include "sclp_diag.h"
#include "sclp_ftp.h"
static DECLARE_COMPLETION(sclp_ftp_rx_complete);
static u8 sclp_ftp_ldflg;
static u64 sclp_ftp_fsize;
static u64 sclp_ftp_length;
/**
* sclp_ftp_txcb() - Diagnostic Test FTP services SCLP command callback
*/
static void sclp_ftp_txcb(struct sclp_req *req, void *data)
{
struct completion *completion = data;
#ifdef DEBUG
pr_debug("SCLP (ET7) TX-IRQ, SCCB @ 0x%p: %*phN\n",
req->sccb, 24, req->sccb);
#endif
complete(completion);
}
/**
* sclp_ftp_rxcb() - Diagnostic Test FTP services receiver event callback
*/
static void sclp_ftp_rxcb(struct evbuf_header *evbuf)
{
struct sclp_diag_evbuf *diag = (struct sclp_diag_evbuf *) evbuf;
/*
* Check for Diagnostic Test FTP Service
*/
if (evbuf->type != EVTYP_DIAG_TEST ||
diag->route != SCLP_DIAG_FTP_ROUTE ||
diag->mdd.ftp.pcx != SCLP_DIAG_FTP_XPCX ||
evbuf->length < SCLP_DIAG_FTP_EVBUF_LEN)
return;
#ifdef DEBUG
pr_debug("SCLP (ET7) RX-IRQ, Event @ 0x%p: %*phN\n",
evbuf, 24, evbuf);
#endif
/*
* Because the event buffer is located in a page which is owned
* by the SCLP core, all data of interest must be copied. The
* error indication is in 'sclp_ftp_ldflg'
*/
sclp_ftp_ldflg = diag->mdd.ftp.ldflg;
sclp_ftp_fsize = diag->mdd.ftp.fsize;
sclp_ftp_length = diag->mdd.ftp.length;
complete(&sclp_ftp_rx_complete);
}
/**
* sclp_ftp_et7() - start a Diagnostic Test FTP Service SCLP request
* @ftp: pointer to FTP descriptor
*
* Return: 0 on success, else a (negative) error code
*/
static int sclp_ftp_et7(const struct hmcdrv_ftp_cmdspec *ftp)
{
struct completion completion;
struct sclp_diag_sccb *sccb;
struct sclp_req *req;
size_t len;
int rc;
req = kzalloc(sizeof(*req), GFP_KERNEL);
sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!req || !sccb) {
rc = -ENOMEM;
goto out_free;
}
sccb->hdr.length = SCLP_DIAG_FTP_EVBUF_LEN +
sizeof(struct sccb_header);
sccb->evbuf.hdr.type = EVTYP_DIAG_TEST;
sccb->evbuf.hdr.length = SCLP_DIAG_FTP_EVBUF_LEN;
sccb->evbuf.hdr.flags = 0; /* clear processed-buffer */
sccb->evbuf.route = SCLP_DIAG_FTP_ROUTE;
sccb->evbuf.mdd.ftp.pcx = SCLP_DIAG_FTP_XPCX;
sccb->evbuf.mdd.ftp.srcflg = 0;
sccb->evbuf.mdd.ftp.pgsize = 0;
sccb->evbuf.mdd.ftp.asce = _ASCE_REAL_SPACE;
sccb->evbuf.mdd.ftp.ldflg = SCLP_DIAG_FTP_LDFAIL;
sccb->evbuf.mdd.ftp.fsize = 0;
sccb->evbuf.mdd.ftp.cmd = ftp->id;
sccb->evbuf.mdd.ftp.offset = ftp->ofs;
sccb->evbuf.mdd.ftp.length = ftp->len;
sccb->evbuf.mdd.ftp.bufaddr = virt_to_phys(ftp->buf);
len = strlcpy(sccb->evbuf.mdd.ftp.fident, ftp->fname,
HMCDRV_FTP_FIDENT_MAX);
if (len >= HMCDRV_FTP_FIDENT_MAX) {
rc = -EINVAL;
goto out_free;
}
req->command = SCLP_CMDW_WRITE_EVENT_DATA;
req->sccb = sccb;
req->status = SCLP_REQ_FILLED;
req->callback = sclp_ftp_txcb;
req->callback_data = &completion;
init_completion(&completion);
rc = sclp_add_request(req);
if (rc)
goto out_free;
/* Wait for end of ftp sclp command. */
wait_for_completion(&completion);
#ifdef DEBUG
pr_debug("status of SCLP (ET7) request is 0x%04x (0x%02x)\n",
sccb->hdr.response_code, sccb->evbuf.hdr.flags);
#endif
/*
* Check if sclp accepted the request. The data transfer runs
* asynchronously and the completion is indicated with an
* sclp ET7 event.
*/
if (req->status != SCLP_REQ_DONE ||
(sccb->evbuf.hdr.flags & 0x80) == 0 || /* processed-buffer */
(sccb->hdr.response_code & 0xffU) != 0x20U) {
rc = -EIO;
}
out_free:
free_page((unsigned long) sccb);
kfree(req);
return rc;
}
/**
* sclp_ftp_cmd() - executes a HMC related SCLP Diagnose (ET7) FTP command
* @ftp: pointer to FTP command specification
* @fsize: return of file size (or NULL if undesirable)
*
* Attention: Notice that this function is not reentrant - so the caller
* must ensure locking.
*
* Return: number of bytes read/written or a (negative) error code
*/
ssize_t sclp_ftp_cmd(const struct hmcdrv_ftp_cmdspec *ftp, size_t *fsize)
{
ssize_t len;
#ifdef DEBUG
unsigned long start_jiffies;
pr_debug("starting SCLP (ET7), cmd %d for '%s' at %lld with %zd bytes\n",
ftp->id, ftp->fname, (long long) ftp->ofs, ftp->len);
start_jiffies = jiffies;
#endif
init_completion(&sclp_ftp_rx_complete);
/* Start ftp sclp command. */
len = sclp_ftp_et7(ftp);
if (len)
goto out_unlock;
/*
* There is no way to cancel the sclp ET7 request, the code
* needs to wait unconditionally until the transfer is complete.
*/
wait_for_completion(&sclp_ftp_rx_complete);
#ifdef DEBUG
pr_debug("completed SCLP (ET7) request after %lu ms (all)\n",
(jiffies - start_jiffies) * 1000 / HZ);
pr_debug("return code of SCLP (ET7) FTP Service is 0x%02x, with %lld/%lld bytes\n",
sclp_ftp_ldflg, sclp_ftp_length, sclp_ftp_fsize);
#endif
switch (sclp_ftp_ldflg) {
case SCLP_DIAG_FTP_OK:
len = sclp_ftp_length;
if (fsize)
*fsize = sclp_ftp_fsize;
break;
case SCLP_DIAG_FTP_LDNPERM:
len = -EPERM;
break;
case SCLP_DIAG_FTP_LDRUNS:
len = -EBUSY;
break;
case SCLP_DIAG_FTP_LDFAIL:
len = -ENOENT;
break;
default:
len = -EIO;
break;
}
out_unlock:
return len;
}
/*
* ET7 event listener
*/
static struct sclp_register sclp_ftp_event = {
.send_mask = EVTYP_DIAG_TEST_MASK, /* want tx events */
.receive_mask = EVTYP_DIAG_TEST_MASK, /* want rx events */
.receiver_fn = sclp_ftp_rxcb, /* async callback (rx) */
.state_change_fn = NULL,
.pm_event_fn = NULL,
};
/**
* sclp_ftp_startup() - startup of FTP services, when running on LPAR
*/
int sclp_ftp_startup(void)
{
#ifdef DEBUG
unsigned long info;
#endif
int rc;
rc = sclp_register(&sclp_ftp_event);
if (rc)
return rc;
#ifdef DEBUG
info = get_zeroed_page(GFP_KERNEL);
if (info != 0) {
struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
if (!stsi(info222, 2, 2, 2)) { /* get SYSIB 2.2.2 */
info222->name[sizeof(info222->name) - 1] = '\0';
EBCASC_500(info222->name, sizeof(info222->name) - 1);
pr_debug("SCLP (ET7) FTP Service working on LPAR %u (%s)\n",
info222->lpar_number, info222->name);
}
free_page(info);
}
#endif /* DEBUG */
return 0;
}
/**
* sclp_ftp_shutdown() - shutdown of FTP services, when running on LPAR
*/
void sclp_ftp_shutdown(void)
{
sclp_unregister(&sclp_ftp_event);
}

View file

@ -0,0 +1,21 @@
/*
* SCLP Event Type (ET) 7 - Diagnostic Test FTP Services, useable on LPAR
*
* Notice that all functions exported here are not reentrant.
* So usage should be exclusive, ensured by the caller (e.g. using a
* mutex).
*
* Copyright IBM Corp. 2013
* Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
*/
#ifndef __SCLP_FTP_H__
#define __SCLP_FTP_H__
#include "hmcdrv_ftp.h"
int sclp_ftp_startup(void);
void sclp_ftp_shutdown(void);
ssize_t sclp_ftp_cmd(const struct hmcdrv_ftp_cmdspec *ftp, size_t *fsize);
#endif /* __SCLP_FTP_H__ */

View file

@ -0,0 +1,144 @@
/*
* SCLP OCF communication parameters sysfs interface
*
* Copyright IBM Corp. 2011
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#define KMSG_COMPONENT "sclp_ocf"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/stat.h>
#include <linux/device.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/kmod.h>
#include <linux/timer.h>
#include <linux/err.h>
#include <asm/ebcdic.h>
#include <asm/sclp.h>
#include "sclp.h"
#define OCF_LENGTH_HMC_NETWORK 8UL
#define OCF_LENGTH_CPC_NAME 8UL
static char hmc_network[OCF_LENGTH_HMC_NETWORK + 1];
static char cpc_name[OCF_LENGTH_CPC_NAME + 1];
static DEFINE_SPINLOCK(sclp_ocf_lock);
static struct work_struct sclp_ocf_change_work;
static struct kset *ocf_kset;
static void sclp_ocf_change_notify(struct work_struct *work)
{
kobject_uevent(&ocf_kset->kobj, KOBJ_CHANGE);
}
/* Handler for OCF event. Look for the CPC image name. */
static void sclp_ocf_handler(struct evbuf_header *evbuf)
{
struct gds_vector *v;
struct gds_subvector *sv, *netid, *cpc;
size_t size;
/* Find the 0x9f00 block. */
v = sclp_find_gds_vector(evbuf + 1, (void *) evbuf + evbuf->length,
0x9f00);
if (!v)
return;
/* Find the 0x9f22 block inside the 0x9f00 block. */
v = sclp_find_gds_vector(v + 1, (void *) v + v->length, 0x9f22);
if (!v)
return;
/* Find the 0x81 block inside the 0x9f22 block. */
sv = sclp_find_gds_subvector(v + 1, (void *) v + v->length, 0x81);
if (!sv)
return;
/* Find the 0x01 block inside the 0x81 block. */
netid = sclp_find_gds_subvector(sv + 1, (void *) sv + sv->length, 1);
/* Find the 0x02 block inside the 0x81 block. */
cpc = sclp_find_gds_subvector(sv + 1, (void *) sv + sv->length, 2);
/* Copy network name and cpc name. */
spin_lock(&sclp_ocf_lock);
if (netid) {
size = min(OCF_LENGTH_HMC_NETWORK, (size_t) netid->length);
memcpy(hmc_network, netid + 1, size);
EBCASC(hmc_network, size);
hmc_network[size] = 0;
}
if (cpc) {
size = min(OCF_LENGTH_CPC_NAME, (size_t) cpc->length);
memcpy(cpc_name, cpc + 1, size);
EBCASC(cpc_name, size);
cpc_name[size] = 0;
}
spin_unlock(&sclp_ocf_lock);
schedule_work(&sclp_ocf_change_work);
}
static struct sclp_register sclp_ocf_event = {
.receive_mask = EVTYP_OCF_MASK,
.receiver_fn = sclp_ocf_handler,
};
static ssize_t cpc_name_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
int rc;
spin_lock_irq(&sclp_ocf_lock);
rc = snprintf(page, PAGE_SIZE, "%s\n", cpc_name);
spin_unlock_irq(&sclp_ocf_lock);
return rc;
}
static struct kobj_attribute cpc_name_attr =
__ATTR(cpc_name, 0444, cpc_name_show, NULL);
static ssize_t hmc_network_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
int rc;
spin_lock_irq(&sclp_ocf_lock);
rc = snprintf(page, PAGE_SIZE, "%s\n", hmc_network);
spin_unlock_irq(&sclp_ocf_lock);
return rc;
}
static struct kobj_attribute hmc_network_attr =
__ATTR(hmc_network, 0444, hmc_network_show, NULL);
static struct attribute *ocf_attrs[] = {
&cpc_name_attr.attr,
&hmc_network_attr.attr,
NULL,
};
static struct attribute_group ocf_attr_group = {
.attrs = ocf_attrs,
};
static int __init ocf_init(void)
{
int rc;
INIT_WORK(&sclp_ocf_change_work, sclp_ocf_change_notify);
ocf_kset = kset_create_and_add("ocf", NULL, firmware_kobj);
if (!ocf_kset)
return -ENOMEM;
rc = sysfs_create_group(&ocf_kset->kobj, &ocf_attr_group);
if (rc) {
kset_unregister(ocf_kset);
return rc;
}
return sclp_register(&sclp_ocf_event);
}
device_initcall(ocf_init);

View file

@ -0,0 +1,84 @@
/*
* signal quiesce handler
*
* Copyright IBM Corp. 1999, 2004
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
* Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/cpumask.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/reboot.h>
#include <linux/atomic.h>
#include <asm/ptrace.h>
#include <asm/smp.h>
#include "sclp.h"
static void (*old_machine_restart)(char *);
static void (*old_machine_halt)(void);
static void (*old_machine_power_off)(void);
/* Shutdown handler. Signal completion of shutdown by loading special PSW. */
static void do_machine_quiesce(void)
{
psw_t quiesce_psw;
smp_send_stop();
quiesce_psw.mask =
PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA | PSW_MASK_WAIT;
quiesce_psw.addr = 0xfff;
__load_psw(quiesce_psw);
}
/* Handler for quiesce event. Start shutdown procedure. */
static void sclp_quiesce_handler(struct evbuf_header *evbuf)
{
if (_machine_restart != (void *) do_machine_quiesce) {
old_machine_restart = _machine_restart;
old_machine_halt = _machine_halt;
old_machine_power_off = _machine_power_off;
_machine_restart = (void *) do_machine_quiesce;
_machine_halt = do_machine_quiesce;
_machine_power_off = do_machine_quiesce;
}
ctrl_alt_del();
}
/* Undo machine restart/halt/power_off modification on resume */
static void sclp_quiesce_pm_event(struct sclp_register *reg,
enum sclp_pm_event sclp_pm_event)
{
switch (sclp_pm_event) {
case SCLP_PM_EVENT_RESTORE:
if (old_machine_restart) {
_machine_restart = old_machine_restart;
_machine_halt = old_machine_halt;
_machine_power_off = old_machine_power_off;
old_machine_restart = NULL;
old_machine_halt = NULL;
old_machine_power_off = NULL;
}
break;
case SCLP_PM_EVENT_FREEZE:
case SCLP_PM_EVENT_THAW:
break;
}
}
static struct sclp_register sclp_quiesce_event = {
.receive_mask = EVTYP_SIGQUIESCE_MASK,
.receiver_fn = sclp_quiesce_handler,
.pm_event_fn = sclp_quiesce_pm_event
};
/* Initialize quiesce driver. */
static int __init sclp_quiesce_init(void)
{
return sclp_register(&sclp_quiesce_event);
}
module_init(sclp_quiesce_init);

469
drivers/s390/char/sclp_rw.c Normal file
View file

@ -0,0 +1,469 @@
/*
* driver: reading from and writing to system console on S/390 via SCLP
*
* Copyright IBM Corp. 1999, 2009
*
* Author(s): Martin Peschke <mpeschke@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/kmod.h>
#include <linux/types.h>
#include <linux/err.h>
#include <linux/string.h>
#include <linux/spinlock.h>
#include <linux/ctype.h>
#include <asm/uaccess.h>
#include "sclp.h"
#include "sclp_rw.h"
/*
* The room for the SCCB (only for writing) is not equal to a pages size
* (as it is specified as the maximum size in the SCLP documentation)
* because of the additional data structure described above.
*/
#define MAX_SCCB_ROOM (PAGE_SIZE - sizeof(struct sclp_buffer))
static void sclp_rw_pm_event(struct sclp_register *reg,
enum sclp_pm_event sclp_pm_event)
{
sclp_console_pm_event(sclp_pm_event);
}
/* Event type structure for write message and write priority message */
static struct sclp_register sclp_rw_event = {
.send_mask = EVTYP_MSG_MASK,
.pm_event_fn = sclp_rw_pm_event,
};
/*
* Setup a sclp write buffer. Gets a page as input (4K) and returns
* a pointer to a struct sclp_buffer structure that is located at the
* end of the input page. This reduces the buffer space by a few
* bytes but simplifies things.
*/
struct sclp_buffer *
sclp_make_buffer(void *page, unsigned short columns, unsigned short htab)
{
struct sclp_buffer *buffer;
struct write_sccb *sccb;
sccb = (struct write_sccb *) page;
/*
* We keep the struct sclp_buffer structure at the end
* of the sccb page.
*/
buffer = ((struct sclp_buffer *) ((addr_t) sccb + PAGE_SIZE)) - 1;
buffer->sccb = sccb;
buffer->retry_count = 0;
buffer->mto_number = 0;
buffer->mto_char_sum = 0;
buffer->current_line = NULL;
buffer->current_length = 0;
buffer->columns = columns;
buffer->htab = htab;
/* initialize sccb */
memset(sccb, 0, sizeof(struct write_sccb));
sccb->header.length = sizeof(struct write_sccb);
sccb->msg_buf.header.length = sizeof(struct msg_buf);
sccb->msg_buf.header.type = EVTYP_MSG;
sccb->msg_buf.mdb.header.length = sizeof(struct mdb);
sccb->msg_buf.mdb.header.type = 1;
sccb->msg_buf.mdb.header.tag = 0xD4C4C240; /* ebcdic "MDB " */
sccb->msg_buf.mdb.header.revision_code = 1;
sccb->msg_buf.mdb.go.length = sizeof(struct go);
sccb->msg_buf.mdb.go.type = 1;
return buffer;
}
/*
* Return a pointer to the original page that has been used to create
* the buffer.
*/
void *
sclp_unmake_buffer(struct sclp_buffer *buffer)
{
return buffer->sccb;
}
/*
* Initialize a new Message Text Object (MTO) at the end of the provided buffer
* with enough room for max_len characters. Return 0 on success.
*/
static int
sclp_initialize_mto(struct sclp_buffer *buffer, int max_len)
{
struct write_sccb *sccb;
struct mto *mto;
int mto_size;
/* max size of new Message Text Object including message text */
mto_size = sizeof(struct mto) + max_len;
/* check if current buffer sccb can contain the mto */
sccb = buffer->sccb;
if ((MAX_SCCB_ROOM - sccb->header.length) < mto_size)
return -ENOMEM;
/* find address of new message text object */
mto = (struct mto *)(((addr_t) sccb) + sccb->header.length);
/*
* fill the new Message-Text Object,
* starting behind the former last byte of the SCCB
*/
memset(mto, 0, sizeof(struct mto));
mto->length = sizeof(struct mto);
mto->type = 4; /* message text object */
mto->line_type_flags = LNTPFLGS_ENDTEXT; /* end text */
/* set pointer to first byte after struct mto. */
buffer->current_line = (char *) (mto + 1);
buffer->current_length = 0;
return 0;
}
/*
* Finalize MTO initialized by sclp_initialize_mto(), updating the sizes of
* MTO, enclosing MDB, event buffer and SCCB.
*/
static void
sclp_finalize_mto(struct sclp_buffer *buffer)
{
struct write_sccb *sccb;
struct mto *mto;
int str_len, mto_size;
str_len = buffer->current_length;
buffer->current_line = NULL;
buffer->current_length = 0;
/* real size of new Message Text Object including message text */
mto_size = sizeof(struct mto) + str_len;
/* find address of new message text object */
sccb = buffer->sccb;
mto = (struct mto *)(((addr_t) sccb) + sccb->header.length);
/* set size of message text object */
mto->length = mto_size;
/*
* update values of sizes
* (SCCB, Event(Message) Buffer, Message Data Block)
*/
sccb->header.length += mto_size;
sccb->msg_buf.header.length += mto_size;
sccb->msg_buf.mdb.header.length += mto_size;
/*
* count number of buffered messages (= number of Message Text
* Objects) and number of buffered characters
* for the SCCB currently used for buffering and at all
*/
buffer->mto_number++;
buffer->mto_char_sum += str_len;
}
/*
* processing of a message including escape characters,
* returns number of characters written to the output sccb
* ("processed" means that is not guaranteed that the character have already
* been sent to the SCLP but that it will be done at least next time the SCLP
* is not busy)
*/
int
sclp_write(struct sclp_buffer *buffer, const unsigned char *msg, int count)
{
int spaces, i_msg;
int rc;
/*
* parse msg for escape sequences (\t,\v ...) and put formated
* msg into an mto (created by sclp_initialize_mto).
*
* We have to do this work ourselfs because there is no support for
* these characters on the native machine and only partial support
* under VM (Why does VM interpret \n but the native machine doesn't ?)
*
* Depending on i/o-control setting the message is always written
* immediately or we wait for a final new line maybe coming with the
* next message. Besides we avoid a buffer overrun by writing its
* content.
*
* RESTRICTIONS:
*
* \r and \b work within one line because we are not able to modify
* previous output that have already been accepted by the SCLP.
*
* \t combined with following \r is not correctly represented because
* \t is expanded to some spaces but \r does not know about a
* previous \t and decreases the current position by one column.
* This is in order to a slim and quick implementation.
*/
for (i_msg = 0; i_msg < count; i_msg++) {
switch (msg[i_msg]) {
case '\n': /* new line, line feed (ASCII) */
/* check if new mto needs to be created */
if (buffer->current_line == NULL) {
rc = sclp_initialize_mto(buffer, 0);
if (rc)
return i_msg;
}
sclp_finalize_mto(buffer);
break;
case '\a': /* bell, one for several times */
/* set SCLP sound alarm bit in General Object */
buffer->sccb->msg_buf.mdb.go.general_msg_flags |=
GNRLMSGFLGS_SNDALRM;
break;
case '\t': /* horizontal tabulator */
/* check if new mto needs to be created */
if (buffer->current_line == NULL) {
rc = sclp_initialize_mto(buffer,
buffer->columns);
if (rc)
return i_msg;
}
/* "go to (next htab-boundary + 1, same line)" */
do {
if (buffer->current_length >= buffer->columns)
break;
/* ok, add a blank */
*buffer->current_line++ = 0x40;
buffer->current_length++;
} while (buffer->current_length % buffer->htab);
break;
case '\f': /* form feed */
case '\v': /* vertical tabulator */
/* "go to (actual column, actual line + 1)" */
/* = new line, leading spaces */
if (buffer->current_line != NULL) {
spaces = buffer->current_length;
sclp_finalize_mto(buffer);
rc = sclp_initialize_mto(buffer,
buffer->columns);
if (rc)
return i_msg;
memset(buffer->current_line, 0x40, spaces);
buffer->current_line += spaces;
buffer->current_length = spaces;
} else {
/* one an empty line this is the same as \n */
rc = sclp_initialize_mto(buffer,
buffer->columns);
if (rc)
return i_msg;
sclp_finalize_mto(buffer);
}
break;
case '\b': /* backspace */
/* "go to (actual column - 1, actual line)" */
/* decrement counter indicating position, */
/* do not remove last character */
if (buffer->current_line != NULL &&
buffer->current_length > 0) {
buffer->current_length--;
buffer->current_line--;
}
break;
case 0x00: /* end of string */
/* transfer current line to SCCB */
if (buffer->current_line != NULL)
sclp_finalize_mto(buffer);
/* skip the rest of the message including the 0 byte */
i_msg = count - 1;
break;
default: /* no escape character */
/* do not output unprintable characters */
if (!isprint(msg[i_msg]))
break;
/* check if new mto needs to be created */
if (buffer->current_line == NULL) {
rc = sclp_initialize_mto(buffer,
buffer->columns);
if (rc)
return i_msg;
}
*buffer->current_line++ = sclp_ascebc(msg[i_msg]);
buffer->current_length++;
break;
}
/* check if current mto is full */
if (buffer->current_line != NULL &&
buffer->current_length >= buffer->columns)
sclp_finalize_mto(buffer);
}
/* return number of processed characters */
return i_msg;
}
/*
* Return the number of free bytes in the sccb
*/
int
sclp_buffer_space(struct sclp_buffer *buffer)
{
int count;
count = MAX_SCCB_ROOM - buffer->sccb->header.length;
if (buffer->current_line != NULL)
count -= sizeof(struct mto) + buffer->current_length;
return count;
}
/*
* Return number of characters in buffer
*/
int
sclp_chars_in_buffer(struct sclp_buffer *buffer)
{
int count;
count = buffer->mto_char_sum;
if (buffer->current_line != NULL)
count += buffer->current_length;
return count;
}
/*
* sets or provides some values that influence the drivers behaviour
*/
void
sclp_set_columns(struct sclp_buffer *buffer, unsigned short columns)
{
buffer->columns = columns;
if (buffer->current_line != NULL &&
buffer->current_length > buffer->columns)
sclp_finalize_mto(buffer);
}
void
sclp_set_htab(struct sclp_buffer *buffer, unsigned short htab)
{
buffer->htab = htab;
}
/*
* called by sclp_console_init and/or sclp_tty_init
*/
int
sclp_rw_init(void)
{
static int init_done = 0;
int rc;
if (init_done)
return 0;
rc = sclp_register(&sclp_rw_event);
if (rc == 0)
init_done = 1;
return rc;
}
#define SCLP_BUFFER_MAX_RETRY 1
/*
* second half of Write Event Data-function that has to be done after
* interruption indicating completion of Service Call.
*/
static void
sclp_writedata_callback(struct sclp_req *request, void *data)
{
int rc;
struct sclp_buffer *buffer;
struct write_sccb *sccb;
buffer = (struct sclp_buffer *) data;
sccb = buffer->sccb;
if (request->status == SCLP_REQ_FAILED) {
if (buffer->callback != NULL)
buffer->callback(buffer, -EIO);
return;
}
/* check SCLP response code and choose suitable action */
switch (sccb->header.response_code) {
case 0x0020 :
/* Normal completion, buffer processed, message(s) sent */
rc = 0;
break;
case 0x0340: /* Contained SCLP equipment check */
if (++buffer->retry_count > SCLP_BUFFER_MAX_RETRY) {
rc = -EIO;
break;
}
/* remove processed buffers and requeue rest */
if (sclp_remove_processed((struct sccb_header *) sccb) > 0) {
/* not all buffers were processed */
sccb->header.response_code = 0x0000;
buffer->request.status = SCLP_REQ_FILLED;
rc = sclp_add_request(request);
if (rc == 0)
return;
} else
rc = 0;
break;
case 0x0040: /* SCLP equipment check */
case 0x05f0: /* Target resource in improper state */
if (++buffer->retry_count > SCLP_BUFFER_MAX_RETRY) {
rc = -EIO;
break;
}
/* retry request */
sccb->header.response_code = 0x0000;
buffer->request.status = SCLP_REQ_FILLED;
rc = sclp_add_request(request);
if (rc == 0)
return;
break;
default:
if (sccb->header.response_code == 0x71f0)
rc = -ENOMEM;
else
rc = -EINVAL;
break;
}
if (buffer->callback != NULL)
buffer->callback(buffer, rc);
}
/*
* Setup the request structure in the struct sclp_buffer to do SCLP Write
* Event Data and pass the request to the core SCLP loop. Return zero on
* success, non-zero otherwise.
*/
int
sclp_emit_buffer(struct sclp_buffer *buffer,
void (*callback)(struct sclp_buffer *, int))
{
struct write_sccb *sccb;
/* add current line if there is one */
if (buffer->current_line != NULL)
sclp_finalize_mto(buffer);
/* Are there messages in the output buffer ? */
if (buffer->mto_number == 0)
return -EIO;
sccb = buffer->sccb;
/* Use normal write message */
sccb->msg_buf.header.type = EVTYP_MSG;
buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA;
buffer->request.status = SCLP_REQ_FILLED;
buffer->request.callback = sclp_writedata_callback;
buffer->request.callback_data = buffer;
buffer->request.sccb = sccb;
buffer->callback = callback;
return sclp_add_request(&buffer->request);
}

101
drivers/s390/char/sclp_rw.h Normal file
View file

@ -0,0 +1,101 @@
/*
* interface to the SCLP-read/write driver
*
* Copyright IBM Corporation 1999, 2009
*
* Author(s): Martin Peschke <mpeschke@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#ifndef __SCLP_RW_H__
#define __SCLP_RW_H__
#include <linux/list.h>
struct mto {
u16 length;
u16 type;
u16 line_type_flags;
u8 alarm_control;
u8 _reserved[3];
} __attribute__((packed));
struct go {
u16 length;
u16 type;
u32 domid;
u8 hhmmss_time[8];
u8 th_time[3];
u8 reserved_0;
u8 dddyyyy_date[7];
u8 _reserved_1;
u16 general_msg_flags;
u8 _reserved_2[10];
u8 originating_system_name[8];
u8 job_guest_name[8];
} __attribute__((packed));
struct mdb_header {
u16 length;
u16 type;
u32 tag;
u32 revision_code;
} __attribute__((packed));
struct mdb {
struct mdb_header header;
struct go go;
} __attribute__((packed));
struct msg_buf {
struct evbuf_header header;
struct mdb mdb;
} __attribute__((packed));
struct write_sccb {
struct sccb_header header;
struct msg_buf msg_buf;
} __attribute__((packed));
/* The number of empty mto buffers that can be contained in a single sccb. */
#define NR_EMPTY_MTO_PER_SCCB ((PAGE_SIZE - sizeof(struct sclp_buffer) - \
sizeof(struct write_sccb)) / sizeof(struct mto))
/*
* data structure for information about list of SCCBs (only for writing),
* will be located at the end of a SCCBs page
*/
struct sclp_buffer {
struct list_head list; /* list_head for sccb_info chain */
struct sclp_req request;
struct write_sccb *sccb;
char *current_line;
int current_length;
int retry_count;
/* output format settings */
unsigned short columns;
unsigned short htab;
/* statistics about this buffer */
unsigned int mto_char_sum; /* # chars in sccb */
unsigned int mto_number; /* # mtos in sccb */
/* Callback that is called after reaching final status. */
void (*callback)(struct sclp_buffer *, int);
};
int sclp_rw_init(void);
struct sclp_buffer *sclp_make_buffer(void *, unsigned short, unsigned short);
void *sclp_unmake_buffer(struct sclp_buffer *);
int sclp_buffer_space(struct sclp_buffer *);
int sclp_write(struct sclp_buffer *buffer, const unsigned char *, int);
int sclp_emit_buffer(struct sclp_buffer *,void (*)(struct sclp_buffer *,int));
void sclp_set_columns(struct sclp_buffer *, unsigned short);
void sclp_set_htab(struct sclp_buffer *, unsigned short);
int sclp_chars_in_buffer(struct sclp_buffer *);
#ifdef CONFIG_SCLP_CONSOLE
void sclp_console_pm_event(enum sclp_pm_event sclp_pm_event);
#else
static inline void sclp_console_pm_event(enum sclp_pm_event sclp_pm_event) { }
#endif
#endif /* __SCLP_RW_H__ */

View file

@ -0,0 +1,281 @@
/*
* SCLP "store data in absolute storage"
*
* Copyright IBM Corp. 2003, 2013
* Author(s): Michael Holzheu
*/
#define KMSG_COMPONENT "sclp_sdias"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/completion.h>
#include <linux/sched.h>
#include <asm/sclp.h>
#include <asm/debug.h>
#include <asm/ipl.h>
#include "sclp_sdias.h"
#include "sclp.h"
#include "sclp_rw.h"
#define TRACE(x...) debug_sprintf_event(sdias_dbf, 1, x)
#define SDIAS_RETRIES 300
#define SDIAS_SLEEP_TICKS 50
static struct debug_info *sdias_dbf;
static struct sclp_register sclp_sdias_register = {
.send_mask = EVTYP_SDIAS_MASK,
};
static struct sdias_sccb sccb __attribute__((aligned(4096)));
static struct sdias_evbuf sdias_evbuf;
static DECLARE_COMPLETION(evbuf_accepted);
static DECLARE_COMPLETION(evbuf_done);
static DEFINE_MUTEX(sdias_mutex);
/*
* Called by SCLP base when read event data has been completed (async mode only)
*/
static void sclp_sdias_receiver_fn(struct evbuf_header *evbuf)
{
memcpy(&sdias_evbuf, evbuf,
min_t(unsigned long, sizeof(sdias_evbuf), evbuf->length));
complete(&evbuf_done);
TRACE("sclp_sdias_receiver_fn done\n");
}
/*
* Called by SCLP base when sdias event has been accepted
*/
static void sdias_callback(struct sclp_req *request, void *data)
{
complete(&evbuf_accepted);
TRACE("callback done\n");
}
static int sdias_sclp_send(struct sclp_req *req)
{
int retries;
int rc;
for (retries = SDIAS_RETRIES; retries; retries--) {
TRACE("add request\n");
rc = sclp_add_request(req);
if (rc) {
/* not initiated, wait some time and retry */
set_current_state(TASK_INTERRUPTIBLE);
TRACE("add request failed: rc = %i\n",rc);
schedule_timeout(SDIAS_SLEEP_TICKS);
continue;
}
/* initiated, wait for completion of service call */
wait_for_completion(&evbuf_accepted);
if (req->status == SCLP_REQ_FAILED) {
TRACE("sclp request failed\n");
continue;
}
/* if not accepted, retry */
if (!(sccb.evbuf.hdr.flags & 0x80)) {
TRACE("sclp request failed: flags=%x\n",
sccb.evbuf.hdr.flags);
continue;
}
/*
* for the sync interface the response is in the initial sccb
*/
if (!sclp_sdias_register.receiver_fn) {
memcpy(&sdias_evbuf, &sccb.evbuf, sizeof(sdias_evbuf));
TRACE("sync request done\n");
return 0;
}
/* otherwise we wait for completion */
wait_for_completion(&evbuf_done);
TRACE("request done\n");
return 0;
}
return -EIO;
}
/*
* Get number of blocks (4K) available in the HSA
*/
int sclp_sdias_blk_count(void)
{
struct sclp_req request;
int rc;
mutex_lock(&sdias_mutex);
memset(&sccb, 0, sizeof(sccb));
memset(&request, 0, sizeof(request));
sccb.hdr.length = sizeof(sccb);
sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf);
sccb.evbuf.hdr.type = EVTYP_SDIAS;
sccb.evbuf.event_qual = SDIAS_EQ_SIZE;
sccb.evbuf.data_id = SDIAS_DI_FCP_DUMP;
sccb.evbuf.event_id = 4712;
sccb.evbuf.dbs = 1;
request.sccb = &sccb;
request.command = SCLP_CMDW_WRITE_EVENT_DATA;
request.status = SCLP_REQ_FILLED;
request.callback = sdias_callback;
rc = sdias_sclp_send(&request);
if (rc) {
pr_err("sclp_send failed for get_nr_blocks\n");
goto out;
}
if (sccb.hdr.response_code != 0x0020) {
TRACE("send failed: %x\n", sccb.hdr.response_code);
rc = -EIO;
goto out;
}
switch (sdias_evbuf.event_status) {
case 0:
rc = sdias_evbuf.blk_cnt;
break;
default:
pr_err("SCLP error: %x\n", sdias_evbuf.event_status);
rc = -EIO;
goto out;
}
TRACE("%i blocks\n", rc);
out:
mutex_unlock(&sdias_mutex);
return rc;
}
/*
* Copy from HSA to absolute storage (not reentrant):
*
* @dest : Address of buffer where data should be copied
* @start_blk: Start Block (beginning with 1)
* @nr_blks : Number of 4K blocks to copy
*
* Return Value: 0 : Requested 'number' of blocks of data copied
* <0: ERROR - negative event status
*/
int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
{
struct sclp_req request;
int rc;
mutex_lock(&sdias_mutex);
memset(&sccb, 0, sizeof(sccb));
memset(&request, 0, sizeof(request));
sccb.hdr.length = sizeof(sccb);
sccb.evbuf.hdr.length = sizeof(struct sdias_evbuf);
sccb.evbuf.hdr.type = EVTYP_SDIAS;
sccb.evbuf.hdr.flags = 0;
sccb.evbuf.event_qual = SDIAS_EQ_STORE_DATA;
sccb.evbuf.data_id = SDIAS_DI_FCP_DUMP;
sccb.evbuf.event_id = 4712;
#ifdef CONFIG_64BIT
sccb.evbuf.asa_size = SDIAS_ASA_SIZE_64;
#else
sccb.evbuf.asa_size = SDIAS_ASA_SIZE_32;
#endif
sccb.evbuf.event_status = 0;
sccb.evbuf.blk_cnt = nr_blks;
sccb.evbuf.asa = (unsigned long)dest;
sccb.evbuf.fbn = start_blk;
sccb.evbuf.lbn = 0;
sccb.evbuf.dbs = 1;
request.sccb = &sccb;
request.command = SCLP_CMDW_WRITE_EVENT_DATA;
request.status = SCLP_REQ_FILLED;
request.callback = sdias_callback;
rc = sdias_sclp_send(&request);
if (rc) {
pr_err("sclp_send failed: %x\n", rc);
goto out;
}
if (sccb.hdr.response_code != 0x0020) {
TRACE("copy failed: %x\n", sccb.hdr.response_code);
rc = -EIO;
goto out;
}
switch (sdias_evbuf.event_status) {
case SDIAS_EVSTATE_ALL_STORED:
TRACE("all stored\n");
break;
case SDIAS_EVSTATE_PART_STORED:
TRACE("part stored: %i\n", sdias_evbuf.blk_cnt);
break;
case SDIAS_EVSTATE_NO_DATA:
TRACE("no data\n");
/* fall through */
default:
pr_err("Error from SCLP while copying hsa. Event status = %x\n",
sdias_evbuf.event_status);
rc = -EIO;
}
out:
mutex_unlock(&sdias_mutex);
return rc;
}
static int __init sclp_sdias_register_check(void)
{
int rc;
rc = sclp_register(&sclp_sdias_register);
if (rc)
return rc;
if (sclp_sdias_blk_count() == 0) {
sclp_unregister(&sclp_sdias_register);
return -ENODEV;
}
return 0;
}
static int __init sclp_sdias_init_sync(void)
{
TRACE("Try synchronous mode\n");
sclp_sdias_register.receive_mask = 0;
sclp_sdias_register.receiver_fn = NULL;
return sclp_sdias_register_check();
}
static int __init sclp_sdias_init_async(void)
{
TRACE("Try asynchronous mode\n");
sclp_sdias_register.receive_mask = EVTYP_SDIAS_MASK;
sclp_sdias_register.receiver_fn = sclp_sdias_receiver_fn;
return sclp_sdias_register_check();
}
int __init sclp_sdias_init(void)
{
if (ipl_info.type != IPL_TYPE_FCP_DUMP)
return 0;
sdias_dbf = debug_register("dump_sdias", 4, 1, 4 * sizeof(long));
debug_register_view(sdias_dbf, &debug_sprintf_view);
debug_set_level(sdias_dbf, 6);
if (sclp_sdias_init_sync() == 0)
goto out;
if (sclp_sdias_init_async() == 0)
goto out;
TRACE("init failed\n");
return -ENODEV;
out:
TRACE("init done\n");
return 0;
}
void __exit sclp_sdias_exit(void)
{
debug_unregister(sdias_dbf);
sclp_unregister(&sclp_sdias_register);
}

View file

@ -0,0 +1,46 @@
/*
* SCLP "store data in absolute storage"
*
* Copyright IBM Corp. 2003, 2013
*/
#ifndef SCLP_SDIAS_H
#define SCLP_SDIAS_H
#include "sclp.h"
#define SDIAS_EQ_STORE_DATA 0x0
#define SDIAS_EQ_SIZE 0x1
#define SDIAS_DI_FCP_DUMP 0x0
#define SDIAS_ASA_SIZE_32 0x0
#define SDIAS_ASA_SIZE_64 0x1
#define SDIAS_EVSTATE_ALL_STORED 0x0
#define SDIAS_EVSTATE_NO_DATA 0x3
#define SDIAS_EVSTATE_PART_STORED 0x10
struct sdias_evbuf {
struct evbuf_header hdr;
u8 event_qual;
u8 data_id;
u64 reserved2;
u32 event_id;
u16 reserved3;
u8 asa_size;
u8 event_status;
u32 reserved4;
u32 blk_cnt;
u64 asa;
u32 reserved5;
u32 fbn;
u32 reserved6;
u32 lbn;
u16 reserved7;
u16 dbs;
} __packed;
struct sdias_sccb {
struct sccb_header hdr;
struct sdias_evbuf evbuf;
} __packed;
#endif /* SCLP_SDIAS_H */

View file

@ -0,0 +1,576 @@
/*
* SCLP line mode terminal driver.
*
* S390 version
* Copyright IBM Corp. 1999
* Author(s): Martin Peschke <mpeschke@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/module.h>
#include <linux/kmod.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/gfp.h>
#include <asm/uaccess.h>
#include "ctrlchar.h"
#include "sclp.h"
#include "sclp_rw.h"
#include "sclp_tty.h"
/*
* size of a buffer that collects single characters coming in
* via sclp_tty_put_char()
*/
#define SCLP_TTY_BUF_SIZE 512
/*
* There is exactly one SCLP terminal, so we can keep things simple
* and allocate all variables statically.
*/
/* Lock to guard over changes to global variables. */
static spinlock_t sclp_tty_lock;
/* List of free pages that can be used for console output buffering. */
static struct list_head sclp_tty_pages;
/* List of full struct sclp_buffer structures ready for output. */
static struct list_head sclp_tty_outqueue;
/* Counter how many buffers are emitted. */
static int sclp_tty_buffer_count;
/* Pointer to current console buffer. */
static struct sclp_buffer *sclp_ttybuf;
/* Timer for delayed output of console messages. */
static struct timer_list sclp_tty_timer;
static struct tty_port sclp_port;
static unsigned char sclp_tty_chars[SCLP_TTY_BUF_SIZE];
static unsigned short int sclp_tty_chars_count;
struct tty_driver *sclp_tty_driver;
static int sclp_tty_tolower;
static int sclp_tty_columns = 80;
#define SPACES_PER_TAB 8
#define CASE_DELIMITER 0x6c /* to separate upper and lower case (% in EBCDIC) */
/* This routine is called whenever we try to open a SCLP terminal. */
static int
sclp_tty_open(struct tty_struct *tty, struct file *filp)
{
tty_port_tty_set(&sclp_port, tty);
tty->driver_data = NULL;
sclp_port.low_latency = 0;
return 0;
}
/* This routine is called when the SCLP terminal is closed. */
static void
sclp_tty_close(struct tty_struct *tty, struct file *filp)
{
if (tty->count > 1)
return;
tty_port_tty_set(&sclp_port, NULL);
}
/*
* This routine returns the numbers of characters the tty driver
* will accept for queuing to be written. This number is subject
* to change as output buffers get emptied, or if the output flow
* control is acted. This is not an exact number because not every
* character needs the same space in the sccb. The worst case is
* a string of newlines. Every newlines creates a new mto which
* needs 8 bytes.
*/
static int
sclp_tty_write_room (struct tty_struct *tty)
{
unsigned long flags;
struct list_head *l;
int count;
spin_lock_irqsave(&sclp_tty_lock, flags);
count = 0;
if (sclp_ttybuf != NULL)
count = sclp_buffer_space(sclp_ttybuf) / sizeof(struct mto);
list_for_each(l, &sclp_tty_pages)
count += NR_EMPTY_MTO_PER_SCCB;
spin_unlock_irqrestore(&sclp_tty_lock, flags);
return count;
}
static void
sclp_ttybuf_callback(struct sclp_buffer *buffer, int rc)
{
unsigned long flags;
void *page;
do {
page = sclp_unmake_buffer(buffer);
spin_lock_irqsave(&sclp_tty_lock, flags);
/* Remove buffer from outqueue */
list_del(&buffer->list);
sclp_tty_buffer_count--;
list_add_tail((struct list_head *) page, &sclp_tty_pages);
/* Check if there is a pending buffer on the out queue. */
buffer = NULL;
if (!list_empty(&sclp_tty_outqueue))
buffer = list_entry(sclp_tty_outqueue.next,
struct sclp_buffer, list);
spin_unlock_irqrestore(&sclp_tty_lock, flags);
} while (buffer && sclp_emit_buffer(buffer, sclp_ttybuf_callback));
tty_port_tty_wakeup(&sclp_port);
}
static inline void
__sclp_ttybuf_emit(struct sclp_buffer *buffer)
{
unsigned long flags;
int count;
int rc;
spin_lock_irqsave(&sclp_tty_lock, flags);
list_add_tail(&buffer->list, &sclp_tty_outqueue);
count = sclp_tty_buffer_count++;
spin_unlock_irqrestore(&sclp_tty_lock, flags);
if (count)
return;
rc = sclp_emit_buffer(buffer, sclp_ttybuf_callback);
if (rc)
sclp_ttybuf_callback(buffer, rc);
}
/*
* When this routine is called from the timer then we flush the
* temporary write buffer.
*/
static void
sclp_tty_timeout(unsigned long data)
{
unsigned long flags;
struct sclp_buffer *buf;
spin_lock_irqsave(&sclp_tty_lock, flags);
buf = sclp_ttybuf;
sclp_ttybuf = NULL;
spin_unlock_irqrestore(&sclp_tty_lock, flags);
if (buf != NULL) {
__sclp_ttybuf_emit(buf);
}
}
/*
* Write a string to the sclp tty.
*/
static int sclp_tty_write_string(const unsigned char *str, int count, int may_fail)
{
unsigned long flags;
void *page;
int written;
int overall_written;
struct sclp_buffer *buf;
if (count <= 0)
return 0;
overall_written = 0;
spin_lock_irqsave(&sclp_tty_lock, flags);
do {
/* Create a sclp output buffer if none exists yet */
if (sclp_ttybuf == NULL) {
while (list_empty(&sclp_tty_pages)) {
spin_unlock_irqrestore(&sclp_tty_lock, flags);
if (may_fail)
goto out;
else
sclp_sync_wait();
spin_lock_irqsave(&sclp_tty_lock, flags);
}
page = sclp_tty_pages.next;
list_del((struct list_head *) page);
sclp_ttybuf = sclp_make_buffer(page, sclp_tty_columns,
SPACES_PER_TAB);
}
/* try to write the string to the current output buffer */
written = sclp_write(sclp_ttybuf, str, count);
overall_written += written;
if (written == count)
break;
/*
* Not all characters could be written to the current
* output buffer. Emit the buffer, create a new buffer
* and then output the rest of the string.
*/
buf = sclp_ttybuf;
sclp_ttybuf = NULL;
spin_unlock_irqrestore(&sclp_tty_lock, flags);
__sclp_ttybuf_emit(buf);
spin_lock_irqsave(&sclp_tty_lock, flags);
str += written;
count -= written;
} while (count > 0);
/* Setup timer to output current console buffer after 1/10 second */
if (sclp_ttybuf && sclp_chars_in_buffer(sclp_ttybuf) &&
!timer_pending(&sclp_tty_timer)) {
init_timer(&sclp_tty_timer);
sclp_tty_timer.function = sclp_tty_timeout;
sclp_tty_timer.data = 0UL;
sclp_tty_timer.expires = jiffies + HZ/10;
add_timer(&sclp_tty_timer);
}
spin_unlock_irqrestore(&sclp_tty_lock, flags);
out:
return overall_written;
}
/*
* This routine is called by the kernel to write a series of characters to the
* tty device. The characters may come from user space or kernel space. This
* routine will return the number of characters actually accepted for writing.
*/
static int
sclp_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
{
if (sclp_tty_chars_count > 0) {
sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
sclp_tty_chars_count = 0;
}
return sclp_tty_write_string(buf, count, 1);
}
/*
* This routine is called by the kernel to write a single character to the tty
* device. If the kernel uses this routine, it must call the flush_chars()
* routine (if defined) when it is done stuffing characters into the driver.
*
* Characters provided to sclp_tty_put_char() are buffered by the SCLP driver.
* If the given character is a '\n' the contents of the SCLP write buffer
* - including previous characters from sclp_tty_put_char() and strings from
* sclp_write() without final '\n' - will be written.
*/
static int
sclp_tty_put_char(struct tty_struct *tty, unsigned char ch)
{
sclp_tty_chars[sclp_tty_chars_count++] = ch;
if (ch == '\n' || sclp_tty_chars_count >= SCLP_TTY_BUF_SIZE) {
sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
sclp_tty_chars_count = 0;
}
return 1;
}
/*
* This routine is called by the kernel after it has written a series of
* characters to the tty device using put_char().
*/
static void
sclp_tty_flush_chars(struct tty_struct *tty)
{
if (sclp_tty_chars_count > 0) {
sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
sclp_tty_chars_count = 0;
}
}
/*
* This routine returns the number of characters in the write buffer of the
* SCLP driver. The provided number includes all characters that are stored
* in the SCCB (will be written next time the SCLP is not busy) as well as
* characters in the write buffer (will not be written as long as there is a
* final line feed missing).
*/
static int
sclp_tty_chars_in_buffer(struct tty_struct *tty)
{
unsigned long flags;
struct list_head *l;
struct sclp_buffer *t;
int count;
spin_lock_irqsave(&sclp_tty_lock, flags);
count = 0;
if (sclp_ttybuf != NULL)
count = sclp_chars_in_buffer(sclp_ttybuf);
list_for_each(l, &sclp_tty_outqueue) {
t = list_entry(l, struct sclp_buffer, list);
count += sclp_chars_in_buffer(t);
}
spin_unlock_irqrestore(&sclp_tty_lock, flags);
return count;
}
/*
* removes all content from buffers of low level driver
*/
static void
sclp_tty_flush_buffer(struct tty_struct *tty)
{
if (sclp_tty_chars_count > 0) {
sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
sclp_tty_chars_count = 0;
}
}
/*
* push input to tty
*/
static void
sclp_tty_input(unsigned char* buf, unsigned int count)
{
struct tty_struct *tty = tty_port_tty_get(&sclp_port);
unsigned int cchar;
/*
* If this tty driver is currently closed
* then throw the received input away.
*/
if (tty == NULL)
return;
cchar = ctrlchar_handle(buf, count, tty);
switch (cchar & CTRLCHAR_MASK) {
case CTRLCHAR_SYSRQ:
break;
case CTRLCHAR_CTRL:
tty_insert_flip_char(&sclp_port, cchar, TTY_NORMAL);
tty_flip_buffer_push(&sclp_port);
break;
case CTRLCHAR_NONE:
/* send (normal) input to line discipline */
if (count < 2 ||
(strncmp((const char *) buf + count - 2, "^n", 2) &&
strncmp((const char *) buf + count - 2, "\252n", 2))) {
/* add the auto \n */
tty_insert_flip_string(&sclp_port, buf, count);
tty_insert_flip_char(&sclp_port, '\n', TTY_NORMAL);
} else
tty_insert_flip_string(&sclp_port, buf, count - 2);
tty_flip_buffer_push(&sclp_port);
break;
}
tty_kref_put(tty);
}
/*
* get a EBCDIC string in upper/lower case,
* find out characters in lower/upper case separated by a special character,
* modifiy original string,
* returns length of resulting string
*/
static int sclp_switch_cases(unsigned char *buf, int count)
{
unsigned char *ip, *op;
int toggle;
/* initially changing case is off */
toggle = 0;
ip = op = buf;
while (count-- > 0) {
/* compare with special character */
if (*ip == CASE_DELIMITER) {
/* followed by another special character? */
if (count && ip[1] == CASE_DELIMITER) {
/*
* ... then put a single copy of the special
* character to the output string
*/
*op++ = *ip++;
count--;
} else
/*
* ... special character follower by a normal
* character toggles the case change behaviour
*/
toggle = ~toggle;
/* skip special character */
ip++;
} else
/* not the special character */
if (toggle)
/* but case switching is on */
if (sclp_tty_tolower)
/* switch to uppercase */
*op++ = _ebc_toupper[(int) *ip++];
else
/* switch to lowercase */
*op++ = _ebc_tolower[(int) *ip++];
else
/* no case switching, copy the character */
*op++ = *ip++;
}
/* return length of reformatted string. */
return op - buf;
}
static void sclp_get_input(struct gds_subvector *sv)
{
unsigned char *str;
int count;
str = (unsigned char *) (sv + 1);
count = sv->length - sizeof(*sv);
if (sclp_tty_tolower)
EBC_TOLOWER(str, count);
count = sclp_switch_cases(str, count);
/* convert EBCDIC to ASCII (modify original input in SCCB) */
sclp_ebcasc_str(str, count);
/* transfer input to high level driver */
sclp_tty_input(str, count);
}
static inline void sclp_eval_selfdeftextmsg(struct gds_subvector *sv)
{
void *end;
end = (void *) sv + sv->length;
for (sv = sv + 1; (void *) sv < end; sv = (void *) sv + sv->length)
if (sv->key == 0x30)
sclp_get_input(sv);
}
static inline void sclp_eval_textcmd(struct gds_vector *v)
{
struct gds_subvector *sv;
void *end;
end = (void *) v + v->length;
for (sv = (struct gds_subvector *) (v + 1);
(void *) sv < end; sv = (void *) sv + sv->length)
if (sv->key == GDS_KEY_SELFDEFTEXTMSG)
sclp_eval_selfdeftextmsg(sv);
}
static inline void sclp_eval_cpmsu(struct gds_vector *v)
{
void *end;
end = (void *) v + v->length;
for (v = v + 1; (void *) v < end; v = (void *) v + v->length)
if (v->gds_id == GDS_ID_TEXTCMD)
sclp_eval_textcmd(v);
}
static inline void sclp_eval_mdsmu(struct gds_vector *v)
{
v = sclp_find_gds_vector(v + 1, (void *) v + v->length, GDS_ID_CPMSU);
if (v)
sclp_eval_cpmsu(v);
}
static void sclp_tty_receiver(struct evbuf_header *evbuf)
{
struct gds_vector *v;
v = sclp_find_gds_vector(evbuf + 1, (void *) evbuf + evbuf->length,
GDS_ID_MDSMU);
if (v)
sclp_eval_mdsmu(v);
}
static void
sclp_tty_state_change(struct sclp_register *reg)
{
}
static struct sclp_register sclp_input_event =
{
.receive_mask = EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK,
.state_change_fn = sclp_tty_state_change,
.receiver_fn = sclp_tty_receiver
};
static const struct tty_operations sclp_ops = {
.open = sclp_tty_open,
.close = sclp_tty_close,
.write = sclp_tty_write,
.put_char = sclp_tty_put_char,
.flush_chars = sclp_tty_flush_chars,
.write_room = sclp_tty_write_room,
.chars_in_buffer = sclp_tty_chars_in_buffer,
.flush_buffer = sclp_tty_flush_buffer,
};
static int __init
sclp_tty_init(void)
{
struct tty_driver *driver;
void *page;
int i;
int rc;
if (!CONSOLE_IS_SCLP)
return 0;
driver = alloc_tty_driver(1);
if (!driver)
return -ENOMEM;
rc = sclp_rw_init();
if (rc) {
put_tty_driver(driver);
return rc;
}
/* Allocate pages for output buffering */
INIT_LIST_HEAD(&sclp_tty_pages);
for (i = 0; i < MAX_KMEM_PAGES; i++) {
page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (page == NULL) {
put_tty_driver(driver);
return -ENOMEM;
}
list_add_tail((struct list_head *) page, &sclp_tty_pages);
}
INIT_LIST_HEAD(&sclp_tty_outqueue);
spin_lock_init(&sclp_tty_lock);
init_timer(&sclp_tty_timer);
sclp_ttybuf = NULL;
sclp_tty_buffer_count = 0;
if (MACHINE_IS_VM) {
/*
* save 4 characters for the CPU number
* written at start of each line by VM/CP
*/
sclp_tty_columns = 76;
/* case input lines to lowercase */
sclp_tty_tolower = 1;
}
sclp_tty_chars_count = 0;
rc = sclp_register(&sclp_input_event);
if (rc) {
put_tty_driver(driver);
return rc;
}
tty_port_init(&sclp_port);
driver->driver_name = "sclp_line";
driver->name = "sclp_line";
driver->major = TTY_MAJOR;
driver->minor_start = 64;
driver->type = TTY_DRIVER_TYPE_SYSTEM;
driver->subtype = SYSTEM_TYPE_TTY;
driver->init_termios = tty_std_termios;
driver->init_termios.c_iflag = IGNBRK | IGNPAR;
driver->init_termios.c_oflag = ONLCR;
driver->init_termios.c_lflag = ISIG | ECHO;
driver->flags = TTY_DRIVER_REAL_RAW;
tty_set_operations(driver, &sclp_ops);
tty_port_link_device(&sclp_port, driver, 0);
rc = tty_register_driver(driver);
if (rc) {
put_tty_driver(driver);
tty_port_destroy(&sclp_port);
return rc;
}
sclp_tty_driver = driver;
return 0;
}
module_init(sclp_tty_init);

View file

@ -0,0 +1,17 @@
/*
* interface to the SCLP-read/write driver
*
* S390 version
* Copyright IBM Corp. 1999
* Author(s): Martin Peschke <mpeschke@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#ifndef __SCLP_TTY_H__
#define __SCLP_TTY_H__
#include <linux/tty_driver.h>
extern struct tty_driver *sclp_tty_driver;
#endif /* __SCLP_TTY_H__ */

View file

@ -0,0 +1,849 @@
/*
* SCLP VT220 terminal driver.
*
* Copyright IBM Corp. 2003, 2009
*
* Author(s): Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
*/
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/wait.h>
#include <linux/timer.h>
#include <linux/kernel.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/major.h>
#include <linux/console.h>
#include <linux/kdev_t.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/reboot.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include "sclp.h"
#define SCLP_VT220_MAJOR TTY_MAJOR
#define SCLP_VT220_MINOR 65
#define SCLP_VT220_DRIVER_NAME "sclp_vt220"
#define SCLP_VT220_DEVICE_NAME "ttysclp"
#define SCLP_VT220_CONSOLE_NAME "ttyS"
#define SCLP_VT220_CONSOLE_INDEX 1 /* console=ttyS1 */
/* Representation of a single write request */
struct sclp_vt220_request {
struct list_head list;
struct sclp_req sclp_req;
int retry_count;
};
/* VT220 SCCB */
struct sclp_vt220_sccb {
struct sccb_header header;
struct evbuf_header evbuf;
};
#define SCLP_VT220_MAX_CHARS_PER_BUFFER (PAGE_SIZE - \
sizeof(struct sclp_vt220_request) - \
sizeof(struct sclp_vt220_sccb))
/* Structures and data needed to register tty driver */
static struct tty_driver *sclp_vt220_driver;
static struct tty_port sclp_vt220_port;
/* Lock to protect internal data from concurrent access */
static spinlock_t sclp_vt220_lock;
/* List of empty pages to be used as write request buffers */
static struct list_head sclp_vt220_empty;
/* List of pending requests */
static struct list_head sclp_vt220_outqueue;
/* Suspend mode flag */
static int sclp_vt220_suspended;
/* Flag that output queue is currently running */
static int sclp_vt220_queue_running;
/* Timer used for delaying write requests to merge subsequent messages into
* a single buffer */
static struct timer_list sclp_vt220_timer;
/* Pointer to current request buffer which has been partially filled but not
* yet sent */
static struct sclp_vt220_request *sclp_vt220_current_request;
/* Number of characters in current request buffer */
static int sclp_vt220_buffered_chars;
/* Counter controlling core driver initialization. */
static int __initdata sclp_vt220_init_count;
/* Flag indicating that sclp_vt220_current_request should really
* have been already queued but wasn't because the SCLP was processing
* another buffer */
static int sclp_vt220_flush_later;
static void sclp_vt220_receiver_fn(struct evbuf_header *evbuf);
static void sclp_vt220_pm_event_fn(struct sclp_register *reg,
enum sclp_pm_event sclp_pm_event);
static int __sclp_vt220_emit(struct sclp_vt220_request *request);
static void sclp_vt220_emit_current(void);
/* Registration structure for SCLP output event buffers */
static struct sclp_register sclp_vt220_register = {
.send_mask = EVTYP_VT220MSG_MASK,
.pm_event_fn = sclp_vt220_pm_event_fn,
};
/* Registration structure for SCLP input event buffers */
static struct sclp_register sclp_vt220_register_input = {
.receive_mask = EVTYP_VT220MSG_MASK,
.receiver_fn = sclp_vt220_receiver_fn,
};
/*
* Put provided request buffer back into queue and check emit pending
* buffers if necessary.
*/
static void
sclp_vt220_process_queue(struct sclp_vt220_request *request)
{
unsigned long flags;
void *page;
do {
/* Put buffer back to list of empty buffers */
page = request->sclp_req.sccb;
spin_lock_irqsave(&sclp_vt220_lock, flags);
/* Move request from outqueue to empty queue */
list_del(&request->list);
list_add_tail((struct list_head *) page, &sclp_vt220_empty);
/* Check if there is a pending buffer on the out queue. */
request = NULL;
if (!list_empty(&sclp_vt220_outqueue))
request = list_entry(sclp_vt220_outqueue.next,
struct sclp_vt220_request, list);
if (!request || sclp_vt220_suspended) {
sclp_vt220_queue_running = 0;
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
break;
}
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
} while (__sclp_vt220_emit(request));
if (request == NULL && sclp_vt220_flush_later)
sclp_vt220_emit_current();
tty_port_tty_wakeup(&sclp_vt220_port);
}
#define SCLP_BUFFER_MAX_RETRY 1
/*
* Callback through which the result of a write request is reported by the
* SCLP.
*/
static void
sclp_vt220_callback(struct sclp_req *request, void *data)
{
struct sclp_vt220_request *vt220_request;
struct sclp_vt220_sccb *sccb;
vt220_request = (struct sclp_vt220_request *) data;
if (request->status == SCLP_REQ_FAILED) {
sclp_vt220_process_queue(vt220_request);
return;
}
sccb = (struct sclp_vt220_sccb *) vt220_request->sclp_req.sccb;
/* Check SCLP response code and choose suitable action */
switch (sccb->header.response_code) {
case 0x0020 :
break;
case 0x05f0: /* Target resource in improper state */
break;
case 0x0340: /* Contained SCLP equipment check */
if (++vt220_request->retry_count > SCLP_BUFFER_MAX_RETRY)
break;
/* Remove processed buffers and requeue rest */
if (sclp_remove_processed((struct sccb_header *) sccb) > 0) {
/* Not all buffers were processed */
sccb->header.response_code = 0x0000;
vt220_request->sclp_req.status = SCLP_REQ_FILLED;
if (sclp_add_request(request) == 0)
return;
}
break;
case 0x0040: /* SCLP equipment check */
if (++vt220_request->retry_count > SCLP_BUFFER_MAX_RETRY)
break;
sccb->header.response_code = 0x0000;
vt220_request->sclp_req.status = SCLP_REQ_FILLED;
if (sclp_add_request(request) == 0)
return;
break;
default:
break;
}
sclp_vt220_process_queue(vt220_request);
}
/*
* Emit vt220 request buffer to SCLP. Return zero on success, non-zero
* otherwise.
*/
static int
__sclp_vt220_emit(struct sclp_vt220_request *request)
{
request->sclp_req.command = SCLP_CMDW_WRITE_EVENT_DATA;
request->sclp_req.status = SCLP_REQ_FILLED;
request->sclp_req.callback = sclp_vt220_callback;
request->sclp_req.callback_data = (void *) request;
return sclp_add_request(&request->sclp_req);
}
/*
* Queue and emit current request.
*/
static void
sclp_vt220_emit_current(void)
{
unsigned long flags;
struct sclp_vt220_request *request;
struct sclp_vt220_sccb *sccb;
spin_lock_irqsave(&sclp_vt220_lock, flags);
if (sclp_vt220_current_request) {
sccb = (struct sclp_vt220_sccb *)
sclp_vt220_current_request->sclp_req.sccb;
/* Only emit buffers with content */
if (sccb->header.length != sizeof(struct sclp_vt220_sccb)) {
list_add_tail(&sclp_vt220_current_request->list,
&sclp_vt220_outqueue);
sclp_vt220_current_request = NULL;
if (timer_pending(&sclp_vt220_timer))
del_timer(&sclp_vt220_timer);
}
sclp_vt220_flush_later = 0;
}
if (sclp_vt220_queue_running || sclp_vt220_suspended)
goto out_unlock;
if (list_empty(&sclp_vt220_outqueue))
goto out_unlock;
request = list_first_entry(&sclp_vt220_outqueue,
struct sclp_vt220_request, list);
sclp_vt220_queue_running = 1;
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
if (__sclp_vt220_emit(request))
sclp_vt220_process_queue(request);
return;
out_unlock:
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
}
#define SCLP_NORMAL_WRITE 0x00
/*
* Helper function to initialize a page with the sclp request structure.
*/
static struct sclp_vt220_request *
sclp_vt220_initialize_page(void *page)
{
struct sclp_vt220_request *request;
struct sclp_vt220_sccb *sccb;
/* Place request structure at end of page */
request = ((struct sclp_vt220_request *)
((addr_t) page + PAGE_SIZE)) - 1;
request->retry_count = 0;
request->sclp_req.sccb = page;
/* SCCB goes at start of page */
sccb = (struct sclp_vt220_sccb *) page;
memset((void *) sccb, 0, sizeof(struct sclp_vt220_sccb));
sccb->header.length = sizeof(struct sclp_vt220_sccb);
sccb->header.function_code = SCLP_NORMAL_WRITE;
sccb->header.response_code = 0x0000;
sccb->evbuf.type = EVTYP_VT220MSG;
sccb->evbuf.length = sizeof(struct evbuf_header);
return request;
}
static inline unsigned int
sclp_vt220_space_left(struct sclp_vt220_request *request)
{
struct sclp_vt220_sccb *sccb;
sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
return PAGE_SIZE - sizeof(struct sclp_vt220_request) -
sccb->header.length;
}
static inline unsigned int
sclp_vt220_chars_stored(struct sclp_vt220_request *request)
{
struct sclp_vt220_sccb *sccb;
sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
return sccb->evbuf.length - sizeof(struct evbuf_header);
}
/*
* Add msg to buffer associated with request. Return the number of characters
* added.
*/
static int
sclp_vt220_add_msg(struct sclp_vt220_request *request,
const unsigned char *msg, int count, int convertlf)
{
struct sclp_vt220_sccb *sccb;
void *buffer;
unsigned char c;
int from;
int to;
if (count > sclp_vt220_space_left(request))
count = sclp_vt220_space_left(request);
if (count <= 0)
return 0;
sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
buffer = (void *) ((addr_t) sccb + sccb->header.length);
if (convertlf) {
/* Perform Linefeed conversion (0x0a -> 0x0a 0x0d)*/
for (from=0, to=0;
(from < count) && (to < sclp_vt220_space_left(request));
from++) {
/* Retrieve character */
c = msg[from];
/* Perform conversion */
if (c == 0x0a) {
if (to + 1 < sclp_vt220_space_left(request)) {
((unsigned char *) buffer)[to++] = c;
((unsigned char *) buffer)[to++] = 0x0d;
} else
break;
} else
((unsigned char *) buffer)[to++] = c;
}
sccb->header.length += to;
sccb->evbuf.length += to;
return from;
} else {
memcpy(buffer, (const void *) msg, count);
sccb->header.length += count;
sccb->evbuf.length += count;
return count;
}
}
/*
* Emit buffer after having waited long enough for more data to arrive.
*/
static void
sclp_vt220_timeout(unsigned long data)
{
sclp_vt220_emit_current();
}
#define BUFFER_MAX_DELAY HZ/20
/*
* Drop oldest console buffer if sclp_con_drop is set
*/
static int
sclp_vt220_drop_buffer(void)
{
struct list_head *list;
struct sclp_vt220_request *request;
void *page;
if (!sclp_console_drop)
return 0;
list = sclp_vt220_outqueue.next;
if (sclp_vt220_queue_running)
/* The first element is in I/O */
list = list->next;
if (list == &sclp_vt220_outqueue)
return 0;
list_del(list);
request = list_entry(list, struct sclp_vt220_request, list);
page = request->sclp_req.sccb;
list_add_tail((struct list_head *) page, &sclp_vt220_empty);
return 1;
}
/*
* Internal implementation of the write function. Write COUNT bytes of data
* from memory at BUF
* to the SCLP interface. In case that the data does not fit into the current
* write buffer, emit the current one and allocate a new one. If there are no
* more empty buffers available, wait until one gets emptied. If DO_SCHEDULE
* is non-zero, the buffer will be scheduled for emitting after a timeout -
* otherwise the user has to explicitly call the flush function.
* A non-zero CONVERTLF parameter indicates that 0x0a characters in the message
* buffer should be converted to 0x0a 0x0d. After completion, return the number
* of bytes written.
*/
static int
__sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
int convertlf, int may_fail)
{
unsigned long flags;
void *page;
int written;
int overall_written;
if (count <= 0)
return 0;
overall_written = 0;
spin_lock_irqsave(&sclp_vt220_lock, flags);
do {
/* Create an sclp output buffer if none exists yet */
if (sclp_vt220_current_request == NULL) {
if (list_empty(&sclp_vt220_empty))
sclp_console_full++;
while (list_empty(&sclp_vt220_empty)) {
if (may_fail || sclp_vt220_suspended)
goto out;
if (sclp_vt220_drop_buffer())
break;
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
sclp_sync_wait();
spin_lock_irqsave(&sclp_vt220_lock, flags);
}
page = (void *) sclp_vt220_empty.next;
list_del((struct list_head *) page);
sclp_vt220_current_request =
sclp_vt220_initialize_page(page);
}
/* Try to write the string to the current request buffer */
written = sclp_vt220_add_msg(sclp_vt220_current_request,
buf, count, convertlf);
overall_written += written;
if (written == count)
break;
/*
* Not all characters could be written to the current
* output buffer. Emit the buffer, create a new buffer
* and then output the rest of the string.
*/
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
sclp_vt220_emit_current();
spin_lock_irqsave(&sclp_vt220_lock, flags);
buf += written;
count -= written;
} while (count > 0);
/* Setup timer to output current console buffer after some time */
if (sclp_vt220_current_request != NULL &&
!timer_pending(&sclp_vt220_timer) && do_schedule) {
sclp_vt220_timer.function = sclp_vt220_timeout;
sclp_vt220_timer.data = 0UL;
sclp_vt220_timer.expires = jiffies + BUFFER_MAX_DELAY;
add_timer(&sclp_vt220_timer);
}
out:
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
return overall_written;
}
/*
* This routine is called by the kernel to write a series of
* characters to the tty device. The characters may come from
* user space or kernel space. This routine will return the
* number of characters actually accepted for writing.
*/
static int
sclp_vt220_write(struct tty_struct *tty, const unsigned char *buf, int count)
{
return __sclp_vt220_write(buf, count, 1, 0, 1);
}
#define SCLP_VT220_SESSION_ENDED 0x01
#define SCLP_VT220_SESSION_STARTED 0x80
#define SCLP_VT220_SESSION_DATA 0x00
/*
* Called by the SCLP to report incoming event buffers.
*/
static void
sclp_vt220_receiver_fn(struct evbuf_header *evbuf)
{
char *buffer;
unsigned int count;
buffer = (char *) ((addr_t) evbuf + sizeof(struct evbuf_header));
count = evbuf->length - sizeof(struct evbuf_header);
switch (*buffer) {
case SCLP_VT220_SESSION_ENDED:
case SCLP_VT220_SESSION_STARTED:
break;
case SCLP_VT220_SESSION_DATA:
/* Send input to line discipline */
buffer++;
count--;
tty_insert_flip_string(&sclp_vt220_port, buffer, count);
tty_flip_buffer_push(&sclp_vt220_port);
break;
}
}
/*
* This routine is called when a particular tty device is opened.
*/
static int
sclp_vt220_open(struct tty_struct *tty, struct file *filp)
{
if (tty->count == 1) {
tty_port_tty_set(&sclp_vt220_port, tty);
sclp_vt220_port.low_latency = 0;
if (!tty->winsize.ws_row && !tty->winsize.ws_col) {
tty->winsize.ws_row = 24;
tty->winsize.ws_col = 80;
}
}
return 0;
}
/*
* This routine is called when a particular tty device is closed.
*/
static void
sclp_vt220_close(struct tty_struct *tty, struct file *filp)
{
if (tty->count == 1)
tty_port_tty_set(&sclp_vt220_port, NULL);
}
/*
* This routine is called by the kernel to write a single
* character to the tty device. If the kernel uses this routine,
* it must call the flush_chars() routine (if defined) when it is
* done stuffing characters into the driver.
*/
static int
sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch)
{
return __sclp_vt220_write(&ch, 1, 0, 0, 1);
}
/*
* This routine is called by the kernel after it has written a
* series of characters to the tty device using put_char().
*/
static void
sclp_vt220_flush_chars(struct tty_struct *tty)
{
if (!sclp_vt220_queue_running)
sclp_vt220_emit_current();
else
sclp_vt220_flush_later = 1;
}
/*
* This routine returns the numbers of characters the tty driver
* will accept for queuing to be written. This number is subject
* to change as output buffers get emptied, or if the output flow
* control is acted.
*/
static int
sclp_vt220_write_room(struct tty_struct *tty)
{
unsigned long flags;
struct list_head *l;
int count;
spin_lock_irqsave(&sclp_vt220_lock, flags);
count = 0;
if (sclp_vt220_current_request != NULL)
count = sclp_vt220_space_left(sclp_vt220_current_request);
list_for_each(l, &sclp_vt220_empty)
count += SCLP_VT220_MAX_CHARS_PER_BUFFER;
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
return count;
}
/*
* Return number of buffered chars.
*/
static int
sclp_vt220_chars_in_buffer(struct tty_struct *tty)
{
unsigned long flags;
struct list_head *l;
struct sclp_vt220_request *r;
int count;
spin_lock_irqsave(&sclp_vt220_lock, flags);
count = 0;
if (sclp_vt220_current_request != NULL)
count = sclp_vt220_chars_stored(sclp_vt220_current_request);
list_for_each(l, &sclp_vt220_outqueue) {
r = list_entry(l, struct sclp_vt220_request, list);
count += sclp_vt220_chars_stored(r);
}
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
return count;
}
/*
* Pass on all buffers to the hardware. Return only when there are no more
* buffers pending.
*/
static void
sclp_vt220_flush_buffer(struct tty_struct *tty)
{
sclp_vt220_emit_current();
}
/* Release allocated pages. */
static void __init __sclp_vt220_free_pages(void)
{
struct list_head *page, *p;
list_for_each_safe(page, p, &sclp_vt220_empty) {
list_del(page);
free_page((unsigned long) page);
}
}
/* Release memory and unregister from sclp core. Controlled by init counting -
* only the last invoker will actually perform these actions. */
static void __init __sclp_vt220_cleanup(void)
{
sclp_vt220_init_count--;
if (sclp_vt220_init_count != 0)
return;
sclp_unregister(&sclp_vt220_register);
__sclp_vt220_free_pages();
tty_port_destroy(&sclp_vt220_port);
}
/* Allocate buffer pages and register with sclp core. Controlled by init
* counting - only the first invoker will actually perform these actions. */
static int __init __sclp_vt220_init(int num_pages)
{
void *page;
int i;
int rc;
sclp_vt220_init_count++;
if (sclp_vt220_init_count != 1)
return 0;
spin_lock_init(&sclp_vt220_lock);
INIT_LIST_HEAD(&sclp_vt220_empty);
INIT_LIST_HEAD(&sclp_vt220_outqueue);
init_timer(&sclp_vt220_timer);
tty_port_init(&sclp_vt220_port);
sclp_vt220_current_request = NULL;
sclp_vt220_buffered_chars = 0;
sclp_vt220_flush_later = 0;
/* Allocate pages for output buffering */
rc = -ENOMEM;
for (i = 0; i < num_pages; i++) {
page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!page)
goto out;
list_add_tail(page, &sclp_vt220_empty);
}
rc = sclp_register(&sclp_vt220_register);
out:
if (rc) {
__sclp_vt220_free_pages();
sclp_vt220_init_count--;
tty_port_destroy(&sclp_vt220_port);
}
return rc;
}
static const struct tty_operations sclp_vt220_ops = {
.open = sclp_vt220_open,
.close = sclp_vt220_close,
.write = sclp_vt220_write,
.put_char = sclp_vt220_put_char,
.flush_chars = sclp_vt220_flush_chars,
.write_room = sclp_vt220_write_room,
.chars_in_buffer = sclp_vt220_chars_in_buffer,
.flush_buffer = sclp_vt220_flush_buffer,
};
/*
* Register driver with SCLP and Linux and initialize internal tty structures.
*/
static int __init sclp_vt220_tty_init(void)
{
struct tty_driver *driver;
int rc;
/* Note: we're not testing for CONSOLE_IS_SCLP here to preserve
* symmetry between VM and LPAR systems regarding ttyS1. */
driver = alloc_tty_driver(1);
if (!driver)
return -ENOMEM;
rc = __sclp_vt220_init(MAX_KMEM_PAGES);
if (rc)
goto out_driver;
driver->driver_name = SCLP_VT220_DRIVER_NAME;
driver->name = SCLP_VT220_DEVICE_NAME;
driver->major = SCLP_VT220_MAJOR;
driver->minor_start = SCLP_VT220_MINOR;
driver->type = TTY_DRIVER_TYPE_SYSTEM;
driver->subtype = SYSTEM_TYPE_TTY;
driver->init_termios = tty_std_termios;
driver->flags = TTY_DRIVER_REAL_RAW;
tty_set_operations(driver, &sclp_vt220_ops);
tty_port_link_device(&sclp_vt220_port, driver, 0);
rc = tty_register_driver(driver);
if (rc)
goto out_init;
rc = sclp_register(&sclp_vt220_register_input);
if (rc)
goto out_reg;
sclp_vt220_driver = driver;
return 0;
out_reg:
tty_unregister_driver(driver);
out_init:
__sclp_vt220_cleanup();
out_driver:
put_tty_driver(driver);
return rc;
}
__initcall(sclp_vt220_tty_init);
static void __sclp_vt220_flush_buffer(void)
{
unsigned long flags;
sclp_vt220_emit_current();
spin_lock_irqsave(&sclp_vt220_lock, flags);
if (timer_pending(&sclp_vt220_timer))
del_timer(&sclp_vt220_timer);
while (sclp_vt220_queue_running) {
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
sclp_sync_wait();
spin_lock_irqsave(&sclp_vt220_lock, flags);
}
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
}
/*
* Resume console: If there are cached messages, emit them.
*/
static void sclp_vt220_resume(void)
{
unsigned long flags;
spin_lock_irqsave(&sclp_vt220_lock, flags);
sclp_vt220_suspended = 0;
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
sclp_vt220_emit_current();
}
/*
* Suspend console: Set suspend flag and flush console
*/
static void sclp_vt220_suspend(void)
{
unsigned long flags;
spin_lock_irqsave(&sclp_vt220_lock, flags);
sclp_vt220_suspended = 1;
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
__sclp_vt220_flush_buffer();
}
static void sclp_vt220_pm_event_fn(struct sclp_register *reg,
enum sclp_pm_event sclp_pm_event)
{
switch (sclp_pm_event) {
case SCLP_PM_EVENT_FREEZE:
sclp_vt220_suspend();
break;
case SCLP_PM_EVENT_RESTORE:
case SCLP_PM_EVENT_THAW:
sclp_vt220_resume();
break;
}
}
#ifdef CONFIG_SCLP_VT220_CONSOLE
static void
sclp_vt220_con_write(struct console *con, const char *buf, unsigned int count)
{
__sclp_vt220_write((const unsigned char *) buf, count, 1, 1, 0);
}
static struct tty_driver *
sclp_vt220_con_device(struct console *c, int *index)
{
*index = 0;
return sclp_vt220_driver;
}
static int
sclp_vt220_notify(struct notifier_block *self,
unsigned long event, void *data)
{
__sclp_vt220_flush_buffer();
return NOTIFY_OK;
}
static struct notifier_block on_panic_nb = {
.notifier_call = sclp_vt220_notify,
.priority = 1,
};
static struct notifier_block on_reboot_nb = {
.notifier_call = sclp_vt220_notify,
.priority = 1,
};
/* Structure needed to register with printk */
static struct console sclp_vt220_console =
{
.name = SCLP_VT220_CONSOLE_NAME,
.write = sclp_vt220_con_write,
.device = sclp_vt220_con_device,
.flags = CON_PRINTBUFFER,
.index = SCLP_VT220_CONSOLE_INDEX
};
static int __init
sclp_vt220_con_init(void)
{
int rc;
rc = __sclp_vt220_init(sclp_console_pages);
if (rc)
return rc;
/* Attach linux console */
atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
register_reboot_notifier(&on_reboot_nb);
register_console(&sclp_vt220_console);
return 0;
}
console_initcall(sclp_vt220_con_init);
#endif /* CONFIG_SCLP_VT220_CONSOLE */

369
drivers/s390/char/tape.h Normal file
View file

@ -0,0 +1,369 @@
/*
* tape device driver for 3480/3490E/3590 tapes.
*
* S390 and zSeries version
* Copyright IBM Corp. 2001, 2009
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Stefan Bader <shbader@de.ibm.com>
*/
#ifndef _TAPE_H
#define _TAPE_H
#include <asm/ccwdev.h>
#include <asm/debug.h>
#include <asm/idals.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mtio.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
struct gendisk;
/*
* Define DBF_LIKE_HELL for lots of messages in the debug feature.
*/
#define DBF_LIKE_HELL
#ifdef DBF_LIKE_HELL
#define DBF_LH(level, str, ...) \
do { \
debug_sprintf_event(TAPE_DBF_AREA, level, str, ## __VA_ARGS__); \
} while (0)
#else
#define DBF_LH(level, str, ...) do {} while(0)
#endif
/*
* macros s390 debug feature (dbf)
*/
#define DBF_EVENT(d_level, d_str...) \
do { \
debug_sprintf_event(TAPE_DBF_AREA, d_level, d_str); \
} while (0)
#define DBF_EXCEPTION(d_level, d_str...) \
do { \
debug_sprintf_exception(TAPE_DBF_AREA, d_level, d_str); \
} while (0)
#define TAPE_VERSION_MAJOR 2
#define TAPE_VERSION_MINOR 0
#define TAPE_MAGIC "tape"
#define TAPE_MINORS_PER_DEV 2 /* two minors per device */
#define TAPEBLOCK_HSEC_SIZE 2048
#define TAPEBLOCK_HSEC_S2B 2
#define TAPEBLOCK_RETRIES 5
enum tape_medium_state {
MS_UNKNOWN,
MS_LOADED,
MS_UNLOADED,
MS_SIZE
};
enum tape_state {
TS_UNUSED=0,
TS_IN_USE,
TS_BLKUSE,
TS_INIT,
TS_NOT_OPER,
TS_SIZE
};
enum tape_op {
TO_BLOCK, /* Block read */
TO_BSB, /* Backward space block */
TO_BSF, /* Backward space filemark */
TO_DSE, /* Data security erase */
TO_FSB, /* Forward space block */
TO_FSF, /* Forward space filemark */
TO_LBL, /* Locate block label */
TO_NOP, /* No operation */
TO_RBA, /* Read backward */
TO_RBI, /* Read block information */
TO_RFO, /* Read forward */
TO_REW, /* Rewind tape */
TO_RUN, /* Rewind and unload tape */
TO_WRI, /* Write block */
TO_WTM, /* Write tape mark */
TO_MSEN, /* Medium sense */
TO_LOAD, /* Load tape */
TO_READ_CONFIG, /* Read configuration data */
TO_READ_ATTMSG, /* Read attention message */
TO_DIS, /* Tape display */
TO_ASSIGN, /* Assign tape to channel path */
TO_UNASSIGN, /* Unassign tape from channel path */
TO_CRYPT_ON, /* Enable encrpytion */
TO_CRYPT_OFF, /* Disable encrpytion */
TO_KEKL_SET, /* Set KEK label */
TO_KEKL_QUERY, /* Query KEK label */
TO_RDC, /* Read device characteristics */
TO_SIZE, /* #entries in tape_op_t */
};
/* Forward declaration */
struct tape_device;
/* tape_request->status can be: */
enum tape_request_status {
TAPE_REQUEST_INIT, /* request is ready to be processed */
TAPE_REQUEST_QUEUED, /* request is queued to be processed */
TAPE_REQUEST_IN_IO, /* request is currently in IO */
TAPE_REQUEST_DONE, /* request is completed. */
TAPE_REQUEST_CANCEL, /* request should be canceled. */
TAPE_REQUEST_LONG_BUSY, /* request has to be restarted after long busy */
};
/* Tape CCW request */
struct tape_request {
struct list_head list; /* list head for request queueing. */
struct tape_device *device; /* tape device of this request */
struct ccw1 *cpaddr; /* address of the channel program. */
void *cpdata; /* pointer to ccw data. */
enum tape_request_status status;/* status of this request */
int options; /* options for execution. */
int retries; /* retry counter for error recovery. */
int rescnt; /* residual count from devstat. */
/* Callback for delivering final status. */
void (*callback)(struct tape_request *, void *);
void *callback_data;
enum tape_op op;
int rc;
};
/* Function type for magnetic tape commands */
typedef int (*tape_mtop_fn)(struct tape_device *, int);
/* Size of the array containing the mtops for a discipline */
#define TAPE_NR_MTOPS (MTMKPART+1)
/* Tape Discipline */
struct tape_discipline {
struct module *owner;
int (*setup_device)(struct tape_device *);
void (*cleanup_device)(struct tape_device *);
int (*irq)(struct tape_device *, struct tape_request *, struct irb *);
struct tape_request *(*read_block)(struct tape_device *, size_t);
struct tape_request *(*write_block)(struct tape_device *, size_t);
void (*process_eov)(struct tape_device*);
/* ioctl function for additional ioctls. */
int (*ioctl_fn)(struct tape_device *, unsigned int, unsigned long);
/* Array of tape commands with TAPE_NR_MTOPS entries */
tape_mtop_fn *mtop_array;
};
/*
* The discipline irq function either returns an error code (<0) which
* means that the request has failed with an error or one of the following:
*/
#define TAPE_IO_SUCCESS 0 /* request successful */
#define TAPE_IO_PENDING 1 /* request still running */
#define TAPE_IO_RETRY 2 /* retry to current request */
#define TAPE_IO_STOP 3 /* stop the running request */
#define TAPE_IO_LONG_BUSY 4 /* delay the running request */
/* Char Frontend Data */
struct tape_char_data {
struct idal_buffer *idal_buf; /* idal buffer for user char data */
int block_size; /* of size block_size. */
};
/* Tape Info */
struct tape_device {
/* entry in tape_device_list */
struct list_head node;
int cdev_id;
struct ccw_device * cdev;
struct tape_class_device * nt;
struct tape_class_device * rt;
/* Device mutex to serialize tape commands. */
struct mutex mutex;
/* Device discipline information. */
struct tape_discipline * discipline;
void * discdata;
/* Generic status flags */
long tape_generic_status;
/* Device state information. */
wait_queue_head_t state_change_wq;
enum tape_state tape_state;
enum tape_medium_state medium_state;
unsigned char * modeset_byte;
/* Reference count. */
atomic_t ref_count;
/* Request queue. */
struct list_head req_queue;
/* Request wait queue. */
wait_queue_head_t wait_queue;
/* Each tape device has (currently) two minor numbers. */
int first_minor;
/* Number of tapemarks required for correct termination. */
int required_tapemarks;
/* Block ID of the BOF */
unsigned int bof;
/* Character device frontend data */
struct tape_char_data char_data;
/* Function to start or stop the next request later. */
struct delayed_work tape_dnr;
/* Timer for long busy */
struct timer_list lb_timeout;
};
/* Externals from tape_core.c */
extern struct tape_request *tape_alloc_request(int cplength, int datasize);
extern void tape_free_request(struct tape_request *);
extern int tape_do_io(struct tape_device *, struct tape_request *);
extern int tape_do_io_async(struct tape_device *, struct tape_request *);
extern int tape_do_io_interruptible(struct tape_device *, struct tape_request *);
extern int tape_cancel_io(struct tape_device *, struct tape_request *);
void tape_hotplug_event(struct tape_device *, int major, int action);
static inline int
tape_do_io_free(struct tape_device *device, struct tape_request *request)
{
int rc;
rc = tape_do_io(device, request);
tape_free_request(request);
return rc;
}
static inline void
tape_do_io_async_free(struct tape_device *device, struct tape_request *request)
{
request->callback = (void *) tape_free_request;
request->callback_data = NULL;
tape_do_io_async(device, request);
}
extern int tape_oper_handler(int irq, int status);
extern void tape_noper_handler(int irq, int status);
extern int tape_open(struct tape_device *);
extern int tape_release(struct tape_device *);
extern int tape_mtop(struct tape_device *, int, int);
extern void tape_state_set(struct tape_device *, enum tape_state);
extern int tape_generic_online(struct tape_device *, struct tape_discipline *);
extern int tape_generic_offline(struct ccw_device *);
extern int tape_generic_pm_suspend(struct ccw_device *);
/* Externals from tape_devmap.c */
extern int tape_generic_probe(struct ccw_device *);
extern void tape_generic_remove(struct ccw_device *);
extern struct tape_device *tape_find_device(int devindex);
extern struct tape_device *tape_get_device(struct tape_device *);
extern void tape_put_device(struct tape_device *);
/* Externals from tape_char.c */
extern int tapechar_init(void);
extern void tapechar_exit(void);
extern int tapechar_setup_device(struct tape_device *);
extern void tapechar_cleanup_device(struct tape_device *);
/* tape initialisation functions */
#ifdef CONFIG_PROC_FS
extern void tape_proc_init (void);
extern void tape_proc_cleanup (void);
#else
static inline void tape_proc_init (void) {;}
static inline void tape_proc_cleanup (void) {;}
#endif
/* a function for dumping device sense info */
extern void tape_dump_sense_dbf(struct tape_device *, struct tape_request *,
struct irb *);
/* functions for handling the status of a device */
extern void tape_med_state_set(struct tape_device *, enum tape_medium_state);
/* The debug area */
extern debug_info_t *TAPE_DBF_AREA;
/* functions for building ccws */
static inline struct ccw1 *
tape_ccw_cc(struct ccw1 *ccw, __u8 cmd_code, __u16 memsize, void *cda)
{
ccw->cmd_code = cmd_code;
ccw->flags = CCW_FLAG_CC;
ccw->count = memsize;
ccw->cda = (__u32)(addr_t) cda;
return ccw + 1;
}
static inline struct ccw1 *
tape_ccw_end(struct ccw1 *ccw, __u8 cmd_code, __u16 memsize, void *cda)
{
ccw->cmd_code = cmd_code;
ccw->flags = 0;
ccw->count = memsize;
ccw->cda = (__u32)(addr_t) cda;
return ccw + 1;
}
static inline struct ccw1 *
tape_ccw_cmd(struct ccw1 *ccw, __u8 cmd_code)
{
ccw->cmd_code = cmd_code;
ccw->flags = 0;
ccw->count = 0;
ccw->cda = (__u32)(addr_t) &ccw->cmd_code;
return ccw + 1;
}
static inline struct ccw1 *
tape_ccw_repeat(struct ccw1 *ccw, __u8 cmd_code, int count)
{
while (count-- > 0) {
ccw->cmd_code = cmd_code;
ccw->flags = CCW_FLAG_CC;
ccw->count = 0;
ccw->cda = (__u32)(addr_t) &ccw->cmd_code;
ccw++;
}
return ccw;
}
static inline struct ccw1 *
tape_ccw_cc_idal(struct ccw1 *ccw, __u8 cmd_code, struct idal_buffer *idal)
{
ccw->cmd_code = cmd_code;
ccw->flags = CCW_FLAG_CC;
idal_buffer_set_cda(idal, ccw);
return ccw++;
}
static inline struct ccw1 *
tape_ccw_end_idal(struct ccw1 *ccw, __u8 cmd_code, struct idal_buffer *idal)
{
ccw->cmd_code = cmd_code;
ccw->flags = 0;
idal_buffer_set_cda(idal, ccw);
return ccw++;
}
/* Global vars */
extern const char *tape_state_verbose[];
extern const char *tape_op_verbose[];
#endif /* for ifdef tape.h */

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,174 @@
/*
* tape device discipline for 3590 tapes.
*
* Copyright IBM Corp. 2001, 2006
* Author(s): Stefan Bader <shbader@de.ibm.com>
* Michael Holzheu <holzheu@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#ifndef _TAPE_3590_H
#define _TAPE_3590_H
#define MEDIUM_SENSE 0xc2
#define READ_PREVIOUS 0x0a
#define MODE_SENSE 0xcf
#define PERFORM_SS_FUNC 0x77
#define READ_SS_DATA 0x3e
#define PREP_RD_SS_DATA 0x18
#define RD_ATTMSG 0x3
#define SENSE_BRA_PER 0
#define SENSE_BRA_CONT 1
#define SENSE_BRA_RE 2
#define SENSE_BRA_DRE 3
#define SENSE_FMT_LIBRARY 0x23
#define SENSE_FMT_UNSOLICITED 0x40
#define SENSE_FMT_COMMAND_REJ 0x41
#define SENSE_FMT_COMMAND_EXEC0 0x50
#define SENSE_FMT_COMMAND_EXEC1 0x51
#define SENSE_FMT_EVENT0 0x60
#define SENSE_FMT_EVENT1 0x61
#define SENSE_FMT_MIM 0x70
#define SENSE_FMT_SIM 0x71
#define MSENSE_UNASSOCIATED 0x00
#define MSENSE_ASSOCIATED_MOUNT 0x01
#define MSENSE_ASSOCIATED_UMOUNT 0x02
#define MSENSE_CRYPT_MASK 0x00000010
#define TAPE_3590_MAX_MSG 0xb0
/* Datatypes */
struct tape_3590_disc_data {
struct tape390_crypt_info crypt_info;
int read_back_op;
};
#define TAPE_3590_CRYPT_INFO(device) \
((struct tape_3590_disc_data*)(device->discdata))->crypt_info
#define TAPE_3590_READ_BACK_OP(device) \
((struct tape_3590_disc_data*)(device->discdata))->read_back_op
struct tape_3590_sense {
unsigned int command_rej:1;
unsigned int interv_req:1;
unsigned int bus_out_check:1;
unsigned int eq_check:1;
unsigned int data_check:1;
unsigned int overrun:1;
unsigned int def_unit_check:1;
unsigned int assgnd_elsew:1;
unsigned int locate_fail:1;
unsigned int inst_online:1;
unsigned int reserved:1;
unsigned int blk_seq_err:1;
unsigned int begin_part:1;
unsigned int wr_mode:1;
unsigned int wr_prot:1;
unsigned int not_cap:1;
unsigned int bra:2;
unsigned int lc:3;
unsigned int vlf_active:1;
unsigned int stm:1;
unsigned int med_pos:1;
unsigned int rac:8;
unsigned int rc_rqc:16;
unsigned int mc:8;
unsigned int sense_fmt:8;
union {
struct {
unsigned int emc:4;
unsigned int smc:4;
unsigned int sev:2;
unsigned int reserved:6;
unsigned int md:8;
unsigned int refcode:8;
unsigned int mid:16;
unsigned int mp:16;
unsigned char volid[6];
unsigned int fid:8;
} f70;
struct {
unsigned int emc:4;
unsigned int smc:4;
unsigned int sev:2;
unsigned int reserved1:5;
unsigned int mdf:1;
unsigned char md[3];
unsigned int simid:8;
unsigned int uid:16;
unsigned int refcode1:16;
unsigned int refcode2:16;
unsigned int refcode3:16;
unsigned int reserved2:8;
} f71;
unsigned char data[14];
} fmt;
unsigned char pad[10];
} __attribute__ ((packed));
struct tape_3590_med_sense {
unsigned int macst:4;
unsigned int masst:4;
char pad1[7];
unsigned int flags;
char pad2[116];
} __attribute__ ((packed));
struct tape_3590_rdc_data {
char data[64];
} __attribute__ ((packed));
/* Datastructures for 3592 encryption support */
struct tape3592_kekl {
__u8 flags;
char label[64];
} __attribute__ ((packed));
struct tape3592_kekl_pair {
__u8 count;
struct tape3592_kekl kekl[2];
} __attribute__ ((packed));
struct tape3592_kekl_query_data {
__u16 len;
__u8 fmt;
__u8 mc;
__u32 id;
__u8 flags;
struct tape3592_kekl_pair kekls;
char reserved[116];
} __attribute__ ((packed));
struct tape3592_kekl_query_order {
__u8 code;
__u8 flags;
char reserved1[2];
__u8 max_count;
char reserved2[35];
} __attribute__ ((packed));
struct tape3592_kekl_set_order {
__u8 code;
__u8 flags;
char reserved1[2];
__u8 op;
struct tape3592_kekl_pair kekls;
char reserved2[120];
} __attribute__ ((packed));
#endif /* _TAPE_3590_H */

View file

@ -0,0 +1,500 @@
/*
* character device frontend for tape device driver
*
* S390 and zSeries version
* Copyright IBM Corp. 2001, 2006
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Michael Holzheu <holzheu@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#define KMSG_COMPONENT "tape"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/types.h>
#include <linux/proc_fs.h>
#include <linux/mtio.h>
#include <linux/compat.h>
#include <asm/uaccess.h>
#define TAPE_DBF_AREA tape_core_dbf
#include "tape.h"
#include "tape_std.h"
#include "tape_class.h"
#define TAPECHAR_MAJOR 0 /* get dynamic major */
/*
* file operation structure for tape character frontend
*/
static ssize_t tapechar_read(struct file *, char __user *, size_t, loff_t *);
static ssize_t tapechar_write(struct file *, const char __user *, size_t, loff_t *);
static int tapechar_open(struct inode *,struct file *);
static int tapechar_release(struct inode *,struct file *);
static long tapechar_ioctl(struct file *, unsigned int, unsigned long);
#ifdef CONFIG_COMPAT
static long tapechar_compat_ioctl(struct file *, unsigned int, unsigned long);
#endif
static const struct file_operations tape_fops =
{
.owner = THIS_MODULE,
.read = tapechar_read,
.write = tapechar_write,
.unlocked_ioctl = tapechar_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = tapechar_compat_ioctl,
#endif
.open = tapechar_open,
.release = tapechar_release,
.llseek = no_llseek,
};
static int tapechar_major = TAPECHAR_MAJOR;
/*
* This function is called for every new tapedevice
*/
int
tapechar_setup_device(struct tape_device * device)
{
char device_name[20];
sprintf(device_name, "ntibm%i", device->first_minor / 2);
device->nt = register_tape_dev(
&device->cdev->dev,
MKDEV(tapechar_major, device->first_minor),
&tape_fops,
device_name,
"non-rewinding"
);
device_name[0] = 'r';
device->rt = register_tape_dev(
&device->cdev->dev,
MKDEV(tapechar_major, device->first_minor + 1),
&tape_fops,
device_name,
"rewinding"
);
return 0;
}
void
tapechar_cleanup_device(struct tape_device *device)
{
unregister_tape_dev(&device->cdev->dev, device->rt);
device->rt = NULL;
unregister_tape_dev(&device->cdev->dev, device->nt);
device->nt = NULL;
}
static int
tapechar_check_idalbuffer(struct tape_device *device, size_t block_size)
{
struct idal_buffer *new;
if (device->char_data.idal_buf != NULL &&
device->char_data.idal_buf->size == block_size)
return 0;
if (block_size > MAX_BLOCKSIZE) {
DBF_EVENT(3, "Invalid blocksize (%zd > %d)\n",
block_size, MAX_BLOCKSIZE);
return -EINVAL;
}
/* The current idal buffer is not correct. Allocate a new one. */
new = idal_buffer_alloc(block_size, 0);
if (IS_ERR(new))
return -ENOMEM;
if (device->char_data.idal_buf != NULL)
idal_buffer_free(device->char_data.idal_buf);
device->char_data.idal_buf = new;
return 0;
}
/*
* Tape device read function
*/
static ssize_t
tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos)
{
struct tape_device *device;
struct tape_request *request;
size_t block_size;
int rc;
DBF_EVENT(6, "TCHAR:read\n");
device = (struct tape_device *) filp->private_data;
/*
* If the tape isn't terminated yet, do it now. And since we then
* are at the end of the tape there wouldn't be anything to read
* anyways. So we return immediately.
*/
if(device->required_tapemarks) {
return tape_std_terminate_write(device);
}
/* Find out block size to use */
if (device->char_data.block_size != 0) {
if (count < device->char_data.block_size) {
DBF_EVENT(3, "TCHAR:read smaller than block "
"size was requested\n");
return -EINVAL;
}
block_size = device->char_data.block_size;
} else {
block_size = count;
}
rc = tapechar_check_idalbuffer(device, block_size);
if (rc)
return rc;
DBF_EVENT(6, "TCHAR:nbytes: %lx\n", block_size);
/* Let the discipline build the ccw chain. */
request = device->discipline->read_block(device, block_size);
if (IS_ERR(request))
return PTR_ERR(request);
/* Execute it. */
rc = tape_do_io(device, request);
if (rc == 0) {
rc = block_size - request->rescnt;
DBF_EVENT(6, "TCHAR:rbytes: %x\n", rc);
/* Copy data from idal buffer to user space. */
if (idal_buffer_to_user(device->char_data.idal_buf,
data, rc) != 0)
rc = -EFAULT;
}
tape_free_request(request);
return rc;
}
/*
* Tape device write function
*/
static ssize_t
tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t *ppos)
{
struct tape_device *device;
struct tape_request *request;
size_t block_size;
size_t written;
int nblocks;
int i, rc;
DBF_EVENT(6, "TCHAR:write\n");
device = (struct tape_device *) filp->private_data;
/* Find out block size and number of blocks */
if (device->char_data.block_size != 0) {
if (count < device->char_data.block_size) {
DBF_EVENT(3, "TCHAR:write smaller than block "
"size was requested\n");
return -EINVAL;
}
block_size = device->char_data.block_size;
nblocks = count / block_size;
} else {
block_size = count;
nblocks = 1;
}
rc = tapechar_check_idalbuffer(device, block_size);
if (rc)
return rc;
DBF_EVENT(6,"TCHAR:nbytes: %lx\n", block_size);
DBF_EVENT(6, "TCHAR:nblocks: %x\n", nblocks);
/* Let the discipline build the ccw chain. */
request = device->discipline->write_block(device, block_size);
if (IS_ERR(request))
return PTR_ERR(request);
rc = 0;
written = 0;
for (i = 0; i < nblocks; i++) {
/* Copy data from user space to idal buffer. */
if (idal_buffer_from_user(device->char_data.idal_buf,
data, block_size)) {
rc = -EFAULT;
break;
}
rc = tape_do_io(device, request);
if (rc)
break;
DBF_EVENT(6, "TCHAR:wbytes: %lx\n",
block_size - request->rescnt);
written += block_size - request->rescnt;
if (request->rescnt != 0)
break;
data += block_size;
}
tape_free_request(request);
if (rc == -ENOSPC) {
/*
* Ok, the device has no more space. It has NOT written
* the block.
*/
if (device->discipline->process_eov)
device->discipline->process_eov(device);
if (written > 0)
rc = 0;
}
/*
* After doing a write we always need two tapemarks to correctly
* terminate the tape (one to terminate the file, the second to
* flag the end of recorded data.
* Since process_eov positions the tape in front of the written
* tapemark it doesn't hurt to write two marks again.
*/
if (!rc)
device->required_tapemarks = 2;
return rc ? rc : written;
}
/*
* Character frontend tape device open function.
*/
static int
tapechar_open (struct inode *inode, struct file *filp)
{
struct tape_device *device;
int minor, rc;
DBF_EVENT(6, "TCHAR:open: %i:%i\n",
imajor(file_inode(filp)),
iminor(file_inode(filp)));
if (imajor(file_inode(filp)) != tapechar_major)
return -ENODEV;
minor = iminor(file_inode(filp));
device = tape_find_device(minor / TAPE_MINORS_PER_DEV);
if (IS_ERR(device)) {
DBF_EVENT(3, "TCHAR:open: tape_find_device() failed\n");
return PTR_ERR(device);
}
rc = tape_open(device);
if (rc == 0) {
filp->private_data = device;
nonseekable_open(inode, filp);
} else
tape_put_device(device);
return rc;
}
/*
* Character frontend tape device release function.
*/
static int
tapechar_release(struct inode *inode, struct file *filp)
{
struct tape_device *device;
DBF_EVENT(6, "TCHAR:release: %x\n", iminor(inode));
device = (struct tape_device *) filp->private_data;
/*
* If this is the rewinding tape minor then rewind. In that case we
* write all required tapemarks. Otherwise only one to terminate the
* file.
*/
if ((iminor(inode) & 1) != 0) {
if (device->required_tapemarks)
tape_std_terminate_write(device);
tape_mtop(device, MTREW, 1);
} else {
if (device->required_tapemarks > 1) {
if (tape_mtop(device, MTWEOF, 1) == 0)
device->required_tapemarks--;
}
}
if (device->char_data.idal_buf != NULL) {
idal_buffer_free(device->char_data.idal_buf);
device->char_data.idal_buf = NULL;
}
tape_release(device);
filp->private_data = NULL;
tape_put_device(device);
return 0;
}
/*
* Tape device io controls.
*/
static int
__tapechar_ioctl(struct tape_device *device,
unsigned int no, unsigned long data)
{
int rc;
if (no == MTIOCTOP) {
struct mtop op;
if (copy_from_user(&op, (char __user *) data, sizeof(op)) != 0)
return -EFAULT;
if (op.mt_count < 0)
return -EINVAL;
/*
* Operations that change tape position should write final
* tapemarks.
*/
switch (op.mt_op) {
case MTFSF:
case MTBSF:
case MTFSR:
case MTBSR:
case MTREW:
case MTOFFL:
case MTEOM:
case MTRETEN:
case MTBSFM:
case MTFSFM:
case MTSEEK:
if (device->required_tapemarks)
tape_std_terminate_write(device);
default:
;
}
rc = tape_mtop(device, op.mt_op, op.mt_count);
if (op.mt_op == MTWEOF && rc == 0) {
if (op.mt_count > device->required_tapemarks)
device->required_tapemarks = 0;
else
device->required_tapemarks -= op.mt_count;
}
return rc;
}
if (no == MTIOCPOS) {
/* MTIOCPOS: query the tape position. */
struct mtpos pos;
rc = tape_mtop(device, MTTELL, 1);
if (rc < 0)
return rc;
pos.mt_blkno = rc;
if (copy_to_user((char __user *) data, &pos, sizeof(pos)) != 0)
return -EFAULT;
return 0;
}
if (no == MTIOCGET) {
/* MTIOCGET: query the tape drive status. */
struct mtget get;
memset(&get, 0, sizeof(get));
get.mt_type = MT_ISUNKNOWN;
get.mt_resid = 0 /* device->devstat.rescnt */;
get.mt_dsreg =
((device->char_data.block_size << MT_ST_BLKSIZE_SHIFT)
& MT_ST_BLKSIZE_MASK);
/* FIXME: mt_gstat, mt_erreg, mt_fileno */
get.mt_gstat = 0;
get.mt_erreg = 0;
get.mt_fileno = 0;
get.mt_gstat = device->tape_generic_status;
if (device->medium_state == MS_LOADED) {
rc = tape_mtop(device, MTTELL, 1);
if (rc < 0)
return rc;
if (rc == 0)
get.mt_gstat |= GMT_BOT(~0);
get.mt_blkno = rc;
}
if (copy_to_user((char __user *) data, &get, sizeof(get)) != 0)
return -EFAULT;
return 0;
}
/* Try the discipline ioctl function. */
if (device->discipline->ioctl_fn == NULL)
return -EINVAL;
return device->discipline->ioctl_fn(device, no, data);
}
static long
tapechar_ioctl(struct file *filp, unsigned int no, unsigned long data)
{
struct tape_device *device;
long rc;
DBF_EVENT(6, "TCHAR:ioct\n");
device = (struct tape_device *) filp->private_data;
mutex_lock(&device->mutex);
rc = __tapechar_ioctl(device, no, data);
mutex_unlock(&device->mutex);
return rc;
}
#ifdef CONFIG_COMPAT
static long
tapechar_compat_ioctl(struct file *filp, unsigned int no, unsigned long data)
{
struct tape_device *device = filp->private_data;
int rval = -ENOIOCTLCMD;
unsigned long argp;
/* The 'arg' argument of any ioctl function may only be used for
* pointers because of the compat pointer conversion.
* Consider this when adding new ioctls.
*/
argp = (unsigned long) compat_ptr(data);
if (device->discipline->ioctl_fn) {
mutex_lock(&device->mutex);
rval = device->discipline->ioctl_fn(device, no, argp);
mutex_unlock(&device->mutex);
if (rval == -EINVAL)
rval = -ENOIOCTLCMD;
}
return rval;
}
#endif /* CONFIG_COMPAT */
/*
* Initialize character device frontend.
*/
int
tapechar_init (void)
{
dev_t dev;
if (alloc_chrdev_region(&dev, 0, 256, "tape") != 0)
return -1;
tapechar_major = MAJOR(dev);
return 0;
}
/*
* cleanup
*/
void
tapechar_exit(void)
{
unregister_chrdev_region(MKDEV(tapechar_major, 0), 256);
}

View file

@ -0,0 +1,132 @@
/*
* Copyright IBM Corp. 2004
*
* Tape class device support
*
* Author: Stefan Bader <shbader@de.ibm.com>
* Based on simple class device code by Greg K-H
*/
#define KMSG_COMPONENT "tape"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/slab.h>
#include "tape_class.h"
MODULE_AUTHOR("Stefan Bader <shbader@de.ibm.com>");
MODULE_DESCRIPTION(
"Copyright IBM Corp. 2004 All Rights Reserved.\n"
"tape_class.c"
);
MODULE_LICENSE("GPL");
static struct class *tape_class;
/*
* Register a tape device and return a pointer to the cdev structure.
*
* device
* The pointer to the struct device of the physical (base) device.
* drivername
* The pointer to the drivers name for it's character devices.
* dev
* The intended major/minor number. The major number may be 0 to
* get a dynamic major number.
* fops
* The pointer to the drivers file operations for the tape device.
* devname
* The pointer to the name of the character device.
*/
struct tape_class_device *register_tape_dev(
struct device * device,
dev_t dev,
const struct file_operations *fops,
char * device_name,
char * mode_name)
{
struct tape_class_device * tcd;
int rc;
char * s;
tcd = kzalloc(sizeof(struct tape_class_device), GFP_KERNEL);
if (!tcd)
return ERR_PTR(-ENOMEM);
strncpy(tcd->device_name, device_name, TAPECLASS_NAME_LEN);
for (s = strchr(tcd->device_name, '/'); s; s = strchr(s, '/'))
*s = '!';
strncpy(tcd->mode_name, mode_name, TAPECLASS_NAME_LEN);
for (s = strchr(tcd->mode_name, '/'); s; s = strchr(s, '/'))
*s = '!';
tcd->char_device = cdev_alloc();
if (!tcd->char_device) {
rc = -ENOMEM;
goto fail_with_tcd;
}
tcd->char_device->owner = fops->owner;
tcd->char_device->ops = fops;
tcd->char_device->dev = dev;
rc = cdev_add(tcd->char_device, tcd->char_device->dev, 1);
if (rc)
goto fail_with_cdev;
tcd->class_device = device_create(tape_class, device,
tcd->char_device->dev, NULL,
"%s", tcd->device_name);
rc = PTR_RET(tcd->class_device);
if (rc)
goto fail_with_cdev;
rc = sysfs_create_link(
&device->kobj,
&tcd->class_device->kobj,
tcd->mode_name
);
if (rc)
goto fail_with_class_device;
return tcd;
fail_with_class_device:
device_destroy(tape_class, tcd->char_device->dev);
fail_with_cdev:
cdev_del(tcd->char_device);
fail_with_tcd:
kfree(tcd);
return ERR_PTR(rc);
}
EXPORT_SYMBOL(register_tape_dev);
void unregister_tape_dev(struct device *device, struct tape_class_device *tcd)
{
if (tcd != NULL && !IS_ERR(tcd)) {
sysfs_remove_link(&device->kobj, tcd->mode_name);
device_destroy(tape_class, tcd->char_device->dev);
cdev_del(tcd->char_device);
kfree(tcd);
}
}
EXPORT_SYMBOL(unregister_tape_dev);
static int __init tape_init(void)
{
tape_class = class_create(THIS_MODULE, "tape390");
return 0;
}
static void __exit tape_exit(void)
{
class_destroy(tape_class);
tape_class = NULL;
}
postcore_initcall(tape_init);
module_exit(tape_exit);

View file

@ -0,0 +1,58 @@
/*
* Copyright IBM Corp. 2004 All Rights Reserved.
*
* Tape class device support
*
* Author: Stefan Bader <shbader@de.ibm.com>
* Based on simple class device code by Greg K-H
*/
#ifndef __TAPE_CLASS_H__
#define __TAPE_CLASS_H__
#include <linux/init.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/major.h>
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/kdev_t.h>
#define TAPECLASS_NAME_LEN 32
struct tape_class_device {
struct cdev *char_device;
struct device *class_device;
char device_name[TAPECLASS_NAME_LEN];
char mode_name[TAPECLASS_NAME_LEN];
};
/*
* Register a tape device and return a pointer to the tape class device
* created by the call.
*
* device
* The pointer to the struct device of the physical (base) device.
* dev
* The intended major/minor number. The major number may be 0 to
* get a dynamic major number.
* fops
* The pointer to the drivers file operations for the tape device.
* device_name
* Pointer to the logical device name (will also be used as kobject name
* of the cdev). This can also be called the name of the tape class
* device.
* mode_name
* Points to the name of the tape mode. This creates a link with that
* name from the physical device to the logical device (class).
*/
struct tape_class_device *register_tape_dev(
struct device * device,
dev_t dev,
const struct file_operations *fops,
char * device_name,
char * node_name
);
void unregister_tape_dev(struct device *device, struct tape_class_device *tcd);
#endif /* __TAPE_CLASS_H__ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,143 @@
/*
* tape device driver for S/390 and zSeries tapes.
*
* S390 and zSeries version
* Copyright IBM Corp. 2001
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Michael Holzheu <holzheu@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
*
* PROCFS Functions
*/
#define KMSG_COMPONENT "tape"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
#define TAPE_DBF_AREA tape_core_dbf
#include "tape.h"
static const char *tape_med_st_verbose[MS_SIZE] =
{
[MS_UNKNOWN] = "UNKNOWN ",
[MS_LOADED] = "LOADED ",
[MS_UNLOADED] = "UNLOADED"
};
/* our proc tapedevices entry */
static struct proc_dir_entry *tape_proc_devices;
/*
* Show function for /proc/tapedevices
*/
static int tape_proc_show(struct seq_file *m, void *v)
{
struct tape_device *device;
struct tape_request *request;
const char *str;
unsigned long n;
n = (unsigned long) v - 1;
if (!n) {
seq_printf(m, "TapeNo\tBusID CuType/Model\t"
"DevType/Model\tBlkSize\tState\tOp\tMedState\n");
}
device = tape_find_device(n);
if (IS_ERR(device))
return 0;
spin_lock_irq(get_ccwdev_lock(device->cdev));
seq_printf(m, "%d\t", (int) n);
seq_printf(m, "%-10.10s ", dev_name(&device->cdev->dev));
seq_printf(m, "%04X/", device->cdev->id.cu_type);
seq_printf(m, "%02X\t", device->cdev->id.cu_model);
seq_printf(m, "%04X/", device->cdev->id.dev_type);
seq_printf(m, "%02X\t\t", device->cdev->id.dev_model);
if (device->char_data.block_size == 0)
seq_printf(m, "auto\t");
else
seq_printf(m, "%i\t", device->char_data.block_size);
if (device->tape_state >= 0 &&
device->tape_state < TS_SIZE)
str = tape_state_verbose[device->tape_state];
else
str = "UNKNOWN";
seq_printf(m, "%s\t", str);
if (!list_empty(&device->req_queue)) {
request = list_entry(device->req_queue.next,
struct tape_request, list);
str = tape_op_verbose[request->op];
} else
str = "---";
seq_printf(m, "%s\t", str);
seq_printf(m, "%s\n", tape_med_st_verbose[device->medium_state]);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
tape_put_device(device);
return 0;
}
static void *tape_proc_start(struct seq_file *m, loff_t *pos)
{
if (*pos >= 256 / TAPE_MINORS_PER_DEV)
return NULL;
return (void *)((unsigned long) *pos + 1);
}
static void *tape_proc_next(struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
return tape_proc_start(m, pos);
}
static void tape_proc_stop(struct seq_file *m, void *v)
{
}
static const struct seq_operations tape_proc_seq = {
.start = tape_proc_start,
.next = tape_proc_next,
.stop = tape_proc_stop,
.show = tape_proc_show,
};
static int tape_proc_open(struct inode *inode, struct file *file)
{
return seq_open(file, &tape_proc_seq);
}
static const struct file_operations tape_proc_ops =
{
.owner = THIS_MODULE,
.open = tape_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
/*
* Initialize procfs stuff on startup
*/
void
tape_proc_init(void)
{
tape_proc_devices =
proc_create("tapedevices", S_IFREG | S_IRUGO | S_IWUSR, NULL,
&tape_proc_ops);
if (tape_proc_devices == NULL) {
return;
}
}
/*
* Cleanup all stuff registered to the procfs
*/
void
tape_proc_cleanup(void)
{
if (tape_proc_devices != NULL)
remove_proc_entry ("tapedevices", NULL);
}

View file

@ -0,0 +1,750 @@
/*
* standard tape device functions for ibm tapes.
*
* S390 and zSeries version
* Copyright IBM Corp. 2001, 2002
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Michael Holzheu <holzheu@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Stefan Bader <shbader@de.ibm.com>
*/
#define KMSG_COMPONENT "tape"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/bio.h>
#include <linux/timer.h>
#include <asm/types.h>
#include <asm/idals.h>
#include <asm/ebcdic.h>
#include <asm/tape390.h>
#define TAPE_DBF_AREA tape_core_dbf
#include "tape.h"
#include "tape_std.h"
/*
* tape_std_assign
*/
static void
tape_std_assign_timeout(unsigned long data)
{
struct tape_request * request;
struct tape_device * device;
int rc;
request = (struct tape_request *) data;
device = request->device;
BUG_ON(!device);
DBF_EVENT(3, "%08x: Assignment timeout. Device busy.\n",
device->cdev_id);
rc = tape_cancel_io(device, request);
if(rc)
DBF_EVENT(3, "(%08x): Assign timeout: Cancel failed with rc = "
"%i\n", device->cdev_id, rc);
}
int
tape_std_assign(struct tape_device *device)
{
int rc;
struct timer_list timeout;
struct tape_request *request;
request = tape_alloc_request(2, 11);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_ASSIGN;
tape_ccw_cc(request->cpaddr, ASSIGN, 11, request->cpdata);
tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
/*
* The assign command sometimes blocks if the device is assigned
* to another host (actually this shouldn't happen but it does).
* So we set up a timeout for this call.
*/
init_timer_on_stack(&timeout);
timeout.function = tape_std_assign_timeout;
timeout.data = (unsigned long) request;
timeout.expires = jiffies + 2 * HZ;
add_timer(&timeout);
rc = tape_do_io_interruptible(device, request);
del_timer_sync(&timeout);
destroy_timer_on_stack(&timeout);
if (rc != 0) {
DBF_EVENT(3, "%08x: assign failed - device might be busy\n",
device->cdev_id);
} else {
DBF_EVENT(3, "%08x: Tape assigned\n", device->cdev_id);
}
tape_free_request(request);
return rc;
}
/*
* tape_std_unassign
*/
int
tape_std_unassign (struct tape_device *device)
{
int rc;
struct tape_request *request;
if (device->tape_state == TS_NOT_OPER) {
DBF_EVENT(3, "(%08x): Can't unassign device\n",
device->cdev_id);
return -EIO;
}
request = tape_alloc_request(2, 11);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_UNASSIGN;
tape_ccw_cc(request->cpaddr, UNASSIGN, 11, request->cpdata);
tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
if ((rc = tape_do_io(device, request)) != 0) {
DBF_EVENT(3, "%08x: Unassign failed\n", device->cdev_id);
} else {
DBF_EVENT(3, "%08x: Tape unassigned\n", device->cdev_id);
}
tape_free_request(request);
return rc;
}
/*
* TAPE390_DISPLAY: Show a string on the tape display.
*/
int
tape_std_display(struct tape_device *device, struct display_struct *disp)
{
struct tape_request *request;
int rc;
request = tape_alloc_request(2, 17);
if (IS_ERR(request)) {
DBF_EVENT(3, "TAPE: load display failed\n");
return PTR_ERR(request);
}
request->op = TO_DIS;
*(unsigned char *) request->cpdata = disp->cntrl;
DBF_EVENT(5, "TAPE: display cntrl=%04x\n", disp->cntrl);
memcpy(((unsigned char *) request->cpdata) + 1, disp->message1, 8);
memcpy(((unsigned char *) request->cpdata) + 9, disp->message2, 8);
ASCEBC(((unsigned char*) request->cpdata) + 1, 16);
tape_ccw_cc(request->cpaddr, LOAD_DISPLAY, 17, request->cpdata);
tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
rc = tape_do_io_interruptible(device, request);
tape_free_request(request);
return rc;
}
/*
* Read block id.
*/
int
tape_std_read_block_id(struct tape_device *device, __u64 *id)
{
struct tape_request *request;
int rc;
request = tape_alloc_request(3, 8);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_RBI;
/* setup ccws */
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_cc(request->cpaddr + 1, READ_BLOCK_ID, 8, request->cpdata);
tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
/* execute it */
rc = tape_do_io(device, request);
if (rc == 0)
/* Get result from read buffer. */
*id = *(__u64 *) request->cpdata;
tape_free_request(request);
return rc;
}
int
tape_std_terminate_write(struct tape_device *device)
{
int rc;
if(device->required_tapemarks == 0)
return 0;
DBF_LH(5, "tape%d: terminate write %dxEOF\n", device->first_minor,
device->required_tapemarks);
rc = tape_mtop(device, MTWEOF, device->required_tapemarks);
if (rc)
return rc;
device->required_tapemarks = 0;
return tape_mtop(device, MTBSR, 1);
}
/*
* MTLOAD: Loads the tape.
* The default implementation just wait until the tape medium state changes
* to MS_LOADED.
*/
int
tape_std_mtload(struct tape_device *device, int count)
{
return wait_event_interruptible(device->state_change_wq,
(device->medium_state == MS_LOADED));
}
/*
* MTSETBLK: Set block size.
*/
int
tape_std_mtsetblk(struct tape_device *device, int count)
{
struct idal_buffer *new;
DBF_LH(6, "tape_std_mtsetblk(%d)\n", count);
if (count <= 0) {
/*
* Just set block_size to 0. tapechar_read/tapechar_write
* will realloc the idal buffer if a bigger one than the
* current is needed.
*/
device->char_data.block_size = 0;
return 0;
}
if (device->char_data.idal_buf != NULL &&
device->char_data.idal_buf->size == count)
/* We already have a idal buffer of that size. */
return 0;
if (count > MAX_BLOCKSIZE) {
DBF_EVENT(3, "Invalid block size (%d > %d) given.\n",
count, MAX_BLOCKSIZE);
return -EINVAL;
}
/* Allocate a new idal buffer. */
new = idal_buffer_alloc(count, 0);
if (IS_ERR(new))
return -ENOMEM;
if (device->char_data.idal_buf != NULL)
idal_buffer_free(device->char_data.idal_buf);
device->char_data.idal_buf = new;
device->char_data.block_size = count;
DBF_LH(6, "new blocksize is %d\n", device->char_data.block_size);
return 0;
}
/*
* MTRESET: Set block size to 0.
*/
int
tape_std_mtreset(struct tape_device *device, int count)
{
DBF_EVENT(6, "TCHAR:devreset:\n");
device->char_data.block_size = 0;
return 0;
}
/*
* MTFSF: Forward space over 'count' file marks. The tape is positioned
* at the EOT (End of Tape) side of the file mark.
*/
int
tape_std_mtfsf(struct tape_device *device, int mt_count)
{
struct tape_request *request;
struct ccw1 *ccw;
request = tape_alloc_request(mt_count + 2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_FSF;
/* setup ccws */
ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
device->modeset_byte);
ccw = tape_ccw_repeat(ccw, FORSPACEFILE, mt_count);
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
/* execute it */
return tape_do_io_free(device, request);
}
/*
* MTFSR: Forward space over 'count' tape blocks (blocksize is set
* via MTSETBLK.
*/
int
tape_std_mtfsr(struct tape_device *device, int mt_count)
{
struct tape_request *request;
struct ccw1 *ccw;
int rc;
request = tape_alloc_request(mt_count + 2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_FSB;
/* setup ccws */
ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
device->modeset_byte);
ccw = tape_ccw_repeat(ccw, FORSPACEBLOCK, mt_count);
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
/* execute it */
rc = tape_do_io(device, request);
if (rc == 0 && request->rescnt > 0) {
DBF_LH(3, "FSR over tapemark\n");
rc = 1;
}
tape_free_request(request);
return rc;
}
/*
* MTBSR: Backward space over 'count' tape blocks.
* (blocksize is set via MTSETBLK.
*/
int
tape_std_mtbsr(struct tape_device *device, int mt_count)
{
struct tape_request *request;
struct ccw1 *ccw;
int rc;
request = tape_alloc_request(mt_count + 2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_BSB;
/* setup ccws */
ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
device->modeset_byte);
ccw = tape_ccw_repeat(ccw, BACKSPACEBLOCK, mt_count);
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
/* execute it */
rc = tape_do_io(device, request);
if (rc == 0 && request->rescnt > 0) {
DBF_LH(3, "BSR over tapemark\n");
rc = 1;
}
tape_free_request(request);
return rc;
}
/*
* MTWEOF: Write 'count' file marks at the current position.
*/
int
tape_std_mtweof(struct tape_device *device, int mt_count)
{
struct tape_request *request;
struct ccw1 *ccw;
request = tape_alloc_request(mt_count + 2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_WTM;
/* setup ccws */
ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
device->modeset_byte);
ccw = tape_ccw_repeat(ccw, WRITETAPEMARK, mt_count);
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
/* execute it */
return tape_do_io_free(device, request);
}
/*
* MTBSFM: Backward space over 'count' file marks.
* The tape is positioned at the BOT (Begin Of Tape) side of the
* last skipped file mark.
*/
int
tape_std_mtbsfm(struct tape_device *device, int mt_count)
{
struct tape_request *request;
struct ccw1 *ccw;
request = tape_alloc_request(mt_count + 2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_BSF;
/* setup ccws */
ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
device->modeset_byte);
ccw = tape_ccw_repeat(ccw, BACKSPACEFILE, mt_count);
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
/* execute it */
return tape_do_io_free(device, request);
}
/*
* MTBSF: Backward space over 'count' file marks. The tape is positioned at
* the EOT (End of Tape) side of the last skipped file mark.
*/
int
tape_std_mtbsf(struct tape_device *device, int mt_count)
{
struct tape_request *request;
struct ccw1 *ccw;
int rc;
request = tape_alloc_request(mt_count + 2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_BSF;
/* setup ccws */
ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
device->modeset_byte);
ccw = tape_ccw_repeat(ccw, BACKSPACEFILE, mt_count);
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
/* execute it */
rc = tape_do_io_free(device, request);
if (rc == 0) {
rc = tape_mtop(device, MTFSR, 1);
if (rc > 0)
rc = 0;
}
return rc;
}
/*
* MTFSFM: Forward space over 'count' file marks.
* The tape is positioned at the BOT (Begin Of Tape) side
* of the last skipped file mark.
*/
int
tape_std_mtfsfm(struct tape_device *device, int mt_count)
{
struct tape_request *request;
struct ccw1 *ccw;
int rc;
request = tape_alloc_request(mt_count + 2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_FSF;
/* setup ccws */
ccw = tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
device->modeset_byte);
ccw = tape_ccw_repeat(ccw, FORSPACEFILE, mt_count);
ccw = tape_ccw_end(ccw, NOP, 0, NULL);
/* execute it */
rc = tape_do_io_free(device, request);
if (rc == 0) {
rc = tape_mtop(device, MTBSR, 1);
if (rc > 0)
rc = 0;
}
return rc;
}
/*
* MTREW: Rewind the tape.
*/
int
tape_std_mtrew(struct tape_device *device, int mt_count)
{
struct tape_request *request;
request = tape_alloc_request(3, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_REW;
/* setup ccws */
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1,
device->modeset_byte);
tape_ccw_cc(request->cpaddr + 1, REWIND, 0, NULL);
tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
/* execute it */
return tape_do_io_free(device, request);
}
/*
* MTOFFL: Rewind the tape and put the drive off-line.
* Implement 'rewind unload'
*/
int
tape_std_mtoffl(struct tape_device *device, int mt_count)
{
struct tape_request *request;
request = tape_alloc_request(3, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_RUN;
/* setup ccws */
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_cc(request->cpaddr + 1, REWIND_UNLOAD, 0, NULL);
tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
/* execute it */
return tape_do_io_free(device, request);
}
/*
* MTNOP: 'No operation'.
*/
int
tape_std_mtnop(struct tape_device *device, int mt_count)
{
struct tape_request *request;
request = tape_alloc_request(2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_NOP;
/* setup ccws */
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
/* execute it */
return tape_do_io_free(device, request);
}
/*
* MTEOM: positions at the end of the portion of the tape already used
* for recordind data. MTEOM positions after the last file mark, ready for
* appending another file.
*/
int
tape_std_mteom(struct tape_device *device, int mt_count)
{
int rc;
/*
* Seek from the beginning of tape (rewind).
*/
if ((rc = tape_mtop(device, MTREW, 1)) < 0)
return rc;
/*
* The logical end of volume is given by two sewuential tapemarks.
* Look for this by skipping to the next file (over one tapemark)
* and then test for another one (fsr returns 1 if a tapemark was
* encountered).
*/
do {
if ((rc = tape_mtop(device, MTFSF, 1)) < 0)
return rc;
if ((rc = tape_mtop(device, MTFSR, 1)) < 0)
return rc;
} while (rc == 0);
return tape_mtop(device, MTBSR, 1);
}
/*
* MTRETEN: Retension the tape, i.e. forward space to end of tape and rewind.
*/
int
tape_std_mtreten(struct tape_device *device, int mt_count)
{
struct tape_request *request;
request = tape_alloc_request(4, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_FSF;
/* setup ccws */
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_cc(request->cpaddr + 1,FORSPACEFILE, 0, NULL);
tape_ccw_cc(request->cpaddr + 2, NOP, 0, NULL);
tape_ccw_end(request->cpaddr + 3, CCW_CMD_TIC, 0, request->cpaddr);
/* execute it, MTRETEN rc gets ignored */
tape_do_io_interruptible(device, request);
tape_free_request(request);
return tape_mtop(device, MTREW, 1);
}
/*
* MTERASE: erases the tape.
*/
int
tape_std_mterase(struct tape_device *device, int mt_count)
{
struct tape_request *request;
request = tape_alloc_request(6, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_DSE;
/* setup ccws */
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_cc(request->cpaddr + 1, REWIND, 0, NULL);
tape_ccw_cc(request->cpaddr + 2, ERASE_GAP, 0, NULL);
tape_ccw_cc(request->cpaddr + 3, DATA_SEC_ERASE, 0, NULL);
tape_ccw_cc(request->cpaddr + 4, REWIND, 0, NULL);
tape_ccw_end(request->cpaddr + 5, NOP, 0, NULL);
/* execute it */
return tape_do_io_free(device, request);
}
/*
* MTUNLOAD: Rewind the tape and unload it.
*/
int
tape_std_mtunload(struct tape_device *device, int mt_count)
{
return tape_mtop(device, MTOFFL, mt_count);
}
/*
* MTCOMPRESSION: used to enable compression.
* Sets the IDRC on/off.
*/
int
tape_std_mtcompression(struct tape_device *device, int mt_count)
{
struct tape_request *request;
if (mt_count < 0 || mt_count > 1) {
DBF_EXCEPTION(6, "xcom parm\n");
return -EINVAL;
}
request = tape_alloc_request(2, 0);
if (IS_ERR(request))
return PTR_ERR(request);
request->op = TO_NOP;
/* setup ccws */
if (mt_count == 0)
*device->modeset_byte &= ~0x08;
else
*device->modeset_byte |= 0x08;
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL);
/* execute it */
return tape_do_io_free(device, request);
}
/*
* Read Block
*/
struct tape_request *
tape_std_read_block(struct tape_device *device, size_t count)
{
struct tape_request *request;
/*
* We have to alloc 4 ccws in order to be able to transform request
* into a read backward request in error case.
*/
request = tape_alloc_request(4, 0);
if (IS_ERR(request)) {
DBF_EXCEPTION(6, "xrbl fail");
return request;
}
request->op = TO_RFO;
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_end_idal(request->cpaddr + 1, READ_FORWARD,
device->char_data.idal_buf);
DBF_EVENT(6, "xrbl ccwg\n");
return request;
}
/*
* Read Block backward transformation function.
*/
void
tape_std_read_backward(struct tape_device *device, struct tape_request *request)
{
/*
* We have allocated 4 ccws in tape_std_read, so we can now
* transform the request to a read backward, followed by a
* forward space block.
*/
request->op = TO_RBA;
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_cc_idal(request->cpaddr + 1, READ_BACKWARD,
device->char_data.idal_buf);
tape_ccw_cc(request->cpaddr + 2, FORSPACEBLOCK, 0, NULL);
tape_ccw_end(request->cpaddr + 3, NOP, 0, NULL);
DBF_EVENT(6, "xrop ccwg");}
/*
* Write Block
*/
struct tape_request *
tape_std_write_block(struct tape_device *device, size_t count)
{
struct tape_request *request;
request = tape_alloc_request(2, 0);
if (IS_ERR(request)) {
DBF_EXCEPTION(6, "xwbl fail\n");
return request;
}
request->op = TO_WRI;
tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte);
tape_ccw_end_idal(request->cpaddr + 1, WRITE_CMD,
device->char_data.idal_buf);
DBF_EVENT(6, "xwbl ccwg\n");
return request;
}
/*
* This routine is called by frontend after an ENOSP on write
*/
void
tape_std_process_eov(struct tape_device *device)
{
/*
* End of volume: We have to backspace the last written record, then
* we TRY to write a tapemark and then backspace over the written TM
*/
if (tape_mtop(device, MTBSR, 1) == 0 &&
tape_mtop(device, MTWEOF, 1) == 0) {
tape_mtop(device, MTBSR, 1);
}
}
EXPORT_SYMBOL(tape_std_assign);
EXPORT_SYMBOL(tape_std_unassign);
EXPORT_SYMBOL(tape_std_display);
EXPORT_SYMBOL(tape_std_read_block_id);
EXPORT_SYMBOL(tape_std_mtload);
EXPORT_SYMBOL(tape_std_mtsetblk);
EXPORT_SYMBOL(tape_std_mtreset);
EXPORT_SYMBOL(tape_std_mtfsf);
EXPORT_SYMBOL(tape_std_mtfsr);
EXPORT_SYMBOL(tape_std_mtbsr);
EXPORT_SYMBOL(tape_std_mtweof);
EXPORT_SYMBOL(tape_std_mtbsfm);
EXPORT_SYMBOL(tape_std_mtbsf);
EXPORT_SYMBOL(tape_std_mtfsfm);
EXPORT_SYMBOL(tape_std_mtrew);
EXPORT_SYMBOL(tape_std_mtoffl);
EXPORT_SYMBOL(tape_std_mtnop);
EXPORT_SYMBOL(tape_std_mteom);
EXPORT_SYMBOL(tape_std_mtreten);
EXPORT_SYMBOL(tape_std_mterase);
EXPORT_SYMBOL(tape_std_mtunload);
EXPORT_SYMBOL(tape_std_mtcompression);
EXPORT_SYMBOL(tape_std_read_block);
EXPORT_SYMBOL(tape_std_read_backward);
EXPORT_SYMBOL(tape_std_write_block);
EXPORT_SYMBOL(tape_std_process_eov);

View file

@ -0,0 +1,154 @@
/*
* standard tape device functions for ibm tapes.
*
* Copyright IBM Corp. 2001, 2006
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#ifndef _TAPE_STD_H
#define _TAPE_STD_H
#include <asm/tape390.h>
/*
* Biggest block size to handle. Currently 64K because we only build
* channel programs without data chaining.
*/
#define MAX_BLOCKSIZE 65535
/*
* The CCW commands for the Tape type of command.
*/
#define INVALID_00 0x00 /* Invalid cmd */
#define BACKSPACEBLOCK 0x27 /* Back Space block */
#define BACKSPACEFILE 0x2f /* Back Space file */
#define DATA_SEC_ERASE 0x97 /* Data security erase */
#define ERASE_GAP 0x17 /* Erase Gap */
#define FORSPACEBLOCK 0x37 /* Forward space block */
#define FORSPACEFILE 0x3F /* Forward Space file */
#define FORCE_STREAM_CNT 0xEB /* Forced streaming count # */
#define NOP 0x03 /* No operation */
#define READ_FORWARD 0x02 /* Read forward */
#define REWIND 0x07 /* Rewind */
#define REWIND_UNLOAD 0x0F /* Rewind and Unload */
#define SENSE 0x04 /* Sense */
#define NEW_MODE_SET 0xEB /* Guess it is Mode set */
#define WRITE_CMD 0x01 /* Write */
#define WRITETAPEMARK 0x1F /* Write Tape Mark */
#define ASSIGN 0xB7 /* 3420 REJECT,3480 OK */
#define CONTROL_ACCESS 0xE3 /* Set high speed */
#define DIAG_MODE_SET 0x0B /* 3420 NOP, 3480 REJECT */
#define LOAD_DISPLAY 0x9F /* 3420 REJECT,3480 OK */
#define LOCATE 0x4F /* 3420 REJ, 3480 NOP */
#define LOOP_WRITE_TO_READ 0x8B /* 3480 REJECT */
#define MODE_SET_DB 0xDB /* 3420 REJECT,3480 OK */
#define MODE_SET_C3 0xC3 /* for 3420 */
#define MODE_SET_CB 0xCB /* for 3420 */
#define MODE_SET_D3 0xD3 /* for 3420 */
#define READ_BACKWARD 0x0C /* */
#define READ_BLOCK_ID 0x22 /* 3420 REJECT,3480 OK */
#define READ_BUFFER 0x12 /* 3420 REJECT,3480 OK */
#define READ_BUFF_LOG 0x24 /* 3420 REJECT,3480 OK */
#define RELEASE 0xD4 /* 3420 NOP, 3480 REJECT */
#define REQ_TRK_IN_ERROR 0x1B /* 3420 NOP, 3480 REJECT */
#define RESERVE 0xF4 /* 3420 NOP, 3480 REJECT */
#define SENSE_GROUP_ID 0x34 /* 3420 REJECT,3480 OK */
#define SENSE_ID 0xE4 /* 3420 REJECT,3480 OK */
#define READ_DEV_CHAR 0x64 /* Read device characteristics */
#define SET_DIAGNOSE 0x4B /* 3420 NOP, 3480 REJECT */
#define SET_GROUP_ID 0xAF /* 3420 REJECT,3480 OK */
#define SET_TAPE_WRITE_IMMED 0xC3 /* for 3480 */
#define SUSPEND 0x5B /* 3420 REJ, 3480 NOP */
#define SYNC 0x43 /* Synchronize (flush buffer) */
#define UNASSIGN 0xC7 /* 3420 REJECT,3480 OK */
#define PERF_SUBSYS_FUNC 0x77 /* 3490 CMD */
#define READ_CONFIG_DATA 0xFA /* 3490 CMD */
#define READ_MESSAGE_ID 0x4E /* 3490 CMD */
#define READ_SUBSYS_DATA 0x3E /* 3490 CMD */
#define SET_INTERFACE_ID 0x73 /* 3490 CMD */
#define SENSE_COMMAND_REJECT 0x80
#define SENSE_INTERVENTION_REQUIRED 0x40
#define SENSE_BUS_OUT_CHECK 0x20
#define SENSE_EQUIPMENT_CHECK 0x10
#define SENSE_DATA_CHECK 0x08
#define SENSE_OVERRUN 0x04
#define SENSE_DEFERRED_UNIT_CHECK 0x02
#define SENSE_ASSIGNED_ELSEWHERE 0x01
#define SENSE_LOCATE_FAILURE 0x80
#define SENSE_DRIVE_ONLINE 0x40
#define SENSE_RESERVED 0x20
#define SENSE_RECORD_SEQUENCE_ERR 0x10
#define SENSE_BEGINNING_OF_TAPE 0x08
#define SENSE_WRITE_MODE 0x04
#define SENSE_WRITE_PROTECT 0x02
#define SENSE_NOT_CAPABLE 0x01
#define SENSE_CHANNEL_ADAPTER_CODE 0xE0
#define SENSE_CHANNEL_ADAPTER_LOC 0x10
#define SENSE_REPORTING_CU 0x08
#define SENSE_AUTOMATIC_LOADER 0x04
#define SENSE_TAPE_SYNC_MODE 0x02
#define SENSE_TAPE_POSITIONING 0x01
/* discipline functions */
struct tape_request *tape_std_read_block(struct tape_device *, size_t);
void tape_std_read_backward(struct tape_device *device,
struct tape_request *request);
struct tape_request *tape_std_write_block(struct tape_device *, size_t);
void tape_std_check_locate(struct tape_device *, struct tape_request *);
/* Some non-mtop commands. */
int tape_std_assign(struct tape_device *);
int tape_std_unassign(struct tape_device *);
int tape_std_read_block_id(struct tape_device *device, __u64 *id);
int tape_std_display(struct tape_device *, struct display_struct *disp);
int tape_std_terminate_write(struct tape_device *);
/* Standard magnetic tape commands. */
int tape_std_mtbsf(struct tape_device *, int);
int tape_std_mtbsfm(struct tape_device *, int);
int tape_std_mtbsr(struct tape_device *, int);
int tape_std_mtcompression(struct tape_device *, int);
int tape_std_mteom(struct tape_device *, int);
int tape_std_mterase(struct tape_device *, int);
int tape_std_mtfsf(struct tape_device *, int);
int tape_std_mtfsfm(struct tape_device *, int);
int tape_std_mtfsr(struct tape_device *, int);
int tape_std_mtload(struct tape_device *, int);
int tape_std_mtnop(struct tape_device *, int);
int tape_std_mtoffl(struct tape_device *, int);
int tape_std_mtreset(struct tape_device *, int);
int tape_std_mtreten(struct tape_device *, int);
int tape_std_mtrew(struct tape_device *, int);
int tape_std_mtsetblk(struct tape_device *, int);
int tape_std_mtunload(struct tape_device *, int);
int tape_std_mtweof(struct tape_device *, int);
/* Event handlers */
void tape_std_default_handler(struct tape_device *);
void tape_std_unexpect_uchk_handler(struct tape_device *);
void tape_std_irq(struct tape_device *);
void tape_std_process_eov(struct tape_device *);
// the error recovery stuff:
void tape_std_error_recovery(struct tape_device *);
void tape_std_error_recovery_has_failed(struct tape_device *,int error_id);
void tape_std_error_recovery_succeded(struct tape_device *);
void tape_std_error_recovery_do_retry(struct tape_device *);
void tape_std_error_recovery_read_opposite(struct tape_device *);
void tape_std_error_recovery_HWBUG(struct tape_device *, int condno);
/* S390 tape types */
enum s390_tape_type {
tape_3480,
tape_3490,
tape_3590,
tape_3592,
};
#endif // _TAPE_STD_H

1918
drivers/s390/char/tty3270.c Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,14 @@
/*
* Copyright IBM Corp. 2007
*
*/
#ifndef __DRIVERS_S390_CHAR_TTY3270_H
#define __DRIVERS_S390_CHAR_TTY3270_H
#include <linux/tty.h>
#include <linux/tty_driver.h>
extern struct tty_driver *tty3270_driver;
#endif /* __DRIVERS_S390_CHAR_TTY3270_H */

213
drivers/s390/char/vmcp.c Normal file
View file

@ -0,0 +1,213 @@
/*
* Copyright IBM Corp. 2004, 2010
* Interface implementation for communication with the z/VM control program
*
* Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
*
* z/VMs CP offers the possibility to issue commands via the diagnose code 8
* this driver implements a character device that issues these commands and
* returns the answer of CP.
*
* The idea of this driver is based on cpint from Neale Ferguson and #CP in CMS
*/
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/compat.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <asm/compat.h>
#include <asm/cpcmd.h>
#include <asm/debug.h>
#include <asm/uaccess.h>
#include "vmcp.h"
static debug_info_t *vmcp_debug;
static int vmcp_open(struct inode *inode, struct file *file)
{
struct vmcp_session *session;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
session = kmalloc(sizeof(*session), GFP_KERNEL);
if (!session)
return -ENOMEM;
session->bufsize = PAGE_SIZE;
session->response = NULL;
session->resp_size = 0;
mutex_init(&session->mutex);
file->private_data = session;
return nonseekable_open(inode, file);
}
static int vmcp_release(struct inode *inode, struct file *file)
{
struct vmcp_session *session;
session = file->private_data;
file->private_data = NULL;
free_pages((unsigned long)session->response, get_order(session->bufsize));
kfree(session);
return 0;
}
static ssize_t
vmcp_read(struct file *file, char __user *buff, size_t count, loff_t *ppos)
{
ssize_t ret;
size_t size;
struct vmcp_session *session;
session = file->private_data;
if (mutex_lock_interruptible(&session->mutex))
return -ERESTARTSYS;
if (!session->response) {
mutex_unlock(&session->mutex);
return 0;
}
size = min_t(size_t, session->resp_size, session->bufsize);
ret = simple_read_from_buffer(buff, count, ppos,
session->response, size);
mutex_unlock(&session->mutex);
return ret;
}
static ssize_t
vmcp_write(struct file *file, const char __user *buff, size_t count,
loff_t *ppos)
{
char *cmd;
struct vmcp_session *session;
if (count > 240)
return -EINVAL;
cmd = kmalloc(count + 1, GFP_KERNEL);
if (!cmd)
return -ENOMEM;
if (copy_from_user(cmd, buff, count)) {
kfree(cmd);
return -EFAULT;
}
cmd[count] = '\0';
session = file->private_data;
if (mutex_lock_interruptible(&session->mutex)) {
kfree(cmd);
return -ERESTARTSYS;
}
if (!session->response)
session->response = (char *)__get_free_pages(GFP_KERNEL
| __GFP_REPEAT | GFP_DMA,
get_order(session->bufsize));
if (!session->response) {
mutex_unlock(&session->mutex);
kfree(cmd);
return -ENOMEM;
}
debug_text_event(vmcp_debug, 1, cmd);
session->resp_size = cpcmd(cmd, session->response, session->bufsize,
&session->resp_code);
mutex_unlock(&session->mutex);
kfree(cmd);
*ppos = 0; /* reset the file pointer after a command */
return count;
}
/*
* These ioctls are available, as the semantics of the diagnose 8 call
* does not fit very well into a Linux call. Diagnose X'08' is described in
* CP Programming Services SC24-6084-00
*
* VMCP_GETCODE: gives the CP return code back to user space
* VMCP_SETBUF: sets the response buffer for the next write call. diagnose 8
* expects adjacent pages in real storage and to make matters worse, we
* dont know the size of the response. Therefore we default to PAGESIZE and
* let userspace to change the response size, if userspace expects a bigger
* response
*/
static long vmcp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct vmcp_session *session;
int __user *argp;
int temp;
session = file->private_data;
if (is_compat_task())
argp = compat_ptr(arg);
else
argp = (int __user *)arg;
if (mutex_lock_interruptible(&session->mutex))
return -ERESTARTSYS;
switch (cmd) {
case VMCP_GETCODE:
temp = session->resp_code;
mutex_unlock(&session->mutex);
return put_user(temp, argp);
case VMCP_SETBUF:
free_pages((unsigned long)session->response,
get_order(session->bufsize));
session->response=NULL;
temp = get_user(session->bufsize, argp);
if (get_order(session->bufsize) > 8) {
session->bufsize = PAGE_SIZE;
temp = -EINVAL;
}
mutex_unlock(&session->mutex);
return temp;
case VMCP_GETSIZE:
temp = session->resp_size;
mutex_unlock(&session->mutex);
return put_user(temp, argp);
default:
mutex_unlock(&session->mutex);
return -ENOIOCTLCMD;
}
}
static const struct file_operations vmcp_fops = {
.owner = THIS_MODULE,
.open = vmcp_open,
.release = vmcp_release,
.read = vmcp_read,
.write = vmcp_write,
.unlocked_ioctl = vmcp_ioctl,
.compat_ioctl = vmcp_ioctl,
.llseek = no_llseek,
};
static struct miscdevice vmcp_dev = {
.name = "vmcp",
.minor = MISC_DYNAMIC_MINOR,
.fops = &vmcp_fops,
};
static int __init vmcp_init(void)
{
int ret;
if (!MACHINE_IS_VM)
return 0;
vmcp_debug = debug_register("vmcp", 1, 1, 240);
if (!vmcp_debug)
return -ENOMEM;
ret = debug_register_view(vmcp_debug, &debug_hex_ascii_view);
if (ret) {
debug_unregister(vmcp_debug);
return ret;
}
ret = misc_register(&vmcp_dev);
if (ret)
debug_unregister(vmcp_debug);
return ret;
}
device_initcall(vmcp_init);

30
drivers/s390/char/vmcp.h Normal file
View file

@ -0,0 +1,30 @@
/*
* Copyright IBM Corp. 2004, 2005
* Interface implementation for communication with the z/VM control program
* Version 1.0
* Author(s): Christian Borntraeger <cborntra@de.ibm.com>
*
*
* z/VMs CP offers the possibility to issue commands via the diagnose code 8
* this driver implements a character device that issues these commands and
* returns the answer of CP.
*
* The idea of this driver is based on cpint from Neale Ferguson
*/
#include <linux/ioctl.h>
#include <linux/mutex.h>
#define VMCP_GETCODE _IOR(0x10, 1, int)
#define VMCP_SETBUF _IOW(0x10, 2, int)
#define VMCP_GETSIZE _IOR(0x10, 3, int)
struct vmcp_session {
unsigned int bufsize;
char *response;
int resp_size;
int resp_code;
/* As we use copy_from/to_user, which might *
* sleep and cannot use a spinlock */
struct mutex mutex;
};

View file

@ -0,0 +1,907 @@
/*
* character device driver for reading z/VM system service records
*
*
* Copyright IBM Corp. 2004, 2009
* character device driver for reading z/VM system service records,
* Version 1.0
* Author(s): Xenia Tkatschow <xenia@us.ibm.com>
* Stefan Weinhuber <wein@de.ibm.com>
*
*/
#define KMSG_COMPONENT "vmlogrdr"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/atomic.h>
#include <asm/uaccess.h>
#include <asm/cpcmd.h>
#include <asm/debug.h>
#include <asm/ebcdic.h>
#include <net/iucv/iucv.h>
#include <linux/kmod.h>
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/string.h>
MODULE_AUTHOR
("(C) 2004 IBM Corporation by Xenia Tkatschow (xenia@us.ibm.com)\n"
" Stefan Weinhuber (wein@de.ibm.com)");
MODULE_DESCRIPTION ("Character device driver for reading z/VM "
"system service records.");
MODULE_LICENSE("GPL");
/*
* The size of the buffer for iucv data transfer is one page,
* but in addition to the data we read from iucv we also
* place an integer and some characters into that buffer,
* so the maximum size for record data is a little less then
* one page.
*/
#define NET_BUFFER_SIZE (PAGE_SIZE - sizeof(int) - sizeof(FENCE))
/*
* The elements that are concurrently accessed by bottom halves are
* connection_established, iucv_path_severed, local_interrupt_buffer
* and receive_ready. The first three can be protected by
* priv_lock. receive_ready is atomic, so it can be incremented and
* decremented without holding a lock.
* The variable dev_in_use needs to be protected by the lock, since
* it's a flag used by open to make sure that the device is opened only
* by one user at the same time.
*/
struct vmlogrdr_priv_t {
char system_service[8];
char internal_name[8];
char recording_name[8];
struct iucv_path *path;
int connection_established;
int iucv_path_severed;
struct iucv_message local_interrupt_buffer;
atomic_t receive_ready;
int minor_num;
char * buffer;
char * current_position;
int remaining;
ulong residual_length;
int buffer_free;
int dev_in_use; /* 1: already opened, 0: not opened*/
spinlock_t priv_lock;
struct device *device;
struct device *class_device;
int autorecording;
int autopurge;
};
/*
* File operation structure for vmlogrdr devices
*/
static int vmlogrdr_open(struct inode *, struct file *);
static int vmlogrdr_release(struct inode *, struct file *);
static ssize_t vmlogrdr_read (struct file *filp, char __user *data,
size_t count, loff_t * ppos);
static const struct file_operations vmlogrdr_fops = {
.owner = THIS_MODULE,
.open = vmlogrdr_open,
.release = vmlogrdr_release,
.read = vmlogrdr_read,
.llseek = no_llseek,
};
static void vmlogrdr_iucv_path_complete(struct iucv_path *, u8 ipuser[16]);
static void vmlogrdr_iucv_path_severed(struct iucv_path *, u8 ipuser[16]);
static void vmlogrdr_iucv_message_pending(struct iucv_path *,
struct iucv_message *);
static struct iucv_handler vmlogrdr_iucv_handler = {
.path_complete = vmlogrdr_iucv_path_complete,
.path_severed = vmlogrdr_iucv_path_severed,
.message_pending = vmlogrdr_iucv_message_pending,
};
static DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue);
static DECLARE_WAIT_QUEUE_HEAD(read_wait_queue);
/*
* pointer to system service private structure
* minor number 0 --> logrec
* minor number 1 --> account
* minor number 2 --> symptom
*/
static struct vmlogrdr_priv_t sys_ser[] = {
{ .system_service = "*LOGREC ",
.internal_name = "logrec",
.recording_name = "EREP",
.minor_num = 0,
.buffer_free = 1,
.priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[0].priv_lock),
.autorecording = 1,
.autopurge = 1,
},
{ .system_service = "*ACCOUNT",
.internal_name = "account",
.recording_name = "ACCOUNT",
.minor_num = 1,
.buffer_free = 1,
.priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[1].priv_lock),
.autorecording = 1,
.autopurge = 1,
},
{ .system_service = "*SYMPTOM",
.internal_name = "symptom",
.recording_name = "SYMPTOM",
.minor_num = 2,
.buffer_free = 1,
.priv_lock = __SPIN_LOCK_UNLOCKED(sys_ser[2].priv_lock),
.autorecording = 1,
.autopurge = 1,
}
};
#define MAXMINOR (sizeof(sys_ser)/sizeof(struct vmlogrdr_priv_t))
static char FENCE[] = {"EOR"};
static int vmlogrdr_major = 0;
static struct cdev *vmlogrdr_cdev = NULL;
static int recording_class_AB;
static void vmlogrdr_iucv_path_complete(struct iucv_path *path, u8 ipuser[16])
{
struct vmlogrdr_priv_t * logptr = path->private;
spin_lock(&logptr->priv_lock);
logptr->connection_established = 1;
spin_unlock(&logptr->priv_lock);
wake_up(&conn_wait_queue);
}
static void vmlogrdr_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
{
struct vmlogrdr_priv_t * logptr = path->private;
u8 reason = (u8) ipuser[8];
pr_err("vmlogrdr: connection severed with reason %i\n", reason);
iucv_path_sever(path, NULL);
kfree(path);
logptr->path = NULL;
spin_lock(&logptr->priv_lock);
logptr->connection_established = 0;
logptr->iucv_path_severed = 1;
spin_unlock(&logptr->priv_lock);
wake_up(&conn_wait_queue);
/* just in case we're sleeping waiting for a record */
wake_up_interruptible(&read_wait_queue);
}
static void vmlogrdr_iucv_message_pending(struct iucv_path *path,
struct iucv_message *msg)
{
struct vmlogrdr_priv_t * logptr = path->private;
/*
* This function is the bottom half so it should be quick.
* Copy the external interrupt data into our local eib and increment
* the usage count
*/
spin_lock(&logptr->priv_lock);
memcpy(&logptr->local_interrupt_buffer, msg, sizeof(*msg));
atomic_inc(&logptr->receive_ready);
spin_unlock(&logptr->priv_lock);
wake_up_interruptible(&read_wait_queue);
}
static int vmlogrdr_get_recording_class_AB(void)
{
static const char cp_command[] = "QUERY COMMAND RECORDING ";
char cp_response[80];
char *tail;
int len,i;
cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
len = strnlen(cp_response,sizeof(cp_response));
// now the parsing
tail=strnchr(cp_response,len,'=');
if (!tail)
return 0;
tail++;
if (!strncmp("ANY",tail,3))
return 1;
if (!strncmp("NONE",tail,4))
return 0;
/*
* expect comma separated list of classes here, if one of them
* is A or B return 1 otherwise 0
*/
for (i=tail-cp_response; i<len; i++)
if ( cp_response[i]=='A' || cp_response[i]=='B' )
return 1;
return 0;
}
static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr,
int action, int purge)
{
char cp_command[80];
char cp_response[160];
char *onoff, *qid_string;
int rc;
onoff = ((action == 1) ? "ON" : "OFF");
qid_string = ((recording_class_AB == 1) ? " QID * " : "");
/*
* The recording commands needs to be called with option QID
* for guests that have previlege classes A or B.
* Purging has to be done as separate step, because recording
* can't be switched on as long as records are on the queue.
* Doing both at the same time doesn't work.
*/
if (purge && (action == 1)) {
memset(cp_command, 0x00, sizeof(cp_command));
memset(cp_response, 0x00, sizeof(cp_response));
snprintf(cp_command, sizeof(cp_command),
"RECORDING %s PURGE %s",
logptr->recording_name,
qid_string);
cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
}
memset(cp_command, 0x00, sizeof(cp_command));
memset(cp_response, 0x00, sizeof(cp_response));
snprintf(cp_command, sizeof(cp_command), "RECORDING %s %s %s",
logptr->recording_name,
onoff,
qid_string);
cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
/* The recording command will usually answer with 'Command complete'
* on success, but when the specific service was never connected
* before then there might be an additional informational message
* 'HCPCRC8072I Recording entry not found' before the
* 'Command complete'. So I use strstr rather then the strncmp.
*/
if (strstr(cp_response,"Command complete"))
rc = 0;
else
rc = -EIO;
/*
* If we turn recording off, we have to purge any remaining records
* afterwards, as a large number of queued records may impact z/VM
* performance.
*/
if (purge && (action == 0)) {
memset(cp_command, 0x00, sizeof(cp_command));
memset(cp_response, 0x00, sizeof(cp_response));
snprintf(cp_command, sizeof(cp_command),
"RECORDING %s PURGE %s",
logptr->recording_name,
qid_string);
cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
}
return rc;
}
static int vmlogrdr_open (struct inode *inode, struct file *filp)
{
int dev_num = 0;
struct vmlogrdr_priv_t * logptr = NULL;
int connect_rc = 0;
int ret;
dev_num = iminor(inode);
if (dev_num >= MAXMINOR)
return -ENODEV;
logptr = &sys_ser[dev_num];
/*
* only allow for blocking reads to be open
*/
if (filp->f_flags & O_NONBLOCK)
return -EOPNOTSUPP;
/* Besure this device hasn't already been opened */
spin_lock_bh(&logptr->priv_lock);
if (logptr->dev_in_use) {
spin_unlock_bh(&logptr->priv_lock);
return -EBUSY;
}
logptr->dev_in_use = 1;
logptr->connection_established = 0;
logptr->iucv_path_severed = 0;
atomic_set(&logptr->receive_ready, 0);
logptr->buffer_free = 1;
spin_unlock_bh(&logptr->priv_lock);
/* set the file options */
filp->private_data = logptr;
/* start recording for this service*/
if (logptr->autorecording) {
ret = vmlogrdr_recording(logptr,1,logptr->autopurge);
if (ret)
pr_warning("vmlogrdr: failed to start "
"recording automatically\n");
}
/* create connection to the system service */
logptr->path = iucv_path_alloc(10, 0, GFP_KERNEL);
if (!logptr->path)
goto out_dev;
connect_rc = iucv_path_connect(logptr->path, &vmlogrdr_iucv_handler,
logptr->system_service, NULL, NULL,
logptr);
if (connect_rc) {
pr_err("vmlogrdr: iucv connection to %s "
"failed with rc %i \n",
logptr->system_service, connect_rc);
goto out_path;
}
/* We've issued the connect and now we must wait for a
* ConnectionComplete or ConnectinSevered Interrupt
* before we can continue to process.
*/
wait_event(conn_wait_queue, (logptr->connection_established)
|| (logptr->iucv_path_severed));
if (logptr->iucv_path_severed)
goto out_record;
nonseekable_open(inode, filp);
return 0;
out_record:
if (logptr->autorecording)
vmlogrdr_recording(logptr,0,logptr->autopurge);
out_path:
kfree(logptr->path); /* kfree(NULL) is ok. */
logptr->path = NULL;
out_dev:
logptr->dev_in_use = 0;
return -EIO;
}
static int vmlogrdr_release (struct inode *inode, struct file *filp)
{
int ret;
struct vmlogrdr_priv_t * logptr = filp->private_data;
iucv_path_sever(logptr->path, NULL);
kfree(logptr->path);
logptr->path = NULL;
if (logptr->autorecording) {
ret = vmlogrdr_recording(logptr,0,logptr->autopurge);
if (ret)
pr_warning("vmlogrdr: failed to stop "
"recording automatically\n");
}
logptr->dev_in_use = 0;
return 0;
}
static int vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv)
{
int rc, *temp;
/* we need to keep track of two data sizes here:
* The number of bytes we need to receive from iucv and
* the total number of bytes we actually write into the buffer.
*/
int user_data_count, iucv_data_count;
char * buffer;
if (atomic_read(&priv->receive_ready)) {
spin_lock_bh(&priv->priv_lock);
if (priv->residual_length){
/* receive second half of a record */
iucv_data_count = priv->residual_length;
user_data_count = 0;
buffer = priv->buffer;
} else {
/* receive a new record:
* We need to return the total length of the record
* + size of FENCE in the first 4 bytes of the buffer.
*/
iucv_data_count = priv->local_interrupt_buffer.length;
user_data_count = sizeof(int);
temp = (int*)priv->buffer;
*temp= iucv_data_count + sizeof(FENCE);
buffer = priv->buffer + sizeof(int);
}
/*
* If the record is bigger than our buffer, we receive only
* a part of it. We can get the rest later.
*/
if (iucv_data_count > NET_BUFFER_SIZE)
iucv_data_count = NET_BUFFER_SIZE;
rc = iucv_message_receive(priv->path,
&priv->local_interrupt_buffer,
0, buffer, iucv_data_count,
&priv->residual_length);
spin_unlock_bh(&priv->priv_lock);
/* An rc of 5 indicates that the record was bigger than
* the buffer, which is OK for us. A 9 indicates that the
* record was purged befor we could receive it.
*/
if (rc == 5)
rc = 0;
if (rc == 9)
atomic_set(&priv->receive_ready, 0);
} else {
rc = 1;
}
if (!rc) {
priv->buffer_free = 0;
user_data_count += iucv_data_count;
priv->current_position = priv->buffer;
if (priv->residual_length == 0){
/* the whole record has been captured,
* now add the fence */
atomic_dec(&priv->receive_ready);
buffer = priv->buffer + user_data_count;
memcpy(buffer, FENCE, sizeof(FENCE));
user_data_count += sizeof(FENCE);
}
priv->remaining = user_data_count;
}
return rc;
}
static ssize_t vmlogrdr_read(struct file *filp, char __user *data,
size_t count, loff_t * ppos)
{
int rc;
struct vmlogrdr_priv_t * priv = filp->private_data;
while (priv->buffer_free) {
rc = vmlogrdr_receive_data(priv);
if (rc) {
rc = wait_event_interruptible(read_wait_queue,
atomic_read(&priv->receive_ready));
if (rc)
return rc;
}
}
/* copy only up to end of record */
if (count > priv->remaining)
count = priv->remaining;
if (copy_to_user(data, priv->current_position, count))
return -EFAULT;
*ppos += count;
priv->current_position += count;
priv->remaining -= count;
/* if all data has been transferred, set buffer free */
if (priv->remaining == 0)
priv->buffer_free = 1;
return count;
}
static ssize_t vmlogrdr_autopurge_store(struct device * dev,
struct device_attribute *attr,
const char * buf, size_t count)
{
struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
ssize_t ret = count;
switch (buf[0]) {
case '0':
priv->autopurge=0;
break;
case '1':
priv->autopurge=1;
break;
default:
ret = -EINVAL;
}
return ret;
}
static ssize_t vmlogrdr_autopurge_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", priv->autopurge);
}
static DEVICE_ATTR(autopurge, 0644, vmlogrdr_autopurge_show,
vmlogrdr_autopurge_store);
static ssize_t vmlogrdr_purge_store(struct device * dev,
struct device_attribute *attr,
const char * buf, size_t count)
{
char cp_command[80];
char cp_response[80];
struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
if (buf[0] != '1')
return -EINVAL;
memset(cp_command, 0x00, sizeof(cp_command));
memset(cp_response, 0x00, sizeof(cp_response));
/*
* The recording command needs to be called with option QID
* for guests that have previlege classes A or B.
* Other guests will not recognize the command and we have to
* issue the same command without the QID parameter.
*/
if (recording_class_AB)
snprintf(cp_command, sizeof(cp_command),
"RECORDING %s PURGE QID * ",
priv->recording_name);
else
snprintf(cp_command, sizeof(cp_command),
"RECORDING %s PURGE ",
priv->recording_name);
cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
return count;
}
static DEVICE_ATTR(purge, 0200, NULL, vmlogrdr_purge_store);
static ssize_t vmlogrdr_autorecording_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
ssize_t ret = count;
switch (buf[0]) {
case '0':
priv->autorecording=0;
break;
case '1':
priv->autorecording=1;
break;
default:
ret = -EINVAL;
}
return ret;
}
static ssize_t vmlogrdr_autorecording_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", priv->autorecording);
}
static DEVICE_ATTR(autorecording, 0644, vmlogrdr_autorecording_show,
vmlogrdr_autorecording_store);
static ssize_t vmlogrdr_recording_store(struct device * dev,
struct device_attribute *attr,
const char * buf, size_t count)
{
struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
ssize_t ret;
switch (buf[0]) {
case '0':
ret = vmlogrdr_recording(priv,0,0);
break;
case '1':
ret = vmlogrdr_recording(priv,1,0);
break;
default:
ret = -EINVAL;
}
if (ret)
return ret;
else
return count;
}
static DEVICE_ATTR(recording, 0200, NULL, vmlogrdr_recording_store);
static ssize_t vmlogrdr_recording_status_show(struct device_driver *driver,
char *buf)
{
static const char cp_command[] = "QUERY RECORDING ";
int len;
cpcmd(cp_command, buf, 4096, NULL);
len = strlen(buf);
return len;
}
static DRIVER_ATTR(recording_status, 0444, vmlogrdr_recording_status_show,
NULL);
static struct attribute *vmlogrdr_drv_attrs[] = {
&driver_attr_recording_status.attr,
NULL,
};
static struct attribute_group vmlogrdr_drv_attr_group = {
.attrs = vmlogrdr_drv_attrs,
};
static const struct attribute_group *vmlogrdr_drv_attr_groups[] = {
&vmlogrdr_drv_attr_group,
NULL,
};
static struct attribute *vmlogrdr_attrs[] = {
&dev_attr_autopurge.attr,
&dev_attr_purge.attr,
&dev_attr_autorecording.attr,
&dev_attr_recording.attr,
NULL,
};
static struct attribute_group vmlogrdr_attr_group = {
.attrs = vmlogrdr_attrs,
};
static const struct attribute_group *vmlogrdr_attr_groups[] = {
&vmlogrdr_attr_group,
NULL,
};
static int vmlogrdr_pm_prepare(struct device *dev)
{
int rc;
struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
rc = 0;
if (priv) {
spin_lock_bh(&priv->priv_lock);
if (priv->dev_in_use)
rc = -EBUSY;
spin_unlock_bh(&priv->priv_lock);
}
if (rc)
pr_err("vmlogrdr: device %s is busy. Refuse to suspend.\n",
dev_name(dev));
return rc;
}
static const struct dev_pm_ops vmlogrdr_pm_ops = {
.prepare = vmlogrdr_pm_prepare,
};
static struct class *vmlogrdr_class;
static struct device_driver vmlogrdr_driver = {
.name = "vmlogrdr",
.bus = &iucv_bus,
.pm = &vmlogrdr_pm_ops,
.groups = vmlogrdr_drv_attr_groups,
};
static int vmlogrdr_register_driver(void)
{
int ret;
/* Register with iucv driver */
ret = iucv_register(&vmlogrdr_iucv_handler, 1);
if (ret)
goto out;
ret = driver_register(&vmlogrdr_driver);
if (ret)
goto out_iucv;
vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr");
if (IS_ERR(vmlogrdr_class)) {
ret = PTR_ERR(vmlogrdr_class);
vmlogrdr_class = NULL;
goto out_driver;
}
return 0;
out_driver:
driver_unregister(&vmlogrdr_driver);
out_iucv:
iucv_unregister(&vmlogrdr_iucv_handler, 1);
out:
return ret;
}
static void vmlogrdr_unregister_driver(void)
{
class_destroy(vmlogrdr_class);
vmlogrdr_class = NULL;
driver_unregister(&vmlogrdr_driver);
iucv_unregister(&vmlogrdr_iucv_handler, 1);
}
static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
{
struct device *dev;
int ret;
dev = kzalloc(sizeof(struct device), GFP_KERNEL);
if (dev) {
dev_set_name(dev, "%s", priv->internal_name);
dev->bus = &iucv_bus;
dev->parent = iucv_root;
dev->driver = &vmlogrdr_driver;
dev->groups = vmlogrdr_attr_groups;
dev_set_drvdata(dev, priv);
/*
* The release function could be called after the
* module has been unloaded. It's _only_ task is to
* free the struct. Therefore, we specify kfree()
* directly here. (Probably a little bit obfuscating
* but legitime ...).
*/
dev->release = (void (*)(struct device *))kfree;
} else
return -ENOMEM;
ret = device_register(dev);
if (ret) {
put_device(dev);
return ret;
}
priv->class_device = device_create(vmlogrdr_class, dev,
MKDEV(vmlogrdr_major,
priv->minor_num),
priv, "%s", dev_name(dev));
if (IS_ERR(priv->class_device)) {
ret = PTR_ERR(priv->class_device);
priv->class_device=NULL;
device_unregister(dev);
return ret;
}
priv->device = dev;
return 0;
}
static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv)
{
device_destroy(vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num));
if (priv->device != NULL) {
device_unregister(priv->device);
priv->device=NULL;
}
return 0;
}
static int vmlogrdr_register_cdev(dev_t dev)
{
int rc = 0;
vmlogrdr_cdev = cdev_alloc();
if (!vmlogrdr_cdev) {
return -ENOMEM;
}
vmlogrdr_cdev->owner = THIS_MODULE;
vmlogrdr_cdev->ops = &vmlogrdr_fops;
vmlogrdr_cdev->dev = dev;
rc = cdev_add(vmlogrdr_cdev, vmlogrdr_cdev->dev, MAXMINOR);
if (!rc)
return 0;
// cleanup: cdev is not fully registered, no cdev_del here!
kobject_put(&vmlogrdr_cdev->kobj);
vmlogrdr_cdev=NULL;
return rc;
}
static void vmlogrdr_cleanup(void)
{
int i;
if (vmlogrdr_cdev) {
cdev_del(vmlogrdr_cdev);
vmlogrdr_cdev=NULL;
}
for (i=0; i < MAXMINOR; ++i ) {
vmlogrdr_unregister_device(&sys_ser[i]);
free_page((unsigned long)sys_ser[i].buffer);
}
vmlogrdr_unregister_driver();
if (vmlogrdr_major) {
unregister_chrdev_region(MKDEV(vmlogrdr_major, 0), MAXMINOR);
vmlogrdr_major=0;
}
}
static int __init vmlogrdr_init(void)
{
int rc;
int i;
dev_t dev;
if (! MACHINE_IS_VM) {
pr_err("not running under VM, driver not loaded.\n");
return -ENODEV;
}
recording_class_AB = vmlogrdr_get_recording_class_AB();
rc = alloc_chrdev_region(&dev, 0, MAXMINOR, "vmlogrdr");
if (rc)
return rc;
vmlogrdr_major = MAJOR(dev);
rc=vmlogrdr_register_driver();
if (rc)
goto cleanup;
for (i=0; i < MAXMINOR; ++i ) {
sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL);
if (!sys_ser[i].buffer) {
rc = -ENOMEM;
break;
}
sys_ser[i].current_position = sys_ser[i].buffer;
rc=vmlogrdr_register_device(&sys_ser[i]);
if (rc)
break;
}
if (rc)
goto cleanup;
rc = vmlogrdr_register_cdev(dev);
if (rc)
goto cleanup;
return 0;
cleanup:
vmlogrdr_cleanup();
return rc;
}
static void __exit vmlogrdr_exit(void)
{
vmlogrdr_cleanup();
return;
}
module_init(vmlogrdr_init);
module_exit(vmlogrdr_exit);

1070
drivers/s390/char/vmur.c Normal file

File diff suppressed because it is too large Load diff

110
drivers/s390/char/vmur.h Normal file
View file

@ -0,0 +1,110 @@
/*
* Linux driver for System z and s390 unit record devices
* (z/VM virtual punch, reader, printer)
*
* Copyright IBM Corp. 2001, 2007
* Authors: Malcolm Beattie <beattiem@uk.ibm.com>
* Michael Holzheu <holzheu@de.ibm.com>
* Frank Munzert <munzert@de.ibm.com>
*/
#ifndef _VMUR_H_
#define _VMUR_H_
#define DEV_CLASS_UR_I 0x20 /* diag210 unit record input device class */
#define DEV_CLASS_UR_O 0x10 /* diag210 unit record output device class */
/*
* we only support z/VM's default unit record devices:
* both in SPOOL directory control statement and in CP DEFINE statement
* RDR defaults to 2540 reader
* PUN defaults to 2540 punch
* PRT defaults to 1403 printer
*/
#define READER_PUNCH_DEVTYPE 0x2540
#define PRINTER_DEVTYPE 0x1403
/* z/VM spool file control block SFBLOK */
struct file_control_block {
char reserved_1[8];
char user_owner[8];
char user_orig[8];
__s32 data_recs;
__s16 rec_len;
__s16 file_num;
__u8 file_stat;
__u8 dev_type;
char reserved_2[6];
char file_name[12];
char file_type[12];
char create_date[8];
char create_time[8];
char reserved_3[6];
__u8 file_class;
__u8 sfb_lok;
__u64 distr_code;
__u32 reserved_4;
__u8 current_starting_copy_number;
__u8 sfblock_cntrl_flags;
__u8 reserved_5;
__u8 more_status_flags;
char rest[200];
} __attribute__ ((packed));
#define FLG_SYSTEM_HOLD 0x04
#define FLG_CP_DUMP 0x10
#define FLG_USER_HOLD 0x20
#define FLG_IN_USE 0x80
/*
* A struct urdev is created for each ur device that is made available
* via the ccw_device driver model.
*/
struct urdev {
struct ccw_device *cdev; /* Backpointer to ccw device */
struct mutex io_mutex; /* Serialises device IO */
struct completion *io_done; /* do_ur_io waits; irq completes */
struct device *device;
struct cdev *char_device;
struct ccw_dev_id dev_id; /* device id */
size_t reclen; /* Record length for *write* CCWs */
int class; /* VM device class */
int io_request_rc; /* return code from I/O request */
atomic_t ref_count; /* reference counter */
wait_queue_head_t wait; /* wait queue to serialize open */
int open_flag; /* "urdev is open" flag */
spinlock_t open_lock; /* serialize critical sections */
};
/*
* A struct urfile is allocated at open() time for each device and
* freed on release().
*/
struct urfile {
struct urdev *urd;
unsigned int flags;
size_t dev_reclen;
__u16 file_reclen;
};
/*
* Device major/minor definitions.
*/
#define UR_MAJOR 0 /* get dynamic major */
/*
* We map minor numbers directly to device numbers (0-FFFF) for simplicity.
* This avoids having to allocate (and manage) slot numbers.
*/
#define NUM_MINORS 65536
/* Limiting each I/O to 511 records limits chan prog to 4KB (511 r/w + 1 NOP) */
#define MAX_RECS_PER_IO 511
#define WRITE_CCW_CMD 0x01
#define TRACE(x...) debug_sprintf_event(vmur_dbf, 1, x)
#define CCWDEV_CU_DI(cutype, di) \
CCW_DEVICE(cutype, 0x00), .driver_info = (di)
#define FILE_RECLEN_OFFSET 4064 /* reclen offset in spool data block */
#endif /* _VMUR_H_ */

782
drivers/s390/char/zcore.c Normal file
View file

@ -0,0 +1,782 @@
/*
* zcore module to export memory content and register sets for creating system
* dumps on SCSI disks (zfcpdump). The "zcore/mem" debugfs file shows the same
* dump format as s390 standalone dumps.
*
* For more information please refer to Documentation/s390/zfcpdump.txt
*
* Copyright IBM Corp. 2003, 2008
* Author(s): Michael Holzheu
*/
#define KMSG_COMPONENT "zdump"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/miscdevice.h>
#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/memblock.h>
#include <asm/asm-offsets.h>
#include <asm/ipl.h>
#include <asm/sclp.h>
#include <asm/setup.h>
#include <asm/uaccess.h>
#include <asm/debug.h>
#include <asm/processor.h>
#include <asm/irqflags.h>
#include <asm/checksum.h>
#include <asm/switch_to.h>
#include "sclp.h"
#define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x)
#define TO_USER 1
#define TO_KERNEL 0
#define CHUNK_INFO_SIZE 34 /* 2 16-byte char, each followed by blank */
enum arch_id {
ARCH_S390 = 0,
ARCH_S390X = 1,
};
/* dump system info */
struct sys_info {
enum arch_id arch;
unsigned long sa_base;
u32 sa_size;
int cpu_map[NR_CPUS];
unsigned long mem_size;
struct save_area lc_mask;
};
struct ipib_info {
unsigned long ipib;
u32 checksum;
} __attribute__((packed));
static struct sys_info sys_info;
static struct debug_info *zcore_dbf;
static int hsa_available;
static struct dentry *zcore_dir;
static struct dentry *zcore_file;
static struct dentry *zcore_memmap_file;
static struct dentry *zcore_reipl_file;
static struct dentry *zcore_hsa_file;
static struct ipl_parameter_block *ipl_block;
/*
* Copy memory from HSA to kernel or user memory (not reentrant):
*
* @dest: Kernel or user buffer where memory should be copied to
* @src: Start address within HSA where data should be copied
* @count: Size of buffer, which should be copied
* @mode: Either TO_KERNEL or TO_USER
*/
int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode)
{
int offs, blk_num;
static char buf[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
if (!hsa_available)
return -ENODATA;
if (count == 0)
return 0;
/* copy first block */
offs = 0;
if ((src % PAGE_SIZE) != 0) {
blk_num = src / PAGE_SIZE + 2;
if (sclp_sdias_copy(buf, blk_num, 1)) {
TRACE("sclp_sdias_copy() failed\n");
return -EIO;
}
offs = min((PAGE_SIZE - (src % PAGE_SIZE)), count);
if (mode == TO_USER) {
if (copy_to_user((__force __user void*) dest,
buf + (src % PAGE_SIZE), offs))
return -EFAULT;
} else
memcpy(dest, buf + (src % PAGE_SIZE), offs);
}
if (offs == count)
goto out;
/* copy middle */
for (; (offs + PAGE_SIZE) <= count; offs += PAGE_SIZE) {
blk_num = (src + offs) / PAGE_SIZE + 2;
if (sclp_sdias_copy(buf, blk_num, 1)) {
TRACE("sclp_sdias_copy() failed\n");
return -EIO;
}
if (mode == TO_USER) {
if (copy_to_user((__force __user void*) dest + offs,
buf, PAGE_SIZE))
return -EFAULT;
} else
memcpy(dest + offs, buf, PAGE_SIZE);
}
if (offs == count)
goto out;
/* copy last block */
blk_num = (src + offs) / PAGE_SIZE + 2;
if (sclp_sdias_copy(buf, blk_num, 1)) {
TRACE("sclp_sdias_copy() failed\n");
return -EIO;
}
if (mode == TO_USER) {
if (copy_to_user((__force __user void*) dest + offs, buf,
count - offs))
return -EFAULT;
} else
memcpy(dest + offs, buf, count - offs);
out:
return 0;
}
static int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count)
{
return memcpy_hsa((void __force *) dest, src, count, TO_USER);
}
static int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count)
{
return memcpy_hsa(dest, src, count, TO_KERNEL);
}
static int __init init_cpu_info(enum arch_id arch)
{
struct save_area_ext *sa_ext;
/* get info for boot cpu from lowcore, stored in the HSA */
sa_ext = dump_save_area_create(0);
if (!sa_ext)
return -ENOMEM;
if (memcpy_hsa_kernel(&sa_ext->sa, sys_info.sa_base,
sys_info.sa_size) < 0) {
TRACE("could not copy from HSA\n");
kfree(sa_ext);
return -EIO;
}
if (MACHINE_HAS_VX)
save_vx_regs_safe(sa_ext->vx_regs);
return 0;
}
static DEFINE_MUTEX(zcore_mutex);
#define DUMP_VERSION 0x5
#define DUMP_MAGIC 0xa8190173618f23fdULL
#define DUMP_ARCH_S390X 2
#define DUMP_ARCH_S390 1
#define HEADER_SIZE 4096
/* dump header dumped according to s390 crash dump format */
struct zcore_header {
u64 magic;
u32 version;
u32 header_size;
u32 dump_level;
u32 page_size;
u64 mem_size;
u64 mem_start;
u64 mem_end;
u32 num_pages;
u32 pad1;
u64 tod;
struct cpuid cpu_id;
u32 arch_id;
u32 volnr;
u32 build_arch;
u64 rmem_size;
u8 mvdump;
u16 cpu_cnt;
u16 real_cpu_cnt;
u8 end_pad1[0x200-0x061];
u64 mvdump_sign;
u64 mvdump_zipl_time;
u8 end_pad2[0x800-0x210];
u32 lc_vec[512];
} __attribute__((packed,__aligned__(16)));
static struct zcore_header zcore_header = {
.magic = DUMP_MAGIC,
.version = DUMP_VERSION,
.header_size = 4096,
.dump_level = 0,
.page_size = PAGE_SIZE,
.mem_start = 0,
#ifdef CONFIG_64BIT
.build_arch = DUMP_ARCH_S390X,
#else
.build_arch = DUMP_ARCH_S390,
#endif
};
/*
* Copy lowcore info to buffer. Use map in order to copy only register parts.
*
* @buf: User buffer
* @sa: Pointer to save area
* @sa_off: Offset in save area to copy
* @len: Number of bytes to copy
*/
static int copy_lc(void __user *buf, void *sa, int sa_off, int len)
{
int i;
char *lc_mask = (char*)&sys_info.lc_mask;
for (i = 0; i < len; i++) {
if (!lc_mask[i + sa_off])
continue;
if (copy_to_user(buf + i, sa + sa_off + i, 1))
return -EFAULT;
}
return 0;
}
/*
* Copy lowcores info to memory, if necessary
*
* @buf: User buffer
* @addr: Start address of buffer in dump memory
* @count: Size of buffer
*/
static int zcore_add_lc(char __user *buf, unsigned long start, size_t count)
{
unsigned long end;
int i;
if (count == 0)
return 0;
end = start + count;
for (i = 0; i < dump_save_areas.count; i++) {
unsigned long cp_start, cp_end; /* copy range */
unsigned long sa_start, sa_end; /* save area range */
unsigned long prefix;
unsigned long sa_off, len, buf_off;
struct save_area *save_area = &dump_save_areas.areas[i]->sa;
prefix = save_area->pref_reg;
sa_start = prefix + sys_info.sa_base;
sa_end = prefix + sys_info.sa_base + sys_info.sa_size;
if ((end < sa_start) || (start > sa_end))
continue;
cp_start = max(start, sa_start);
cp_end = min(end, sa_end);
buf_off = cp_start - start;
sa_off = cp_start - sa_start;
len = cp_end - cp_start;
TRACE("copy_lc for: %lx\n", start);
if (copy_lc(buf + buf_off, save_area, sa_off, len))
return -EFAULT;
}
return 0;
}
/*
* Release the HSA
*/
static void release_hsa(void)
{
diag308(DIAG308_REL_HSA, NULL);
hsa_available = 0;
}
/*
* Read routine for zcore character device
* First 4K are dump header
* Next 32MB are HSA Memory
* Rest is read from absolute Memory
*/
static ssize_t zcore_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
unsigned long mem_start; /* Start address in memory */
size_t mem_offs; /* Offset in dump memory */
size_t hdr_count; /* Size of header part of output buffer */
size_t size;
int rc;
mutex_lock(&zcore_mutex);
if (*ppos > (sys_info.mem_size + HEADER_SIZE)) {
rc = -EINVAL;
goto fail;
}
count = min(count, (size_t) (sys_info.mem_size + HEADER_SIZE - *ppos));
/* Copy dump header */
if (*ppos < HEADER_SIZE) {
size = min(count, (size_t) (HEADER_SIZE - *ppos));
if (copy_to_user(buf, &zcore_header + *ppos, size)) {
rc = -EFAULT;
goto fail;
}
hdr_count = size;
mem_start = 0;
} else {
hdr_count = 0;
mem_start = *ppos - HEADER_SIZE;
}
mem_offs = 0;
/* Copy from HSA data */
if (*ppos < sclp_get_hsa_size() + HEADER_SIZE) {
size = min((count - hdr_count),
(size_t) (sclp_get_hsa_size() - mem_start));
rc = memcpy_hsa_user(buf + hdr_count, mem_start, size);
if (rc)
goto fail;
mem_offs += size;
}
/* Copy from real mem */
size = count - mem_offs - hdr_count;
rc = copy_to_user_real(buf + hdr_count + mem_offs,
(void *) mem_start + mem_offs, size);
if (rc)
goto fail;
/*
* Since s390 dump analysis tools like lcrash or crash
* expect register sets in the prefix pages of the cpus,
* we copy them into the read buffer, if necessary.
* buf + hdr_count: Start of memory part of output buffer
* mem_start: Start memory address to copy from
* count - hdr_count: Size of memory area to copy
*/
if (zcore_add_lc(buf + hdr_count, mem_start, count - hdr_count)) {
rc = -EFAULT;
goto fail;
}
*ppos += count;
fail:
mutex_unlock(&zcore_mutex);
return (rc < 0) ? rc : count;
}
static int zcore_open(struct inode *inode, struct file *filp)
{
if (!hsa_available)
return -ENODATA;
else
return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
}
static int zcore_release(struct inode *inode, struct file *filep)
{
if (hsa_available)
release_hsa();
return 0;
}
static loff_t zcore_lseek(struct file *file, loff_t offset, int orig)
{
loff_t rc;
mutex_lock(&zcore_mutex);
switch (orig) {
case 0:
file->f_pos = offset;
rc = file->f_pos;
break;
case 1:
file->f_pos += offset;
rc = file->f_pos;
break;
default:
rc = -EINVAL;
}
mutex_unlock(&zcore_mutex);
return rc;
}
static const struct file_operations zcore_fops = {
.owner = THIS_MODULE,
.llseek = zcore_lseek,
.read = zcore_read,
.open = zcore_open,
.release = zcore_release,
};
static ssize_t zcore_memmap_read(struct file *filp, char __user *buf,
size_t count, loff_t *ppos)
{
return simple_read_from_buffer(buf, count, ppos, filp->private_data,
memblock.memory.cnt * CHUNK_INFO_SIZE);
}
static int zcore_memmap_open(struct inode *inode, struct file *filp)
{
struct memblock_region *reg;
char *buf;
int i = 0;
buf = kzalloc(memblock.memory.cnt * CHUNK_INFO_SIZE, GFP_KERNEL);
if (!buf) {
return -ENOMEM;
}
for_each_memblock(memory, reg) {
sprintf(buf + (i++ * CHUNK_INFO_SIZE), "%016llx %016llx ",
(unsigned long long) reg->base,
(unsigned long long) reg->size);
}
filp->private_data = buf;
return nonseekable_open(inode, filp);
}
static int zcore_memmap_release(struct inode *inode, struct file *filp)
{
kfree(filp->private_data);
return 0;
}
static const struct file_operations zcore_memmap_fops = {
.owner = THIS_MODULE,
.read = zcore_memmap_read,
.open = zcore_memmap_open,
.release = zcore_memmap_release,
.llseek = no_llseek,
};
static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf,
size_t count, loff_t *ppos)
{
if (ipl_block) {
diag308(DIAG308_SET, ipl_block);
diag308(DIAG308_IPL, NULL);
}
return count;
}
static int zcore_reipl_open(struct inode *inode, struct file *filp)
{
return nonseekable_open(inode, filp);
}
static int zcore_reipl_release(struct inode *inode, struct file *filp)
{
return 0;
}
static const struct file_operations zcore_reipl_fops = {
.owner = THIS_MODULE,
.write = zcore_reipl_write,
.open = zcore_reipl_open,
.release = zcore_reipl_release,
.llseek = no_llseek,
};
static ssize_t zcore_hsa_read(struct file *filp, char __user *buf,
size_t count, loff_t *ppos)
{
static char str[18];
if (hsa_available)
snprintf(str, sizeof(str), "%lx\n", sclp_get_hsa_size());
else
snprintf(str, sizeof(str), "0\n");
return simple_read_from_buffer(buf, count, ppos, str, strlen(str));
}
static ssize_t zcore_hsa_write(struct file *filp, const char __user *buf,
size_t count, loff_t *ppos)
{
char value;
if (*ppos != 0)
return -EPIPE;
if (copy_from_user(&value, buf, 1))
return -EFAULT;
if (value != '0')
return -EINVAL;
release_hsa();
return count;
}
static const struct file_operations zcore_hsa_fops = {
.owner = THIS_MODULE,
.write = zcore_hsa_write,
.read = zcore_hsa_read,
.open = nonseekable_open,
.llseek = no_llseek,
};
#ifdef CONFIG_32BIT
static void __init set_lc_mask(struct save_area *map)
{
memset(&map->ext_save, 0xff, sizeof(map->ext_save));
memset(&map->timer, 0xff, sizeof(map->timer));
memset(&map->clk_cmp, 0xff, sizeof(map->clk_cmp));
memset(&map->psw, 0xff, sizeof(map->psw));
memset(&map->pref_reg, 0xff, sizeof(map->pref_reg));
memset(&map->acc_regs, 0xff, sizeof(map->acc_regs));
memset(&map->fp_regs, 0xff, sizeof(map->fp_regs));
memset(&map->gp_regs, 0xff, sizeof(map->gp_regs));
memset(&map->ctrl_regs, 0xff, sizeof(map->ctrl_regs));
}
#else /* CONFIG_32BIT */
static void __init set_lc_mask(struct save_area *map)
{
memset(&map->fp_regs, 0xff, sizeof(map->fp_regs));
memset(&map->gp_regs, 0xff, sizeof(map->gp_regs));
memset(&map->psw, 0xff, sizeof(map->psw));
memset(&map->pref_reg, 0xff, sizeof(map->pref_reg));
memset(&map->fp_ctrl_reg, 0xff, sizeof(map->fp_ctrl_reg));
memset(&map->tod_reg, 0xff, sizeof(map->tod_reg));
memset(&map->timer, 0xff, sizeof(map->timer));
memset(&map->clk_cmp, 0xff, sizeof(map->clk_cmp));
memset(&map->acc_regs, 0xff, sizeof(map->acc_regs));
memset(&map->ctrl_regs, 0xff, sizeof(map->ctrl_regs));
}
#endif /* CONFIG_32BIT */
/*
* Initialize dump globals for a given architecture
*/
static int __init sys_info_init(enum arch_id arch, unsigned long mem_end)
{
int rc;
switch (arch) {
case ARCH_S390X:
pr_alert("DETECTED 'S390X (64 bit) OS'\n");
break;
case ARCH_S390:
pr_alert("DETECTED 'S390 (32 bit) OS'\n");
break;
default:
pr_alert("0x%x is an unknown architecture.\n",arch);
return -EINVAL;
}
sys_info.sa_base = SAVE_AREA_BASE;
sys_info.sa_size = sizeof(struct save_area);
sys_info.arch = arch;
set_lc_mask(&sys_info.lc_mask);
rc = init_cpu_info(arch);
if (rc)
return rc;
sys_info.mem_size = mem_end;
return 0;
}
static int __init check_sdias(void)
{
if (!sclp_get_hsa_size()) {
TRACE("Could not determine HSA size\n");
return -ENODEV;
}
return 0;
}
static int __init get_mem_info(unsigned long *mem, unsigned long *end)
{
struct memblock_region *reg;
for_each_memblock(memory, reg) {
*mem += reg->size;
*end = max_t(unsigned long, *end, reg->base + reg->size);
}
return 0;
}
static void __init zcore_header_init(int arch, struct zcore_header *hdr,
unsigned long mem_size)
{
u32 prefix;
int i;
if (arch == ARCH_S390X)
hdr->arch_id = DUMP_ARCH_S390X;
else
hdr->arch_id = DUMP_ARCH_S390;
hdr->mem_size = mem_size;
hdr->rmem_size = mem_size;
hdr->mem_end = sys_info.mem_size;
hdr->num_pages = mem_size / PAGE_SIZE;
hdr->tod = get_tod_clock();
get_cpu_id(&hdr->cpu_id);
for (i = 0; i < dump_save_areas.count; i++) {
prefix = dump_save_areas.areas[i]->sa.pref_reg;
hdr->real_cpu_cnt++;
if (!prefix)
continue;
hdr->lc_vec[hdr->cpu_cnt] = prefix;
hdr->cpu_cnt++;
}
}
/*
* Provide IPL parameter information block from either HSA or memory
* for future reipl
*/
static int __init zcore_reipl_init(void)
{
struct ipib_info ipib_info;
int rc;
rc = memcpy_hsa_kernel(&ipib_info, __LC_DUMP_REIPL, sizeof(ipib_info));
if (rc)
return rc;
if (ipib_info.ipib == 0)
return 0;
ipl_block = (void *) __get_free_page(GFP_KERNEL);
if (!ipl_block)
return -ENOMEM;
if (ipib_info.ipib < sclp_get_hsa_size())
rc = memcpy_hsa_kernel(ipl_block, ipib_info.ipib, PAGE_SIZE);
else
rc = memcpy_real(ipl_block, (void *) ipib_info.ipib, PAGE_SIZE);
if (rc || csum_partial(ipl_block, ipl_block->hdr.len, 0) !=
ipib_info.checksum) {
TRACE("Checksum does not match\n");
free_page((unsigned long) ipl_block);
ipl_block = NULL;
}
return 0;
}
static int __init zcore_init(void)
{
unsigned long mem_size, mem_end;
unsigned char arch;
int rc;
mem_size = mem_end = 0;
if (ipl_info.type != IPL_TYPE_FCP_DUMP)
return -ENODATA;
if (OLDMEM_BASE)
return -ENODATA;
zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long));
debug_register_view(zcore_dbf, &debug_sprintf_view);
debug_set_level(zcore_dbf, 6);
TRACE("devno: %x\n", ipl_info.data.fcp.dev_id.devno);
TRACE("wwpn: %llx\n", (unsigned long long) ipl_info.data.fcp.wwpn);
TRACE("lun: %llx\n", (unsigned long long) ipl_info.data.fcp.lun);
rc = sclp_sdias_init();
if (rc)
goto fail;
rc = check_sdias();
if (rc)
goto fail;
hsa_available = 1;
rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1);
if (rc)
goto fail;
#ifdef CONFIG_64BIT
if (arch == ARCH_S390) {
pr_alert("The 64-bit dump tool cannot be used for a "
"32-bit system\n");
rc = -EINVAL;
goto fail;
}
#else /* CONFIG_64BIT */
if (arch == ARCH_S390X) {
pr_alert("The 32-bit dump tool cannot be used for a "
"64-bit system\n");
rc = -EINVAL;
goto fail;
}
#endif /* CONFIG_64BIT */
rc = get_mem_info(&mem_size, &mem_end);
if (rc)
goto fail;
rc = sys_info_init(arch, mem_end);
if (rc)
goto fail;
zcore_header_init(arch, &zcore_header, mem_size);
rc = zcore_reipl_init();
if (rc)
goto fail;
zcore_dir = debugfs_create_dir("zcore" , NULL);
if (!zcore_dir) {
rc = -ENOMEM;
goto fail;
}
zcore_file = debugfs_create_file("mem", S_IRUSR, zcore_dir, NULL,
&zcore_fops);
if (!zcore_file) {
rc = -ENOMEM;
goto fail_dir;
}
zcore_memmap_file = debugfs_create_file("memmap", S_IRUSR, zcore_dir,
NULL, &zcore_memmap_fops);
if (!zcore_memmap_file) {
rc = -ENOMEM;
goto fail_file;
}
zcore_reipl_file = debugfs_create_file("reipl", S_IRUSR, zcore_dir,
NULL, &zcore_reipl_fops);
if (!zcore_reipl_file) {
rc = -ENOMEM;
goto fail_memmap_file;
}
zcore_hsa_file = debugfs_create_file("hsa", S_IRUSR|S_IWUSR, zcore_dir,
NULL, &zcore_hsa_fops);
if (!zcore_hsa_file) {
rc = -ENOMEM;
goto fail_reipl_file;
}
return 0;
fail_reipl_file:
debugfs_remove(zcore_reipl_file);
fail_memmap_file:
debugfs_remove(zcore_memmap_file);
fail_file:
debugfs_remove(zcore_file);
fail_dir:
debugfs_remove(zcore_dir);
fail:
diag308(DIAG308_REL_HSA, NULL);
return rc;
}
static void __exit zcore_exit(void)
{
debug_unregister(zcore_dbf);
sclp_sdias_exit();
free_page((unsigned long) ipl_block);
debugfs_remove(zcore_hsa_file);
debugfs_remove(zcore_reipl_file);
debugfs_remove(zcore_memmap_file);
debugfs_remove(zcore_file);
debugfs_remove(zcore_dir);
diag308(DIAG308_REL_HSA, NULL);
}
MODULE_AUTHOR("Copyright IBM Corp. 2003,2008");
MODULE_DESCRIPTION("zcore module for zfcpdump support");
MODULE_LICENSE("GPL");
subsys_initcall(zcore_init);
module_exit(zcore_exit);