Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

111
drivers/s390/net/Kconfig Normal file
View file

@ -0,0 +1,111 @@
menu "S/390 network device drivers"
depends on NETDEVICES && S390
config LCS
def_tristate m
prompt "Lan Channel Station Interface"
depends on CCW && NETDEVICES && (ETHERNET || FDDI)
help
Select this option if you want to use LCS networking on IBM System z.
This device driver supports FDDI (IEEE 802.7) and Ethernet.
To compile as a module, choose M. The module name is lcs.
If you do not know what it is, it's safe to choose Y.
config CTCM
def_tristate m
prompt "CTC and MPC SNA device support"
depends on CCW && NETDEVICES
help
Select this option if you want to use channel-to-channel
point-to-point networking on IBM System z.
This device driver supports real CTC coupling using ESCON.
It also supports virtual CTCs when running under VM.
This driver also supports channel-to-channel MPC SNA devices.
MPC is an SNA protocol device used by Communication Server for Linux.
To compile as a module, choose M. The module name is ctcm.
To compile into the kernel, choose Y.
If you do not need any channel-to-channel connection, choose N.
config NETIUCV
def_tristate m
prompt "IUCV network device support (VM only)"
depends on IUCV && NETDEVICES
help
Select this option if you want to use inter-user communication
vehicle networking under VM or VIF. It enables a fast communication
link between VM guests. Using ifconfig a point-to-point connection
can be established to the Linux on IBM System z
running on the other VM guest. To compile as a module, choose M.
The module name is netiucv. If unsure, choose Y.
config SMSGIUCV
def_tristate m
prompt "IUCV special message support (VM only)"
depends on IUCV
help
Select this option if you want to be able to receive SMSG messages
from other VM guest systems.
config SMSGIUCV_EVENT
def_tristate m
prompt "Deliver IUCV special messages as uevents (VM only)"
depends on SMSGIUCV
help
Select this option to deliver CP special messages (SMSGs) as
uevents. The driver handles only those special messages that
start with "APP".
To compile as a module, choose M. The module name is "smsgiucv_app".
config CLAW
def_tristate m
prompt "CLAW device support"
depends on CCW && NETDEVICES
help
This driver supports channel attached CLAW devices.
CLAW is Common Link Access for Workstation. Common devices
that use CLAW are RS/6000s, Cisco Routers (CIP) and 3172 devices.
To compile as a module, choose M. The module name is claw.
To compile into the kernel, choose Y.
config QETH
def_tristate y
prompt "Gigabit Ethernet device support"
depends on CCW && NETDEVICES && IP_MULTICAST && QDIO
help
This driver supports the IBM System z OSA Express adapters
in QDIO mode (all media types), HiperSockets interfaces and z/VM
virtual NICs for Guest LAN and VSWITCH.
For details please refer to the documentation provided by IBM at
<http://www.ibm.com/developerworks/linux/linux390>
To compile this driver as a module, choose M.
The module name is qeth.
config QETH_L2
def_tristate y
prompt "qeth layer 2 device support"
depends on QETH
help
Select this option to be able to run qeth devices in layer 2 mode.
To compile as a module, choose M. The module name is qeth_l2.
If unsure, choose y.
config QETH_L3
def_tristate y
prompt "qeth layer 3 device support"
depends on QETH
help
Select this option to be able to run qeth devices in layer 3 mode.
To compile as a module choose M. The module name is qeth_l3.
If unsure, choose Y.
config QETH_IPV6
def_bool y if (QETH_L3 = IPV6) || (QETH_L3 && IPV6 = 'y')
config CCWGROUP
tristate
default (LCS || CTCM || QETH || CLAW)
endmenu

17
drivers/s390/net/Makefile Normal file
View file

@ -0,0 +1,17 @@
#
# S/390 network devices
#
ctcm-y += ctcm_main.o ctcm_fsms.o ctcm_mpc.o ctcm_sysfs.o ctcm_dbug.o
obj-$(CONFIG_CTCM) += ctcm.o fsm.o
obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o
obj-$(CONFIG_SMSGIUCV) += smsgiucv.o
obj-$(CONFIG_SMSGIUCV_EVENT) += smsgiucv_app.o
obj-$(CONFIG_LCS) += lcs.o
obj-$(CONFIG_CLAW) += claw.o
qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o
obj-$(CONFIG_QETH) += qeth.o
qeth_l2-y += qeth_l2_main.o qeth_l2_sys.o
obj-$(CONFIG_QETH_L2) += qeth_l2.o
qeth_l3-y += qeth_l3_main.o qeth_l3_sys.o
obj-$(CONFIG_QETH_L3) += qeth_l3.o

3379
drivers/s390/net/claw.c Normal file

File diff suppressed because it is too large Load diff

348
drivers/s390/net/claw.h Normal file
View file

@ -0,0 +1,348 @@
/*******************************************************
* Define constants *
* *
********************************************************/
/*-----------------------------------------------------*
* CCW command codes for CLAW protocol *
*------------------------------------------------------*/
#define CCW_CLAW_CMD_WRITE 0x01 /* write - not including link */
#define CCW_CLAW_CMD_READ 0x02 /* read */
#define CCW_CLAW_CMD_NOP 0x03 /* NOP */
#define CCW_CLAW_CMD_SENSE 0x04 /* Sense */
#define CCW_CLAW_CMD_SIGNAL_SMOD 0x05 /* Signal Status Modifier */
#define CCW_CLAW_CMD_TIC 0x08 /* TIC */
#define CCW_CLAW_CMD_READHEADER 0x12 /* read header data */
#define CCW_CLAW_CMD_READFF 0x22 /* read an FF */
#define CCW_CLAW_CMD_SENSEID 0xe4 /* Sense ID */
/*-----------------------------------------------------*
* CLAW Unique constants *
*------------------------------------------------------*/
#define MORE_to_COME_FLAG 0x04 /* OR with write CCW in case of m-t-c */
#define CLAW_IDLE 0x00 /* flag to indicate CLAW is idle */
#define CLAW_BUSY 0xff /* flag to indicate CLAW is busy */
#define CLAW_PENDING 0x00 /* flag to indicate i/o is pending */
#define CLAW_COMPLETE 0xff /* flag to indicate i/o completed */
/*-----------------------------------------------------*
* CLAW control command code *
*------------------------------------------------------*/
#define SYSTEM_VALIDATE_REQUEST 0x01 /* System Validate request */
#define SYSTEM_VALIDATE_RESPONSE 0x02 /* System Validate response */
#define CONNECTION_REQUEST 0x21 /* Connection request */
#define CONNECTION_RESPONSE 0x22 /* Connection response */
#define CONNECTION_CONFIRM 0x23 /* Connection confirm */
#define DISCONNECT 0x24 /* Disconnect */
#define CLAW_ERROR 0x41 /* CLAW error message */
#define CLAW_VERSION_ID 2 /* CLAW version ID */
/*-----------------------------------------------------*
* CLAW adater sense bytes *
*------------------------------------------------------*/
#define CLAW_ADAPTER_SENSE_BYTE 0x41 /* Stop command issued to adapter */
/*-----------------------------------------------------*
* CLAW control command return codes *
*------------------------------------------------------*/
#define CLAW_RC_NAME_MISMATCH 166 /* names do not match */
#define CLAW_RC_WRONG_VERSION 167 /* wrong CLAW version number */
#define CLAW_RC_HOST_RCV_TOO_SMALL 180 /* Host maximum receive is */
/* less than Linux on zSeries*/
/* transmit size */
/*-----------------------------------------------------*
* CLAW Constants application name *
*------------------------------------------------------*/
#define HOST_APPL_NAME "TCPIP "
#define WS_APPL_NAME_IP_LINK "TCPIP "
#define WS_APPL_NAME_IP_NAME "IP "
#define WS_APPL_NAME_API_LINK "API "
#define WS_APPL_NAME_PACKED "PACKED "
#define WS_NAME_NOT_DEF "NOT_DEF "
#define PACKING_ASK 1
#define PACK_SEND 2
#define DO_PACKED 3
#define MAX_ENVELOPE_SIZE 65536
#define CLAW_DEFAULT_MTU_SIZE 4096
#define DEF_PACK_BUFSIZE 32768
#define READ_CHANNEL 0
#define WRITE_CHANNEL 1
#define TB_TX 0 /* sk buffer handling in process */
#define TB_STOP 1 /* network device stop in process */
#define TB_RETRY 2 /* retry in process */
#define TB_NOBUFFER 3 /* no buffer on free queue */
#define CLAW_MAX_LINK_ID 1
#define CLAW_MAX_DEV 256 /* max claw devices */
#define MAX_NAME_LEN 8 /* host name, adapter name length */
#define CLAW_FRAME_SIZE 4096
#define CLAW_ID_SIZE 20+3
/* state machine codes used in claw_irq_handler */
#define CLAW_STOP 0
#define CLAW_START_HALT_IO 1
#define CLAW_START_SENSEID 2
#define CLAW_START_READ 3
#define CLAW_START_WRITE 4
/*-----------------------------------------------------*
* Lock flag *
*------------------------------------------------------*/
#define LOCK_YES 0
#define LOCK_NO 1
/*-----------------------------------------------------*
* DBF Debug macros *
*------------------------------------------------------*/
#define CLAW_DBF_TEXT(level, name, text) \
do { \
debug_text_event(claw_dbf_##name, level, text); \
} while (0)
#define CLAW_DBF_HEX(level,name,addr,len) \
do { \
debug_event(claw_dbf_##name,level,(void*)(addr),len); \
} while (0)
#define CLAW_DBF_TEXT_(level,name,text...) \
do { \
if (debug_level_enabled(claw_dbf_##name, level)) { \
sprintf(debug_buffer, text); \
debug_text_event(claw_dbf_##name, level, \
debug_buffer); \
} \
} while (0)
/**
* Enum for classifying detected devices.
*/
enum claw_channel_types {
/* Device is not a channel */
claw_channel_type_none,
/* Device is a CLAW channel device */
claw_channel_type_claw
};
/*******************************************************
* Define Control Blocks *
* *
********************************************************/
/*------------------------------------------------------*/
/* CLAW header */
/*------------------------------------------------------*/
struct clawh {
__u16 length; /* length of data read by preceding read CCW */
__u8 opcode; /* equivalent read CCW */
__u8 flag; /* flag of FF to indicate read was completed */
};
/*------------------------------------------------------*/
/* CLAW Packing header 4 bytes */
/*------------------------------------------------------*/
struct clawph {
__u16 len; /* Length of Packed Data Area */
__u8 flag; /* Reserved not used */
__u8 link_num; /* Link ID */
};
/*------------------------------------------------------*/
/* CLAW Ending struct ccwbk */
/*------------------------------------------------------*/
struct endccw {
__u32 real; /* real address of this block */
__u8 write1; /* write 1 is active */
__u8 read1; /* read 1 is active */
__u16 reserved; /* reserved for future use */
struct ccw1 write1_nop1;
struct ccw1 write1_nop2;
struct ccw1 write2_nop1;
struct ccw1 write2_nop2;
struct ccw1 read1_nop1;
struct ccw1 read1_nop2;
struct ccw1 read2_nop1;
struct ccw1 read2_nop2;
};
/*------------------------------------------------------*/
/* CLAW struct ccwbk */
/*------------------------------------------------------*/
struct ccwbk {
void *next; /* pointer to next ccw block */
__u32 real; /* real address of this ccw */
void *p_buffer; /* virtual address of data */
struct clawh header; /* claw header */
struct ccw1 write; /* write CCW */
struct ccw1 w_read_FF; /* read FF */
struct ccw1 w_TIC_1; /* TIC */
struct ccw1 read; /* read CCW */
struct ccw1 read_h; /* read header */
struct ccw1 signal; /* signal SMOD */
struct ccw1 r_TIC_1; /* TIC1 */
struct ccw1 r_read_FF; /* read FF */
struct ccw1 r_TIC_2; /* TIC2 */
};
/*------------------------------------------------------*/
/* CLAW control block */
/*------------------------------------------------------*/
struct clawctl {
__u8 command; /* control command */
__u8 version; /* CLAW protocol version */
__u8 linkid; /* link ID */
__u8 correlator; /* correlator */
__u8 rc; /* return code */
__u8 reserved1; /* reserved */
__u8 reserved2; /* reserved */
__u8 reserved3; /* reserved */
__u8 data[24]; /* command specific fields */
};
/*------------------------------------------------------*/
/* Data for SYSTEMVALIDATE command */
/*------------------------------------------------------*/
struct sysval {
char WS_name[8]; /* Workstation System name */
char host_name[8]; /* Host system name */
__u16 read_frame_size; /* read frame size */
__u16 write_frame_size; /* write frame size */
__u8 reserved[4]; /* reserved */
};
/*------------------------------------------------------*/
/* Data for Connect command */
/*------------------------------------------------------*/
struct conncmd {
char WS_name[8]; /* Workstation application name */
char host_name[8]; /* Host application name */
__u16 reserved1[2]; /* read frame size */
__u8 reserved2[4]; /* reserved */
};
/*------------------------------------------------------*/
/* Data for CLAW error */
/*------------------------------------------------------*/
struct clawwerror {
char reserved1[8]; /* reserved */
char reserved2[8]; /* reserved */
char reserved3[8]; /* reserved */
};
/*------------------------------------------------------*/
/* Data buffer for CLAW */
/*------------------------------------------------------*/
struct clawbuf {
char buffer[MAX_ENVELOPE_SIZE]; /* data buffer */
};
/*------------------------------------------------------*/
/* Channel control block for read and write channel */
/*------------------------------------------------------*/
struct chbk {
unsigned int devno;
int irq;
char id[CLAW_ID_SIZE];
__u32 IO_active;
__u8 claw_state;
struct irb *irb;
struct ccw_device *cdev; /* pointer to the channel device */
struct net_device *ndev;
wait_queue_head_t wait;
struct tasklet_struct tasklet;
struct timer_list timer;
unsigned long flag_a; /* atomic flags */
#define CLAW_BH_ACTIVE 0
unsigned long flag_b; /* atomic flags */
#define CLAW_WRITE_ACTIVE 0
__u8 last_dstat;
__u8 flag;
struct sk_buff_head collect_queue;
spinlock_t collect_lock;
#define CLAW_WRITE 0x02 /* - Set if this is a write channel */
#define CLAW_READ 0x01 /* - Set if this is a read channel */
#define CLAW_TIMER 0x80 /* - Set if timer made the wake_up */
};
/*--------------------------------------------------------------*
* CLAW environment block *
*---------------------------------------------------------------*/
struct claw_env {
unsigned int devno[2]; /* device number */
char host_name[9]; /* Host name */
char adapter_name [9]; /* adapter name */
char api_type[9]; /* TCPIP, API or PACKED */
void *p_priv; /* privptr */
__u16 read_buffers; /* read buffer number */
__u16 write_buffers; /* write buffer number */
__u16 read_size; /* read buffer size */
__u16 write_size; /* write buffer size */
__u16 dev_id; /* device ident */
__u8 packing; /* are we packing? */
__u8 in_use; /* device active flag */
struct net_device *ndev; /* backward ptr to the net dev*/
};
/*--------------------------------------------------------------*
* CLAW main control block *
*---------------------------------------------------------------*/
struct claw_privbk {
void *p_buff_ccw;
__u32 p_buff_ccw_num;
void *p_buff_read;
__u32 p_buff_read_num;
__u32 p_buff_pages_perread;
void *p_buff_write;
__u32 p_buff_write_num;
__u32 p_buff_pages_perwrite;
long active_link_ID; /* Active logical link ID */
struct ccwbk *p_write_free_chain; /* pointer to free ccw chain */
struct ccwbk *p_write_active_first; /* ptr to the first write ccw */
struct ccwbk *p_write_active_last; /* ptr to the last write ccw */
struct ccwbk *p_read_active_first; /* ptr to the first read ccw */
struct ccwbk *p_read_active_last; /* ptr to the last read ccw */
struct endccw *p_end_ccw; /*ptr to ending ccw */
struct ccwbk *p_claw_signal_blk; /* ptr to signal block */
__u32 write_free_count; /* number of free bufs for write */
struct net_device_stats stats; /* device status */
struct chbk channel[2]; /* Channel control blocks */
__u8 mtc_skipping;
int mtc_offset;
int mtc_logical_link;
void *p_mtc_envelope;
struct sk_buff *pk_skb; /* packing buffer */
int pk_cnt;
struct clawctl ctl_bk;
struct claw_env *p_env;
__u8 system_validate_comp;
__u8 release_pend;
__u8 checksum_received_ip_pkts;
__u8 buffs_alloc;
struct endccw end_ccw;
unsigned long tbusy;
};
/************************************************************/
/* define global constants */
/************************************************************/
#define CCWBK_SIZE sizeof(struct ccwbk)

View file

@ -0,0 +1,77 @@
/*
* Copyright IBM Corp. 2001, 2007
* Authors: Peter Tiedemann (ptiedem@de.ibm.com)
*
*/
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/ctype.h>
#include <linux/sysctl.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/debugfs.h>
#include "ctcm_dbug.h"
/*
* Debug Facility Stuff
*/
struct ctcm_dbf_info ctcm_dbf[CTCM_DBF_INFOS] = {
[CTCM_DBF_SETUP] = {"ctc_setup", 8, 1, 64, CTC_DBF_INFO, NULL},
[CTCM_DBF_ERROR] = {"ctc_error", 8, 1, 64, CTC_DBF_ERROR, NULL},
[CTCM_DBF_TRACE] = {"ctc_trace", 8, 1, 64, CTC_DBF_ERROR, NULL},
[CTCM_DBF_MPC_SETUP] = {"mpc_setup", 8, 1, 80, CTC_DBF_INFO, NULL},
[CTCM_DBF_MPC_ERROR] = {"mpc_error", 8, 1, 80, CTC_DBF_ERROR, NULL},
[CTCM_DBF_MPC_TRACE] = {"mpc_trace", 8, 1, 80, CTC_DBF_ERROR, NULL},
};
void ctcm_unregister_dbf_views(void)
{
int x;
for (x = 0; x < CTCM_DBF_INFOS; x++) {
debug_unregister(ctcm_dbf[x].id);
ctcm_dbf[x].id = NULL;
}
}
int ctcm_register_dbf_views(void)
{
int x;
for (x = 0; x < CTCM_DBF_INFOS; x++) {
/* register the areas */
ctcm_dbf[x].id = debug_register(ctcm_dbf[x].name,
ctcm_dbf[x].pages,
ctcm_dbf[x].areas,
ctcm_dbf[x].len);
if (ctcm_dbf[x].id == NULL) {
ctcm_unregister_dbf_views();
return -ENOMEM;
}
/* register a view */
debug_register_view(ctcm_dbf[x].id, &debug_hex_ascii_view);
/* set a passing level */
debug_set_level(ctcm_dbf[x].id, ctcm_dbf[x].level);
}
return 0;
}
void ctcm_dbf_longtext(enum ctcm_dbf_names dbf_nix, int level, char *fmt, ...)
{
char dbf_txt_buf[64];
va_list args;
if (!debug_level_enabled(ctcm_dbf[dbf_nix].id, level))
return;
va_start(args, fmt);
vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
va_end(args);
debug_text_event(ctcm_dbf[dbf_nix].id, level, dbf_txt_buf);
}

View file

@ -0,0 +1,141 @@
/*
* Copyright IBM Corp. 2001, 2007
* Authors: Peter Tiedemann (ptiedem@de.ibm.com)
*
*/
#ifndef _CTCM_DBUG_H_
#define _CTCM_DBUG_H_
/*
* Debug Facility stuff
*/
#include <asm/debug.h>
#ifdef DEBUG
#define do_debug 1
#else
#define do_debug 0
#endif
#ifdef DEBUGCCW
#define do_debug_ccw 1
#define DEBUGDATA 1
#else
#define do_debug_ccw 0
#endif
#ifdef DEBUGDATA
#define do_debug_data 1
#else
#define do_debug_data 0
#endif
/* define dbf debug levels similar to kernel msg levels */
#define CTC_DBF_ALWAYS 0 /* always print this */
#define CTC_DBF_EMERG 0 /* system is unusable */
#define CTC_DBF_ALERT 1 /* action must be taken immediately */
#define CTC_DBF_CRIT 2 /* critical conditions */
#define CTC_DBF_ERROR 3 /* error conditions */
#define CTC_DBF_WARN 4 /* warning conditions */
#define CTC_DBF_NOTICE 5 /* normal but significant condition */
#define CTC_DBF_INFO 5 /* informational */
#define CTC_DBF_DEBUG 6 /* debug-level messages */
enum ctcm_dbf_names {
CTCM_DBF_SETUP,
CTCM_DBF_ERROR,
CTCM_DBF_TRACE,
CTCM_DBF_MPC_SETUP,
CTCM_DBF_MPC_ERROR,
CTCM_DBF_MPC_TRACE,
CTCM_DBF_INFOS /* must be last element */
};
struct ctcm_dbf_info {
char name[DEBUG_MAX_NAME_LEN];
int pages;
int areas;
int len;
int level;
debug_info_t *id;
};
extern struct ctcm_dbf_info ctcm_dbf[CTCM_DBF_INFOS];
int ctcm_register_dbf_views(void);
void ctcm_unregister_dbf_views(void);
void ctcm_dbf_longtext(enum ctcm_dbf_names dbf_nix, int level, char *text, ...);
static inline const char *strtail(const char *s, int n)
{
int l = strlen(s);
return (l > n) ? s + (l - n) : s;
}
#define CTCM_FUNTAIL strtail((char *)__func__, 16)
#define CTCM_DBF_TEXT(name, level, text) \
do { \
debug_text_event(ctcm_dbf[CTCM_DBF_##name].id, level, text); \
} while (0)
#define CTCM_DBF_HEX(name, level, addr, len) \
do { \
debug_event(ctcm_dbf[CTCM_DBF_##name].id, \
level, (void *)(addr), len); \
} while (0)
#define CTCM_DBF_TEXT_(name, level, text...) \
ctcm_dbf_longtext(CTCM_DBF_##name, level, text)
/*
* cat : one of {setup, mpc_setup, trace, mpc_trace, error, mpc_error}.
* dev : netdevice with valid name field.
* text: any text string.
*/
#define CTCM_DBF_DEV_NAME(cat, dev, text) \
do { \
CTCM_DBF_TEXT_(cat, CTC_DBF_INFO, "%s(%s) :- %s", \
CTCM_FUNTAIL, dev->name, text); \
} while (0)
#define MPC_DBF_DEV_NAME(cat, dev, text) \
do { \
CTCM_DBF_TEXT_(MPC_##cat, CTC_DBF_INFO, "%s(%s) := %s", \
CTCM_FUNTAIL, dev->name, text); \
} while (0)
#define CTCMY_DBF_DEV_NAME(cat, dev, text) \
do { \
if (IS_MPCDEV(dev)) \
MPC_DBF_DEV_NAME(cat, dev, text); \
else \
CTCM_DBF_DEV_NAME(cat, dev, text); \
} while (0)
/*
* cat : one of {setup, mpc_setup, trace, mpc_trace, error, mpc_error}.
* dev : netdevice.
* text: any text string.
*/
#define CTCM_DBF_DEV(cat, dev, text) \
do { \
CTCM_DBF_TEXT_(cat, CTC_DBF_INFO, "%s(%p) :-: %s", \
CTCM_FUNTAIL, dev, text); \
} while (0)
#define MPC_DBF_DEV(cat, dev, text) \
do { \
CTCM_DBF_TEXT_(MPC_##cat, CTC_DBF_INFO, "%s(%p) :=: %s", \
CTCM_FUNTAIL, dev, text); \
} while (0)
#define CTCMY_DBF_DEV(cat, dev, text) \
do { \
if (IS_MPCDEV(dev)) \
MPC_DBF_DEV(cat, dev, text); \
else \
CTCM_DBF_DEV(cat, dev, text); \
} while (0)
#endif

2304
drivers/s390/net/ctcm_fsms.c Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,356 @@
/*
* Copyright IBM Corp. 2001, 2007
* Authors: Fritz Elfert (felfert@millenux.com)
* Peter Tiedemann (ptiedem@de.ibm.com)
* MPC additions :
* Belinda Thompson (belindat@us.ibm.com)
* Andy Richter (richtera@us.ibm.com)
*/
#ifndef _CTCM_FSMS_H_
#define _CTCM_FSMS_H_
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/bitops.h>
#include <linux/signal.h>
#include <linux/string.h>
#include <linux/ip.h>
#include <linux/if_arp.h>
#include <linux/tcp.h>
#include <linux/skbuff.h>
#include <linux/ctype.h>
#include <net/dst.h>
#include <linux/io.h>
#include <asm/ccwdev.h>
#include <asm/ccwgroup.h>
#include <linux/uaccess.h>
#include <asm/idals.h>
#include "fsm.h"
#include "ctcm_main.h"
/*
* Definitions for the channel statemachine(s) for ctc and ctcmpc
*
* To allow better kerntyping, prefix-less definitions for channel states
* and channel events have been replaced :
* ch_event... -> ctc_ch_event...
* CH_EVENT... -> CTC_EVENT...
* ch_state... -> ctc_ch_state...
* CH_STATE... -> CTC_STATE...
*/
/*
* Events of the channel statemachine(s) for ctc and ctcmpc
*/
enum ctc_ch_events {
/*
* Events, representing return code of
* I/O operations (ccw_device_start, ccw_device_halt et al.)
*/
CTC_EVENT_IO_SUCCESS,
CTC_EVENT_IO_EBUSY,
CTC_EVENT_IO_ENODEV,
CTC_EVENT_IO_UNKNOWN,
CTC_EVENT_ATTNBUSY,
CTC_EVENT_ATTN,
CTC_EVENT_BUSY,
/*
* Events, representing unit-check
*/
CTC_EVENT_UC_RCRESET,
CTC_EVENT_UC_RSRESET,
CTC_EVENT_UC_TXTIMEOUT,
CTC_EVENT_UC_TXPARITY,
CTC_EVENT_UC_HWFAIL,
CTC_EVENT_UC_RXPARITY,
CTC_EVENT_UC_ZERO,
CTC_EVENT_UC_UNKNOWN,
/*
* Events, representing subchannel-check
*/
CTC_EVENT_SC_UNKNOWN,
/*
* Events, representing machine checks
*/
CTC_EVENT_MC_FAIL,
CTC_EVENT_MC_GOOD,
/*
* Event, representing normal IRQ
*/
CTC_EVENT_IRQ,
CTC_EVENT_FINSTAT,
/*
* Event, representing timer expiry.
*/
CTC_EVENT_TIMER,
/*
* Events, representing commands from upper levels.
*/
CTC_EVENT_START,
CTC_EVENT_STOP,
CTC_NR_EVENTS,
/*
* additional MPC events
*/
CTC_EVENT_SEND_XID = CTC_NR_EVENTS,
CTC_EVENT_RSWEEP_TIMER,
/*
* MUST be always the last element!!
*/
CTC_MPC_NR_EVENTS,
};
/*
* States of the channel statemachine(s) for ctc and ctcmpc.
*/
enum ctc_ch_states {
/*
* Channel not assigned to any device,
* initial state, direction invalid
*/
CTC_STATE_IDLE,
/*
* Channel assigned but not operating
*/
CTC_STATE_STOPPED,
CTC_STATE_STARTWAIT,
CTC_STATE_STARTRETRY,
CTC_STATE_SETUPWAIT,
CTC_STATE_RXINIT,
CTC_STATE_TXINIT,
CTC_STATE_RX,
CTC_STATE_TX,
CTC_STATE_RXIDLE,
CTC_STATE_TXIDLE,
CTC_STATE_RXERR,
CTC_STATE_TXERR,
CTC_STATE_TERM,
CTC_STATE_DTERM,
CTC_STATE_NOTOP,
CTC_NR_STATES, /* MUST be the last element of non-expanded states */
/*
* additional MPC states
*/
CH_XID0_PENDING = CTC_NR_STATES,
CH_XID0_INPROGRESS,
CH_XID7_PENDING,
CH_XID7_PENDING1,
CH_XID7_PENDING2,
CH_XID7_PENDING3,
CH_XID7_PENDING4,
CTC_MPC_NR_STATES, /* MUST be the last element of expanded mpc states */
};
extern const char *ctc_ch_event_names[];
extern const char *ctc_ch_state_names[];
void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg);
void ctcm_purge_skb_queue(struct sk_buff_head *q);
void fsm_action_nop(fsm_instance *fi, int event, void *arg);
/*
* ----- non-static actions for ctcm channel statemachine -----
*
*/
void ctcm_chx_txidle(fsm_instance *fi, int event, void *arg);
/*
* ----- FSM (state/event/action) of the ctcm channel statemachine -----
*/
extern const fsm_node ch_fsm[];
extern int ch_fsm_len;
/*
* ----- non-static actions for ctcmpc channel statemachine ----
*
*/
/* shared :
void ctcm_chx_txidle(fsm_instance * fi, int event, void *arg);
*/
void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg);
/*
* ----- FSM (state/event/action) of the ctcmpc channel statemachine -----
*/
extern const fsm_node ctcmpc_ch_fsm[];
extern int mpc_ch_fsm_len;
/*
* Definitions for the device interface statemachine for ctc and mpc
*/
/*
* States of the device interface statemachine.
*/
enum dev_states {
DEV_STATE_STOPPED,
DEV_STATE_STARTWAIT_RXTX,
DEV_STATE_STARTWAIT_RX,
DEV_STATE_STARTWAIT_TX,
DEV_STATE_STOPWAIT_RXTX,
DEV_STATE_STOPWAIT_RX,
DEV_STATE_STOPWAIT_TX,
DEV_STATE_RUNNING,
/*
* MUST be always the last element!!
*/
CTCM_NR_DEV_STATES
};
extern const char *dev_state_names[];
/*
* Events of the device interface statemachine.
* ctcm and ctcmpc
*/
enum dev_events {
DEV_EVENT_START,
DEV_EVENT_STOP,
DEV_EVENT_RXUP,
DEV_EVENT_TXUP,
DEV_EVENT_RXDOWN,
DEV_EVENT_TXDOWN,
DEV_EVENT_RESTART,
/*
* MUST be always the last element!!
*/
CTCM_NR_DEV_EVENTS
};
extern const char *dev_event_names[];
/*
* Actions for the device interface statemachine.
* ctc and ctcmpc
*/
/*
static void dev_action_start(fsm_instance * fi, int event, void *arg);
static void dev_action_stop(fsm_instance * fi, int event, void *arg);
static void dev_action_restart(fsm_instance *fi, int event, void *arg);
static void dev_action_chup(fsm_instance * fi, int event, void *arg);
static void dev_action_chdown(fsm_instance * fi, int event, void *arg);
*/
/*
* The (state/event/action) fsm table of the device interface statemachine.
* ctcm and ctcmpc
*/
extern const fsm_node dev_fsm[];
extern int dev_fsm_len;
/*
* Definitions for the MPC Group statemachine
*/
/*
* MPC Group Station FSM States
State Name When In This State
====================== =======================================
MPCG_STATE_RESET Initial State When Driver Loaded
We receive and send NOTHING
MPCG_STATE_INOP INOP Received.
Group level non-recoverable error
MPCG_STATE_READY XID exchanges for at least 1 write and
1 read channel have completed.
Group is ready for data transfer.
States from ctc_mpc_alloc_channel
==============================================================
MPCG_STATE_XID2INITW Awaiting XID2(0) Initiation
ATTN from other side will start
XID negotiations.
Y-side protocol only.
MPCG_STATE_XID2INITX XID2(0) negotiations are in progress.
At least 1, but not all, XID2(0)'s
have been received from partner.
MPCG_STATE_XID7INITW XID2(0) complete
No XID2(7)'s have yet been received.
XID2(7) negotiations pending.
MPCG_STATE_XID7INITX XID2(7) negotiations in progress.
At least 1, but not all, XID2(7)'s
have been received from partner.
MPCG_STATE_XID7INITF XID2(7) negotiations complete.
Transitioning to READY.
MPCG_STATE_READY Ready for Data Transfer.
States from ctc_mpc_establish_connectivity call
==============================================================
MPCG_STATE_XID0IOWAIT Initiating XID2(0) negotiations.
X-side protocol only.
ATTN-BUSY from other side will convert
this to Y-side protocol and the
ctc_mpc_alloc_channel flow will begin.
MPCG_STATE_XID0IOWAIX XID2(0) negotiations are in progress.
At least 1, but not all, XID2(0)'s
have been received from partner.
MPCG_STATE_XID7INITI XID2(0) complete
No XID2(7)'s have yet been received.
XID2(7) negotiations pending.
MPCG_STATE_XID7INITZ XID2(7) negotiations in progress.
At least 1, but not all, XID2(7)'s
have been received from partner.
MPCG_STATE_XID7INITF XID2(7) negotiations complete.
Transitioning to READY.
MPCG_STATE_READY Ready for Data Transfer.
*/
enum mpcg_events {
MPCG_EVENT_INOP,
MPCG_EVENT_DISCONC,
MPCG_EVENT_XID0DO,
MPCG_EVENT_XID2,
MPCG_EVENT_XID2DONE,
MPCG_EVENT_XID7DONE,
MPCG_EVENT_TIMER,
MPCG_EVENT_DOIO,
MPCG_NR_EVENTS,
};
enum mpcg_states {
MPCG_STATE_RESET,
MPCG_STATE_INOP,
MPCG_STATE_XID2INITW,
MPCG_STATE_XID2INITX,
MPCG_STATE_XID7INITW,
MPCG_STATE_XID7INITX,
MPCG_STATE_XID0IOWAIT,
MPCG_STATE_XID0IOWAIX,
MPCG_STATE_XID7INITI,
MPCG_STATE_XID7INITZ,
MPCG_STATE_XID7INITF,
MPCG_STATE_FLOWC,
MPCG_STATE_READY,
MPCG_NR_STATES,
};
#endif
/* --- This is the END my friend --- */

1873
drivers/s390/net/ctcm_main.c Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,315 @@
/*
* Copyright IBM Corp. 2001, 2007
* Authors: Fritz Elfert (felfert@millenux.com)
* Peter Tiedemann (ptiedem@de.ibm.com)
*/
#ifndef _CTCM_MAIN_H_
#define _CTCM_MAIN_H_
#include <asm/ccwdev.h>
#include <asm/ccwgroup.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include "fsm.h"
#include "ctcm_dbug.h"
#include "ctcm_mpc.h"
#define CTC_DRIVER_NAME "ctcm"
#define CTC_DEVICE_NAME "ctc"
#define MPC_DEVICE_NAME "mpc"
#define CTC_DEVICE_GENE CTC_DEVICE_NAME "%d"
#define MPC_DEVICE_GENE MPC_DEVICE_NAME "%d"
#define CHANNEL_FLAGS_READ 0
#define CHANNEL_FLAGS_WRITE 1
#define CHANNEL_FLAGS_INUSE 2
#define CHANNEL_FLAGS_BUFSIZE_CHANGED 4
#define CHANNEL_FLAGS_FAILED 8
#define CHANNEL_FLAGS_WAITIRQ 16
#define CHANNEL_FLAGS_RWMASK 1
#define CHANNEL_DIRECTION(f) (f & CHANNEL_FLAGS_RWMASK)
#define LOG_FLAG_ILLEGALPKT 1
#define LOG_FLAG_ILLEGALSIZE 2
#define LOG_FLAG_OVERRUN 4
#define LOG_FLAG_NOMEM 8
#define ctcm_pr_debug(fmt, arg...) printk(KERN_DEBUG fmt, ##arg)
#define CTCM_PR_DEBUG(fmt, arg...) \
do { \
if (do_debug) \
printk(KERN_DEBUG fmt, ##arg); \
} while (0)
#define CTCM_PR_DBGDATA(fmt, arg...) \
do { \
if (do_debug_data) \
printk(KERN_DEBUG fmt, ##arg); \
} while (0)
#define CTCM_D3_DUMP(buf, len) \
do { \
if (do_debug_data) \
ctcmpc_dumpit(buf, len); \
} while (0)
#define CTCM_CCW_DUMP(buf, len) \
do { \
if (do_debug_ccw) \
ctcmpc_dumpit(buf, len); \
} while (0)
/**
* Enum for classifying detected devices
*/
enum ctcm_channel_types {
/* Device is not a channel */
ctcm_channel_type_none,
/* Device is a CTC/A */
ctcm_channel_type_parallel,
/* Device is a FICON channel */
ctcm_channel_type_ficon,
/* Device is a ESCON channel */
ctcm_channel_type_escon
};
/*
* CCW commands, used in this driver.
*/
#define CCW_CMD_WRITE 0x01
#define CCW_CMD_READ 0x02
#define CCW_CMD_NOOP 0x03
#define CCW_CMD_TIC 0x08
#define CCW_CMD_SENSE_CMD 0x14
#define CCW_CMD_WRITE_CTL 0x17
#define CCW_CMD_SET_EXTENDED 0xc3
#define CCW_CMD_PREPARE 0xe3
#define CTCM_PROTO_S390 0
#define CTCM_PROTO_LINUX 1
#define CTCM_PROTO_LINUX_TTY 2
#define CTCM_PROTO_OS390 3
#define CTCM_PROTO_MPC 4
#define CTCM_PROTO_MAX 4
#define CTCM_BUFSIZE_LIMIT 65535
#define CTCM_BUFSIZE_DEFAULT 32768
#define MPC_BUFSIZE_DEFAULT CTCM_BUFSIZE_LIMIT
#define CTCM_TIME_1_SEC 1000
#define CTCM_TIME_5_SEC 5000
#define CTCM_TIME_10_SEC 10000
#define CTCM_INITIAL_BLOCKLEN 2
#define CTCM_READ 0
#define CTCM_WRITE 1
#define CTCM_ID_SIZE 20+3
struct ctcm_profile {
unsigned long maxmulti;
unsigned long maxcqueue;
unsigned long doios_single;
unsigned long doios_multi;
unsigned long txlen;
unsigned long tx_time;
struct timespec send_stamp;
};
/*
* Definition of one channel
*/
struct channel {
struct channel *next;
char id[CTCM_ID_SIZE];
struct ccw_device *cdev;
/*
* Type of this channel.
* CTC/A or Escon for valid channels.
*/
enum ctcm_channel_types type;
/*
* Misc. flags. See CHANNEL_FLAGS_... below
*/
__u32 flags;
__u16 protocol; /* protocol of this channel (4 = MPC) */
/*
* I/O and irq related stuff
*/
struct ccw1 *ccw;
struct irb *irb;
/*
* RX/TX buffer size
*/
int max_bufsize;
struct sk_buff *trans_skb; /* transmit/receive buffer */
struct sk_buff_head io_queue; /* universal I/O queue */
struct tasklet_struct ch_tasklet; /* MPC ONLY */
/*
* TX queue for collecting skb's during busy.
*/
struct sk_buff_head collect_queue;
/*
* Amount of data in collect_queue.
*/
int collect_len;
/*
* spinlock for collect_queue and collect_len
*/
spinlock_t collect_lock;
/*
* Timer for detecting unresposive
* I/O operations.
*/
fsm_timer timer;
/* MPC ONLY section begin */
__u32 th_seq_num; /* SNA TH seq number */
__u8 th_seg;
__u32 pdu_seq;
struct sk_buff *xid_skb;
char *xid_skb_data;
struct th_header *xid_th;
struct xid2 *xid;
char *xid_id;
struct th_header *rcvd_xid_th;
struct xid2 *rcvd_xid;
char *rcvd_xid_id;
__u8 in_mpcgroup;
fsm_timer sweep_timer;
struct sk_buff_head sweep_queue;
struct th_header *discontact_th;
struct tasklet_struct ch_disc_tasklet;
/* MPC ONLY section end */
int retry; /* retry counter for misc. operations */
fsm_instance *fsm; /* finite state machine of this channel */
struct net_device *netdev; /* corresponding net_device */
struct ctcm_profile prof;
__u8 *trans_skb_data;
__u16 logflags;
__u8 sense_rc; /* last unit check sense code report control */
};
struct ctcm_priv {
struct net_device_stats stats;
unsigned long tbusy;
/* The MPC group struct of this interface */
struct mpc_group *mpcg; /* MPC only */
struct xid2 *xid; /* MPC only */
/* The finite state machine of this interface */
fsm_instance *fsm;
/* The protocol of this device */
__u16 protocol;
/* Timer for restarting after I/O Errors */
fsm_timer restart_timer;
int buffer_size; /* ctc only */
struct channel *channel[2];
};
int ctcm_open(struct net_device *dev);
int ctcm_close(struct net_device *dev);
extern const struct attribute_group *ctcm_attr_groups[];
/*
* Compatibility macros for busy handling
* of network devices.
*/
static inline void ctcm_clear_busy_do(struct net_device *dev)
{
clear_bit(0, &(((struct ctcm_priv *)dev->ml_priv)->tbusy));
netif_wake_queue(dev);
}
static inline void ctcm_clear_busy(struct net_device *dev)
{
struct mpc_group *grp;
grp = ((struct ctcm_priv *)dev->ml_priv)->mpcg;
if (!(grp && grp->in_sweep))
ctcm_clear_busy_do(dev);
}
static inline int ctcm_test_and_set_busy(struct net_device *dev)
{
netif_stop_queue(dev);
return test_and_set_bit(0,
&(((struct ctcm_priv *)dev->ml_priv)->tbusy));
}
extern int loglevel;
extern struct channel *channels;
void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb);
/*
* Functions related to setup and device detection.
*/
static inline int ctcm_less_than(char *id1, char *id2)
{
unsigned long dev1, dev2;
id1 = id1 + 5;
id2 = id2 + 5;
dev1 = simple_strtoul(id1, &id1, 16);
dev2 = simple_strtoul(id2, &id2, 16);
return (dev1 < dev2);
}
int ctcm_ch_alloc_buffer(struct channel *ch);
static inline int ctcm_checkalloc_buffer(struct channel *ch)
{
if (ch->trans_skb == NULL)
return ctcm_ch_alloc_buffer(ch);
if (ch->flags & CHANNEL_FLAGS_BUFSIZE_CHANGED) {
dev_kfree_skb(ch->trans_skb);
return ctcm_ch_alloc_buffer(ch);
}
return 0;
}
struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv);
/* test if protocol attribute (of struct ctcm_priv or struct channel)
* has MPC protocol setting. Type is not checked
*/
#define IS_MPC(p) ((p)->protocol == CTCM_PROTO_MPC)
/* test if struct ctcm_priv of struct net_device has MPC protocol setting */
#define IS_MPCDEV(dev) IS_MPC((struct ctcm_priv *)dev->ml_priv)
static inline gfp_t gfp_type(void)
{
return in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
}
/*
* Definition of our link level header.
*/
struct ll_header {
__u16 length;
__u16 type;
__u16 unused;
};
#define LL_HEADER_LENGTH (sizeof(struct ll_header))
#endif

2174
drivers/s390/net/ctcm_mpc.c Normal file

File diff suppressed because it is too large Load diff

238
drivers/s390/net/ctcm_mpc.h Normal file
View file

@ -0,0 +1,238 @@
/*
* Copyright IBM Corp. 2007
* Authors: Peter Tiedemann (ptiedem@de.ibm.com)
*
* MPC additions:
* Belinda Thompson (belindat@us.ibm.com)
* Andy Richter (richtera@us.ibm.com)
*/
#ifndef _CTC_MPC_H_
#define _CTC_MPC_H_
#include <linux/interrupt.h>
#include <linux/skbuff.h>
#include "fsm.h"
/*
* MPC external interface
* Note that ctc_mpc_xyz are called with a lock on ................
*/
/* port_number is the mpc device 0, 1, 2 etc mpc2 is port_number 2 */
/* passive open Just wait for XID2 exchange */
extern int ctc_mpc_alloc_channel(int port,
void (*callback)(int port_num, int max_write_size));
/* active open Alloc then send XID2 */
extern void ctc_mpc_establish_connectivity(int port,
void (*callback)(int port_num, int rc, int max_write_size));
extern void ctc_mpc_dealloc_ch(int port);
extern void ctc_mpc_flow_control(int port, int flowc);
/*
* other MPC Group prototypes and structures
*/
#define ETH_P_SNA_DIX 0x80D5
/*
* Declaration of an XID2
*
*/
#define ALLZEROS 0x0000000000000000
#define XID_FM2 0x20
#define XID2_0 0x00
#define XID2_7 0x07
#define XID2_WRITE_SIDE 0x04
#define XID2_READ_SIDE 0x05
struct xid2 {
__u8 xid2_type_id;
__u8 xid2_len;
__u32 xid2_adj_id;
__u8 xid2_rlen;
__u8 xid2_resv1;
__u8 xid2_flag1;
__u8 xid2_fmtt;
__u8 xid2_flag4;
__u16 xid2_resv2;
__u8 xid2_tgnum;
__u32 xid2_sender_id;
__u8 xid2_flag2;
__u8 xid2_option;
char xid2_resv3[8];
__u16 xid2_resv4;
__u8 xid2_dlc_type;
__u16 xid2_resv5;
__u8 xid2_mpc_flag;
__u8 xid2_resv6;
__u16 xid2_buf_len;
char xid2_buffer[255 - (13 * sizeof(__u8) +
2 * sizeof(__u32) +
4 * sizeof(__u16) +
8 * sizeof(char))];
} __attribute__ ((packed));
#define XID2_LENGTH (sizeof(struct xid2))
struct th_header {
__u8 th_seg;
__u8 th_ch_flag;
#define TH_HAS_PDU 0xf0
#define TH_IS_XID 0x01
#define TH_SWEEP_REQ 0xfe
#define TH_SWEEP_RESP 0xff
__u8 th_blk_flag;
#define TH_DATA_IS_XID 0x80
#define TH_RETRY 0x40
#define TH_DISCONTACT 0xc0
#define TH_SEG_BLK 0x20
#define TH_LAST_SEG 0x10
#define TH_PDU_PART 0x08
__u8 th_is_xid; /* is 0x01 if this is XID */
__u32 th_seq_num;
} __attribute__ ((packed));
struct th_addon {
__u32 th_last_seq;
__u32 th_resvd;
} __attribute__ ((packed));
struct th_sweep {
struct th_header th;
struct th_addon sw;
} __attribute__ ((packed));
#define TH_HEADER_LENGTH (sizeof(struct th_header))
#define TH_SWEEP_LENGTH (sizeof(struct th_sweep))
#define PDU_LAST 0x80
#define PDU_CNTL 0x40
#define PDU_FIRST 0x20
struct pdu {
__u32 pdu_offset;
__u8 pdu_flag;
__u8 pdu_proto; /* 0x01 is APPN SNA */
__u16 pdu_seq;
} __attribute__ ((packed));
#define PDU_HEADER_LENGTH (sizeof(struct pdu))
struct qllc {
__u8 qllc_address;
#define QLLC_REQ 0xFF
#define QLLC_RESP 0x00
__u8 qllc_commands;
#define QLLC_DISCONNECT 0x53
#define QLLC_UNSEQACK 0x73
#define QLLC_SETMODE 0x93
#define QLLC_EXCHID 0xBF
} __attribute__ ((packed));
/*
* Definition of one MPC group
*/
#define MAX_MPCGCHAN 10
#define MPC_XID_TIMEOUT_VALUE 10000
#define MPC_CHANNEL_ADD 0
#define MPC_CHANNEL_REMOVE 1
#define MPC_CHANNEL_ATTN 2
#define XSIDE 1
#define YSIDE 0
struct mpcg_info {
struct sk_buff *skb;
struct channel *ch;
struct xid2 *xid;
struct th_sweep *sweep;
struct th_header *th;
};
struct mpc_group {
struct tasklet_struct mpc_tasklet;
struct tasklet_struct mpc_tasklet2;
int changed_side;
int saved_state;
int channels_terminating;
int out_of_sequence;
int flow_off_called;
int port_num;
int port_persist;
int alloc_called;
__u32 xid2_adj_id;
__u8 xid2_tgnum;
__u32 xid2_sender_id;
int num_channel_paths;
int active_channels[2];
__u16 group_max_buflen;
int outstanding_xid2;
int outstanding_xid7;
int outstanding_xid7_p2;
int sweep_req_pend_num;
int sweep_rsp_pend_num;
struct sk_buff *xid_skb;
char *xid_skb_data;
struct th_header *xid_th;
struct xid2 *xid;
char *xid_id;
struct th_header *rcvd_xid_th;
struct sk_buff *rcvd_xid_skb;
char *rcvd_xid_data;
__u8 in_sweep;
__u8 roll;
struct xid2 *saved_xid2;
void (*allochanfunc)(int, int);
int allocchan_callback_retries;
void (*estconnfunc)(int, int, int);
int estconn_callback_retries;
int estconn_called;
int xidnogood;
int send_qllc_disc;
fsm_timer timer;
fsm_instance *fsm; /* group xid fsm */
};
#ifdef DEBUGDATA
void ctcmpc_dumpit(char *buf, int len);
#else
static inline void ctcmpc_dumpit(char *buf, int len)
{
}
#endif
#ifdef DEBUGDATA
/*
* Dump header and first 16 bytes of an sk_buff for debugging purposes.
*
* skb The struct sk_buff to dump.
* offset Offset relative to skb-data, where to start the dump.
*/
void ctcmpc_dump_skb(struct sk_buff *skb, int offset);
#else
static inline void ctcmpc_dump_skb(struct sk_buff *skb, int offset)
{}
#endif
static inline void ctcmpc_dump32(char *buf, int len)
{
if (len < 32)
ctcmpc_dumpit(buf, len);
else
ctcmpc_dumpit(buf, 32);
}
int ctcmpc_open(struct net_device *);
void ctcm_ccw_check_rc(struct channel *, int, char *);
void mpc_group_ready(unsigned long adev);
void mpc_channel_action(struct channel *ch, int direction, int action);
void mpc_action_send_discontact(unsigned long thischan);
void mpc_action_discontact(fsm_instance *fi, int event, void *arg);
void ctcmpc_bh(unsigned long thischan);
#endif
/* --- This is the END my friend --- */

View file

@ -0,0 +1,208 @@
/*
* Copyright IBM Corp. 2007, 2007
* Authors: Peter Tiedemann (ptiedem@de.ibm.com)
*
*/
#undef DEBUG
#undef DEBUGDATA
#undef DEBUGCCW
#define KMSG_COMPONENT "ctcm"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/device.h>
#include <linux/sysfs.h>
#include <linux/slab.h>
#include "ctcm_main.h"
/*
* sysfs attributes
*/
static ssize_t ctcm_buffer_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ctcm_priv *priv = dev_get_drvdata(dev);
if (!priv)
return -ENODEV;
return sprintf(buf, "%d\n", priv->buffer_size);
}
static ssize_t ctcm_buffer_write(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct net_device *ndev;
unsigned int bs1;
struct ctcm_priv *priv = dev_get_drvdata(dev);
int rc;
ndev = priv->channel[CTCM_READ]->netdev;
if (!(priv && priv->channel[CTCM_READ] && ndev)) {
CTCM_DBF_TEXT(SETUP, CTC_DBF_ERROR, "bfnondev");
return -ENODEV;
}
rc = sscanf(buf, "%u", &bs1);
if (rc != 1)
goto einval;
if (bs1 > CTCM_BUFSIZE_LIMIT)
goto einval;
if (bs1 < (576 + LL_HEADER_LENGTH + 2))
goto einval;
priv->buffer_size = bs1; /* just to overwrite the default */
if ((ndev->flags & IFF_RUNNING) &&
(bs1 < (ndev->mtu + LL_HEADER_LENGTH + 2)))
goto einval;
priv->channel[CTCM_READ]->max_bufsize = bs1;
priv->channel[CTCM_WRITE]->max_bufsize = bs1;
if (!(ndev->flags & IFF_RUNNING))
ndev->mtu = bs1 - LL_HEADER_LENGTH - 2;
priv->channel[CTCM_READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
priv->channel[CTCM_WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
CTCM_DBF_DEV(SETUP, ndev, buf);
return count;
einval:
CTCM_DBF_DEV(SETUP, ndev, "buff_err");
return -EINVAL;
}
static void ctcm_print_statistics(struct ctcm_priv *priv)
{
char *sbuf;
char *p;
if (!priv)
return;
sbuf = kmalloc(2048, GFP_KERNEL);
if (sbuf == NULL)
return;
p = sbuf;
p += sprintf(p, " Device FSM state: %s\n",
fsm_getstate_str(priv->fsm));
p += sprintf(p, " RX channel FSM state: %s\n",
fsm_getstate_str(priv->channel[CTCM_READ]->fsm));
p += sprintf(p, " TX channel FSM state: %s\n",
fsm_getstate_str(priv->channel[CTCM_WRITE]->fsm));
p += sprintf(p, " Max. TX buffer used: %ld\n",
priv->channel[WRITE]->prof.maxmulti);
p += sprintf(p, " Max. chained SKBs: %ld\n",
priv->channel[WRITE]->prof.maxcqueue);
p += sprintf(p, " TX single write ops: %ld\n",
priv->channel[WRITE]->prof.doios_single);
p += sprintf(p, " TX multi write ops: %ld\n",
priv->channel[WRITE]->prof.doios_multi);
p += sprintf(p, " Netto bytes written: %ld\n",
priv->channel[WRITE]->prof.txlen);
p += sprintf(p, " Max. TX IO-time: %ld\n",
priv->channel[WRITE]->prof.tx_time);
printk(KERN_INFO "Statistics for %s:\n%s",
priv->channel[CTCM_WRITE]->netdev->name, sbuf);
kfree(sbuf);
return;
}
static ssize_t stats_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
struct ctcm_priv *priv = dev_get_drvdata(dev);
if (!priv || gdev->state != CCWGROUP_ONLINE)
return -ENODEV;
ctcm_print_statistics(priv);
return sprintf(buf, "0\n");
}
static ssize_t stats_write(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct ctcm_priv *priv = dev_get_drvdata(dev);
if (!priv)
return -ENODEV;
/* Reset statistics */
memset(&priv->channel[WRITE]->prof, 0,
sizeof(priv->channel[CTCM_WRITE]->prof));
return count;
}
static ssize_t ctcm_proto_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ctcm_priv *priv = dev_get_drvdata(dev);
if (!priv)
return -ENODEV;
return sprintf(buf, "%d\n", priv->protocol);
}
static ssize_t ctcm_proto_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
int value, rc;
struct ctcm_priv *priv = dev_get_drvdata(dev);
if (!priv)
return -ENODEV;
rc = sscanf(buf, "%d", &value);
if ((rc != 1) ||
!((value == CTCM_PROTO_S390) ||
(value == CTCM_PROTO_LINUX) ||
(value == CTCM_PROTO_MPC) ||
(value == CTCM_PROTO_OS390)))
return -EINVAL;
priv->protocol = value;
CTCM_DBF_DEV(SETUP, dev, buf);
return count;
}
static const char *ctcm_type[] = {
"not a channel",
"CTC/A",
"FICON channel",
"ESCON channel",
"unknown channel type",
"unsupported channel type",
};
static ssize_t ctcm_type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ccwgroup_device *cgdev;
cgdev = to_ccwgroupdev(dev);
if (!cgdev)
return -ENODEV;
return sprintf(buf, "%s\n",
ctcm_type[cgdev->cdev[0]->id.driver_info]);
}
static DEVICE_ATTR(buffer, 0644, ctcm_buffer_show, ctcm_buffer_write);
static DEVICE_ATTR(protocol, 0644, ctcm_proto_show, ctcm_proto_store);
static DEVICE_ATTR(type, 0444, ctcm_type_show, NULL);
static DEVICE_ATTR(stats, 0644, stats_show, stats_write);
static struct attribute *ctcm_attr[] = {
&dev_attr_protocol.attr,
&dev_attr_type.attr,
&dev_attr_buffer.attr,
&dev_attr_stats.attr,
NULL,
};
static struct attribute_group ctcm_attr_group = {
.attrs = ctcm_attr,
};
const struct attribute_group *ctcm_attr_groups[] = {
&ctcm_attr_group,
NULL,
};

214
drivers/s390/net/fsm.c Normal file
View file

@ -0,0 +1,214 @@
/**
* A generic FSM based on fsm used in isdn4linux
*
*/
#include "fsm.h"
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/timer.h>
MODULE_AUTHOR("(C) 2000 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
MODULE_DESCRIPTION("Finite state machine helper functions");
MODULE_LICENSE("GPL");
fsm_instance *
init_fsm(char *name, const char **state_names, const char **event_names, int nr_states,
int nr_events, const fsm_node *tmpl, int tmpl_len, gfp_t order)
{
int i;
fsm_instance *this;
fsm_function_t *m;
fsm *f;
this = kzalloc(sizeof(fsm_instance), order);
if (this == NULL) {
printk(KERN_WARNING
"fsm(%s): init_fsm: Couldn't alloc instance\n", name);
return NULL;
}
strlcpy(this->name, name, sizeof(this->name));
init_waitqueue_head(&this->wait_q);
f = kzalloc(sizeof(fsm), order);
if (f == NULL) {
printk(KERN_WARNING
"fsm(%s): init_fsm: Couldn't alloc fsm\n", name);
kfree_fsm(this);
return NULL;
}
f->nr_events = nr_events;
f->nr_states = nr_states;
f->event_names = event_names;
f->state_names = state_names;
this->f = f;
m = kcalloc(nr_states*nr_events, sizeof(fsm_function_t), order);
if (m == NULL) {
printk(KERN_WARNING
"fsm(%s): init_fsm: Couldn't alloc jumptable\n", name);
kfree_fsm(this);
return NULL;
}
f->jumpmatrix = m;
for (i = 0; i < tmpl_len; i++) {
if ((tmpl[i].cond_state >= nr_states) ||
(tmpl[i].cond_event >= nr_events) ) {
printk(KERN_ERR
"fsm(%s): init_fsm: Bad template l=%d st(%ld/%ld) ev(%ld/%ld)\n",
name, i, (long)tmpl[i].cond_state, (long)f->nr_states,
(long)tmpl[i].cond_event, (long)f->nr_events);
kfree_fsm(this);
return NULL;
} else
m[nr_states * tmpl[i].cond_event + tmpl[i].cond_state] =
tmpl[i].function;
}
return this;
}
void
kfree_fsm(fsm_instance *this)
{
if (this) {
if (this->f) {
kfree(this->f->jumpmatrix);
kfree(this->f);
}
kfree(this);
} else
printk(KERN_WARNING
"fsm: kfree_fsm called with NULL argument\n");
}
#if FSM_DEBUG_HISTORY
void
fsm_print_history(fsm_instance *fi)
{
int idx = 0;
int i;
if (fi->history_size >= FSM_HISTORY_SIZE)
idx = fi->history_index;
printk(KERN_DEBUG "fsm(%s): History:\n", fi->name);
for (i = 0; i < fi->history_size; i++) {
int e = fi->history[idx].event;
int s = fi->history[idx++].state;
idx %= FSM_HISTORY_SIZE;
if (e == -1)
printk(KERN_DEBUG " S=%s\n",
fi->f->state_names[s]);
else
printk(KERN_DEBUG " S=%s E=%s\n",
fi->f->state_names[s],
fi->f->event_names[e]);
}
fi->history_size = fi->history_index = 0;
}
void
fsm_record_history(fsm_instance *fi, int state, int event)
{
fi->history[fi->history_index].state = state;
fi->history[fi->history_index++].event = event;
fi->history_index %= FSM_HISTORY_SIZE;
if (fi->history_size < FSM_HISTORY_SIZE)
fi->history_size++;
}
#endif
const char *
fsm_getstate_str(fsm_instance *fi)
{
int st = atomic_read(&fi->state);
if (st >= fi->f->nr_states)
return "Invalid";
return fi->f->state_names[st];
}
static void
fsm_expire_timer(fsm_timer *this)
{
#if FSM_TIMER_DEBUG
printk(KERN_DEBUG "fsm(%s): Timer %p expired\n",
this->fi->name, this);
#endif
fsm_event(this->fi, this->expire_event, this->event_arg);
}
void
fsm_settimer(fsm_instance *fi, fsm_timer *this)
{
this->fi = fi;
this->tl.function = (void *)fsm_expire_timer;
this->tl.data = (long)this;
#if FSM_TIMER_DEBUG
printk(KERN_DEBUG "fsm(%s): Create timer %p\n", fi->name,
this);
#endif
init_timer(&this->tl);
}
void
fsm_deltimer(fsm_timer *this)
{
#if FSM_TIMER_DEBUG
printk(KERN_DEBUG "fsm(%s): Delete timer %p\n", this->fi->name,
this);
#endif
del_timer(&this->tl);
}
int
fsm_addtimer(fsm_timer *this, int millisec, int event, void *arg)
{
#if FSM_TIMER_DEBUG
printk(KERN_DEBUG "fsm(%s): Add timer %p %dms\n",
this->fi->name, this, millisec);
#endif
init_timer(&this->tl);
this->tl.function = (void *)fsm_expire_timer;
this->tl.data = (long)this;
this->expire_event = event;
this->event_arg = arg;
this->tl.expires = jiffies + (millisec * HZ) / 1000;
add_timer(&this->tl);
return 0;
}
/* FIXME: this function is never used, why */
void
fsm_modtimer(fsm_timer *this, int millisec, int event, void *arg)
{
#if FSM_TIMER_DEBUG
printk(KERN_DEBUG "fsm(%s): Restart timer %p %dms\n",
this->fi->name, this, millisec);
#endif
del_timer(&this->tl);
init_timer(&this->tl);
this->tl.function = (void *)fsm_expire_timer;
this->tl.data = (long)this;
this->expire_event = event;
this->event_arg = arg;
this->tl.expires = jiffies + (millisec * HZ) / 1000;
add_timer(&this->tl);
}
EXPORT_SYMBOL(init_fsm);
EXPORT_SYMBOL(kfree_fsm);
EXPORT_SYMBOL(fsm_settimer);
EXPORT_SYMBOL(fsm_deltimer);
EXPORT_SYMBOL(fsm_addtimer);
EXPORT_SYMBOL(fsm_modtimer);
EXPORT_SYMBOL(fsm_getstate_str);
#if FSM_DEBUG_HISTORY
EXPORT_SYMBOL(fsm_print_history);
EXPORT_SYMBOL(fsm_record_history);
#endif

265
drivers/s390/net/fsm.h Normal file
View file

@ -0,0 +1,265 @@
#ifndef _FSM_H_
#define _FSM_H_
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/timer.h>
#include <linux/time.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/atomic.h>
/**
* Define this to get debugging messages.
*/
#define FSM_DEBUG 0
/**
* Define this to get debugging massages for
* timer handling.
*/
#define FSM_TIMER_DEBUG 0
/**
* Define these to record a history of
* Events/Statechanges and print it if a
* action_function is not found.
*/
#define FSM_DEBUG_HISTORY 0
#define FSM_HISTORY_SIZE 40
struct fsm_instance_t;
/**
* Definition of an action function, called by a FSM
*/
typedef void (*fsm_function_t)(struct fsm_instance_t *, int, void *);
/**
* Internal jump table for a FSM
*/
typedef struct {
fsm_function_t *jumpmatrix;
int nr_events;
int nr_states;
const char **event_names;
const char **state_names;
} fsm;
#if FSM_DEBUG_HISTORY
/**
* Element of State/Event history used for debugging.
*/
typedef struct {
int state;
int event;
} fsm_history;
#endif
/**
* Representation of a FSM
*/
typedef struct fsm_instance_t {
fsm *f;
atomic_t state;
char name[16];
void *userdata;
int userint;
wait_queue_head_t wait_q;
#if FSM_DEBUG_HISTORY
int history_index;
int history_size;
fsm_history history[FSM_HISTORY_SIZE];
#endif
} fsm_instance;
/**
* Description of a state-event combination
*/
typedef struct {
int cond_state;
int cond_event;
fsm_function_t function;
} fsm_node;
/**
* Description of a FSM Timer.
*/
typedef struct {
fsm_instance *fi;
struct timer_list tl;
int expire_event;
void *event_arg;
} fsm_timer;
/**
* Creates an FSM
*
* @param name Name of this instance for logging purposes.
* @param state_names An array of names for all states for logging purposes.
* @param event_names An array of names for all events for logging purposes.
* @param nr_states Number of states for this instance.
* @param nr_events Number of events for this instance.
* @param tmpl An array of fsm_nodes, describing this FSM.
* @param tmpl_len Length of the describing array.
* @param order Parameter for allocation of the FSM data structs.
*/
extern fsm_instance *
init_fsm(char *name, const char **state_names,
const char **event_names,
int nr_states, int nr_events, const fsm_node *tmpl,
int tmpl_len, gfp_t order);
/**
* Releases an FSM
*
* @param fi Pointer to an FSM, previously created with init_fsm.
*/
extern void kfree_fsm(fsm_instance *fi);
#if FSM_DEBUG_HISTORY
extern void
fsm_print_history(fsm_instance *fi);
extern void
fsm_record_history(fsm_instance *fi, int state, int event);
#endif
/**
* Emits an event to a FSM.
* If an action function is defined for the current state/event combination,
* this function is called.
*
* @param fi Pointer to FSM which should receive the event.
* @param event The event do be delivered.
* @param arg A generic argument, handed to the action function.
*
* @return 0 on success,
* 1 if current state or event is out of range
* !0 if state and event in range, but no action defined.
*/
static inline int
fsm_event(fsm_instance *fi, int event, void *arg)
{
fsm_function_t r;
int state = atomic_read(&fi->state);
if ((state >= fi->f->nr_states) ||
(event >= fi->f->nr_events) ) {
printk(KERN_ERR "fsm(%s): Invalid state st(%ld/%ld) ev(%d/%ld)\n",
fi->name, (long)state,(long)fi->f->nr_states, event,
(long)fi->f->nr_events);
#if FSM_DEBUG_HISTORY
fsm_print_history(fi);
#endif
return 1;
}
r = fi->f->jumpmatrix[fi->f->nr_states * event + state];
if (r) {
#if FSM_DEBUG
printk(KERN_DEBUG "fsm(%s): state %s event %s\n",
fi->name, fi->f->state_names[state],
fi->f->event_names[event]);
#endif
#if FSM_DEBUG_HISTORY
fsm_record_history(fi, state, event);
#endif
r(fi, event, arg);
return 0;
} else {
#if FSM_DEBUG || FSM_DEBUG_HISTORY
printk(KERN_DEBUG "fsm(%s): no function for event %s in state %s\n",
fi->name, fi->f->event_names[event],
fi->f->state_names[state]);
#endif
#if FSM_DEBUG_HISTORY
fsm_print_history(fi);
#endif
return !0;
}
}
/**
* Modifies the state of an FSM.
* This does <em>not</em> trigger an event or calls an action function.
*
* @param fi Pointer to FSM
* @param state The new state for this FSM.
*/
static inline void
fsm_newstate(fsm_instance *fi, int newstate)
{
atomic_set(&fi->state,newstate);
#if FSM_DEBUG_HISTORY
fsm_record_history(fi, newstate, -1);
#endif
#if FSM_DEBUG
printk(KERN_DEBUG "fsm(%s): New state %s\n", fi->name,
fi->f->state_names[newstate]);
#endif
wake_up(&fi->wait_q);
}
/**
* Retrieves the state of an FSM
*
* @param fi Pointer to FSM
*
* @return The current state of the FSM.
*/
static inline int
fsm_getstate(fsm_instance *fi)
{
return atomic_read(&fi->state);
}
/**
* Retrieves the name of the state of an FSM
*
* @param fi Pointer to FSM
*
* @return The current state of the FSM in a human readable form.
*/
extern const char *fsm_getstate_str(fsm_instance *fi);
/**
* Initializes a timer for an FSM.
* This prepares an fsm_timer for usage with fsm_addtimer.
*
* @param fi Pointer to FSM
* @param timer The timer to be initialized.
*/
extern void fsm_settimer(fsm_instance *fi, fsm_timer *);
/**
* Clears a pending timer of an FSM instance.
*
* @param timer The timer to clear.
*/
extern void fsm_deltimer(fsm_timer *timer);
/**
* Adds and starts a timer to an FSM instance.
*
* @param timer The timer to be added. The field fi of that timer
* must have been set to point to the instance.
* @param millisec Duration, after which the timer should expire.
* @param event Event, to trigger if timer expires.
* @param arg Generic argument, provided to expiry function.
*
* @return 0 on success, -1 if timer is already active.
*/
extern int fsm_addtimer(fsm_timer *timer, int millisec, int event, void *arg);
/**
* Modifies a timer of an FSM.
*
* @param timer The timer to modify.
* @param millisec Duration, after which the timer should expire.
* @param event Event, to trigger if timer expires.
* @param arg Generic argument, provided to expiry function.
*/
extern void fsm_modtimer(fsm_timer *timer, int millisec, int event, void *arg);
#endif /* _FSM_H_ */

2493
drivers/s390/net/lcs.c Normal file

File diff suppressed because it is too large Load diff

339
drivers/s390/net/lcs.h Normal file
View file

@ -0,0 +1,339 @@
/*lcs.h*/
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/workqueue.h>
#include <asm/ccwdev.h>
#define LCS_DBF_TEXT(level, name, text) \
do { \
debug_text_event(lcs_dbf_##name, level, text); \
} while (0)
#define LCS_DBF_HEX(level,name,addr,len) \
do { \
debug_event(lcs_dbf_##name,level,(void*)(addr),len); \
} while (0)
#define LCS_DBF_TEXT_(level,name,text...) \
do { \
if (debug_level_enabled(lcs_dbf_##name, level)) { \
sprintf(debug_buffer, text); \
debug_text_event(lcs_dbf_##name, level, debug_buffer); \
} \
} while (0)
/**
* sysfs related stuff
*/
#define CARD_FROM_DEV(cdev) \
(struct lcs_card *) dev_get_drvdata( \
&((struct ccwgroup_device *)dev_get_drvdata(&cdev->dev))->dev);
/**
* Enum for classifying detected devices.
*/
enum lcs_channel_types {
/* Device is not a channel */
lcs_channel_type_none,
/* Device is a 2216 channel */
lcs_channel_type_parallel,
/* Device is a 2216 channel */
lcs_channel_type_2216,
/* Device is a OSA2 card */
lcs_channel_type_osa2
};
/**
* CCW commands used in this driver
*/
#define LCS_CCW_WRITE 0x01
#define LCS_CCW_READ 0x02
#define LCS_CCW_TRANSFER 0x08
/**
* LCS device status primitives
*/
#define LCS_CMD_STARTLAN 0x01
#define LCS_CMD_STOPLAN 0x02
#define LCS_CMD_LANSTAT 0x04
#define LCS_CMD_STARTUP 0x07
#define LCS_CMD_SHUTDOWN 0x08
#define LCS_CMD_QIPASSIST 0xb2
#define LCS_CMD_SETIPM 0xb4
#define LCS_CMD_DELIPM 0xb5
#define LCS_INITIATOR_TCPIP 0x00
#define LCS_INITIATOR_LGW 0x01
#define LCS_STD_CMD_SIZE 16
#define LCS_MULTICAST_CMD_SIZE 404
/**
* LCS IPASSIST MASKS,only used when multicast is switched on
*/
/* Not supported by LCS */
#define LCS_IPASS_ARP_PROCESSING 0x0001
#define LCS_IPASS_IN_CHECKSUM_SUPPORT 0x0002
#define LCS_IPASS_OUT_CHECKSUM_SUPPORT 0x0004
#define LCS_IPASS_IP_FRAG_REASSEMBLY 0x0008
#define LCS_IPASS_IP_FILTERING 0x0010
/* Supported by lcs 3172 */
#define LCS_IPASS_IPV6_SUPPORT 0x0020
#define LCS_IPASS_MULTICAST_SUPPORT 0x0040
/**
* LCS sense byte definitions
*/
#define LCS_SENSE_BYTE_0 0
#define LCS_SENSE_BYTE_1 1
#define LCS_SENSE_BYTE_2 2
#define LCS_SENSE_BYTE_3 3
#define LCS_SENSE_INTERFACE_DISCONNECT 0x01
#define LCS_SENSE_EQUIPMENT_CHECK 0x10
#define LCS_SENSE_BUS_OUT_CHECK 0x20
#define LCS_SENSE_INTERVENTION_REQUIRED 0x40
#define LCS_SENSE_CMD_REJECT 0x80
#define LCS_SENSE_RESETTING_EVENT 0x80
#define LCS_SENSE_DEVICE_ONLINE 0x20
/**
* LCS packet type definitions
*/
#define LCS_FRAME_TYPE_CONTROL 0
#define LCS_FRAME_TYPE_ENET 1
#define LCS_FRAME_TYPE_TR 2
#define LCS_FRAME_TYPE_FDDI 7
#define LCS_FRAME_TYPE_AUTO -1
/**
* some more definitions,we will sort them later
*/
#define LCS_ILLEGAL_OFFSET 0xffff
#define LCS_IOBUFFERSIZE 0x5000
#define LCS_NUM_BUFFS 32 /* needs to be power of 2 */
#define LCS_MAC_LENGTH 6
#define LCS_INVALID_PORT_NO -1
#define LCS_LANCMD_TIMEOUT_DEFAULT 5
/**
* Multicast state
*/
#define LCS_IPM_STATE_SET_REQUIRED 0
#define LCS_IPM_STATE_DEL_REQUIRED 1
#define LCS_IPM_STATE_ON_CARD 2
/**
* LCS IP Assist declarations
* seems to be only used for multicast
*/
#define LCS_IPASS_ARP_PROCESSING 0x0001
#define LCS_IPASS_INBOUND_CSUM_SUPP 0x0002
#define LCS_IPASS_OUTBOUND_CSUM_SUPP 0x0004
#define LCS_IPASS_IP_FRAG_REASSEMBLY 0x0008
#define LCS_IPASS_IP_FILTERING 0x0010
#define LCS_IPASS_IPV6_SUPPORT 0x0020
#define LCS_IPASS_MULTICAST_SUPPORT 0x0040
/**
* LCS Buffer states
*/
enum lcs_buffer_states {
LCS_BUF_STATE_EMPTY, /* buffer is empty */
LCS_BUF_STATE_LOCKED, /* buffer is locked, don't touch */
LCS_BUF_STATE_READY, /* buffer is ready for read/write */
LCS_BUF_STATE_PROCESSED,
};
/**
* LCS Channel State Machine declarations
*/
enum lcs_channel_states {
LCS_CH_STATE_INIT,
LCS_CH_STATE_HALTED,
LCS_CH_STATE_STOPPED,
LCS_CH_STATE_RUNNING,
LCS_CH_STATE_SUSPENDED,
LCS_CH_STATE_CLEARED,
LCS_CH_STATE_ERROR,
};
/**
* LCS device state machine
*/
enum lcs_dev_states {
DEV_STATE_DOWN,
DEV_STATE_UP,
DEV_STATE_RECOVER,
};
enum lcs_threads {
LCS_SET_MC_THREAD = 1,
LCS_RECOVERY_THREAD = 2,
};
/**
* LCS struct declarations
*/
struct lcs_header {
__u16 offset;
__u8 type;
__u8 slot;
} __attribute__ ((packed));
struct lcs_ip_mac_pair {
__be32 ip_addr;
__u8 mac_addr[LCS_MAC_LENGTH];
__u8 reserved[2];
} __attribute__ ((packed));
struct lcs_ipm_list {
struct list_head list;
struct lcs_ip_mac_pair ipm;
__u8 ipm_state;
};
struct lcs_cmd {
__u16 offset;
__u8 type;
__u8 slot;
__u8 cmd_code;
__u8 initiator;
__u16 sequence_no;
__u16 return_code;
union {
struct {
__u8 lan_type;
__u8 portno;
__u16 parameter_count;
__u8 operator_flags[3];
__u8 reserved[3];
} lcs_std_cmd;
struct {
__u16 unused1;
__u16 buff_size;
__u8 unused2[6];
} lcs_startup;
struct {
__u8 lan_type;
__u8 portno;
__u8 unused[10];
__u8 mac_addr[LCS_MAC_LENGTH];
__u32 num_packets_deblocked;
__u32 num_packets_blocked;
__u32 num_packets_tx_on_lan;
__u32 num_tx_errors_detected;
__u32 num_tx_packets_disgarded;
__u32 num_packets_rx_from_lan;
__u32 num_rx_errors_detected;
__u32 num_rx_discarded_nobuffs_avail;
__u32 num_rx_packets_too_large;
} lcs_lanstat_cmd;
#ifdef CONFIG_IP_MULTICAST
struct {
__u8 lan_type;
__u8 portno;
__u16 num_ip_pairs;
__u16 ip_assists_supported;
__u16 ip_assists_enabled;
__u16 version;
struct {
struct lcs_ip_mac_pair
ip_mac_pair[32];
__u32 response_data;
} lcs_ipass_ctlmsg __attribute ((packed));
} lcs_qipassist __attribute__ ((packed));
#endif /*CONFIG_IP_MULTICAST */
} cmd __attribute__ ((packed));
} __attribute__ ((packed));
/**
* Forward declarations.
*/
struct lcs_card;
struct lcs_channel;
/**
* Definition of an lcs buffer.
*/
struct lcs_buffer {
enum lcs_buffer_states state;
void *data;
int count;
/* Callback for completion notification. */
void (*callback)(struct lcs_channel *, struct lcs_buffer *);
};
struct lcs_reply {
struct list_head list;
__u16 sequence_no;
atomic_t refcnt;
/* Callback for completion notification. */
void (*callback)(struct lcs_card *, struct lcs_cmd *);
wait_queue_head_t wait_q;
struct lcs_card *card;
int received;
int rc;
};
/**
* Definition of an lcs channel
*/
struct lcs_channel {
enum lcs_channel_states state;
struct ccw_device *ccwdev;
struct ccw1 ccws[LCS_NUM_BUFFS + 1];
wait_queue_head_t wait_q;
struct tasklet_struct irq_tasklet;
struct lcs_buffer iob[LCS_NUM_BUFFS];
int io_idx;
int buf_idx;
};
/**
* definition of the lcs card
*/
struct lcs_card {
spinlock_t lock;
spinlock_t ipm_lock;
enum lcs_dev_states state;
struct net_device *dev;
struct net_device_stats stats;
__be16 (*lan_type_trans)(struct sk_buff *skb,
struct net_device *dev);
struct ccwgroup_device *gdev;
struct lcs_channel read;
struct lcs_channel write;
struct lcs_buffer *tx_buffer;
int tx_emitted;
struct list_head lancmd_waiters;
int lancmd_timeout;
struct work_struct kernel_thread_starter;
spinlock_t mask_lock;
unsigned long thread_start_mask;
unsigned long thread_running_mask;
unsigned long thread_allowed_mask;
wait_queue_head_t wait_q;
#ifdef CONFIG_IP_MULTICAST
struct list_head ipm_list;
#endif
__u8 mac[LCS_MAC_LENGTH];
__u16 ip_assists_supported;
__u16 ip_assists_enabled;
__s8 lan_type;
__u32 pkt_seq;
__u16 sequence_no;
__s16 portno;
/* Some info copied from probeinfo */
u8 device_forced;
u8 max_port_no;
u8 hint_port_no;
s16 port_protocol_no;
} __attribute__ ((aligned(8)));

2259
drivers/s390/net/netiucv.c Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,995 @@
/*
* Copyright IBM Corp. 2007
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
* Frank Pavlic <fpavlic@de.ibm.com>,
* Thomas Spatzier <tspat@de.ibm.com>,
* Frank Blaschka <frank.blaschka@de.ibm.com>
*/
#ifndef __QETH_CORE_H__
#define __QETH_CORE_H__
#include <linux/if.h>
#include <linux/if_arp.h>
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
#include <linux/ctype.h>
#include <linux/in6.h>
#include <linux/bitops.h>
#include <linux/seq_file.h>
#include <linux/ethtool.h>
#include <net/ipv6.h>
#include <net/if_inet6.h>
#include <net/addrconf.h>
#include <asm/debug.h>
#include <asm/qdio.h>
#include <asm/ccwdev.h>
#include <asm/ccwgroup.h>
#include <asm/sysinfo.h>
#include "qeth_core_mpc.h"
/**
* Debug Facility stuff
*/
enum qeth_dbf_names {
QETH_DBF_SETUP,
QETH_DBF_MSG,
QETH_DBF_CTRL,
QETH_DBF_INFOS /* must be last element */
};
struct qeth_dbf_info {
char name[DEBUG_MAX_NAME_LEN];
int pages;
int areas;
int len;
int level;
struct debug_view *view;
debug_info_t *id;
};
#define QETH_DBF_CTRL_LEN 256
#define QETH_DBF_TEXT(name, level, text) \
debug_text_event(qeth_dbf[QETH_DBF_##name].id, level, text)
#define QETH_DBF_HEX(name, level, addr, len) \
debug_event(qeth_dbf[QETH_DBF_##name].id, level, (void *)(addr), len)
#define QETH_DBF_MESSAGE(level, text...) \
debug_sprintf_event(qeth_dbf[QETH_DBF_MSG].id, level, text)
#define QETH_DBF_TEXT_(name, level, text...) \
qeth_dbf_longtext(qeth_dbf[QETH_DBF_##name].id, level, text)
#define QETH_CARD_TEXT(card, level, text) \
debug_text_event(card->debug, level, text)
#define QETH_CARD_HEX(card, level, addr, len) \
debug_event(card->debug, level, (void *)(addr), len)
#define QETH_CARD_MESSAGE(card, text...) \
debug_sprintf_event(card->debug, level, text)
#define QETH_CARD_TEXT_(card, level, text...) \
qeth_dbf_longtext(card->debug, level, text)
#define SENSE_COMMAND_REJECT_BYTE 0
#define SENSE_COMMAND_REJECT_FLAG 0x80
#define SENSE_RESETTING_EVENT_BYTE 1
#define SENSE_RESETTING_EVENT_FLAG 0x80
/*
* Common IO related definitions
*/
#define CARD_RDEV(card) card->read.ccwdev
#define CARD_WDEV(card) card->write.ccwdev
#define CARD_DDEV(card) card->data.ccwdev
#define CARD_BUS_ID(card) dev_name(&card->gdev->dev)
#define CARD_RDEV_ID(card) dev_name(&card->read.ccwdev->dev)
#define CARD_WDEV_ID(card) dev_name(&card->write.ccwdev->dev)
#define CARD_DDEV_ID(card) dev_name(&card->data.ccwdev->dev)
#define CHANNEL_ID(channel) dev_name(&channel->ccwdev->dev)
/**
* card stuff
*/
struct qeth_perf_stats {
unsigned int bufs_rec;
unsigned int bufs_sent;
unsigned int skbs_sent_pack;
unsigned int bufs_sent_pack;
unsigned int sc_dp_p;
unsigned int sc_p_dp;
/* qdio_cq_handler: number of times called, time spent in */
__u64 cq_start_time;
unsigned int cq_cnt;
unsigned int cq_time;
/* qdio_input_handler: number of times called, time spent in */
__u64 inbound_start_time;
unsigned int inbound_cnt;
unsigned int inbound_time;
/* qeth_send_packet: number of times called, time spent in */
__u64 outbound_start_time;
unsigned int outbound_cnt;
unsigned int outbound_time;
/* qdio_output_handler: number of times called, time spent in */
__u64 outbound_handler_start_time;
unsigned int outbound_handler_cnt;
unsigned int outbound_handler_time;
/* number of calls to and time spent in do_QDIO for inbound queue */
__u64 inbound_do_qdio_start_time;
unsigned int inbound_do_qdio_cnt;
unsigned int inbound_do_qdio_time;
/* number of calls to and time spent in do_QDIO for outbound queues */
__u64 outbound_do_qdio_start_time;
unsigned int outbound_do_qdio_cnt;
unsigned int outbound_do_qdio_time;
unsigned int large_send_bytes;
unsigned int large_send_cnt;
unsigned int sg_skbs_sent;
unsigned int sg_frags_sent;
/* initial values when measuring starts */
unsigned long initial_rx_packets;
unsigned long initial_tx_packets;
/* inbound scatter gather data */
unsigned int sg_skbs_rx;
unsigned int sg_frags_rx;
unsigned int sg_alloc_page_rx;
unsigned int tx_csum;
unsigned int tx_lin;
};
/* Routing stuff */
struct qeth_routing_info {
enum qeth_routing_types type;
};
/* IPA stuff */
struct qeth_ipa_info {
__u32 supported_funcs;
__u32 enabled_funcs;
};
/* SETBRIDGEPORT stuff */
enum qeth_sbp_roles {
QETH_SBP_ROLE_NONE = 0,
QETH_SBP_ROLE_PRIMARY = 1,
QETH_SBP_ROLE_SECONDARY = 2,
};
enum qeth_sbp_states {
QETH_SBP_STATE_INACTIVE = 0,
QETH_SBP_STATE_STANDBY = 1,
QETH_SBP_STATE_ACTIVE = 2,
};
#define QETH_SBP_HOST_NOTIFICATION 1
struct qeth_sbp_info {
__u32 supported_funcs;
enum qeth_sbp_roles role;
__u32 hostnotification:1;
};
static inline int qeth_is_ipa_supported(struct qeth_ipa_info *ipa,
enum qeth_ipa_funcs func)
{
return (ipa->supported_funcs & func);
}
static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
enum qeth_ipa_funcs func)
{
return (ipa->supported_funcs & ipa->enabled_funcs & func);
}
#define qeth_adp_supported(c, f) \
qeth_is_ipa_supported(&c->options.adp, f)
#define qeth_adp_enabled(c, f) \
qeth_is_ipa_enabled(&c->options.adp, f)
#define qeth_is_supported(c, f) \
qeth_is_ipa_supported(&c->options.ipa4, f)
#define qeth_is_enabled(c, f) \
qeth_is_ipa_enabled(&c->options.ipa4, f)
#define qeth_is_supported6(c, f) \
qeth_is_ipa_supported(&c->options.ipa6, f)
#define qeth_is_enabled6(c, f) \
qeth_is_ipa_enabled(&c->options.ipa6, f)
#define qeth_is_ipafunc_supported(c, prot, f) \
((prot == QETH_PROT_IPV6) ? \
qeth_is_supported6(c, f) : qeth_is_supported(c, f))
#define qeth_is_ipafunc_enabled(c, prot, f) \
((prot == QETH_PROT_IPV6) ? \
qeth_is_enabled6(c, f) : qeth_is_enabled(c, f))
#define QETH_IDX_FUNC_LEVEL_OSD 0x0101
#define QETH_IDX_FUNC_LEVEL_IQD 0x4108
#define QETH_MODELLIST_ARRAY \
{{0x1731, 0x01, 0x1732, QETH_CARD_TYPE_OSD, QETH_MAX_QUEUES, 0}, \
{0x1731, 0x05, 0x1732, QETH_CARD_TYPE_IQD, QETH_MAX_QUEUES, 0x103}, \
{0x1731, 0x06, 0x1732, QETH_CARD_TYPE_OSN, QETH_MAX_QUEUES, 0}, \
{0x1731, 0x02, 0x1732, QETH_CARD_TYPE_OSM, QETH_MAX_QUEUES, 0}, \
{0x1731, 0x02, 0x1732, QETH_CARD_TYPE_OSX, QETH_MAX_QUEUES, 0}, \
{0, 0, 0, 0, 0, 0} }
#define QETH_CU_TYPE_IND 0
#define QETH_CU_MODEL_IND 1
#define QETH_DEV_TYPE_IND 2
#define QETH_DEV_MODEL_IND 3
#define QETH_QUEUE_NO_IND 4
#define QETH_MULTICAST_IND 5
#define QETH_REAL_CARD 1
#define QETH_VLAN_CARD 2
#define QETH_BUFSIZE 4096
/**
* some more defs
*/
#define QETH_TX_TIMEOUT 100 * HZ
#define QETH_RCD_TIMEOUT 60 * HZ
#define QETH_RECLAIM_WORK_TIME HZ
#define QETH_HEADER_SIZE 32
#define QETH_MAX_PORTNO 15
/*IPv6 address autoconfiguration stuff*/
#define UNIQUE_ID_IF_CREATE_ADDR_FAILED 0xfffe
#define UNIQUE_ID_NOT_BY_CARD 0x10000
/*****************************************************************************/
/* QDIO queue and buffer handling */
/*****************************************************************************/
#define QETH_MAX_QUEUES 4
#define QETH_IN_BUF_SIZE_DEFAULT 65536
#define QETH_IN_BUF_COUNT_DEFAULT 64
#define QETH_IN_BUF_COUNT_HSDEFAULT 128
#define QETH_IN_BUF_COUNT_MIN 8
#define QETH_IN_BUF_COUNT_MAX 128
#define QETH_MAX_BUFFER_ELEMENTS(card) ((card)->qdio.in_buf_size >> 12)
#define QETH_IN_BUF_REQUEUE_THRESHOLD(card) \
((card)->qdio.in_buf_pool.buf_count / 2)
/* buffers we have to be behind before we get a PCI */
#define QETH_PCI_THRESHOLD_A(card) ((card)->qdio.in_buf_pool.buf_count+1)
/*enqueued free buffers left before we get a PCI*/
#define QETH_PCI_THRESHOLD_B(card) 0
/*not used unless the microcode gets patched*/
#define QETH_PCI_TIMER_VALUE(card) 3
/* priority queing */
#define QETH_PRIOQ_DEFAULT QETH_NO_PRIO_QUEUEING
#define QETH_DEFAULT_QUEUE 2
#define QETH_NO_PRIO_QUEUEING 0
#define QETH_PRIO_Q_ING_PREC 1
#define QETH_PRIO_Q_ING_TOS 2
#define QETH_PRIO_Q_ING_SKB 3
#define QETH_PRIO_Q_ING_VLAN 4
/* Packing */
#define QETH_LOW_WATERMARK_PACK 2
#define QETH_HIGH_WATERMARK_PACK 5
#define QETH_WATERMARK_PACK_FUZZ 1
#define QETH_IP_HEADER_SIZE 40
/* large receive scatter gather copy break */
#define QETH_RX_SG_CB (PAGE_SIZE >> 1)
#define QETH_RX_PULL_LEN 256
struct qeth_hdr_layer3 {
__u8 id;
__u8 flags;
__u16 inbound_checksum; /*TSO:__u16 seqno */
__u32 token; /*TSO: __u32 reserved */
__u16 length;
__u8 vlan_prio;
__u8 ext_flags;
__u16 vlan_id;
__u16 frame_offset;
__u8 dest_addr[16];
} __attribute__ ((packed));
struct qeth_hdr_layer2 {
__u8 id;
__u8 flags[3];
__u8 port_no;
__u8 hdr_length;
__u16 pkt_length;
__u16 seq_no;
__u16 vlan_id;
__u32 reserved;
__u8 reserved2[16];
} __attribute__ ((packed));
struct qeth_hdr_osn {
__u8 id;
__u8 reserved;
__u16 seq_no;
__u16 reserved2;
__u16 control_flags;
__u16 pdu_length;
__u8 reserved3[18];
__u32 ccid;
} __attribute__ ((packed));
struct qeth_hdr {
union {
struct qeth_hdr_layer2 l2;
struct qeth_hdr_layer3 l3;
struct qeth_hdr_osn osn;
} hdr;
} __attribute__ ((packed));
/*TCP Segmentation Offload header*/
struct qeth_hdr_ext_tso {
__u16 hdr_tot_len;
__u8 imb_hdr_no;
__u8 reserved;
__u8 hdr_type;
__u8 hdr_version;
__u16 hdr_len;
__u32 payload_len;
__u16 mss;
__u16 dg_hdr_len;
__u8 padding[16];
} __attribute__ ((packed));
struct qeth_hdr_tso {
struct qeth_hdr hdr; /*hdr->hdr.l3.xxx*/
struct qeth_hdr_ext_tso ext;
} __attribute__ ((packed));
/* flags for qeth_hdr.flags */
#define QETH_HDR_PASSTHRU 0x10
#define QETH_HDR_IPV6 0x80
#define QETH_HDR_CAST_MASK 0x07
enum qeth_cast_flags {
QETH_CAST_UNICAST = 0x06,
QETH_CAST_MULTICAST = 0x04,
QETH_CAST_BROADCAST = 0x05,
QETH_CAST_ANYCAST = 0x07,
QETH_CAST_NOCAST = 0x00,
};
enum qeth_layer2_frame_flags {
QETH_LAYER2_FLAG_MULTICAST = 0x01,
QETH_LAYER2_FLAG_BROADCAST = 0x02,
QETH_LAYER2_FLAG_UNICAST = 0x04,
QETH_LAYER2_FLAG_VLAN = 0x10,
};
enum qeth_header_ids {
QETH_HEADER_TYPE_LAYER3 = 0x01,
QETH_HEADER_TYPE_LAYER2 = 0x02,
QETH_HEADER_TYPE_TSO = 0x03,
QETH_HEADER_TYPE_OSN = 0x04,
};
/* flags for qeth_hdr.ext_flags */
#define QETH_HDR_EXT_VLAN_FRAME 0x01
#define QETH_HDR_EXT_TOKEN_ID 0x02
#define QETH_HDR_EXT_INCLUDE_VLAN_TAG 0x04
#define QETH_HDR_EXT_SRC_MAC_ADDR 0x08
#define QETH_HDR_EXT_CSUM_HDR_REQ 0x10
#define QETH_HDR_EXT_CSUM_TRANSP_REQ 0x20
#define QETH_HDR_EXT_UDP 0x40 /*bit off for TCP*/
static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
{
return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
}
enum qeth_qdio_buffer_states {
/*
* inbound: read out by driver; owned by hardware in order to be filled
* outbound: owned by driver in order to be filled
*/
QETH_QDIO_BUF_EMPTY,
/*
* inbound: filled by hardware; owned by driver in order to be read out
* outbound: filled by driver; owned by hardware in order to be sent
*/
QETH_QDIO_BUF_PRIMED,
/*
* inbound: not applicable
* outbound: identified to be pending in TPQ
*/
QETH_QDIO_BUF_PENDING,
/*
* inbound: not applicable
* outbound: found in completion queue
*/
QETH_QDIO_BUF_IN_CQ,
/*
* inbound: not applicable
* outbound: handled via transfer pending / completion queue
*/
QETH_QDIO_BUF_HANDLED_DELAYED,
};
enum qeth_qdio_info_states {
QETH_QDIO_UNINITIALIZED,
QETH_QDIO_ALLOCATED,
QETH_QDIO_ESTABLISHED,
QETH_QDIO_CLEANING
};
struct qeth_buffer_pool_entry {
struct list_head list;
struct list_head init_list;
void *elements[QDIO_MAX_ELEMENTS_PER_BUFFER];
};
struct qeth_qdio_buffer_pool {
struct list_head entry_list;
int buf_count;
};
struct qeth_qdio_buffer {
struct qdio_buffer *buffer;
/* the buffer pool entry currently associated to this buffer */
struct qeth_buffer_pool_entry *pool_entry;
struct sk_buff *rx_skb;
};
struct qeth_qdio_q {
struct qdio_buffer *qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
struct qeth_qdio_buffer bufs[QDIO_MAX_BUFFERS_PER_Q];
int next_buf_to_init;
};
struct qeth_qdio_out_buffer {
struct qdio_buffer *buffer;
atomic_t state;
int next_element_to_fill;
struct sk_buff_head skb_list;
int is_header[16];
struct qaob *aob;
struct qeth_qdio_out_q *q;
struct qeth_qdio_out_buffer *next_pending;
};
struct qeth_card;
enum qeth_out_q_states {
QETH_OUT_Q_UNLOCKED,
QETH_OUT_Q_LOCKED,
QETH_OUT_Q_LOCKED_FLUSH,
};
struct qeth_qdio_out_q {
struct qdio_buffer *qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q];
struct qdio_outbuf_state *bufstates; /* convenience pointer */
int queue_no;
struct qeth_card *card;
atomic_t state;
int do_pack;
/*
* index of buffer to be filled by driver; state EMPTY or PACKING
*/
int next_buf_to_fill;
/*
* number of buffers that are currently filled (PRIMED)
* -> these buffers are hardware-owned
*/
atomic_t used_buffers;
/* indicates whether PCI flag must be set (or if one is outstanding) */
atomic_t set_pci_flags_count;
};
struct qeth_qdio_info {
atomic_t state;
/* input */
int no_in_queues;
struct qeth_qdio_q *in_q;
struct qeth_qdio_q *c_q;
struct qeth_qdio_buffer_pool in_buf_pool;
struct qeth_qdio_buffer_pool init_pool;
int in_buf_size;
/* output */
int no_out_queues;
struct qeth_qdio_out_q **out_qs;
struct qdio_outbuf_state *out_bufstates;
/* priority queueing */
int do_prio_queueing;
int default_out_queue;
};
enum qeth_send_errors {
QETH_SEND_ERROR_NONE,
QETH_SEND_ERROR_LINK_FAILURE,
QETH_SEND_ERROR_RETRY,
QETH_SEND_ERROR_KICK_IT,
};
#define QETH_ETH_MAC_V4 0x0100 /* like v4 */
#define QETH_ETH_MAC_V6 0x3333 /* like v6 */
/* tr mc mac is longer, but that will be enough to detect mc frames */
#define QETH_TR_MAC_NC 0xc000 /* non-canonical */
#define QETH_TR_MAC_C 0x0300 /* canonical */
#define DEFAULT_ADD_HHLEN 0
#define MAX_ADD_HHLEN 1024
/**
* buffer stuff for read channel
*/
#define QETH_CMD_BUFFER_NO 8
/**
* channel state machine
*/
enum qeth_channel_states {
CH_STATE_UP,
CH_STATE_DOWN,
CH_STATE_ACTIVATING,
CH_STATE_HALTED,
CH_STATE_STOPPED,
CH_STATE_RCD,
CH_STATE_RCD_DONE,
};
/**
* card state machine
*/
enum qeth_card_states {
CARD_STATE_DOWN,
CARD_STATE_HARDSETUP,
CARD_STATE_SOFTSETUP,
CARD_STATE_UP,
CARD_STATE_RECOVER,
};
/**
* Protocol versions
*/
enum qeth_prot_versions {
QETH_PROT_IPV4 = 0x0004,
QETH_PROT_IPV6 = 0x0006,
};
enum qeth_ip_types {
QETH_IP_TYPE_NORMAL,
QETH_IP_TYPE_VIPA,
QETH_IP_TYPE_RXIP,
QETH_IP_TYPE_DEL_ALL_MC,
};
enum qeth_cmd_buffer_state {
BUF_STATE_FREE,
BUF_STATE_LOCKED,
BUF_STATE_PROCESSED,
};
enum qeth_cq {
QETH_CQ_DISABLED = 0,
QETH_CQ_ENABLED = 1,
QETH_CQ_NOTAVAILABLE = 2,
};
struct qeth_ipato {
int enabled;
int invert4;
int invert6;
struct list_head entries;
};
struct qeth_channel;
struct qeth_cmd_buffer {
enum qeth_cmd_buffer_state state;
struct qeth_channel *channel;
unsigned char *data;
int rc;
void (*callback) (struct qeth_channel *, struct qeth_cmd_buffer *);
};
/**
* definition of a qeth channel, used for read and write
*/
struct qeth_channel {
enum qeth_channel_states state;
struct ccw1 ccw;
spinlock_t iob_lock;
wait_queue_head_t wait_q;
struct tasklet_struct irq_tasklet;
struct ccw_device *ccwdev;
/*command buffer for control data*/
struct qeth_cmd_buffer iob[QETH_CMD_BUFFER_NO];
atomic_t irq_pending;
int io_buf_no;
int buf_no;
};
/**
* OSA card related definitions
*/
struct qeth_token {
__u32 issuer_rm_w;
__u32 issuer_rm_r;
__u32 cm_filter_w;
__u32 cm_filter_r;
__u32 cm_connection_w;
__u32 cm_connection_r;
__u32 ulp_filter_w;
__u32 ulp_filter_r;
__u32 ulp_connection_w;
__u32 ulp_connection_r;
};
struct qeth_seqno {
__u32 trans_hdr;
__u32 pdu_hdr;
__u32 pdu_hdr_ack;
__u16 ipa;
__u32 pkt_seqno;
};
struct qeth_reply {
struct list_head list;
wait_queue_head_t wait_q;
int (*callback)(struct qeth_card *, struct qeth_reply *,
unsigned long);
u32 seqno;
unsigned long offset;
atomic_t received;
int rc;
void *param;
struct qeth_card *card;
atomic_t refcnt;
};
struct qeth_card_blkt {
int time_total;
int inter_packet;
int inter_packet_jumbo;
};
#define QETH_BROADCAST_WITH_ECHO 0x01
#define QETH_BROADCAST_WITHOUT_ECHO 0x02
#define QETH_LAYER2_MAC_READ 0x01
#define QETH_LAYER2_MAC_REGISTERED 0x02
struct qeth_card_info {
unsigned short unit_addr2;
unsigned short cula;
unsigned short chpid;
__u16 func_level;
char mcl_level[QETH_MCL_LENGTH + 1];
int guestlan;
int mac_bits;
int portname_required;
int portno;
char portname[9];
enum qeth_card_types type;
enum qeth_link_types link_type;
int is_multicast_different;
int initial_mtu;
int max_mtu;
int broadcast_capable;
int unique_id;
struct qeth_card_blkt blkt;
__u32 csum_mask;
__u32 tx_csum_mask;
enum qeth_ipa_promisc_modes promisc_mode;
__u32 diagass_support;
__u32 hwtrap;
};
struct qeth_card_options {
struct qeth_routing_info route4;
struct qeth_ipa_info ipa4;
struct qeth_ipa_info adp; /*Adapter parameters*/
struct qeth_routing_info route6;
struct qeth_ipa_info ipa6;
struct qeth_sbp_info sbp; /* SETBRIDGEPORT options */
int fake_broadcast;
int add_hhlen;
int layer2;
int performance_stats;
int rx_sg_cb;
enum qeth_ipa_isolation_modes isolation;
enum qeth_ipa_isolation_modes prev_isolation;
int sniffer;
enum qeth_cq cq;
char hsuid[9];
};
/*
* thread bits for qeth_card thread masks
*/
enum qeth_threads {
QETH_RECOVER_THREAD = 1,
};
struct qeth_osn_info {
int (*assist_cb)(struct net_device *dev, void *data);
int (*data_cb)(struct sk_buff *skb);
};
enum qeth_discipline_id {
QETH_DISCIPLINE_LAYER3 = 0,
QETH_DISCIPLINE_LAYER2 = 1,
};
struct qeth_discipline {
void (*start_poll)(struct ccw_device *, int, unsigned long);
qdio_handler_t *input_handler;
qdio_handler_t *output_handler;
int (*recover)(void *ptr);
int (*setup) (struct ccwgroup_device *);
void (*remove) (struct ccwgroup_device *);
int (*set_online) (struct ccwgroup_device *);
int (*set_offline) (struct ccwgroup_device *);
void (*shutdown)(struct ccwgroup_device *);
int (*prepare) (struct ccwgroup_device *);
void (*complete) (struct ccwgroup_device *);
int (*freeze)(struct ccwgroup_device *);
int (*thaw) (struct ccwgroup_device *);
int (*restore)(struct ccwgroup_device *);
int (*control_event_handler)(struct qeth_card *card,
struct qeth_ipa_cmd *cmd);
};
struct qeth_vlan_vid {
struct list_head list;
unsigned short vid;
};
struct qeth_mc_mac {
struct list_head list;
__u8 mc_addr[MAX_ADDR_LEN];
unsigned char mc_addrlen;
int is_vmac;
};
struct qeth_rx {
int b_count;
int b_index;
struct qdio_buffer_element *b_element;
int e_offset;
int qdio_err;
};
struct carrier_info {
__u8 card_type;
__u16 port_mode;
__u32 port_speed;
};
struct qeth_switch_info {
__u32 capabilities;
__u32 settings;
};
#define QETH_NAPI_WEIGHT NAPI_POLL_WEIGHT
struct qeth_card {
struct list_head list;
enum qeth_card_states state;
int lan_online;
spinlock_t lock;
struct ccwgroup_device *gdev;
struct qeth_channel read;
struct qeth_channel write;
struct qeth_channel data;
struct net_device *dev;
struct net_device_stats stats;
struct qeth_card_info info;
struct qeth_token token;
struct qeth_seqno seqno;
struct qeth_card_options options;
wait_queue_head_t wait_q;
spinlock_t vlanlock;
spinlock_t mclock;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct list_head vid_list;
struct list_head mc_list;
struct work_struct kernel_thread_starter;
spinlock_t thread_mask_lock;
unsigned long thread_start_mask;
unsigned long thread_allowed_mask;
unsigned long thread_running_mask;
struct task_struct *recovery_task;
spinlock_t ip_lock;
struct list_head ip_list;
struct list_head *ip_tbd_list;
struct qeth_ipato ipato;
struct list_head cmd_waiter_list;
/* QDIO buffer handling */
struct qeth_qdio_info qdio;
struct qeth_perf_stats perf_stats;
int read_or_write_problem;
struct qeth_osn_info osn_info;
struct qeth_discipline *discipline;
atomic_t force_alloc_skb;
struct service_level qeth_service_level;
struct qdio_ssqd_desc ssqd;
debug_info_t *debug;
struct mutex conf_mutex;
struct mutex discipline_mutex;
struct napi_struct napi;
struct qeth_rx rx;
struct delayed_work buffer_reclaim_work;
int reclaim_index;
struct work_struct close_dev_work;
};
struct qeth_card_list_struct {
struct list_head list;
rwlock_t rwlock;
};
struct qeth_trap_id {
__u16 lparnr;
char vmname[8];
__u8 chpid;
__u8 ssid;
__u16 devno;
} __packed;
/*some helper functions*/
#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
static inline struct qeth_card *CARD_FROM_CDEV(struct ccw_device *cdev)
{
struct qeth_card *card = dev_get_drvdata(&((struct ccwgroup_device *)
dev_get_drvdata(&cdev->dev))->dev);
return card;
}
static inline int qeth_get_micros(void)
{
return (int) (get_tod_clock() >> 12);
}
static inline int qeth_get_ip_version(struct sk_buff *skb)
{
__be16 *p = &((struct ethhdr *)skb->data)->h_proto;
if (*p == ETH_P_8021Q)
p += 2;
switch (*p) {
case ETH_P_IPV6:
return 6;
case ETH_P_IP:
return 4;
default:
return 0;
}
}
static inline void qeth_put_buffer_pool_entry(struct qeth_card *card,
struct qeth_buffer_pool_entry *entry)
{
list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list);
}
static inline int qeth_is_diagass_supported(struct qeth_card *card,
enum qeth_diags_cmds cmd)
{
return card->info.diagass_support & (__u32)cmd;
}
extern struct qeth_discipline qeth_l2_discipline;
extern struct qeth_discipline qeth_l3_discipline;
extern const struct attribute_group *qeth_generic_attr_groups[];
extern const struct attribute_group *qeth_osn_attr_groups[];
extern struct workqueue_struct *qeth_wq;
int qeth_card_hw_is_reachable(struct qeth_card *);
const char *qeth_get_cardname_short(struct qeth_card *);
int qeth_realloc_buffer_pool(struct qeth_card *, int);
int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id);
void qeth_core_free_discipline(struct qeth_card *);
void qeth_buffer_reclaim_work(struct work_struct *);
/* exports for qeth discipline device drivers */
extern struct qeth_card_list_struct qeth_core_card_list;
extern struct kmem_cache *qeth_core_header_cache;
extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS];
void qeth_set_recovery_task(struct qeth_card *);
void qeth_clear_recovery_task(struct qeth_card *);
void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int);
int qeth_threads_running(struct qeth_card *, unsigned long);
int qeth_wait_for_threads(struct qeth_card *, unsigned long);
int qeth_do_run_thread(struct qeth_card *, unsigned long);
void qeth_clear_thread_start_bit(struct qeth_card *, unsigned long);
void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long);
int qeth_core_hardsetup_card(struct qeth_card *);
void qeth_print_status_message(struct qeth_card *);
int qeth_init_qdio_queues(struct qeth_card *);
int qeth_send_startlan(struct qeth_card *);
int qeth_send_stoplan(struct qeth_card *);
int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
int (*reply_cb)
(struct qeth_card *, struct qeth_reply *, unsigned long),
void *);
struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *,
enum qeth_ipa_cmds, enum qeth_prot_versions);
int qeth_query_setadapterparms(struct qeth_card *);
int qeth_check_qdio_errors(struct qeth_card *, struct qdio_buffer *,
unsigned int, const char *);
void qeth_queue_input_buffer(struct qeth_card *, int);
struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
struct qeth_qdio_buffer *, struct qdio_buffer_element **, int *,
struct qeth_hdr **);
void qeth_schedule_recovery(struct qeth_card *);
void qeth_qdio_start_poll(struct ccw_device *, int, unsigned long);
void qeth_qdio_input_handler(struct ccw_device *,
unsigned int, unsigned int, int,
int, unsigned long);
void qeth_qdio_output_handler(struct ccw_device *, unsigned int,
int, int, int, unsigned long);
void qeth_clear_ipacmd_list(struct qeth_card *);
int qeth_qdio_clear_card(struct qeth_card *, int);
void qeth_clear_working_pool_list(struct qeth_card *);
void qeth_clear_cmd_buffers(struct qeth_channel *);
void qeth_clear_qdio_buffers(struct qeth_card *);
void qeth_setadp_promisc_mode(struct qeth_card *);
struct net_device_stats *qeth_get_stats(struct net_device *);
int qeth_change_mtu(struct net_device *, int);
int qeth_setadpparms_change_macaddr(struct qeth_card *);
void qeth_tx_timeout(struct net_device *);
void qeth_prepare_control_data(struct qeth_card *, int,
struct qeth_cmd_buffer *);
void qeth_release_buffer(struct qeth_channel *, struct qeth_cmd_buffer *);
void qeth_prepare_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, char);
struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *);
int qeth_mdio_read(struct net_device *, int, int);
int qeth_snmp_command(struct qeth_card *, char __user *);
int qeth_query_oat_command(struct qeth_card *, char __user *);
int qeth_query_switch_attributes(struct qeth_card *card,
struct qeth_switch_info *sw_info);
int qeth_query_card_info(struct qeth_card *card,
struct carrier_info *carrier_info);
int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *,
int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long),
void *reply_param);
int qeth_bridgeport_query_ports(struct qeth_card *card,
enum qeth_sbp_roles *role, enum qeth_sbp_states *state);
int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role);
int qeth_bridgeport_an_set(struct qeth_card *card, int enable);
int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int);
int qeth_get_elements_no(struct qeth_card *, struct sk_buff *, int);
int qeth_get_elements_for_frags(struct sk_buff *);
int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *,
struct sk_buff *, struct qeth_hdr *, int, int, int);
int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *,
struct sk_buff *, struct qeth_hdr *, int);
int qeth_core_get_sset_count(struct net_device *, int);
void qeth_core_get_ethtool_stats(struct net_device *,
struct ethtool_stats *, u64 *);
void qeth_core_get_strings(struct net_device *, u32, u8 *);
void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...);
int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *);
int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback);
int qeth_hdr_chk_and_bounce(struct sk_buff *, struct qeth_hdr **, int);
int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
int qeth_query_ipassists(struct qeth_card *, enum qeth_prot_versions prot);
void qeth_trace_features(struct qeth_card *);
void qeth_close_dev(struct qeth_card *);
/* exports for OSN */
int qeth_osn_assist(struct net_device *, void *, int);
int qeth_osn_register(unsigned char *read_dev_no, struct net_device **,
int (*assist_cb)(struct net_device *, void *),
int (*data_cb)(struct sk_buff *));
void qeth_osn_deregister(struct net_device *);
#endif /* __QETH_CORE_H__ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,270 @@
/*
* Copyright IBM Corp. 2007
* Author(s): Frank Pavlic <fpavlic@de.ibm.com>,
* Thomas Spatzier <tspat@de.ibm.com>,
* Frank Blaschka <frank.blaschka@de.ibm.com>
*/
#include <linux/module.h>
#include <asm/cio.h>
#include "qeth_core_mpc.h"
unsigned char IDX_ACTIVATE_READ[] = {
0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
0x19, 0x01, 0x01, 0x80, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc8, 0xc1,
0xd3, 0xd3, 0xd6, 0xd3, 0xc5, 0x40, 0x00, 0x00,
0x00, 0x00
};
unsigned char IDX_ACTIVATE_WRITE[] = {
0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
0x15, 0x01, 0x01, 0x80, 0x00, 0x00, 0x00, 0x00,
0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xc8, 0xc1,
0xd3, 0xd3, 0xd6, 0xd3, 0xc5, 0x40, 0x00, 0x00,
0x00, 0x00
};
unsigned char CM_ENABLE[] = {
0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x63,
0x10, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00,
0x81, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x00, 0x23,
0x00, 0x00, 0x23, 0x05, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x40,
0x00, 0x0c, 0x41, 0x02, 0x00, 0x17, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x0b, 0x04, 0x01,
0x7e, 0x04, 0x05, 0x00, 0x01, 0x01, 0x0f,
0x00,
0x0c, 0x04, 0x02, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff
};
unsigned char CM_SETUP[] = {
0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,
0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x64,
0x10, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00,
0x81, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x00, 0x24,
0x00, 0x00, 0x24, 0x05, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x40,
0x00, 0x0c, 0x41, 0x04, 0x00, 0x18, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x09, 0x04, 0x04,
0x05, 0x00, 0x01, 0x01, 0x11,
0x00, 0x09, 0x04,
0x05, 0x05, 0x00, 0x00, 0x00, 0x00,
0x00, 0x06,
0x04, 0x06, 0xc8, 0x00
};
unsigned char ULP_ENABLE[] = {
0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03,
0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x6b,
0x10, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00,
0x41, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x00, 0x2b,
0x00, 0x00, 0x2b, 0x05, 0x20, 0x01, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x2b, 0x00, 0x00, 0x00, 0x40,
0x00, 0x0c, 0x41, 0x02, 0x00, 0x1f, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x0b, 0x04, 0x01,
0x03, 0x04, 0x05, 0x00, 0x01, 0x01, 0x12,
0x00,
0x14, 0x04, 0x0a, 0x00, 0x20, 0x00, 0x00, 0xff,
0xff, 0x00, 0x08, 0xc8, 0xe8, 0xc4, 0xf1, 0xc7,
0xf1, 0x00, 0x00
};
unsigned char ULP_SETUP[] = {
0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04,
0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x6c,
0x10, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00,
0x41, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02,
0x00, 0x00, 0x00, 0x01, 0x00, 0x24, 0x00, 0x2c,
0x00, 0x00, 0x2c, 0x05, 0x20, 0x01, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x2c, 0x00, 0x00, 0x00, 0x40,
0x00, 0x0c, 0x41, 0x04, 0x00, 0x20, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x09, 0x04, 0x04,
0x05, 0x00, 0x01, 0x01, 0x14,
0x00, 0x09, 0x04,
0x05, 0x05, 0x30, 0x01, 0x00, 0x00,
0x00, 0x06,
0x04, 0x06, 0x40, 0x00,
0x00, 0x08, 0x04, 0x0b,
0x00, 0x00, 0x00, 0x00
};
unsigned char DM_ACT[] = {
0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x55,
0x10, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00,
0x41, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03,
0x00, 0x00, 0x00, 0x02, 0x00, 0x24, 0x00, 0x15,
0x00, 0x00, 0x2c, 0x05, 0x20, 0x01, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, 0x40,
0x00, 0x0c, 0x43, 0x60, 0x00, 0x09, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x09, 0x04, 0x04,
0x05, 0x40, 0x01, 0x01, 0x00
};
unsigned char IPA_PDU_HEADER[] = {
0x00, 0xe0, 0x00, 0x00, 0x77, 0x77, 0x77, 0x77,
0x00, 0x00, 0x00, 0x14, 0x00, 0x00,
(IPA_PDU_HEADER_SIZE+sizeof(struct qeth_ipa_cmd)) / 256,
(IPA_PDU_HEADER_SIZE+sizeof(struct qeth_ipa_cmd)) % 256,
0x10, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
0xc1, 0x03, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x24,
sizeof(struct qeth_ipa_cmd) / 256,
sizeof(struct qeth_ipa_cmd) % 256,
0x00,
sizeof(struct qeth_ipa_cmd) / 256,
sizeof(struct qeth_ipa_cmd) % 256,
0x05,
0x77, 0x77, 0x77, 0x77,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x00,
sizeof(struct qeth_ipa_cmd) / 256,
sizeof(struct qeth_ipa_cmd) % 256,
0x00, 0x00, 0x00, 0x40,
};
EXPORT_SYMBOL_GPL(IPA_PDU_HEADER);
unsigned char WRITE_CCW[] = {
0x01, CCW_FLAG_SLI, 0, 0,
0, 0, 0, 0
};
unsigned char READ_CCW[] = {
0x02, CCW_FLAG_SLI, 0, 0,
0, 0, 0, 0
};
struct ipa_rc_msg {
enum qeth_ipa_return_codes rc;
char *msg;
};
static struct ipa_rc_msg qeth_ipa_rc_msg[] = {
{IPA_RC_SUCCESS, "success"},
{IPA_RC_NOTSUPP, "Command not supported"},
{IPA_RC_IP_TABLE_FULL, "Add Addr IP Table Full - ipv6"},
{IPA_RC_UNKNOWN_ERROR, "IPA command failed - reason unknown"},
{IPA_RC_UNSUPPORTED_COMMAND, "Command not supported"},
{IPA_RC_DUP_IPV6_REMOTE, "ipv6 address already registered remote"},
{IPA_RC_DUP_IPV6_HOME, "ipv6 address already registered"},
{IPA_RC_UNREGISTERED_ADDR, "Address not registered"},
{IPA_RC_NO_ID_AVAILABLE, "No identifiers available"},
{IPA_RC_ID_NOT_FOUND, "Identifier not found"},
{IPA_RC_INVALID_IP_VERSION, "IP version incorrect"},
{IPA_RC_LAN_FRAME_MISMATCH, "LAN and frame mismatch"},
{IPA_RC_L2_UNSUPPORTED_CMD, "Unsupported layer 2 command"},
{IPA_RC_L2_DUP_MAC, "Duplicate MAC address"},
{IPA_RC_L2_ADDR_TABLE_FULL, "Layer2 address table full"},
{IPA_RC_L2_DUP_LAYER3_MAC, "Duplicate with layer 3 MAC"},
{IPA_RC_L2_GMAC_NOT_FOUND, "GMAC not found"},
{IPA_RC_L2_MAC_NOT_AUTH_BY_HYP, "L2 mac not authorized by hypervisor"},
{IPA_RC_L2_MAC_NOT_AUTH_BY_ADP, "L2 mac not authorized by adapter"},
{IPA_RC_L2_MAC_NOT_FOUND, "L2 mac address not found"},
{IPA_RC_L2_INVALID_VLAN_ID, "L2 invalid vlan id"},
{IPA_RC_L2_DUP_VLAN_ID, "L2 duplicate vlan id"},
{IPA_RC_L2_VLAN_ID_NOT_FOUND, "L2 vlan id not found"},
{IPA_RC_DATA_MISMATCH, "Data field mismatch (v4/v6 mixed)"},
{IPA_RC_INVALID_MTU_SIZE, "Invalid MTU size"},
{IPA_RC_INVALID_LANTYPE, "Invalid LAN type"},
{IPA_RC_INVALID_LANNUM, "Invalid LAN num"},
{IPA_RC_DUPLICATE_IP_ADDRESS, "Address already registered"},
{IPA_RC_IP_ADDR_TABLE_FULL, "IP address table full"},
{IPA_RC_LAN_PORT_STATE_ERROR, "LAN port state error"},
{IPA_RC_SETIP_NO_STARTLAN, "Setip no startlan received"},
{IPA_RC_SETIP_ALREADY_RECEIVED, "Setip already received"},
{IPA_RC_IP_ADDR_ALREADY_USED, "IP address already in use on LAN"},
{IPA_RC_MC_ADDR_NOT_FOUND, "Multicast address not found"},
{IPA_RC_SETIP_INVALID_VERSION, "SETIP invalid IP version"},
{IPA_RC_UNSUPPORTED_SUBCMD, "Unsupported assist subcommand"},
{IPA_RC_ARP_ASSIST_NO_ENABLE, "Only partial success, no enable"},
{IPA_RC_PRIMARY_ALREADY_DEFINED, "Primary already defined"},
{IPA_RC_SECOND_ALREADY_DEFINED, "Secondary already defined"},
{IPA_RC_INVALID_SETRTG_INDICATOR, "Invalid SETRTG indicator"},
{IPA_RC_MC_ADDR_ALREADY_DEFINED, "Multicast address already defined"},
{IPA_RC_LAN_OFFLINE, "STRTLAN_LAN_DISABLED - LAN offline"},
{IPA_RC_VEPA_TO_VEB_TRANSITION, "Adj. switch disabled port mode RR"},
{IPA_RC_INVALID_IP_VERSION2, "Invalid IP version"},
{IPA_RC_ENOMEM, "Memory problem"},
{IPA_RC_FFFF, "Unknown Error"}
};
char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc)
{
int x = 0;
qeth_ipa_rc_msg[sizeof(qeth_ipa_rc_msg) /
sizeof(struct ipa_rc_msg) - 1].rc = rc;
while (qeth_ipa_rc_msg[x].rc != rc)
x++;
return qeth_ipa_rc_msg[x].msg;
}
struct ipa_cmd_names {
enum qeth_ipa_cmds cmd;
char *name;
};
static struct ipa_cmd_names qeth_ipa_cmd_names[] = {
{IPA_CMD_STARTLAN, "startlan"},
{IPA_CMD_STOPLAN, "stoplan"},
{IPA_CMD_SETVMAC, "setvmac"},
{IPA_CMD_DELVMAC, "delvmac"},
{IPA_CMD_SETGMAC, "setgmac"},
{IPA_CMD_DELGMAC, "delgmac"},
{IPA_CMD_SETVLAN, "setvlan"},
{IPA_CMD_DELVLAN, "delvlan"},
{IPA_CMD_SETCCID, "setccid"},
{IPA_CMD_DELCCID, "delccid"},
{IPA_CMD_MODCCID, "modccid"},
{IPA_CMD_SETIP, "setip"},
{IPA_CMD_QIPASSIST, "qipassist"},
{IPA_CMD_SETASSPARMS, "setassparms"},
{IPA_CMD_SETIPM, "setipm"},
{IPA_CMD_DELIPM, "delipm"},
{IPA_CMD_SETRTG, "setrtg"},
{IPA_CMD_DELIP, "delip"},
{IPA_CMD_SETADAPTERPARMS, "setadapterparms"},
{IPA_CMD_SET_DIAG_ASS, "set_diag_ass"},
{IPA_CMD_SETBRIDGEPORT, "set_bridge_port"},
{IPA_CMD_CREATE_ADDR, "create_addr"},
{IPA_CMD_DESTROY_ADDR, "destroy_addr"},
{IPA_CMD_REGISTER_LOCAL_ADDR, "register_local_addr"},
{IPA_CMD_UNREGISTER_LOCAL_ADDR, "unregister_local_addr"},
{IPA_CMD_ADDRESS_CHANGE_NOTIF, "address_change_notification"},
{IPA_CMD_UNKNOWN, "unknown"},
};
char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd)
{
int x = 0;
qeth_ipa_cmd_names[
sizeof(qeth_ipa_cmd_names) /
sizeof(struct ipa_cmd_names)-1].cmd = cmd;
while (qeth_ipa_cmd_names[x].cmd != cmd)
x++;
return qeth_ipa_cmd_names[x].name;
}

View file

@ -0,0 +1,807 @@
/*
* Copyright IBM Corp. 2007
* Author(s): Frank Pavlic <fpavlic@de.ibm.com>,
* Thomas Spatzier <tspat@de.ibm.com>,
* Frank Blaschka <frank.blaschka@de.ibm.com>
*/
#ifndef __QETH_CORE_MPC_H__
#define __QETH_CORE_MPC_H__
#include <asm/qeth.h>
#define IPA_PDU_HEADER_SIZE 0x40
#define QETH_IPA_PDU_LEN_TOTAL(buffer) (buffer + 0x0e)
#define QETH_IPA_PDU_LEN_PDU1(buffer) (buffer + 0x26)
#define QETH_IPA_PDU_LEN_PDU2(buffer) (buffer + 0x29)
#define QETH_IPA_PDU_LEN_PDU3(buffer) (buffer + 0x3a)
extern unsigned char IPA_PDU_HEADER[];
#define QETH_IPA_CMD_DEST_ADDR(buffer) (buffer + 0x2c)
#define IPA_CMD_LENGTH (IPA_PDU_HEADER_SIZE + sizeof(struct qeth_ipa_cmd))
#define QETH_SEQ_NO_LENGTH 4
#define QETH_MPC_TOKEN_LENGTH 4
#define QETH_MCL_LENGTH 4
#define OSA_ADDR_LEN 6
#define QETH_TIMEOUT (10 * HZ)
#define QETH_IPA_TIMEOUT (45 * HZ)
#define QETH_IDX_COMMAND_SEQNO 0xffff0000
#define SR_INFO_LEN 16
#define QETH_CLEAR_CHANNEL_PARM -10
#define QETH_HALT_CHANNEL_PARM -11
#define QETH_RCD_PARM -12
/*****************************************************************************/
/* IP Assist related definitions */
/*****************************************************************************/
#define IPA_CMD_INITIATOR_HOST 0x00
#define IPA_CMD_INITIATOR_OSA 0x01
#define IPA_CMD_INITIATOR_HOST_REPLY 0x80
#define IPA_CMD_INITIATOR_OSA_REPLY 0x81
#define IPA_CMD_PRIM_VERSION_NO 0x01
enum qeth_card_types {
QETH_CARD_TYPE_UNKNOWN = 0,
QETH_CARD_TYPE_OSD = 1,
QETH_CARD_TYPE_IQD = 5,
QETH_CARD_TYPE_OSN = 6,
QETH_CARD_TYPE_OSM = 3,
QETH_CARD_TYPE_OSX = 2,
};
#define QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE 0x18
/* only the first two bytes are looked at in qeth_get_cardname_short */
enum qeth_link_types {
QETH_LINK_TYPE_FAST_ETH = 0x01,
QETH_LINK_TYPE_HSTR = 0x02,
QETH_LINK_TYPE_GBIT_ETH = 0x03,
QETH_LINK_TYPE_OSN = 0x04,
QETH_LINK_TYPE_10GBIT_ETH = 0x10,
QETH_LINK_TYPE_LANE_ETH100 = 0x81,
QETH_LINK_TYPE_LANE_TR = 0x82,
QETH_LINK_TYPE_LANE_ETH1000 = 0x83,
QETH_LINK_TYPE_LANE = 0x88,
QETH_LINK_TYPE_ATM_NATIVE = 0x90,
};
/*
* Routing stuff
*/
#define RESET_ROUTING_FLAG 0x10 /* indicate that routing type shall be set */
enum qeth_routing_types {
/* TODO: set to bit flag used in IPA Command */
NO_ROUTER = 0,
PRIMARY_ROUTER = 1,
SECONDARY_ROUTER = 2,
MULTICAST_ROUTER = 3,
PRIMARY_CONNECTOR = 4,
SECONDARY_CONNECTOR = 5,
};
/* IPA Commands */
enum qeth_ipa_cmds {
IPA_CMD_STARTLAN = 0x01,
IPA_CMD_STOPLAN = 0x02,
IPA_CMD_SETVMAC = 0x21,
IPA_CMD_DELVMAC = 0x22,
IPA_CMD_SETGMAC = 0x23,
IPA_CMD_DELGMAC = 0x24,
IPA_CMD_SETVLAN = 0x25,
IPA_CMD_DELVLAN = 0x26,
IPA_CMD_SETCCID = 0x41,
IPA_CMD_DELCCID = 0x42,
IPA_CMD_MODCCID = 0x43,
IPA_CMD_SETIP = 0xb1,
IPA_CMD_QIPASSIST = 0xb2,
IPA_CMD_SETASSPARMS = 0xb3,
IPA_CMD_SETIPM = 0xb4,
IPA_CMD_DELIPM = 0xb5,
IPA_CMD_SETRTG = 0xb6,
IPA_CMD_DELIP = 0xb7,
IPA_CMD_SETADAPTERPARMS = 0xb8,
IPA_CMD_SET_DIAG_ASS = 0xb9,
IPA_CMD_SETBRIDGEPORT = 0xbe,
IPA_CMD_CREATE_ADDR = 0xc3,
IPA_CMD_DESTROY_ADDR = 0xc4,
IPA_CMD_REGISTER_LOCAL_ADDR = 0xd1,
IPA_CMD_UNREGISTER_LOCAL_ADDR = 0xd2,
IPA_CMD_ADDRESS_CHANGE_NOTIF = 0xd3,
IPA_CMD_UNKNOWN = 0x00
};
enum qeth_ip_ass_cmds {
IPA_CMD_ASS_START = 0x0001,
IPA_CMD_ASS_STOP = 0x0002,
IPA_CMD_ASS_CONFIGURE = 0x0003,
IPA_CMD_ASS_ENABLE = 0x0004,
};
enum qeth_arp_process_subcmds {
IPA_CMD_ASS_ARP_SET_NO_ENTRIES = 0x0003,
IPA_CMD_ASS_ARP_QUERY_CACHE = 0x0004,
IPA_CMD_ASS_ARP_ADD_ENTRY = 0x0005,
IPA_CMD_ASS_ARP_REMOVE_ENTRY = 0x0006,
IPA_CMD_ASS_ARP_FLUSH_CACHE = 0x0007,
IPA_CMD_ASS_ARP_QUERY_INFO = 0x0104,
IPA_CMD_ASS_ARP_QUERY_STATS = 0x0204,
};
/* Return Codes for IPA Commands
* according to OSA card Specs */
enum qeth_ipa_return_codes {
IPA_RC_SUCCESS = 0x0000,
IPA_RC_NOTSUPP = 0x0001,
IPA_RC_IP_TABLE_FULL = 0x0002,
IPA_RC_UNKNOWN_ERROR = 0x0003,
IPA_RC_UNSUPPORTED_COMMAND = 0x0004,
IPA_RC_TRACE_ALREADY_ACTIVE = 0x0005,
IPA_RC_INVALID_FORMAT = 0x0006,
IPA_RC_DUP_IPV6_REMOTE = 0x0008,
IPA_RC_DUP_IPV6_HOME = 0x0010,
IPA_RC_UNREGISTERED_ADDR = 0x0011,
IPA_RC_NO_ID_AVAILABLE = 0x0012,
IPA_RC_ID_NOT_FOUND = 0x0013,
IPA_RC_INVALID_IP_VERSION = 0x0020,
IPA_RC_LAN_FRAME_MISMATCH = 0x0040,
IPA_RC_L2_UNSUPPORTED_CMD = 0x2003,
IPA_RC_L2_DUP_MAC = 0x2005,
IPA_RC_L2_ADDR_TABLE_FULL = 0x2006,
IPA_RC_L2_DUP_LAYER3_MAC = 0x200a,
IPA_RC_L2_GMAC_NOT_FOUND = 0x200b,
IPA_RC_L2_MAC_NOT_AUTH_BY_HYP = 0x200c,
IPA_RC_L2_MAC_NOT_AUTH_BY_ADP = 0x200d,
IPA_RC_L2_MAC_NOT_FOUND = 0x2010,
IPA_RC_L2_INVALID_VLAN_ID = 0x2015,
IPA_RC_L2_DUP_VLAN_ID = 0x2016,
IPA_RC_L2_VLAN_ID_NOT_FOUND = 0x2017,
IPA_RC_DATA_MISMATCH = 0xe001,
IPA_RC_INVALID_MTU_SIZE = 0xe002,
IPA_RC_INVALID_LANTYPE = 0xe003,
IPA_RC_INVALID_LANNUM = 0xe004,
IPA_RC_DUPLICATE_IP_ADDRESS = 0xe005,
IPA_RC_IP_ADDR_TABLE_FULL = 0xe006,
IPA_RC_LAN_PORT_STATE_ERROR = 0xe007,
IPA_RC_SETIP_NO_STARTLAN = 0xe008,
IPA_RC_SETIP_ALREADY_RECEIVED = 0xe009,
IPA_RC_IP_ADDR_ALREADY_USED = 0xe00a,
IPA_RC_MC_ADDR_NOT_FOUND = 0xe00b,
IPA_RC_SETIP_INVALID_VERSION = 0xe00d,
IPA_RC_UNSUPPORTED_SUBCMD = 0xe00e,
IPA_RC_ARP_ASSIST_NO_ENABLE = 0xe00f,
IPA_RC_PRIMARY_ALREADY_DEFINED = 0xe010,
IPA_RC_SECOND_ALREADY_DEFINED = 0xe011,
IPA_RC_INVALID_SETRTG_INDICATOR = 0xe012,
IPA_RC_MC_ADDR_ALREADY_DEFINED = 0xe013,
IPA_RC_LAN_OFFLINE = 0xe080,
IPA_RC_VEPA_TO_VEB_TRANSITION = 0xe090,
IPA_RC_INVALID_IP_VERSION2 = 0xf001,
IPA_RC_ENOMEM = 0xfffe,
IPA_RC_FFFF = 0xffff
};
/* for DELIP */
#define IPA_RC_IP_ADDRESS_NOT_DEFINED IPA_RC_PRIMARY_ALREADY_DEFINED
/* for SET_DIAGNOSTIC_ASSIST */
#define IPA_RC_INVALID_SUBCMD IPA_RC_IP_TABLE_FULL
#define IPA_RC_HARDWARE_AUTH_ERROR IPA_RC_UNKNOWN_ERROR
/* IPA function flags; each flag marks availability of respective function */
enum qeth_ipa_funcs {
IPA_ARP_PROCESSING = 0x00000001L,
IPA_INBOUND_CHECKSUM = 0x00000002L,
IPA_OUTBOUND_CHECKSUM = 0x00000004L,
IPA_IP_FRAGMENTATION = 0x00000008L,
IPA_FILTERING = 0x00000010L,
IPA_IPV6 = 0x00000020L,
IPA_MULTICASTING = 0x00000040L,
IPA_IP_REASSEMBLY = 0x00000080L,
IPA_QUERY_ARP_COUNTERS = 0x00000100L,
IPA_QUERY_ARP_ADDR_INFO = 0x00000200L,
IPA_SETADAPTERPARMS = 0x00000400L,
IPA_VLAN_PRIO = 0x00000800L,
IPA_PASSTHRU = 0x00001000L,
IPA_FLUSH_ARP_SUPPORT = 0x00002000L,
IPA_FULL_VLAN = 0x00004000L,
IPA_INBOUND_PASSTHRU = 0x00008000L,
IPA_SOURCE_MAC = 0x00010000L,
IPA_OSA_MC_ROUTER = 0x00020000L,
IPA_QUERY_ARP_ASSIST = 0x00040000L,
IPA_INBOUND_TSO = 0x00080000L,
IPA_OUTBOUND_TSO = 0x00100000L,
};
/* SETIP/DELIP IPA Command: ***************************************************/
enum qeth_ipa_setdelip_flags {
QETH_IPA_SETDELIP_DEFAULT = 0x00L, /* default */
QETH_IPA_SETIP_VIPA_FLAG = 0x01L, /* no grat. ARP */
QETH_IPA_SETIP_TAKEOVER_FLAG = 0x02L, /* nofail on grat. ARP */
QETH_IPA_DELIP_ADDR_2_B_TAKEN_OVER = 0x20L,
QETH_IPA_DELIP_VIPA_FLAG = 0x40L,
QETH_IPA_DELIP_ADDR_NEEDS_SETIP = 0x80L,
};
/* SETADAPTER IPA Command: ****************************************************/
enum qeth_ipa_setadp_cmd {
IPA_SETADP_QUERY_COMMANDS_SUPPORTED = 0x00000001L,
IPA_SETADP_ALTER_MAC_ADDRESS = 0x00000002L,
IPA_SETADP_ADD_DELETE_GROUP_ADDRESS = 0x00000004L,
IPA_SETADP_ADD_DELETE_FUNCTIONAL_ADDR = 0x00000008L,
IPA_SETADP_SET_ADDRESSING_MODE = 0x00000010L,
IPA_SETADP_SET_CONFIG_PARMS = 0x00000020L,
IPA_SETADP_SET_CONFIG_PARMS_EXTENDED = 0x00000040L,
IPA_SETADP_SET_BROADCAST_MODE = 0x00000080L,
IPA_SETADP_SEND_OSA_MESSAGE = 0x00000100L,
IPA_SETADP_SET_SNMP_CONTROL = 0x00000200L,
IPA_SETADP_QUERY_CARD_INFO = 0x00000400L,
IPA_SETADP_SET_PROMISC_MODE = 0x00000800L,
IPA_SETADP_SET_DIAG_ASSIST = 0x00002000L,
IPA_SETADP_SET_ACCESS_CONTROL = 0x00010000L,
IPA_SETADP_QUERY_OAT = 0x00080000L,
IPA_SETADP_QUERY_SWITCH_ATTRIBUTES = 0x00100000L,
};
enum qeth_ipa_mac_ops {
CHANGE_ADDR_READ_MAC = 0,
CHANGE_ADDR_REPLACE_MAC = 1,
CHANGE_ADDR_ADD_MAC = 2,
CHANGE_ADDR_DEL_MAC = 4,
CHANGE_ADDR_RESET_MAC = 8,
};
enum qeth_ipa_addr_ops {
CHANGE_ADDR_READ_ADDR = 0,
CHANGE_ADDR_ADD_ADDR = 1,
CHANGE_ADDR_DEL_ADDR = 2,
CHANGE_ADDR_FLUSH_ADDR_TABLE = 4,
};
enum qeth_ipa_promisc_modes {
SET_PROMISC_MODE_OFF = 0,
SET_PROMISC_MODE_ON = 1,
};
enum qeth_ipa_isolation_modes {
ISOLATION_MODE_NONE = 0x00000000L,
ISOLATION_MODE_FWD = 0x00000001L,
ISOLATION_MODE_DROP = 0x00000002L,
};
enum qeth_ipa_set_access_mode_rc {
SET_ACCESS_CTRL_RC_SUCCESS = 0x0000,
SET_ACCESS_CTRL_RC_NOT_SUPPORTED = 0x0004,
SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED = 0x0008,
SET_ACCESS_CTRL_RC_ALREADY_ISOLATED = 0x0010,
SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER = 0x0014,
SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF = 0x0018,
SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED = 0x0022,
SET_ACCESS_CTRL_RC_REFLREL_FAILED = 0x0024,
SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED = 0x0028,
};
enum qeth_card_info_card_type {
CARD_INFO_TYPE_1G_COPPER_A = 0x61,
CARD_INFO_TYPE_1G_FIBRE_A = 0x71,
CARD_INFO_TYPE_10G_FIBRE_A = 0x91,
CARD_INFO_TYPE_1G_COPPER_B = 0xb1,
CARD_INFO_TYPE_1G_FIBRE_B = 0xa1,
CARD_INFO_TYPE_10G_FIBRE_B = 0xc1,
};
enum qeth_card_info_port_mode {
CARD_INFO_PORTM_HALFDUPLEX = 0x0002,
CARD_INFO_PORTM_FULLDUPLEX = 0x0003,
};
enum qeth_card_info_port_speed {
CARD_INFO_PORTS_10M = 0x00000005,
CARD_INFO_PORTS_100M = 0x00000006,
CARD_INFO_PORTS_1G = 0x00000007,
CARD_INFO_PORTS_10G = 0x00000008,
};
/* (SET)DELIP(M) IPA stuff ***************************************************/
struct qeth_ipacmd_setdelip4 {
__u8 ip_addr[4];
__u8 mask[4];
__u32 flags;
} __attribute__ ((packed));
struct qeth_ipacmd_setdelip6 {
__u8 ip_addr[16];
__u8 mask[16];
__u32 flags;
} __attribute__ ((packed));
struct qeth_ipacmd_setdelipm {
__u8 mac[6];
__u8 padding[2];
__u8 ip6[12];
__u8 ip4[4];
} __attribute__ ((packed));
struct qeth_ipacmd_layer2setdelmac {
__u32 mac_length;
__u8 mac[6];
} __attribute__ ((packed));
struct qeth_ipacmd_layer2setdelvlan {
__u16 vlan_id;
} __attribute__ ((packed));
struct qeth_ipacmd_setassparms_hdr {
__u32 assist_no;
__u16 length;
__u16 command_code;
__u16 return_code;
__u8 number_of_replies;
__u8 seq_no;
} __attribute__((packed));
struct qeth_arp_query_data {
__u16 request_bits;
__u16 reply_bits;
__u32 no_entries;
char data; /* only for replies */
} __attribute__((packed));
/* used as parameter for arp_query reply */
struct qeth_arp_query_info {
__u32 udata_len;
__u16 mask_bits;
__u32 udata_offset;
__u32 no_entries;
char *udata;
};
/* SETASSPARMS IPA Command: */
struct qeth_ipacmd_setassparms {
struct qeth_ipacmd_setassparms_hdr hdr;
union {
__u32 flags_32bit;
struct qeth_arp_cache_entry add_arp_entry;
struct qeth_arp_query_data query_arp;
__u8 ip[16];
} data;
} __attribute__ ((packed));
/* SETRTG IPA Command: ****************************************************/
struct qeth_set_routing {
__u8 type;
};
/* SETADAPTERPARMS IPA Command: *******************************************/
struct qeth_query_cmds_supp {
__u32 no_lantypes_supp;
__u8 lan_type;
__u8 reserved1[3];
__u32 supported_cmds;
__u8 reserved2[8];
} __attribute__ ((packed));
struct qeth_change_addr {
__u32 cmd;
__u32 addr_size;
__u32 no_macs;
__u8 addr[OSA_ADDR_LEN];
} __attribute__ ((packed));
struct qeth_snmp_cmd {
__u8 token[16];
__u32 request;
__u32 interface;
__u32 returncode;
__u32 firmwarelevel;
__u32 seqno;
__u8 data;
} __attribute__ ((packed));
struct qeth_snmp_ureq_hdr {
__u32 data_len;
__u32 req_len;
__u32 reserved1;
__u32 reserved2;
} __attribute__ ((packed));
struct qeth_snmp_ureq {
struct qeth_snmp_ureq_hdr hdr;
struct qeth_snmp_cmd cmd;
} __attribute__((packed));
/* SET_ACCESS_CONTROL: same format for request and reply */
struct qeth_set_access_ctrl {
__u32 subcmd_code;
__u8 reserved[8];
} __attribute__((packed));
struct qeth_query_oat {
__u32 subcmd_code;
__u8 reserved[12];
} __packed;
struct qeth_qoat_priv {
__u32 buffer_len;
__u32 response_len;
char *buffer;
};
struct qeth_query_card_info {
__u8 card_type;
__u8 reserved1;
__u16 port_mode;
__u32 port_speed;
__u32 reserved2;
};
#define QETH_SWITCH_FORW_802_1 0x00000001
#define QETH_SWITCH_FORW_REFL_RELAY 0x00000002
#define QETH_SWITCH_CAP_RTE 0x00000004
#define QETH_SWITCH_CAP_ECP 0x00000008
#define QETH_SWITCH_CAP_VDP 0x00000010
struct qeth_query_switch_attributes {
__u8 version;
__u8 reserved1;
__u16 reserved2;
__u32 capabilities;
__u32 settings;
__u8 reserved3[8];
};
struct qeth_ipacmd_setadpparms_hdr {
__u32 supp_hw_cmds;
__u32 reserved1;
__u16 cmdlength;
__u16 reserved2;
__u32 command_code;
__u16 return_code;
__u8 used_total;
__u8 seq_no;
__u32 reserved3;
} __attribute__ ((packed));
struct qeth_ipacmd_setadpparms {
struct qeth_ipacmd_setadpparms_hdr hdr;
union {
struct qeth_query_cmds_supp query_cmds_supp;
struct qeth_change_addr change_addr;
struct qeth_snmp_cmd snmp;
struct qeth_set_access_ctrl set_access_ctrl;
struct qeth_query_oat query_oat;
struct qeth_query_card_info card_info;
struct qeth_query_switch_attributes query_switch_attributes;
__u32 mode;
} data;
} __attribute__ ((packed));
/* CREATE_ADDR IPA Command: ***********************************************/
struct qeth_create_destroy_address {
__u8 unique_id[8];
} __attribute__ ((packed));
/* SET DIAGNOSTIC ASSIST IPA Command: *************************************/
enum qeth_diags_cmds {
QETH_DIAGS_CMD_QUERY = 0x0001,
QETH_DIAGS_CMD_TRAP = 0x0002,
QETH_DIAGS_CMD_TRACE = 0x0004,
QETH_DIAGS_CMD_NOLOG = 0x0008,
QETH_DIAGS_CMD_DUMP = 0x0010,
};
enum qeth_diags_trace_types {
QETH_DIAGS_TYPE_HIPERSOCKET = 0x02,
};
enum qeth_diags_trace_cmds {
QETH_DIAGS_CMD_TRACE_ENABLE = 0x0001,
QETH_DIAGS_CMD_TRACE_DISABLE = 0x0002,
QETH_DIAGS_CMD_TRACE_MODIFY = 0x0004,
QETH_DIAGS_CMD_TRACE_REPLACE = 0x0008,
QETH_DIAGS_CMD_TRACE_QUERY = 0x0010,
};
enum qeth_diags_trap_action {
QETH_DIAGS_TRAP_ARM = 0x01,
QETH_DIAGS_TRAP_DISARM = 0x02,
QETH_DIAGS_TRAP_CAPTURE = 0x04,
};
struct qeth_ipacmd_diagass {
__u32 host_tod2;
__u32:32;
__u16 subcmd_len;
__u16:16;
__u32 subcmd;
__u8 type;
__u8 action;
__u16 options;
__u32 ext;
__u8 cdata[64];
} __attribute__ ((packed));
/* SETBRIDGEPORT IPA Command: *********************************************/
enum qeth_ipa_sbp_cmd {
IPA_SBP_QUERY_COMMANDS_SUPPORTED = 0x00000000L,
IPA_SBP_RESET_BRIDGE_PORT_ROLE = 0x00000001L,
IPA_SBP_SET_PRIMARY_BRIDGE_PORT = 0x00000002L,
IPA_SBP_SET_SECONDARY_BRIDGE_PORT = 0x00000004L,
IPA_SBP_QUERY_BRIDGE_PORTS = 0x00000008L,
IPA_SBP_BRIDGE_PORT_STATE_CHANGE = 0x00000010L,
};
struct net_if_token {
__u16 devnum;
__u8 cssid;
__u8 iid;
__u8 ssid;
__u8 chpid;
__u16 chid;
} __packed;
struct mac_addr_lnid {
__u8 mac[6];
__u16 lnid;
} __packed;
struct qeth_ipacmd_sbp_hdr {
__u32 supported_sbp_cmds;
__u32 enabled_sbp_cmds;
__u16 cmdlength;
__u16 reserved1;
__u32 command_code;
__u16 return_code;
__u8 used_total;
__u8 seq_no;
__u32 reserved2;
} __packed;
struct qeth_sbp_query_cmds_supp {
__u32 supported_cmds;
__u32 reserved;
} __packed;
struct qeth_sbp_reset_role {
} __packed;
struct qeth_sbp_set_primary {
struct net_if_token token;
} __packed;
struct qeth_sbp_set_secondary {
} __packed;
struct qeth_sbp_port_entry {
__u8 role;
__u8 state;
__u8 reserved1;
__u8 reserved2;
struct net_if_token token;
} __packed;
struct qeth_sbp_query_ports {
__u8 primary_bp_supported;
__u8 secondary_bp_supported;
__u8 num_entries;
__u8 entry_length;
struct qeth_sbp_port_entry entry[];
} __packed;
struct qeth_sbp_state_change {
__u8 primary_bp_supported;
__u8 secondary_bp_supported;
__u8 num_entries;
__u8 entry_length;
struct qeth_sbp_port_entry entry[];
} __packed;
struct qeth_ipacmd_setbridgeport {
struct qeth_ipacmd_sbp_hdr hdr;
union {
struct qeth_sbp_query_cmds_supp query_cmds_supp;
struct qeth_sbp_reset_role reset_role;
struct qeth_sbp_set_primary set_primary;
struct qeth_sbp_set_secondary set_secondary;
struct qeth_sbp_query_ports query_ports;
struct qeth_sbp_state_change state_change;
} data;
} __packed;
/* ADDRESS_CHANGE_NOTIFICATION adapter-initiated "command" *******************/
/* Bitmask for entry->change_code. Both bits may be raised. */
enum qeth_ipa_addr_change_code {
IPA_ADDR_CHANGE_CODE_VLANID = 0x01,
IPA_ADDR_CHANGE_CODE_MACADDR = 0x02,
IPA_ADDR_CHANGE_CODE_REMOVAL = 0x80, /* else addition */
};
enum qeth_ipa_addr_change_retcode {
IPA_ADDR_CHANGE_RETCODE_OK = 0x0000,
IPA_ADDR_CHANGE_RETCODE_LOSTEVENTS = 0x0010,
};
enum qeth_ipa_addr_change_lostmask {
IPA_ADDR_CHANGE_MASK_OVERFLOW = 0x01,
IPA_ADDR_CHANGE_MASK_STATECHANGE = 0x02,
};
struct qeth_ipacmd_addr_change_entry {
struct net_if_token token;
struct mac_addr_lnid addr_lnid;
__u8 change_code;
__u8 reserved1;
__u16 reserved2;
} __packed;
struct qeth_ipacmd_addr_change {
__u8 lost_event_mask;
__u8 reserved;
__u16 num_entries;
struct qeth_ipacmd_addr_change_entry entry[];
} __packed;
/* Header for each IPA command */
struct qeth_ipacmd_hdr {
__u8 command;
__u8 initiator;
__u16 seqno;
__u16 return_code;
__u8 adapter_type;
__u8 rel_adapter_no;
__u8 prim_version_no;
__u8 param_count;
__u16 prot_version;
__u32 ipa_supported;
__u32 ipa_enabled;
} __attribute__ ((packed));
/* The IPA command itself */
struct qeth_ipa_cmd {
struct qeth_ipacmd_hdr hdr;
union {
struct qeth_ipacmd_setdelip4 setdelip4;
struct qeth_ipacmd_setdelip6 setdelip6;
struct qeth_ipacmd_setdelipm setdelipm;
struct qeth_ipacmd_setassparms setassparms;
struct qeth_ipacmd_layer2setdelmac setdelmac;
struct qeth_ipacmd_layer2setdelvlan setdelvlan;
struct qeth_create_destroy_address create_destroy_addr;
struct qeth_ipacmd_setadpparms setadapterparms;
struct qeth_set_routing setrtg;
struct qeth_ipacmd_diagass diagass;
struct qeth_ipacmd_setbridgeport sbp;
struct qeth_ipacmd_addr_change addrchange;
} data;
} __attribute__ ((packed));
/*
* special command for ARP processing.
* this is not included in setassparms command before, because we get
* problem with the size of struct qeth_ipacmd_setassparms otherwise
*/
enum qeth_ipa_arp_return_codes {
QETH_IPA_ARP_RC_SUCCESS = 0x0000,
QETH_IPA_ARP_RC_FAILED = 0x0001,
QETH_IPA_ARP_RC_NOTSUPP = 0x0002,
QETH_IPA_ARP_RC_OUT_OF_RANGE = 0x0003,
QETH_IPA_ARP_RC_Q_NOTSUPP = 0x0004,
QETH_IPA_ARP_RC_Q_NO_DATA = 0x0008,
};
extern char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc);
extern char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
#define QETH_SETASS_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \
sizeof(struct qeth_ipacmd_setassparms_hdr))
#define QETH_IPA_ARP_DATA_POS(buffer) (buffer + IPA_PDU_HEADER_SIZE + \
QETH_SETASS_BASE_LEN)
#define QETH_SETADP_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \
sizeof(struct qeth_ipacmd_setadpparms_hdr))
#define QETH_SNMP_SETADP_CMDLENGTH 16
#define QETH_ARP_DATA_SIZE 3968
#define QETH_ARP_CMD_LEN (QETH_ARP_DATA_SIZE + 8)
/* Helper functions */
#define IS_IPA_REPLY(cmd) ((cmd->hdr.initiator == IPA_CMD_INITIATOR_HOST) || \
(cmd->hdr.initiator == IPA_CMD_INITIATOR_OSA_REPLY))
/*****************************************************************************/
/* END OF IP Assist related definitions */
/*****************************************************************************/
extern unsigned char WRITE_CCW[];
extern unsigned char READ_CCW[];
extern unsigned char CM_ENABLE[];
#define CM_ENABLE_SIZE 0x63
#define QETH_CM_ENABLE_ISSUER_RM_TOKEN(buffer) (buffer + 0x2c)
#define QETH_CM_ENABLE_FILTER_TOKEN(buffer) (buffer + 0x53)
#define QETH_CM_ENABLE_USER_DATA(buffer) (buffer + 0x5b)
#define QETH_CM_ENABLE_RESP_FILTER_TOKEN(buffer) \
(PDU_ENCAPSULATION(buffer) + 0x13)
extern unsigned char CM_SETUP[];
#define CM_SETUP_SIZE 0x64
#define QETH_CM_SETUP_DEST_ADDR(buffer) (buffer + 0x2c)
#define QETH_CM_SETUP_CONNECTION_TOKEN(buffer) (buffer + 0x51)
#define QETH_CM_SETUP_FILTER_TOKEN(buffer) (buffer + 0x5a)
#define QETH_CM_SETUP_RESP_DEST_ADDR(buffer) \
(PDU_ENCAPSULATION(buffer) + 0x1a)
extern unsigned char ULP_ENABLE[];
#define ULP_ENABLE_SIZE 0x6b
#define QETH_ULP_ENABLE_LINKNUM(buffer) (buffer + 0x61)
#define QETH_ULP_ENABLE_DEST_ADDR(buffer) (buffer + 0x2c)
#define QETH_ULP_ENABLE_FILTER_TOKEN(buffer) (buffer + 0x53)
#define QETH_ULP_ENABLE_PORTNAME_AND_LL(buffer) (buffer + 0x62)
#define QETH_ULP_ENABLE_RESP_FILTER_TOKEN(buffer) \
(PDU_ENCAPSULATION(buffer) + 0x13)
#define QETH_ULP_ENABLE_RESP_MAX_MTU(buffer) \
(PDU_ENCAPSULATION(buffer) + 0x1f)
#define QETH_ULP_ENABLE_RESP_DIFINFO_LEN(buffer) \
(PDU_ENCAPSULATION(buffer) + 0x17)
#define QETH_ULP_ENABLE_RESP_LINK_TYPE(buffer) \
(PDU_ENCAPSULATION(buffer) + 0x2b)
/* Layer 2 definitions */
#define QETH_PROT_LAYER2 0x08
#define QETH_PROT_TCPIP 0x03
#define QETH_PROT_OSN2 0x0a
#define QETH_ULP_ENABLE_PROT_TYPE(buffer) (buffer + 0x50)
#define QETH_IPA_CMD_PROT_TYPE(buffer) (buffer + 0x19)
extern unsigned char ULP_SETUP[];
#define ULP_SETUP_SIZE 0x6c
#define QETH_ULP_SETUP_DEST_ADDR(buffer) (buffer + 0x2c)
#define QETH_ULP_SETUP_CONNECTION_TOKEN(buffer) (buffer + 0x51)
#define QETH_ULP_SETUP_FILTER_TOKEN(buffer) (buffer + 0x5a)
#define QETH_ULP_SETUP_CUA(buffer) (buffer + 0x68)
#define QETH_ULP_SETUP_REAL_DEVADDR(buffer) (buffer + 0x6a)
#define QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(buffer) \
(PDU_ENCAPSULATION(buffer) + 0x1a)
extern unsigned char DM_ACT[];
#define DM_ACT_SIZE 0x55
#define QETH_DM_ACT_DEST_ADDR(buffer) (buffer + 0x2c)
#define QETH_DM_ACT_CONNECTION_TOKEN(buffer) (buffer + 0x51)
#define QETH_TRANSPORT_HEADER_SEQ_NO(buffer) (buffer + 4)
#define QETH_PDU_HEADER_SEQ_NO(buffer) (buffer + 0x1c)
#define QETH_PDU_HEADER_ACK_SEQ_NO(buffer) (buffer + 0x20)
extern unsigned char IDX_ACTIVATE_READ[];
extern unsigned char IDX_ACTIVATE_WRITE[];
#define IDX_ACTIVATE_SIZE 0x22
#define QETH_IDX_ACT_PNO(buffer) (buffer+0x0b)
#define QETH_IDX_ACT_ISSUER_RM_TOKEN(buffer) (buffer + 0x0c)
#define QETH_IDX_NO_PORTNAME_REQUIRED(buffer) ((buffer)[0x0b] & 0x80)
#define QETH_IDX_ACT_FUNC_LEVEL(buffer) (buffer + 0x10)
#define QETH_IDX_ACT_DATASET_NAME(buffer) (buffer + 0x16)
#define QETH_IDX_ACT_QDIO_DEV_CUA(buffer) (buffer + 0x1e)
#define QETH_IDX_ACT_QDIO_DEV_REALADDR(buffer) (buffer + 0x20)
#define QETH_IS_IDX_ACT_POS_REPLY(buffer) (((buffer)[0x08] & 3) == 2)
#define QETH_IDX_REPLY_LEVEL(buffer) (buffer + 0x12)
#define QETH_IDX_ACT_CAUSE_CODE(buffer) (buffer)[0x09]
#define QETH_IDX_ACT_ERR_EXCL 0x19
#define QETH_IDX_ACT_ERR_AUTH 0x1E
#define QETH_IDX_ACT_ERR_AUTH_USER 0x20
#define PDU_ENCAPSULATION(buffer) \
(buffer + *(buffer + (*(buffer + 0x0b)) + \
*(buffer + *(buffer + 0x0b) + 0x11) + 0x07))
#define IS_IPA(buffer) \
((buffer) && \
(*(buffer + ((*(buffer + 0x0b)) + 4)) == 0xc1))
#define ADDR_FRAME_TYPE_DIX 1
#define ADDR_FRAME_TYPE_802_3 2
#define ADDR_FRAME_TYPE_TR_WITHOUT_SR 0x10
#define ADDR_FRAME_TYPE_TR_WITH_SR 0x20
#endif

View file

@ -0,0 +1,794 @@
/*
* Copyright IBM Corp. 2007
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
* Frank Pavlic <fpavlic@de.ibm.com>,
* Thomas Spatzier <tspat@de.ibm.com>,
* Frank Blaschka <frank.blaschka@de.ibm.com>
*/
#define KMSG_COMPONENT "qeth"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/list.h>
#include <linux/rwsem.h>
#include <asm/ebcdic.h>
#include "qeth_core.h"
static ssize_t qeth_dev_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
switch (card->state) {
case CARD_STATE_DOWN:
return sprintf(buf, "DOWN\n");
case CARD_STATE_HARDSETUP:
return sprintf(buf, "HARDSETUP\n");
case CARD_STATE_SOFTSETUP:
return sprintf(buf, "SOFTSETUP\n");
case CARD_STATE_UP:
if (card->lan_online)
return sprintf(buf, "UP (LAN ONLINE)\n");
else
return sprintf(buf, "UP (LAN OFFLINE)\n");
case CARD_STATE_RECOVER:
return sprintf(buf, "RECOVER\n");
default:
return sprintf(buf, "UNKNOWN\n");
}
}
static DEVICE_ATTR(state, 0444, qeth_dev_state_show, NULL);
static ssize_t qeth_dev_chpid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
return sprintf(buf, "%02X\n", card->info.chpid);
}
static DEVICE_ATTR(chpid, 0444, qeth_dev_chpid_show, NULL);
static ssize_t qeth_dev_if_name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
return sprintf(buf, "%s\n", QETH_CARD_IFNAME(card));
}
static DEVICE_ATTR(if_name, 0444, qeth_dev_if_name_show, NULL);
static ssize_t qeth_dev_card_type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
return sprintf(buf, "%s\n", qeth_get_cardname_short(card));
}
static DEVICE_ATTR(card_type, 0444, qeth_dev_card_type_show, NULL);
static inline const char *qeth_get_bufsize_str(struct qeth_card *card)
{
if (card->qdio.in_buf_size == 16384)
return "16k";
else if (card->qdio.in_buf_size == 24576)
return "24k";
else if (card->qdio.in_buf_size == 32768)
return "32k";
else if (card->qdio.in_buf_size == 40960)
return "40k";
else
return "64k";
}
static ssize_t qeth_dev_inbuf_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
return sprintf(buf, "%s\n", qeth_get_bufsize_str(card));
}
static DEVICE_ATTR(inbuf_size, 0444, qeth_dev_inbuf_size_show, NULL);
static ssize_t qeth_dev_portno_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
return sprintf(buf, "%i\n", card->info.portno);
}
static ssize_t qeth_dev_portno_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
unsigned int portno, limit;
int rc = 0;
if (!card)
return -EINVAL;
mutex_lock(&card->conf_mutex);
if ((card->state != CARD_STATE_DOWN) &&
(card->state != CARD_STATE_RECOVER)) {
rc = -EPERM;
goto out;
}
portno = simple_strtoul(buf, &tmp, 16);
if (portno > QETH_MAX_PORTNO) {
rc = -EINVAL;
goto out;
}
limit = (card->ssqd.pcnt ? card->ssqd.pcnt - 1 : card->ssqd.pcnt);
if (portno > limit) {
rc = -EINVAL;
goto out;
}
card->info.portno = portno;
out:
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
static DEVICE_ATTR(portno, 0644, qeth_dev_portno_show, qeth_dev_portno_store);
static ssize_t qeth_dev_portname_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
char portname[9] = {0, };
if (!card)
return -EINVAL;
if (card->info.portname_required) {
memcpy(portname, card->info.portname + 1, 8);
EBCASC(portname, 8);
return sprintf(buf, "%s\n", portname);
} else
return sprintf(buf, "no portname required\n");
}
static ssize_t qeth_dev_portname_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
int i, rc = 0;
if (!card)
return -EINVAL;
mutex_lock(&card->conf_mutex);
if ((card->state != CARD_STATE_DOWN) &&
(card->state != CARD_STATE_RECOVER)) {
rc = -EPERM;
goto out;
}
tmp = strsep((char **) &buf, "\n");
if ((strlen(tmp) > 8) || (strlen(tmp) == 0)) {
rc = -EINVAL;
goto out;
}
card->info.portname[0] = strlen(tmp);
/* for beauty reasons */
for (i = 1; i < 9; i++)
card->info.portname[i] = ' ';
strcpy(card->info.portname + 1, tmp);
ASCEBC(card->info.portname + 1, 8);
out:
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
static DEVICE_ATTR(portname, 0644, qeth_dev_portname_show,
qeth_dev_portname_store);
static ssize_t qeth_dev_prioqing_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
switch (card->qdio.do_prio_queueing) {
case QETH_PRIO_Q_ING_PREC:
return sprintf(buf, "%s\n", "by precedence");
case QETH_PRIO_Q_ING_TOS:
return sprintf(buf, "%s\n", "by type of service");
case QETH_PRIO_Q_ING_SKB:
return sprintf(buf, "%s\n", "by skb-priority");
case QETH_PRIO_Q_ING_VLAN:
return sprintf(buf, "%s\n", "by VLAN headers");
default:
return sprintf(buf, "always queue %i\n",
card->qdio.default_out_queue);
}
}
static ssize_t qeth_dev_prioqing_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
int rc = 0;
if (!card)
return -EINVAL;
mutex_lock(&card->conf_mutex);
if ((card->state != CARD_STATE_DOWN) &&
(card->state != CARD_STATE_RECOVER)) {
rc = -EPERM;
goto out;
}
/* check if 1920 devices are supported ,
* if though we have to permit priority queueing
*/
if (card->qdio.no_out_queues == 1) {
card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
rc = -EPERM;
goto out;
}
tmp = strsep((char **) &buf, "\n");
if (!strcmp(tmp, "prio_queueing_prec")) {
card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_PREC;
card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
} else if (!strcmp(tmp, "prio_queueing_skb")) {
card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_SKB;
card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
} else if (!strcmp(tmp, "prio_queueing_tos")) {
card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_TOS;
card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
} else if (!strcmp(tmp, "prio_queueing_vlan")) {
if (!card->options.layer2) {
rc = -ENOTSUPP;
goto out;
}
card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_VLAN;
card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
} else if (!strcmp(tmp, "no_prio_queueing:0")) {
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
card->qdio.default_out_queue = 0;
} else if (!strcmp(tmp, "no_prio_queueing:1")) {
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
card->qdio.default_out_queue = 1;
} else if (!strcmp(tmp, "no_prio_queueing:2")) {
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
card->qdio.default_out_queue = 2;
} else if (!strcmp(tmp, "no_prio_queueing:3")) {
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
card->qdio.default_out_queue = 3;
} else if (!strcmp(tmp, "no_prio_queueing")) {
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
} else
rc = -EINVAL;
out:
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
static DEVICE_ATTR(priority_queueing, 0644, qeth_dev_prioqing_show,
qeth_dev_prioqing_store);
static ssize_t qeth_dev_bufcnt_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
return sprintf(buf, "%i\n", card->qdio.in_buf_pool.buf_count);
}
static ssize_t qeth_dev_bufcnt_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
int cnt, old_cnt;
int rc = 0;
if (!card)
return -EINVAL;
mutex_lock(&card->conf_mutex);
if ((card->state != CARD_STATE_DOWN) &&
(card->state != CARD_STATE_RECOVER)) {
rc = -EPERM;
goto out;
}
old_cnt = card->qdio.in_buf_pool.buf_count;
cnt = simple_strtoul(buf, &tmp, 10);
cnt = (cnt < QETH_IN_BUF_COUNT_MIN) ? QETH_IN_BUF_COUNT_MIN :
((cnt > QETH_IN_BUF_COUNT_MAX) ? QETH_IN_BUF_COUNT_MAX : cnt);
if (old_cnt != cnt) {
rc = qeth_realloc_buffer_pool(card, cnt);
}
out:
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
static DEVICE_ATTR(buffer_count, 0644, qeth_dev_bufcnt_show,
qeth_dev_bufcnt_store);
static ssize_t qeth_dev_recover_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
int i;
if (!card)
return -EINVAL;
if (card->state != CARD_STATE_UP)
return -EPERM;
i = simple_strtoul(buf, &tmp, 16);
if (i == 1)
qeth_schedule_recovery(card);
return count;
}
static DEVICE_ATTR(recover, 0200, NULL, qeth_dev_recover_store);
static ssize_t qeth_dev_performance_stats_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
return sprintf(buf, "%i\n", card->options.performance_stats ? 1:0);
}
static ssize_t qeth_dev_performance_stats_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
int i, rc = 0;
if (!card)
return -EINVAL;
mutex_lock(&card->conf_mutex);
i = simple_strtoul(buf, &tmp, 16);
if ((i == 0) || (i == 1)) {
if (i == card->options.performance_stats)
goto out;
card->options.performance_stats = i;
if (i == 0)
memset(&card->perf_stats, 0,
sizeof(struct qeth_perf_stats));
card->perf_stats.initial_rx_packets = card->stats.rx_packets;
card->perf_stats.initial_tx_packets = card->stats.tx_packets;
} else
rc = -EINVAL;
out:
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
static DEVICE_ATTR(performance_stats, 0644, qeth_dev_performance_stats_show,
qeth_dev_performance_stats_store);
static ssize_t qeth_dev_layer2_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
return sprintf(buf, "%i\n", card->options.layer2);
}
static ssize_t qeth_dev_layer2_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
int i, rc = 0;
enum qeth_discipline_id newdis;
if (!card)
return -EINVAL;
mutex_lock(&card->discipline_mutex);
if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
goto out;
}
i = simple_strtoul(buf, &tmp, 16);
switch (i) {
case 0:
newdis = QETH_DISCIPLINE_LAYER3;
break;
case 1:
newdis = QETH_DISCIPLINE_LAYER2;
break;
default:
rc = -EINVAL;
goto out;
}
if (card->options.layer2 == newdis)
goto out;
else {
card->info.mac_bits = 0;
if (card->discipline) {
card->discipline->remove(card->gdev);
qeth_core_free_discipline(card);
}
}
rc = qeth_core_load_discipline(card, newdis);
if (rc)
goto out;
rc = card->discipline->setup(card->gdev);
out:
mutex_unlock(&card->discipline_mutex);
return rc ? rc : count;
}
static DEVICE_ATTR(layer2, 0644, qeth_dev_layer2_show,
qeth_dev_layer2_store);
#define ATTR_QETH_ISOLATION_NONE ("none")
#define ATTR_QETH_ISOLATION_FWD ("forward")
#define ATTR_QETH_ISOLATION_DROP ("drop")
static ssize_t qeth_dev_isolation_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
switch (card->options.isolation) {
case ISOLATION_MODE_NONE:
return snprintf(buf, 6, "%s\n", ATTR_QETH_ISOLATION_NONE);
case ISOLATION_MODE_FWD:
return snprintf(buf, 9, "%s\n", ATTR_QETH_ISOLATION_FWD);
case ISOLATION_MODE_DROP:
return snprintf(buf, 6, "%s\n", ATTR_QETH_ISOLATION_DROP);
default:
return snprintf(buf, 5, "%s\n", "N/A");
}
}
static ssize_t qeth_dev_isolation_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
enum qeth_ipa_isolation_modes isolation;
int rc = 0;
char *tmp, *curtoken;
curtoken = (char *) buf;
if (!card)
return -EINVAL;
mutex_lock(&card->conf_mutex);
/* check for unknown, too, in case we do not yet know who we are */
if (card->info.type != QETH_CARD_TYPE_OSD &&
card->info.type != QETH_CARD_TYPE_OSX &&
card->info.type != QETH_CARD_TYPE_UNKNOWN) {
rc = -EOPNOTSUPP;
dev_err(&card->gdev->dev, "Adapter does not "
"support QDIO data connection isolation\n");
goto out;
}
/* parse input into isolation mode */
tmp = strsep(&curtoken, "\n");
if (!strcmp(tmp, ATTR_QETH_ISOLATION_NONE)) {
isolation = ISOLATION_MODE_NONE;
} else if (!strcmp(tmp, ATTR_QETH_ISOLATION_FWD)) {
isolation = ISOLATION_MODE_FWD;
} else if (!strcmp(tmp, ATTR_QETH_ISOLATION_DROP)) {
isolation = ISOLATION_MODE_DROP;
} else {
rc = -EINVAL;
goto out;
}
rc = count;
/* defer IP assist if device is offline (until discipline->set_online)*/
card->options.prev_isolation = card->options.isolation;
card->options.isolation = isolation;
if (card->state == CARD_STATE_SOFTSETUP ||
card->state == CARD_STATE_UP) {
int ipa_rc = qeth_set_access_ctrl_online(card, 1);
if (ipa_rc != 0)
rc = ipa_rc;
}
out:
mutex_unlock(&card->conf_mutex);
return rc;
}
static DEVICE_ATTR(isolation, 0644, qeth_dev_isolation_show,
qeth_dev_isolation_store);
static ssize_t qeth_dev_switch_attrs_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
struct qeth_switch_info sw_info;
int rc = 0;
if (!card)
return -EINVAL;
if (card->state != CARD_STATE_SOFTSETUP && card->state != CARD_STATE_UP)
return sprintf(buf, "n/a\n");
rc = qeth_query_switch_attributes(card, &sw_info);
if (rc)
return rc;
if (!sw_info.capabilities)
rc = sprintf(buf, "unknown");
if (sw_info.capabilities & QETH_SWITCH_FORW_802_1)
rc = sprintf(buf, (sw_info.settings & QETH_SWITCH_FORW_802_1 ?
"[802.1]" : "802.1"));
if (sw_info.capabilities & QETH_SWITCH_FORW_REFL_RELAY)
rc += sprintf(buf + rc,
(sw_info.settings & QETH_SWITCH_FORW_REFL_RELAY ?
" [rr]" : " rr"));
rc += sprintf(buf + rc, "\n");
return rc;
}
static DEVICE_ATTR(switch_attrs, 0444,
qeth_dev_switch_attrs_show, NULL);
static ssize_t qeth_hw_trap_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
if (card->info.hwtrap)
return snprintf(buf, 5, "arm\n");
else
return snprintf(buf, 8, "disarm\n");
}
static ssize_t qeth_hw_trap_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
int rc = 0;
char *tmp, *curtoken;
int state = 0;
curtoken = (char *)buf;
if (!card)
return -EINVAL;
mutex_lock(&card->conf_mutex);
if (card->state == CARD_STATE_SOFTSETUP || card->state == CARD_STATE_UP)
state = 1;
tmp = strsep(&curtoken, "\n");
if (!strcmp(tmp, "arm") && !card->info.hwtrap) {
if (state) {
if (qeth_is_diagass_supported(card,
QETH_DIAGS_CMD_TRAP)) {
rc = qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM);
if (!rc)
card->info.hwtrap = 1;
} else
rc = -EINVAL;
} else
card->info.hwtrap = 1;
} else if (!strcmp(tmp, "disarm") && card->info.hwtrap) {
if (state) {
rc = qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
if (!rc)
card->info.hwtrap = 0;
} else
card->info.hwtrap = 0;
} else if (!strcmp(tmp, "trap") && state && card->info.hwtrap)
rc = qeth_hw_trap(card, QETH_DIAGS_TRAP_CAPTURE);
else
rc = -EINVAL;
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
static DEVICE_ATTR(hw_trap, 0644, qeth_hw_trap_show,
qeth_hw_trap_store);
static ssize_t qeth_dev_blkt_show(char *buf, struct qeth_card *card, int value)
{
if (!card)
return -EINVAL;
return sprintf(buf, "%i\n", value);
}
static ssize_t qeth_dev_blkt_store(struct qeth_card *card,
const char *buf, size_t count, int *value, int max_value)
{
char *tmp;
int i, rc = 0;
if (!card)
return -EINVAL;
mutex_lock(&card->conf_mutex);
if ((card->state != CARD_STATE_DOWN) &&
(card->state != CARD_STATE_RECOVER)) {
rc = -EPERM;
goto out;
}
i = simple_strtoul(buf, &tmp, 10);
if (i <= max_value)
*value = i;
else
rc = -EINVAL;
out:
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
static ssize_t qeth_dev_blkt_total_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
return qeth_dev_blkt_show(buf, card, card->info.blkt.time_total);
}
static ssize_t qeth_dev_blkt_total_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
return qeth_dev_blkt_store(card, buf, count,
&card->info.blkt.time_total, 5000);
}
static DEVICE_ATTR(total, 0644, qeth_dev_blkt_total_show,
qeth_dev_blkt_total_store);
static ssize_t qeth_dev_blkt_inter_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
return qeth_dev_blkt_show(buf, card, card->info.blkt.inter_packet);
}
static ssize_t qeth_dev_blkt_inter_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
return qeth_dev_blkt_store(card, buf, count,
&card->info.blkt.inter_packet, 1000);
}
static DEVICE_ATTR(inter, 0644, qeth_dev_blkt_inter_show,
qeth_dev_blkt_inter_store);
static ssize_t qeth_dev_blkt_inter_jumbo_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
return qeth_dev_blkt_show(buf, card,
card->info.blkt.inter_packet_jumbo);
}
static ssize_t qeth_dev_blkt_inter_jumbo_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
return qeth_dev_blkt_store(card, buf, count,
&card->info.blkt.inter_packet_jumbo, 1000);
}
static DEVICE_ATTR(inter_jumbo, 0644, qeth_dev_blkt_inter_jumbo_show,
qeth_dev_blkt_inter_jumbo_store);
static struct attribute *qeth_blkt_device_attrs[] = {
&dev_attr_total.attr,
&dev_attr_inter.attr,
&dev_attr_inter_jumbo.attr,
NULL,
};
static struct attribute_group qeth_device_blkt_group = {
.name = "blkt",
.attrs = qeth_blkt_device_attrs,
};
static struct attribute *qeth_device_attrs[] = {
&dev_attr_state.attr,
&dev_attr_chpid.attr,
&dev_attr_if_name.attr,
&dev_attr_card_type.attr,
&dev_attr_inbuf_size.attr,
&dev_attr_portno.attr,
&dev_attr_portname.attr,
&dev_attr_priority_queueing.attr,
&dev_attr_buffer_count.attr,
&dev_attr_recover.attr,
&dev_attr_performance_stats.attr,
&dev_attr_layer2.attr,
&dev_attr_isolation.attr,
&dev_attr_hw_trap.attr,
&dev_attr_switch_attrs.attr,
NULL,
};
static struct attribute_group qeth_device_attr_group = {
.attrs = qeth_device_attrs,
};
const struct attribute_group *qeth_generic_attr_groups[] = {
&qeth_device_attr_group,
&qeth_device_blkt_group,
NULL,
};
static struct attribute *qeth_osn_device_attrs[] = {
&dev_attr_state.attr,
&dev_attr_chpid.attr,
&dev_attr_if_name.attr,
&dev_attr_card_type.attr,
&dev_attr_buffer_count.attr,
&dev_attr_recover.attr,
NULL,
};
static struct attribute_group qeth_osn_device_attr_group = {
.attrs = qeth_osn_device_attrs,
};
const struct attribute_group *qeth_osn_attr_groups[] = {
&qeth_osn_device_attr_group,
NULL,
};

View file

@ -0,0 +1,15 @@
/*
* Copyright IBM Corp. 2013
* Author(s): Eugene Crosser <eugene.crosser@ru.ibm.com>
*/
#ifndef __QETH_L2_H__
#define __QETH_L2_H__
#include "qeth_core.h"
int qeth_l2_create_device_attributes(struct device *);
void qeth_l2_remove_device_attributes(struct device *);
void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card);
#endif /* __QETH_L2_H__ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,218 @@
/*
* Copyright IBM Corp. 2013
* Author(s): Eugene Crosser <eugene.crosser@ru.ibm.com>
*/
#include <linux/slab.h>
#include <asm/ebcdic.h>
#include "qeth_core.h"
#include "qeth_l2.h"
#define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \
struct device_attribute dev_attr_##_id = __ATTR(_name, _mode, _show, _store)
static ssize_t qeth_bridge_port_role_state_show(struct device *dev,
struct device_attribute *attr, char *buf,
int show_state)
{
struct qeth_card *card = dev_get_drvdata(dev);
enum qeth_sbp_states state = QETH_SBP_STATE_INACTIVE;
int rc = 0;
char *word;
if (!card)
return -EINVAL;
mutex_lock(&card->conf_mutex);
if (qeth_card_hw_is_reachable(card) &&
card->options.sbp.supported_funcs)
rc = qeth_bridgeport_query_ports(card,
&card->options.sbp.role, &state);
if (!rc) {
if (show_state)
switch (state) {
case QETH_SBP_STATE_INACTIVE:
word = "inactive"; break;
case QETH_SBP_STATE_STANDBY:
word = "standby"; break;
case QETH_SBP_STATE_ACTIVE:
word = "active"; break;
default:
rc = -EIO;
}
else
switch (card->options.sbp.role) {
case QETH_SBP_ROLE_NONE:
word = "none"; break;
case QETH_SBP_ROLE_PRIMARY:
word = "primary"; break;
case QETH_SBP_ROLE_SECONDARY:
word = "secondary"; break;
default:
rc = -EIO;
}
if (rc)
QETH_CARD_TEXT_(card, 2, "SBP%02x:%02x",
card->options.sbp.role, state);
else
rc = sprintf(buf, "%s\n", word);
}
mutex_unlock(&card->conf_mutex);
return rc;
}
static ssize_t qeth_bridge_port_role_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return qeth_bridge_port_role_state_show(dev, attr, buf, 0);
}
static ssize_t qeth_bridge_port_role_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
int rc = 0;
enum qeth_sbp_roles role;
if (!card)
return -EINVAL;
if (sysfs_streq(buf, "primary"))
role = QETH_SBP_ROLE_PRIMARY;
else if (sysfs_streq(buf, "secondary"))
role = QETH_SBP_ROLE_SECONDARY;
else if (sysfs_streq(buf, "none"))
role = QETH_SBP_ROLE_NONE;
else
return -EINVAL;
mutex_lock(&card->conf_mutex);
if (qeth_card_hw_is_reachable(card)) {
rc = qeth_bridgeport_setrole(card, role);
if (!rc)
card->options.sbp.role = role;
} else
card->options.sbp.role = role;
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
static DEVICE_ATTR(bridge_role, 0644, qeth_bridge_port_role_show,
qeth_bridge_port_role_store);
static ssize_t qeth_bridge_port_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return qeth_bridge_port_role_state_show(dev, attr, buf, 1);
}
static DEVICE_ATTR(bridge_state, 0644, qeth_bridge_port_state_show,
NULL);
static ssize_t qeth_bridgeport_hostnotification_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
int enabled;
if (!card)
return -EINVAL;
mutex_lock(&card->conf_mutex);
enabled = card->options.sbp.hostnotification;
mutex_unlock(&card->conf_mutex);
return sprintf(buf, "%d\n", enabled);
}
static ssize_t qeth_bridgeport_hostnotification_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
int rc = 0;
int enable;
if (!card)
return -EINVAL;
if (sysfs_streq(buf, "0"))
enable = 0;
else if (sysfs_streq(buf, "1"))
enable = 1;
else
return -EINVAL;
mutex_lock(&card->conf_mutex);
if (qeth_card_hw_is_reachable(card)) {
rc = qeth_bridgeport_an_set(card, enable);
if (!rc)
card->options.sbp.hostnotification = enable;
} else
card->options.sbp.hostnotification = enable;
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
static DEVICE_ATTR(bridge_hostnotify, 0644,
qeth_bridgeport_hostnotification_show,
qeth_bridgeport_hostnotification_store);
static struct attribute *qeth_l2_bridgeport_attrs[] = {
&dev_attr_bridge_role.attr,
&dev_attr_bridge_state.attr,
&dev_attr_bridge_hostnotify.attr,
NULL,
};
static struct attribute_group qeth_l2_bridgeport_attr_group = {
.attrs = qeth_l2_bridgeport_attrs,
};
int qeth_l2_create_device_attributes(struct device *dev)
{
return sysfs_create_group(&dev->kobj, &qeth_l2_bridgeport_attr_group);
}
void qeth_l2_remove_device_attributes(struct device *dev)
{
sysfs_remove_group(&dev->kobj, &qeth_l2_bridgeport_attr_group);
}
/**
* qeth_l2_setup_bridgeport_attrs() - set/restore attrs when turning online.
* @card: qeth_card structure pointer
*
* Note: this function is called with conf_mutex held by the caller
*/
void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card)
{
int rc;
if (!card)
return;
if (!card->options.sbp.supported_funcs)
return;
if (card->options.sbp.role != QETH_SBP_ROLE_NONE) {
/* Conditional to avoid spurious error messages */
qeth_bridgeport_setrole(card, card->options.sbp.role);
/* Let the callback function refresh the stored role value. */
qeth_bridgeport_query_ports(card,
&card->options.sbp.role, NULL);
}
if (card->options.sbp.hostnotification) {
rc = qeth_bridgeport_an_set(card, 1);
if (rc)
card->options.sbp.hostnotification = 0;
} else
qeth_bridgeport_an_set(card, 0);
}

View file

@ -0,0 +1,69 @@
/*
* Copyright IBM Corp. 2007
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
* Frank Pavlic <fpavlic@de.ibm.com>,
* Thomas Spatzier <tspat@de.ibm.com>,
* Frank Blaschka <frank.blaschka@de.ibm.com>
*/
#ifndef __QETH_L3_H__
#define __QETH_L3_H__
#include "qeth_core.h"
#define QETH_SNIFF_AVAIL 0x0008
struct qeth_ipaddr {
struct list_head entry;
enum qeth_ip_types type;
enum qeth_ipa_setdelip_flags set_flags;
enum qeth_ipa_setdelip_flags del_flags;
int is_multicast;
int users;
enum qeth_prot_versions proto;
unsigned char mac[OSA_ADDR_LEN];
union {
struct {
unsigned int addr;
unsigned int mask;
} a4;
struct {
struct in6_addr addr;
unsigned int pfxlen;
} a6;
} u;
};
struct qeth_ipato_entry {
struct list_head entry;
enum qeth_prot_versions proto;
char addr[16];
int mask_bits;
};
void qeth_l3_ipaddr4_to_string(const __u8 *, char *);
int qeth_l3_string_to_ipaddr4(const char *, __u8 *);
void qeth_l3_ipaddr6_to_string(const __u8 *, char *);
int qeth_l3_string_to_ipaddr6(const char *, __u8 *);
void qeth_l3_ipaddr_to_string(enum qeth_prot_versions, const __u8 *, char *);
int qeth_l3_string_to_ipaddr(const char *, enum qeth_prot_versions, __u8 *);
int qeth_l3_create_device_attributes(struct device *);
void qeth_l3_remove_device_attributes(struct device *);
int qeth_l3_setrouting_v4(struct qeth_card *);
int qeth_l3_setrouting_v6(struct qeth_card *);
int qeth_l3_add_ipato_entry(struct qeth_card *, struct qeth_ipato_entry *);
void qeth_l3_del_ipato_entry(struct qeth_card *, enum qeth_prot_versions,
u8 *, int);
int qeth_l3_add_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
void qeth_l3_del_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *);
void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions,
const u8 *);
int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *);
struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions);
int qeth_l3_add_ip(struct qeth_card *, struct qeth_ipaddr *);
int qeth_l3_delete_ip(struct qeth_card *, struct qeth_ipaddr *);
void qeth_l3_set_ip_addr_list(struct qeth_card *);
#endif /* __QETH_L3_H__ */

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

259
drivers/s390/net/smsgiucv.c Normal file
View file

@ -0,0 +1,259 @@
/*
* IUCV special message driver
*
* Copyright IBM Corp. 2003, 2009
*
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <net/iucv/iucv.h>
#include <asm/cpcmd.h>
#include <asm/ebcdic.h>
#include "smsgiucv.h"
struct smsg_callback {
struct list_head list;
const char *prefix;
int len;
void (*callback)(const char *from, char *str);
};
MODULE_AUTHOR
("(C) 2003 IBM Corporation by Martin Schwidefsky (schwidefsky@de.ibm.com)");
MODULE_DESCRIPTION ("Linux for S/390 IUCV special message driver");
static struct iucv_path *smsg_path;
/* dummy device used as trigger for PM functions */
static struct device *smsg_dev;
static DEFINE_SPINLOCK(smsg_list_lock);
static LIST_HEAD(smsg_list);
static int iucv_path_connected;
static int smsg_path_pending(struct iucv_path *, u8 ipvmid[8], u8 ipuser[16]);
static void smsg_message_pending(struct iucv_path *, struct iucv_message *);
static struct iucv_handler smsg_handler = {
.path_pending = smsg_path_pending,
.message_pending = smsg_message_pending,
};
static int smsg_path_pending(struct iucv_path *path, u8 ipvmid[8],
u8 ipuser[16])
{
if (strncmp(ipvmid, "*MSG ", 8) != 0)
return -EINVAL;
/* Path pending from *MSG. */
return iucv_path_accept(path, &smsg_handler, "SMSGIUCV ", NULL);
}
static void smsg_message_pending(struct iucv_path *path,
struct iucv_message *msg)
{
struct smsg_callback *cb;
unsigned char *buffer;
unsigned char sender[9];
int rc, i;
buffer = kmalloc(msg->length + 1, GFP_ATOMIC | GFP_DMA);
if (!buffer) {
iucv_message_reject(path, msg);
return;
}
rc = iucv_message_receive(path, msg, 0, buffer, msg->length, NULL);
if (rc == 0) {
buffer[msg->length] = 0;
EBCASC(buffer, msg->length);
memcpy(sender, buffer, 8);
sender[8] = 0;
/* Remove trailing whitespace from the sender name. */
for (i = 7; i >= 0; i--) {
if (sender[i] != ' ' && sender[i] != '\t')
break;
sender[i] = 0;
}
spin_lock(&smsg_list_lock);
list_for_each_entry(cb, &smsg_list, list)
if (strncmp(buffer + 8, cb->prefix, cb->len) == 0) {
cb->callback(sender, buffer + 8);
break;
}
spin_unlock(&smsg_list_lock);
}
kfree(buffer);
}
int smsg_register_callback(const char *prefix,
void (*callback)(const char *from, char *str))
{
struct smsg_callback *cb;
cb = kmalloc(sizeof(struct smsg_callback), GFP_KERNEL);
if (!cb)
return -ENOMEM;
cb->prefix = prefix;
cb->len = strlen(prefix);
cb->callback = callback;
spin_lock_bh(&smsg_list_lock);
list_add_tail(&cb->list, &smsg_list);
spin_unlock_bh(&smsg_list_lock);
return 0;
}
void smsg_unregister_callback(const char *prefix,
void (*callback)(const char *from,
char *str))
{
struct smsg_callback *cb, *tmp;
spin_lock_bh(&smsg_list_lock);
cb = NULL;
list_for_each_entry(tmp, &smsg_list, list)
if (tmp->callback == callback &&
strcmp(tmp->prefix, prefix) == 0) {
cb = tmp;
list_del(&cb->list);
break;
}
spin_unlock_bh(&smsg_list_lock);
kfree(cb);
}
static int smsg_pm_freeze(struct device *dev)
{
#ifdef CONFIG_PM_DEBUG
printk(KERN_WARNING "smsg_pm_freeze\n");
#endif
if (smsg_path && iucv_path_connected) {
iucv_path_sever(smsg_path, NULL);
iucv_path_connected = 0;
}
return 0;
}
static int smsg_pm_restore_thaw(struct device *dev)
{
int rc;
#ifdef CONFIG_PM_DEBUG
printk(KERN_WARNING "smsg_pm_restore_thaw\n");
#endif
if (smsg_path && !iucv_path_connected) {
memset(smsg_path, 0, sizeof(*smsg_path));
smsg_path->msglim = 255;
smsg_path->flags = 0;
rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG ",
NULL, NULL, NULL);
#ifdef CONFIG_PM_DEBUG
if (rc)
printk(KERN_ERR
"iucv_path_connect returned with rc %i\n", rc);
#endif
if (!rc)
iucv_path_connected = 1;
cpcmd("SET SMSG IUCV", NULL, 0, NULL);
}
return 0;
}
static const struct dev_pm_ops smsg_pm_ops = {
.freeze = smsg_pm_freeze,
.thaw = smsg_pm_restore_thaw,
.restore = smsg_pm_restore_thaw,
};
static struct device_driver smsg_driver = {
.owner = THIS_MODULE,
.name = SMSGIUCV_DRV_NAME,
.bus = &iucv_bus,
.pm = &smsg_pm_ops,
};
static void __exit smsg_exit(void)
{
cpcmd("SET SMSG IUCV", NULL, 0, NULL);
device_unregister(smsg_dev);
iucv_unregister(&smsg_handler, 1);
driver_unregister(&smsg_driver);
}
static int __init smsg_init(void)
{
int rc;
if (!MACHINE_IS_VM) {
rc = -EPROTONOSUPPORT;
goto out;
}
rc = driver_register(&smsg_driver);
if (rc != 0)
goto out;
rc = iucv_register(&smsg_handler, 1);
if (rc)
goto out_driver;
smsg_path = iucv_path_alloc(255, 0, GFP_KERNEL);
if (!smsg_path) {
rc = -ENOMEM;
goto out_register;
}
rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG ",
NULL, NULL, NULL);
if (rc)
goto out_free_path;
else
iucv_path_connected = 1;
smsg_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
if (!smsg_dev) {
rc = -ENOMEM;
goto out_free_path;
}
dev_set_name(smsg_dev, "smsg_iucv");
smsg_dev->bus = &iucv_bus;
smsg_dev->parent = iucv_root;
smsg_dev->release = (void (*)(struct device *))kfree;
smsg_dev->driver = &smsg_driver;
rc = device_register(smsg_dev);
if (rc)
goto out_put;
cpcmd("SET SMSG IUCV", NULL, 0, NULL);
return 0;
out_put:
put_device(smsg_dev);
out_free_path:
iucv_path_free(smsg_path);
smsg_path = NULL;
out_register:
iucv_unregister(&smsg_handler, 1);
out_driver:
driver_unregister(&smsg_driver);
out:
return rc;
}
module_init(smsg_init);
module_exit(smsg_exit);
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(smsg_register_callback);
EXPORT_SYMBOL(smsg_unregister_callback);

View file

@ -0,0 +1,14 @@
/*
* IUCV special message driver
*
* Copyright IBM Corp. 2003
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
#define SMSGIUCV_DRV_NAME "SMSGIUCV"
int smsg_register_callback(const char *,
void (*)(const char *, char *));
void smsg_unregister_callback(const char *,
void (*)(const char *, char *));

View file

@ -0,0 +1,218 @@
/*
* Deliver z/VM CP special messages (SMSG) as uevents.
*
* The driver registers for z/VM CP special messages with the
* "APP" prefix. Incoming messages are delivered to user space
* as uevents.
*
* Copyright IBM Corp. 2010
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
*
*/
#define KMSG_COMPONENT "smsgiucv_app"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/ctype.h>
#include <linux/err.h>
#include <linux/device.h>
#include <linux/list.h>
#include <linux/kobject.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <net/iucv/iucv.h>
#include "smsgiucv.h"
/* prefix used for SMSG registration */
#define SMSG_PREFIX "APP"
/* SMSG related uevent environment variables */
#define ENV_SENDER_STR "SMSG_SENDER="
#define ENV_SENDER_LEN (strlen(ENV_SENDER_STR) + 8 + 1)
#define ENV_PREFIX_STR "SMSG_ID="
#define ENV_PREFIX_LEN (strlen(ENV_PREFIX_STR) + \
strlen(SMSG_PREFIX) + 1)
#define ENV_TEXT_STR "SMSG_TEXT="
#define ENV_TEXT_LEN(msg) (strlen(ENV_TEXT_STR) + strlen((msg)) + 1)
/* z/VM user ID which is permitted to send SMSGs
* If the value is undefined or empty (""), special messages are
* accepted from any z/VM user ID. */
static char *sender;
module_param(sender, charp, 0400);
MODULE_PARM_DESC(sender, "z/VM user ID from which CP SMSGs are accepted");
/* SMSG device representation */
static struct device *smsg_app_dev;
/* list element for queuing received messages for delivery */
struct smsg_app_event {
struct list_head list;
char *buf;
char *envp[4];
};
/* queue for outgoing uevents */
static LIST_HEAD(smsg_event_queue);
static DEFINE_SPINLOCK(smsg_event_queue_lock);
static void smsg_app_event_free(struct smsg_app_event *ev)
{
kfree(ev->buf);
kfree(ev);
}
static struct smsg_app_event *smsg_app_event_alloc(const char *from,
const char *msg)
{
struct smsg_app_event *ev;
ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
if (!ev)
return NULL;
ev->buf = kzalloc(ENV_SENDER_LEN + ENV_PREFIX_LEN +
ENV_TEXT_LEN(msg), GFP_ATOMIC);
if (!ev->buf) {
kfree(ev);
return NULL;
}
/* setting up environment pointers into buf */
ev->envp[0] = ev->buf;
ev->envp[1] = ev->envp[0] + ENV_SENDER_LEN;
ev->envp[2] = ev->envp[1] + ENV_PREFIX_LEN;
ev->envp[3] = NULL;
/* setting up environment: sender, prefix name, and message text */
snprintf(ev->envp[0], ENV_SENDER_LEN, ENV_SENDER_STR "%s", from);
snprintf(ev->envp[1], ENV_PREFIX_LEN, ENV_PREFIX_STR "%s", SMSG_PREFIX);
snprintf(ev->envp[2], ENV_TEXT_LEN(msg), ENV_TEXT_STR "%s", msg);
return ev;
}
static void smsg_event_work_fn(struct work_struct *work)
{
LIST_HEAD(event_queue);
struct smsg_app_event *p, *n;
struct device *dev;
dev = get_device(smsg_app_dev);
if (!dev)
return;
spin_lock_bh(&smsg_event_queue_lock);
list_splice_init(&smsg_event_queue, &event_queue);
spin_unlock_bh(&smsg_event_queue_lock);
list_for_each_entry_safe(p, n, &event_queue, list) {
list_del(&p->list);
kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, p->envp);
smsg_app_event_free(p);
}
put_device(dev);
}
static DECLARE_WORK(smsg_event_work, smsg_event_work_fn);
static void smsg_app_callback(const char *from, char *msg)
{
struct smsg_app_event *se;
/* check if the originating z/VM user ID matches
* the configured sender. */
if (sender && strlen(sender) > 0 && strcmp(from, sender) != 0)
return;
/* get start of message text (skip prefix and leading blanks) */
msg += strlen(SMSG_PREFIX);
while (*msg && isspace(*msg))
msg++;
if (*msg == '\0')
return;
/* allocate event list element and its environment */
se = smsg_app_event_alloc(from, msg);
if (!se)
return;
/* queue event and schedule work function */
spin_lock(&smsg_event_queue_lock);
list_add_tail(&se->list, &smsg_event_queue);
spin_unlock(&smsg_event_queue_lock);
schedule_work(&smsg_event_work);
return;
}
static int __init smsgiucv_app_init(void)
{
struct device_driver *smsgiucv_drv;
int rc;
if (!MACHINE_IS_VM)
return -ENODEV;
smsg_app_dev = kzalloc(sizeof(*smsg_app_dev), GFP_KERNEL);
if (!smsg_app_dev)
return -ENOMEM;
smsgiucv_drv = driver_find(SMSGIUCV_DRV_NAME, &iucv_bus);
if (!smsgiucv_drv) {
kfree(smsg_app_dev);
return -ENODEV;
}
rc = dev_set_name(smsg_app_dev, KMSG_COMPONENT);
if (rc) {
kfree(smsg_app_dev);
goto fail;
}
smsg_app_dev->bus = &iucv_bus;
smsg_app_dev->parent = iucv_root;
smsg_app_dev->release = (void (*)(struct device *)) kfree;
smsg_app_dev->driver = smsgiucv_drv;
rc = device_register(smsg_app_dev);
if (rc) {
put_device(smsg_app_dev);
goto fail;
}
/* convert sender to uppercase characters */
if (sender) {
int len = strlen(sender);
while (len--)
sender[len] = toupper(sender[len]);
}
/* register with the smsgiucv device driver */
rc = smsg_register_callback(SMSG_PREFIX, smsg_app_callback);
if (rc) {
device_unregister(smsg_app_dev);
goto fail;
}
rc = 0;
fail:
return rc;
}
module_init(smsgiucv_app_init);
static void __exit smsgiucv_app_exit(void)
{
/* unregister callback */
smsg_unregister_callback(SMSG_PREFIX, smsg_app_callback);
/* cancel pending work and flush any queued event work */
cancel_work_sync(&smsg_event_work);
smsg_event_work_fn(&smsg_event_work);
device_unregister(smsg_app_dev);
}
module_exit(smsgiucv_app_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Deliver z/VM CP SMSG as uevents");
MODULE_AUTHOR("Hendrik Brueckner <brueckner@linux.vnet.ibm.com>");