Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

View file

@ -0,0 +1,35 @@
#
# SEEQ device configuration
#
config NET_VENDOR_SEEQ
bool "SEEQ devices"
default y
depends on HAS_IOMEM
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
<http://www.tldp.org/docs.html#howto>.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
the questions about SEEQ devices. If you say Y, you will be asked for
your specific card in the following questions.
if NET_VENDOR_SEEQ
config ARM_ETHER3
tristate "Acorn/ANT Ether3 support"
depends on ARM && ARCH_ACORN
---help---
If you have an Acorn system with one of these network cards, you
should say Y to this option if you wish to use it with Linux.
config SGISEEQ
tristate "SGI Seeq ethernet controller support"
depends on SGI_HAS_SEEQ
---help---
Say Y here if you have an Seeq based Ethernet network card. This is
used in many Silicon Graphics machines.
endif # NET_VENDOR_SEEQ

View file

@ -0,0 +1,6 @@
#
# Makefile for the SEEQ network device drivers
#
obj-$(CONFIG_ARM_ETHER3) += ether3.o
obj-$(CONFIG_SGISEEQ) += sgiseeq.o

View file

@ -0,0 +1,899 @@
/*
* linux/drivers/acorn/net/ether3.c
*
* Copyright (C) 1995-2000 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* SEEQ nq8005 ethernet driver for Acorn/ANT Ether3 card
* for Acorn machines
*
* By Russell King, with some suggestions from borris@ant.co.uk
*
* Changelog:
* 1.04 RMK 29/02/1996 Won't pass packets that are from our ethernet
* address up to the higher levels - they're
* silently ignored. I/F can now be put into
* multicast mode. Receiver routine optimised.
* 1.05 RMK 30/02/1996 Now claims interrupt at open when part of
* the kernel rather than when a module.
* 1.06 RMK 02/03/1996 Various code cleanups
* 1.07 RMK 13/10/1996 Optimised interrupt routine and transmit
* routines.
* 1.08 RMK 14/10/1996 Fixed problem with too many packets,
* prevented the kernel message about dropped
* packets appearing too many times a second.
* Now does not disable all IRQs, only the IRQ
* used by this card.
* 1.09 RMK 10/11/1996 Only enables TX irq when buffer space is low,
* but we still service the TX queue if we get a
* RX interrupt.
* 1.10 RMK 15/07/1997 Fixed autoprobing of NQ8004.
* 1.11 RMK 16/11/1997 Fixed autoprobing of NQ8005A.
* 1.12 RMK 31/12/1997 Removed reference to dev_tint for Linux 2.1.
* RMK 27/06/1998 Changed asm/delay.h to linux/delay.h.
* 1.13 RMK 29/06/1998 Fixed problem with transmission of packets.
* Chip seems to have a bug in, whereby if the
* packet starts two bytes from the end of the
* buffer, it corrupts the receiver chain, and
* never updates the transmit status correctly.
* 1.14 RMK 07/01/1998 Added initial code for ETHERB addressing.
* 1.15 RMK 30/04/1999 More fixes to the transmit routine for buggy
* hardware.
* 1.16 RMK 10/02/2000 Updated for 2.3.43
* 1.17 RMK 13/05/2000 Updated for 2.3.99-pre8
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/in.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/bitops.h>
#include <asm/ecard.h>
#include <asm/io.h>
static char version[] = "ether3 ethernet driver (c) 1995-2000 R.M.King v1.17\n";
#include "ether3.h"
static unsigned int net_debug = NET_DEBUG;
static void ether3_setmulticastlist(struct net_device *dev);
static int ether3_rx(struct net_device *dev, unsigned int maxcnt);
static void ether3_tx(struct net_device *dev);
static int ether3_open (struct net_device *dev);
static int ether3_sendpacket (struct sk_buff *skb, struct net_device *dev);
static irqreturn_t ether3_interrupt (int irq, void *dev_id);
static int ether3_close (struct net_device *dev);
static void ether3_setmulticastlist (struct net_device *dev);
static void ether3_timeout(struct net_device *dev);
#define BUS_16 2
#define BUS_8 1
#define BUS_UNKNOWN 0
/* --------------------------------------------------------------------------- */
typedef enum {
buffer_write,
buffer_read
} buffer_rw_t;
/*
* ether3 read/write. Slow things down a bit...
* The SEEQ8005 doesn't like us writing to its registers
* too quickly.
*/
static inline void ether3_outb(int v, void __iomem *r)
{
writeb(v, r);
udelay(1);
}
static inline void ether3_outw(int v, void __iomem *r)
{
writew(v, r);
udelay(1);
}
#define ether3_inb(r) ({ unsigned int __v = readb((r)); udelay(1); __v; })
#define ether3_inw(r) ({ unsigned int __v = readw((r)); udelay(1); __v; })
static int
ether3_setbuffer(struct net_device *dev, buffer_rw_t read, int start)
{
int timeout = 1000;
ether3_outw(priv(dev)->regs.config1 | CFG1_LOCBUFMEM, REG_CONFIG1);
ether3_outw(priv(dev)->regs.command | CMD_FIFOWRITE, REG_COMMAND);
while ((ether3_inw(REG_STATUS) & STAT_FIFOEMPTY) == 0) {
if (!timeout--) {
printk("%s: setbuffer broken\n", dev->name);
priv(dev)->broken = 1;
return 1;
}
udelay(1);
}
if (read == buffer_read) {
ether3_outw(start, REG_DMAADDR);
ether3_outw(priv(dev)->regs.command | CMD_FIFOREAD, REG_COMMAND);
} else {
ether3_outw(priv(dev)->regs.command | CMD_FIFOWRITE, REG_COMMAND);
ether3_outw(start, REG_DMAADDR);
}
return 0;
}
/*
* write data to the buffer memory
*/
#define ether3_writebuffer(dev,data,length) \
writesw(REG_BUFWIN, (data), (length) >> 1)
#define ether3_writeword(dev,data) \
writew((data), REG_BUFWIN)
#define ether3_writelong(dev,data) { \
void __iomem *reg_bufwin = REG_BUFWIN; \
writew((data), reg_bufwin); \
writew((data) >> 16, reg_bufwin); \
}
/*
* read data from the buffer memory
*/
#define ether3_readbuffer(dev,data,length) \
readsw(REG_BUFWIN, (data), (length) >> 1)
#define ether3_readword(dev) \
readw(REG_BUFWIN)
#define ether3_readlong(dev) \
readw(REG_BUFWIN) | (readw(REG_BUFWIN) << 16)
/*
* Switch LED off...
*/
static void ether3_ledoff(unsigned long data)
{
struct net_device *dev = (struct net_device *)data;
ether3_outw(priv(dev)->regs.config2 |= CFG2_CTRLO, REG_CONFIG2);
}
/*
* switch LED on...
*/
static inline void ether3_ledon(struct net_device *dev)
{
del_timer(&priv(dev)->timer);
priv(dev)->timer.expires = jiffies + HZ / 50; /* leave on for 1/50th second */
priv(dev)->timer.data = (unsigned long)dev;
priv(dev)->timer.function = ether3_ledoff;
add_timer(&priv(dev)->timer);
if (priv(dev)->regs.config2 & CFG2_CTRLO)
ether3_outw(priv(dev)->regs.config2 &= ~CFG2_CTRLO, REG_CONFIG2);
}
/*
* Read the ethernet address string from the on board rom.
* This is an ascii string!!!
*/
static int
ether3_addr(char *addr, struct expansion_card *ec)
{
struct in_chunk_dir cd;
char *s;
if (ecard_readchunk(&cd, ec, 0xf5, 0) && (s = strchr(cd.d.string, '('))) {
int i;
for (i = 0; i<6; i++) {
addr[i] = simple_strtoul(s + 1, &s, 0x10);
if (*s != (i==5?')' : ':' ))
break;
}
if (i == 6)
return 0;
}
/* I wonder if we should even let the user continue in this case
* - no, it would be better to disable the device
*/
printk(KERN_ERR "ether3: Couldn't read a valid MAC address from card.\n");
return -ENODEV;
}
/* --------------------------------------------------------------------------- */
static int
ether3_ramtest(struct net_device *dev, unsigned char byte)
{
unsigned char *buffer = kmalloc(RX_END, GFP_KERNEL);
int i,ret = 0;
int max_errors = 4;
int bad = -1;
if (!buffer)
return 1;
memset(buffer, byte, RX_END);
ether3_setbuffer(dev, buffer_write, 0);
ether3_writebuffer(dev, buffer, TX_END);
ether3_setbuffer(dev, buffer_write, RX_START);
ether3_writebuffer(dev, buffer + RX_START, RX_LEN);
memset(buffer, byte ^ 0xff, RX_END);
ether3_setbuffer(dev, buffer_read, 0);
ether3_readbuffer(dev, buffer, TX_END);
ether3_setbuffer(dev, buffer_read, RX_START);
ether3_readbuffer(dev, buffer + RX_START, RX_LEN);
for (i = 0; i < RX_END; i++) {
if (buffer[i] != byte) {
if (max_errors > 0 && bad != buffer[i]) {
printk("%s: RAM failed with (%02X instead of %02X) at 0x%04X",
dev->name, buffer[i], byte, i);
ret = 2;
max_errors--;
bad = i;
}
} else {
if (bad != -1) {
if (bad != i - 1)
printk(" - 0x%04X\n", i - 1);
printk("\n");
bad = -1;
}
}
}
if (bad != -1)
printk(" - 0xffff\n");
kfree(buffer);
return ret;
}
/* ------------------------------------------------------------------------------- */
static int ether3_init_2(struct net_device *dev)
{
int i;
priv(dev)->regs.config1 = CFG1_RECVCOMPSTAT0|CFG1_DMABURST8;
priv(dev)->regs.config2 = CFG2_CTRLO|CFG2_RECVCRC|CFG2_ERRENCRC;
priv(dev)->regs.command = 0;
/*
* Set up our hardware address
*/
ether3_outw(priv(dev)->regs.config1 | CFG1_BUFSELSTAT0, REG_CONFIG1);
for (i = 0; i < 6; i++)
ether3_outb(dev->dev_addr[i], REG_BUFWIN);
if (dev->flags & IFF_PROMISC)
priv(dev)->regs.config1 |= CFG1_RECVPROMISC;
else if (dev->flags & IFF_MULTICAST)
priv(dev)->regs.config1 |= CFG1_RECVSPECBRMULTI;
else
priv(dev)->regs.config1 |= CFG1_RECVSPECBROAD;
/*
* There is a problem with the NQ8005 in that it occasionally loses the
* last two bytes. To get round this problem, we receive the CRC as
* well. That way, if we do lose the last two, then it doesn't matter.
*/
ether3_outw(priv(dev)->regs.config1 | CFG1_TRANSEND, REG_CONFIG1);
ether3_outw((TX_END>>8) - 1, REG_BUFWIN);
ether3_outw(priv(dev)->rx_head, REG_RECVPTR);
ether3_outw(0, REG_TRANSMITPTR);
ether3_outw(priv(dev)->rx_head >> 8, REG_RECVEND);
ether3_outw(priv(dev)->regs.config2, REG_CONFIG2);
ether3_outw(priv(dev)->regs.config1 | CFG1_LOCBUFMEM, REG_CONFIG1);
ether3_outw(priv(dev)->regs.command, REG_COMMAND);
i = ether3_ramtest(dev, 0x5A);
if(i)
return i;
i = ether3_ramtest(dev, 0x1E);
if(i)
return i;
ether3_setbuffer(dev, buffer_write, 0);
ether3_writelong(dev, 0);
return 0;
}
static void
ether3_init_for_open(struct net_device *dev)
{
int i;
/* Reset the chip */
ether3_outw(CFG2_RESET, REG_CONFIG2);
udelay(4);
priv(dev)->regs.command = 0;
ether3_outw(CMD_RXOFF|CMD_TXOFF, REG_COMMAND);
while (ether3_inw(REG_STATUS) & (STAT_RXON|STAT_TXON))
barrier();
ether3_outw(priv(dev)->regs.config1 | CFG1_BUFSELSTAT0, REG_CONFIG1);
for (i = 0; i < 6; i++)
ether3_outb(dev->dev_addr[i], REG_BUFWIN);
priv(dev)->tx_head = 0;
priv(dev)->tx_tail = 0;
priv(dev)->regs.config2 |= CFG2_CTRLO;
priv(dev)->rx_head = RX_START;
ether3_outw(priv(dev)->regs.config1 | CFG1_TRANSEND, REG_CONFIG1);
ether3_outw((TX_END>>8) - 1, REG_BUFWIN);
ether3_outw(priv(dev)->rx_head, REG_RECVPTR);
ether3_outw(priv(dev)->rx_head >> 8, REG_RECVEND);
ether3_outw(0, REG_TRANSMITPTR);
ether3_outw(priv(dev)->regs.config2, REG_CONFIG2);
ether3_outw(priv(dev)->regs.config1 | CFG1_LOCBUFMEM, REG_CONFIG1);
ether3_setbuffer(dev, buffer_write, 0);
ether3_writelong(dev, 0);
priv(dev)->regs.command = CMD_ENINTRX | CMD_ENINTTX;
ether3_outw(priv(dev)->regs.command | CMD_RXON, REG_COMMAND);
}
static inline int
ether3_probe_bus_8(struct net_device *dev, int val)
{
int write_low, write_high, read_low, read_high;
write_low = val & 255;
write_high = val >> 8;
printk(KERN_DEBUG "ether3_probe: write8 [%02X:%02X]", write_high, write_low);
ether3_outb(write_low, REG_RECVPTR);
ether3_outb(write_high, REG_RECVPTR + 4);
read_low = ether3_inb(REG_RECVPTR);
read_high = ether3_inb(REG_RECVPTR + 4);
printk(", read8 [%02X:%02X]\n", read_high, read_low);
return read_low == write_low && read_high == write_high;
}
static inline int
ether3_probe_bus_16(struct net_device *dev, int val)
{
int read_val;
ether3_outw(val, REG_RECVPTR);
read_val = ether3_inw(REG_RECVPTR);
printk(KERN_DEBUG "ether3_probe: write16 [%04X], read16 [%04X]\n", val, read_val);
return read_val == val;
}
/*
* Open/initialize the board. This is called (in the current kernel)
* sometime after booting when the 'ifconfig' program is run.
*
* This routine should set everything up anew at each open, even
* registers that "should" only need to be set once at boot, so that
* there is non-reboot way to recover if something goes wrong.
*/
static int
ether3_open(struct net_device *dev)
{
if (request_irq(dev->irq, ether3_interrupt, 0, "ether3", dev))
return -EAGAIN;
ether3_init_for_open(dev);
netif_start_queue(dev);
return 0;
}
/*
* The inverse routine to ether3_open().
*/
static int
ether3_close(struct net_device *dev)
{
netif_stop_queue(dev);
disable_irq(dev->irq);
ether3_outw(CMD_RXOFF|CMD_TXOFF, REG_COMMAND);
priv(dev)->regs.command = 0;
while (ether3_inw(REG_STATUS) & (STAT_RXON|STAT_TXON))
barrier();
ether3_outb(0x80, REG_CONFIG2 + 4);
ether3_outw(0, REG_COMMAND);
free_irq(dev->irq, dev);
return 0;
}
/*
* Set or clear promiscuous/multicast mode filter for this adaptor.
*
* We don't attempt any packet filtering. The card may have a SEEQ 8004
* in which does not have the other ethernet address registers present...
*/
static void ether3_setmulticastlist(struct net_device *dev)
{
priv(dev)->regs.config1 &= ~CFG1_RECVPROMISC;
if (dev->flags & IFF_PROMISC) {
/* promiscuous mode */
priv(dev)->regs.config1 |= CFG1_RECVPROMISC;
} else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
priv(dev)->regs.config1 |= CFG1_RECVSPECBRMULTI;
} else
priv(dev)->regs.config1 |= CFG1_RECVSPECBROAD;
ether3_outw(priv(dev)->regs.config1 | CFG1_LOCBUFMEM, REG_CONFIG1);
}
static void ether3_timeout(struct net_device *dev)
{
unsigned long flags;
del_timer(&priv(dev)->timer);
local_irq_save(flags);
printk(KERN_ERR "%s: transmit timed out, network cable problem?\n", dev->name);
printk(KERN_ERR "%s: state: { status=%04X cfg1=%04X cfg2=%04X }\n", dev->name,
ether3_inw(REG_STATUS), ether3_inw(REG_CONFIG1), ether3_inw(REG_CONFIG2));
printk(KERN_ERR "%s: { rpr=%04X rea=%04X tpr=%04X }\n", dev->name,
ether3_inw(REG_RECVPTR), ether3_inw(REG_RECVEND), ether3_inw(REG_TRANSMITPTR));
printk(KERN_ERR "%s: tx head=%X tx tail=%X\n", dev->name,
priv(dev)->tx_head, priv(dev)->tx_tail);
ether3_setbuffer(dev, buffer_read, priv(dev)->tx_tail);
printk(KERN_ERR "%s: packet status = %08X\n", dev->name, ether3_readlong(dev));
local_irq_restore(flags);
priv(dev)->regs.config2 |= CFG2_CTRLO;
dev->stats.tx_errors += 1;
ether3_outw(priv(dev)->regs.config2, REG_CONFIG2);
priv(dev)->tx_head = priv(dev)->tx_tail = 0;
netif_wake_queue(dev);
}
/*
* Transmit a packet
*/
static int
ether3_sendpacket(struct sk_buff *skb, struct net_device *dev)
{
unsigned long flags;
unsigned int length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
unsigned int ptr, next_ptr;
if (priv(dev)->broken) {
dev_kfree_skb(skb);
dev->stats.tx_dropped++;
netif_start_queue(dev);
return NETDEV_TX_OK;
}
length = (length + 1) & ~1;
if (length != skb->len) {
if (skb_padto(skb, length))
goto out;
}
next_ptr = (priv(dev)->tx_head + 1) & 15;
local_irq_save(flags);
if (priv(dev)->tx_tail == next_ptr) {
local_irq_restore(flags);
return NETDEV_TX_BUSY; /* unable to queue */
}
ptr = 0x600 * priv(dev)->tx_head;
priv(dev)->tx_head = next_ptr;
next_ptr *= 0x600;
#define TXHDR_FLAGS (TXHDR_TRANSMIT|TXHDR_CHAINCONTINUE|TXHDR_DATAFOLLOWS|TXHDR_ENSUCCESS)
ether3_setbuffer(dev, buffer_write, next_ptr);
ether3_writelong(dev, 0);
ether3_setbuffer(dev, buffer_write, ptr);
ether3_writelong(dev, 0);
ether3_writebuffer(dev, skb->data, length);
ether3_writeword(dev, htons(next_ptr));
ether3_writeword(dev, TXHDR_CHAINCONTINUE >> 16);
ether3_setbuffer(dev, buffer_write, ptr);
ether3_writeword(dev, htons((ptr + length + 4)));
ether3_writeword(dev, TXHDR_FLAGS >> 16);
ether3_ledon(dev);
if (!(ether3_inw(REG_STATUS) & STAT_TXON)) {
ether3_outw(ptr, REG_TRANSMITPTR);
ether3_outw(priv(dev)->regs.command | CMD_TXON, REG_COMMAND);
}
next_ptr = (priv(dev)->tx_head + 1) & 15;
local_irq_restore(flags);
dev_kfree_skb(skb);
if (priv(dev)->tx_tail == next_ptr)
netif_stop_queue(dev);
out:
return NETDEV_TX_OK;
}
static irqreturn_t
ether3_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
unsigned int status, handled = IRQ_NONE;
#if NET_DEBUG > 1
if(net_debug & DEBUG_INT)
printk("eth3irq: %d ", irq);
#endif
status = ether3_inw(REG_STATUS);
if (status & STAT_INTRX) {
ether3_outw(CMD_ACKINTRX | priv(dev)->regs.command, REG_COMMAND);
ether3_rx(dev, 12);
handled = IRQ_HANDLED;
}
if (status & STAT_INTTX) {
ether3_outw(CMD_ACKINTTX | priv(dev)->regs.command, REG_COMMAND);
ether3_tx(dev);
handled = IRQ_HANDLED;
}
#if NET_DEBUG > 1
if(net_debug & DEBUG_INT)
printk("done\n");
#endif
return handled;
}
/*
* If we have a good packet(s), get it/them out of the buffers.
*/
static int ether3_rx(struct net_device *dev, unsigned int maxcnt)
{
unsigned int next_ptr = priv(dev)->rx_head, received = 0;
ether3_ledon(dev);
do {
unsigned int this_ptr, status;
unsigned char addrs[16];
/*
* read the first 16 bytes from the buffer.
* This contains the status bytes etc and ethernet addresses,
* and we also check the source ethernet address to see if
* it originated from us.
*/
{
unsigned int temp_ptr;
ether3_setbuffer(dev, buffer_read, next_ptr);
temp_ptr = ether3_readword(dev);
status = ether3_readword(dev);
if ((status & (RXSTAT_DONE | RXHDR_CHAINCONTINUE | RXHDR_RECEIVE)) !=
(RXSTAT_DONE | RXHDR_CHAINCONTINUE) || !temp_ptr)
break;
this_ptr = next_ptr + 4;
next_ptr = ntohs(temp_ptr);
}
ether3_setbuffer(dev, buffer_read, this_ptr);
ether3_readbuffer(dev, addrs+2, 12);
if (next_ptr < RX_START || next_ptr >= RX_END) {
int i;
printk("%s: bad next pointer @%04X: ", dev->name, priv(dev)->rx_head);
printk("%02X %02X %02X %02X ", next_ptr >> 8, next_ptr & 255, status & 255, status >> 8);
for (i = 2; i < 14; i++)
printk("%02X ", addrs[i]);
printk("\n");
next_ptr = priv(dev)->rx_head;
break;
}
/*
* ignore our own packets...
*/
if (!(*(unsigned long *)&dev->dev_addr[0] ^ *(unsigned long *)&addrs[2+6]) &&
!(*(unsigned short *)&dev->dev_addr[4] ^ *(unsigned short *)&addrs[2+10])) {
maxcnt ++; /* compensate for loopedback packet */
ether3_outw(next_ptr >> 8, REG_RECVEND);
} else
if (!(status & (RXSTAT_OVERSIZE|RXSTAT_CRCERROR|RXSTAT_DRIBBLEERROR|RXSTAT_SHORTPACKET))) {
unsigned int length = next_ptr - this_ptr;
struct sk_buff *skb;
if (next_ptr <= this_ptr)
length += RX_END - RX_START;
skb = netdev_alloc_skb(dev, length + 2);
if (skb) {
unsigned char *buf;
skb_reserve(skb, 2);
buf = skb_put(skb, length);
ether3_readbuffer(dev, buf + 12, length - 12);
ether3_outw(next_ptr >> 8, REG_RECVEND);
*(unsigned short *)(buf + 0) = *(unsigned short *)(addrs + 2);
*(unsigned long *)(buf + 2) = *(unsigned long *)(addrs + 4);
*(unsigned long *)(buf + 6) = *(unsigned long *)(addrs + 8);
*(unsigned short *)(buf + 10) = *(unsigned short *)(addrs + 12);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
received ++;
} else {
ether3_outw(next_ptr >> 8, REG_RECVEND);
dev->stats.rx_dropped++;
goto done;
}
} else {
struct net_device_stats *stats = &dev->stats;
ether3_outw(next_ptr >> 8, REG_RECVEND);
if (status & RXSTAT_OVERSIZE) stats->rx_over_errors ++;
if (status & RXSTAT_CRCERROR) stats->rx_crc_errors ++;
if (status & RXSTAT_DRIBBLEERROR) stats->rx_fifo_errors ++;
if (status & RXSTAT_SHORTPACKET) stats->rx_length_errors ++;
stats->rx_errors++;
}
}
while (-- maxcnt);
done:
dev->stats.rx_packets += received;
priv(dev)->rx_head = next_ptr;
/*
* If rx went off line, then that means that the buffer may be full. We
* have dropped at least one packet.
*/
if (!(ether3_inw(REG_STATUS) & STAT_RXON)) {
dev->stats.rx_dropped++;
ether3_outw(next_ptr, REG_RECVPTR);
ether3_outw(priv(dev)->regs.command | CMD_RXON, REG_COMMAND);
}
return maxcnt;
}
/*
* Update stats for the transmitted packet(s)
*/
static void ether3_tx(struct net_device *dev)
{
unsigned int tx_tail = priv(dev)->tx_tail;
int max_work = 14;
do {
unsigned long status;
/*
* Read the packet header
*/
ether3_setbuffer(dev, buffer_read, tx_tail * 0x600);
status = ether3_readlong(dev);
/*
* Check to see if this packet has been transmitted
*/
if ((status & (TXSTAT_DONE | TXHDR_TRANSMIT)) !=
(TXSTAT_DONE | TXHDR_TRANSMIT))
break;
/*
* Update errors
*/
if (!(status & (TXSTAT_BABBLED | TXSTAT_16COLLISIONS)))
dev->stats.tx_packets++;
else {
dev->stats.tx_errors++;
if (status & TXSTAT_16COLLISIONS)
dev->stats.collisions += 16;
if (status & TXSTAT_BABBLED)
dev->stats.tx_fifo_errors++;
}
tx_tail = (tx_tail + 1) & 15;
} while (--max_work);
if (priv(dev)->tx_tail != tx_tail) {
priv(dev)->tx_tail = tx_tail;
netif_wake_queue(dev);
}
}
static void ether3_banner(void)
{
static unsigned version_printed = 0;
if (net_debug && version_printed++ == 0)
printk(KERN_INFO "%s", version);
}
static const struct net_device_ops ether3_netdev_ops = {
.ndo_open = ether3_open,
.ndo_stop = ether3_close,
.ndo_start_xmit = ether3_sendpacket,
.ndo_set_rx_mode = ether3_setmulticastlist,
.ndo_tx_timeout = ether3_timeout,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
};
static int
ether3_probe(struct expansion_card *ec, const struct ecard_id *id)
{
const struct ether3_data *data = id->data;
struct net_device *dev;
int bus_type, ret;
ether3_banner();
ret = ecard_request_resources(ec);
if (ret)
goto out;
dev = alloc_etherdev(sizeof(struct dev_priv));
if (!dev) {
ret = -ENOMEM;
goto release;
}
SET_NETDEV_DEV(dev, &ec->dev);
priv(dev)->base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0);
if (!priv(dev)->base) {
ret = -ENOMEM;
goto free;
}
ec->irqaddr = priv(dev)->base + data->base_offset;
ec->irqmask = 0xf0;
priv(dev)->seeq = priv(dev)->base + data->base_offset;
dev->irq = ec->irq;
ether3_addr(dev->dev_addr, ec);
init_timer(&priv(dev)->timer);
/* Reset card...
*/
ether3_outb(0x80, REG_CONFIG2 + 4);
bus_type = BUS_UNKNOWN;
udelay(4);
/* Test using Receive Pointer (16-bit register) to find out
* how the ether3 is connected to the bus...
*/
if (ether3_probe_bus_8(dev, 0x100) &&
ether3_probe_bus_8(dev, 0x201))
bus_type = BUS_8;
if (bus_type == BUS_UNKNOWN &&
ether3_probe_bus_16(dev, 0x101) &&
ether3_probe_bus_16(dev, 0x201))
bus_type = BUS_16;
switch (bus_type) {
case BUS_UNKNOWN:
printk(KERN_ERR "%s: unable to identify bus width\n", dev->name);
ret = -ENODEV;
goto free;
case BUS_8:
printk(KERN_ERR "%s: %s found, but is an unsupported "
"8-bit card\n", dev->name, data->name);
ret = -ENODEV;
goto free;
default:
break;
}
if (ether3_init_2(dev)) {
ret = -ENODEV;
goto free;
}
dev->netdev_ops = &ether3_netdev_ops;
dev->watchdog_timeo = 5 * HZ / 100;
ret = register_netdev(dev);
if (ret)
goto free;
printk("%s: %s in slot %d, %pM\n",
dev->name, data->name, ec->slot_no, dev->dev_addr);
ecard_set_drvdata(ec, dev);
return 0;
free:
free_netdev(dev);
release:
ecard_release_resources(ec);
out:
return ret;
}
static void ether3_remove(struct expansion_card *ec)
{
struct net_device *dev = ecard_get_drvdata(ec);
ecard_set_drvdata(ec, NULL);
unregister_netdev(dev);
free_netdev(dev);
ecard_release_resources(ec);
}
static struct ether3_data ether3 = {
.name = "ether3",
.base_offset = 0,
};
static struct ether3_data etherb = {
.name = "etherb",
.base_offset = 0x800,
};
static const struct ecard_id ether3_ids[] = {
{ MANU_ANT2, PROD_ANT_ETHER3, &ether3 },
{ MANU_ANT, PROD_ANT_ETHER3, &ether3 },
{ MANU_ANT, PROD_ANT_ETHERB, &etherb },
{ 0xffff, 0xffff }
};
static struct ecard_driver ether3_driver = {
.probe = ether3_probe,
.remove = ether3_remove,
.id_table = ether3_ids,
.drv = {
.name = "ether3",
},
};
static int __init ether3_init(void)
{
return ecard_register_driver(&ether3_driver);
}
static void __exit ether3_exit(void)
{
ecard_remove_driver(&ether3_driver);
}
module_init(ether3_init);
module_exit(ether3_exit);
MODULE_LICENSE("GPL");

View file

@ -0,0 +1,176 @@
/*
* linux/drivers/acorn/net/ether3.h
*
* Copyright (C) 1995-2000 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* network driver for Acorn/ANT Ether3 cards
*/
#ifndef _LINUX_ether3_H
#define _LINUX_ether3_H
/* use 0 for production, 1 for verification, >2 for debug. debug flags: */
#define DEBUG_TX 2
#define DEBUG_RX 4
#define DEBUG_INT 8
#define DEBUG_IC 16
#ifndef NET_DEBUG
#define NET_DEBUG 0
#endif
#define priv(dev) ((struct dev_priv *)netdev_priv(dev))
/* Command register definitions & bits */
#define REG_COMMAND (priv(dev)->seeq + 0x0000)
#define CMD_ENINTDMA 0x0001
#define CMD_ENINTRX 0x0002
#define CMD_ENINTTX 0x0004
#define CMD_ENINTBUFWIN 0x0008
#define CMD_ACKINTDMA 0x0010
#define CMD_ACKINTRX 0x0020
#define CMD_ACKINTTX 0x0040
#define CMD_ACKINTBUFWIN 0x0080
#define CMD_DMAON 0x0100
#define CMD_RXON 0x0200
#define CMD_TXON 0x0400
#define CMD_DMAOFF 0x0800
#define CMD_RXOFF 0x1000
#define CMD_TXOFF 0x2000
#define CMD_FIFOREAD 0x4000
#define CMD_FIFOWRITE 0x8000
/* status register */
#define REG_STATUS (priv(dev)->seeq + 0x0000)
#define STAT_ENINTSTAT 0x0001
#define STAT_ENINTRX 0x0002
#define STAT_ENINTTX 0x0004
#define STAT_ENINTBUFWIN 0x0008
#define STAT_INTDMA 0x0010
#define STAT_INTRX 0x0020
#define STAT_INTTX 0x0040
#define STAT_INTBUFWIN 0x0080
#define STAT_DMAON 0x0100
#define STAT_RXON 0x0200
#define STAT_TXON 0x0400
#define STAT_FIFOFULL 0x2000
#define STAT_FIFOEMPTY 0x4000
#define STAT_FIFODIR 0x8000
/* configuration register 1 */
#define REG_CONFIG1 (priv(dev)->seeq + 0x0040)
#define CFG1_BUFSELSTAT0 0x0000
#define CFG1_BUFSELSTAT1 0x0001
#define CFG1_BUFSELSTAT2 0x0002
#define CFG1_BUFSELSTAT3 0x0003
#define CFG1_BUFSELSTAT4 0x0004
#define CFG1_BUFSELSTAT5 0x0005
#define CFG1_ADDRPROM 0x0006
#define CFG1_TRANSEND 0x0007
#define CFG1_LOCBUFMEM 0x0008
#define CFG1_INTVECTOR 0x0009
#define CFG1_RECVSPECONLY 0x0000
#define CFG1_RECVSPECBROAD 0x4000
#define CFG1_RECVSPECBRMULTI 0x8000
#define CFG1_RECVPROMISC 0xC000
/* The following aren't in 8004 */
#define CFG1_DMABURSTCONT 0x0000
#define CFG1_DMABURST800NS 0x0010
#define CFG1_DMABURST1600NS 0x0020
#define CFG1_DMABURST3200NS 0x0030
#define CFG1_DMABURST1 0x0000
#define CFG1_DMABURST4 0x0040
#define CFG1_DMABURST8 0x0080
#define CFG1_DMABURST16 0x00C0
#define CFG1_RECVCOMPSTAT0 0x0100
#define CFG1_RECVCOMPSTAT1 0x0200
#define CFG1_RECVCOMPSTAT2 0x0400
#define CFG1_RECVCOMPSTAT3 0x0800
#define CFG1_RECVCOMPSTAT4 0x1000
#define CFG1_RECVCOMPSTAT5 0x2000
/* configuration register 2 */
#define REG_CONFIG2 (priv(dev)->seeq + 0x0080)
#define CFG2_BYTESWAP 0x0001
#define CFG2_ERRENCRC 0x0008
#define CFG2_ERRENDRIBBLE 0x0010
#define CFG2_ERRSHORTFRAME 0x0020
#define CFG2_SLOTSELECT 0x0040
#define CFG2_PREAMSELECT 0x0080
#define CFG2_ADDRLENGTH 0x0100
#define CFG2_RECVCRC 0x0200
#define CFG2_XMITNOCRC 0x0400
#define CFG2_LOOPBACK 0x0800
#define CFG2_CTRLO 0x1000
#define CFG2_RESET 0x8000
#define REG_RECVEND (priv(dev)->seeq + 0x00c0)
#define REG_BUFWIN (priv(dev)->seeq + 0x0100)
#define REG_RECVPTR (priv(dev)->seeq + 0x0140)
#define REG_TRANSMITPTR (priv(dev)->seeq + 0x0180)
#define REG_DMAADDR (priv(dev)->seeq + 0x01c0)
/*
* Cards transmit/receive headers
*/
#define TX_NEXT (0xffff)
#define TXHDR_ENBABBLEINT (1 << 16)
#define TXHDR_ENCOLLISIONINT (1 << 17)
#define TXHDR_EN16COLLISION (1 << 18)
#define TXHDR_ENSUCCESS (1 << 19)
#define TXHDR_DATAFOLLOWS (1 << 21)
#define TXHDR_CHAINCONTINUE (1 << 22)
#define TXHDR_TRANSMIT (1 << 23)
#define TXSTAT_BABBLED (1 << 24)
#define TXSTAT_COLLISION (1 << 25)
#define TXSTAT_16COLLISIONS (1 << 26)
#define TXSTAT_DONE (1 << 31)
#define RX_NEXT (0xffff)
#define RXHDR_CHAINCONTINUE (1 << 6)
#define RXHDR_RECEIVE (1 << 7)
#define RXSTAT_OVERSIZE (1 << 8)
#define RXSTAT_CRCERROR (1 << 9)
#define RXSTAT_DRIBBLEERROR (1 << 10)
#define RXSTAT_SHORTPACKET (1 << 11)
#define RXSTAT_DONE (1 << 15)
#define TX_START 0x0000
#define TX_END 0x6000
#define RX_START 0x6000
#define RX_LEN 0xA000
#define RX_END 0x10000
/* must be a power of 2 and greater than MAX_TX_BUFFERED */
#define MAX_TXED 16
#define MAX_TX_BUFFERED 10
struct dev_priv {
void __iomem *base;
void __iomem *seeq;
struct {
unsigned int command;
unsigned int config1;
unsigned int config2;
} regs;
unsigned char tx_head; /* buffer nr to insert next packet */
unsigned char tx_tail; /* buffer nr of transmitting packet */
unsigned int rx_head; /* address to fetch next packet from */
struct timer_list timer;
int broken; /* 0 = ok, 1 = something went wrong */
};
struct ether3_data {
const char name[8];
unsigned long base_offset;
};
#endif

View file

@ -0,0 +1,838 @@
/*
* sgiseeq.c: Seeq8003 ethernet driver for SGI machines.
*
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
*/
#undef DEBUG
#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/platform_device.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <asm/sgi/hpc3.h>
#include <asm/sgi/ip22.h>
#include <asm/sgi/seeq.h>
#include "sgiseeq.h"
static char *sgiseeqstr = "SGI Seeq8003";
/*
* If you want speed, you do something silly, it always has worked for me. So,
* with that in mind, I've decided to make this driver look completely like a
* stupid Lance from a driver architecture perspective. Only difference is that
* here our "ring buffer" looks and acts like a real Lance one does but is
* laid out like how the HPC DMA and the Seeq want it to. You'd be surprised
* how a stupid idea like this can pay off in performance, not to mention
* making this driver 2,000 times easier to write. ;-)
*/
/* Tune these if we tend to run out often etc. */
#define SEEQ_RX_BUFFERS 16
#define SEEQ_TX_BUFFERS 16
#define PKT_BUF_SZ 1584
#define NEXT_RX(i) (((i) + 1) & (SEEQ_RX_BUFFERS - 1))
#define NEXT_TX(i) (((i) + 1) & (SEEQ_TX_BUFFERS - 1))
#define PREV_RX(i) (((i) - 1) & (SEEQ_RX_BUFFERS - 1))
#define PREV_TX(i) (((i) - 1) & (SEEQ_TX_BUFFERS - 1))
#define TX_BUFFS_AVAIL(sp) ((sp->tx_old <= sp->tx_new) ? \
sp->tx_old + (SEEQ_TX_BUFFERS - 1) - sp->tx_new : \
sp->tx_old - sp->tx_new - 1)
#define VIRT_TO_DMA(sp, v) ((sp)->srings_dma + \
(dma_addr_t)((unsigned long)(v) - \
(unsigned long)((sp)->rx_desc)))
/* Copy frames shorter than rx_copybreak, otherwise pass on up in
* a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha).
*/
static int rx_copybreak = 100;
#define PAD_SIZE (128 - sizeof(struct hpc_dma_desc) - sizeof(void *))
struct sgiseeq_rx_desc {
volatile struct hpc_dma_desc rdma;
u8 padding[PAD_SIZE];
struct sk_buff *skb;
};
struct sgiseeq_tx_desc {
volatile struct hpc_dma_desc tdma;
u8 padding[PAD_SIZE];
struct sk_buff *skb;
};
/*
* Warning: This structure is laid out in a certain way because HPC dma
* descriptors must be 8-byte aligned. So don't touch this without
* some care.
*/
struct sgiseeq_init_block { /* Note the name ;-) */
struct sgiseeq_rx_desc rxvector[SEEQ_RX_BUFFERS];
struct sgiseeq_tx_desc txvector[SEEQ_TX_BUFFERS];
};
struct sgiseeq_private {
struct sgiseeq_init_block *srings;
dma_addr_t srings_dma;
/* Ptrs to the descriptors in uncached space. */
struct sgiseeq_rx_desc *rx_desc;
struct sgiseeq_tx_desc *tx_desc;
char *name;
struct hpc3_ethregs *hregs;
struct sgiseeq_regs *sregs;
/* Ring entry counters. */
unsigned int rx_new, tx_new;
unsigned int rx_old, tx_old;
int is_edlc;
unsigned char control;
unsigned char mode;
spinlock_t tx_lock;
};
static inline void dma_sync_desc_cpu(struct net_device *dev, void *addr)
{
dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc),
DMA_FROM_DEVICE);
}
static inline void dma_sync_desc_dev(struct net_device *dev, void *addr)
{
dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc),
DMA_TO_DEVICE);
}
static inline void hpc3_eth_reset(struct hpc3_ethregs *hregs)
{
hregs->reset = HPC3_ERST_CRESET | HPC3_ERST_CLRIRQ;
udelay(20);
hregs->reset = 0;
}
static inline void reset_hpc3_and_seeq(struct hpc3_ethregs *hregs,
struct sgiseeq_regs *sregs)
{
hregs->rx_ctrl = hregs->tx_ctrl = 0;
hpc3_eth_reset(hregs);
}
#define RSTAT_GO_BITS (SEEQ_RCMD_IGOOD | SEEQ_RCMD_IEOF | SEEQ_RCMD_ISHORT | \
SEEQ_RCMD_IDRIB | SEEQ_RCMD_ICRC)
static inline void seeq_go(struct sgiseeq_private *sp,
struct hpc3_ethregs *hregs,
struct sgiseeq_regs *sregs)
{
sregs->rstat = sp->mode | RSTAT_GO_BITS;
hregs->rx_ctrl = HPC3_ERXCTRL_ACTIVE;
}
static inline void __sgiseeq_set_mac_address(struct net_device *dev)
{
struct sgiseeq_private *sp = netdev_priv(dev);
struct sgiseeq_regs *sregs = sp->sregs;
int i;
sregs->tstat = SEEQ_TCMD_RB0;
for (i = 0; i < 6; i++)
sregs->rw.eth_addr[i] = dev->dev_addr[i];
}
static int sgiseeq_set_mac_address(struct net_device *dev, void *addr)
{
struct sgiseeq_private *sp = netdev_priv(dev);
struct sockaddr *sa = addr;
memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
spin_lock_irq(&sp->tx_lock);
__sgiseeq_set_mac_address(dev);
spin_unlock_irq(&sp->tx_lock);
return 0;
}
#define TCNTINFO_INIT (HPCDMA_EOX | HPCDMA_ETXD)
#define RCNTCFG_INIT (HPCDMA_OWN | HPCDMA_EORP | HPCDMA_XIE)
#define RCNTINFO_INIT (RCNTCFG_INIT | (PKT_BUF_SZ & HPCDMA_BCNT))
static int seeq_init_ring(struct net_device *dev)
{
struct sgiseeq_private *sp = netdev_priv(dev);
int i;
netif_stop_queue(dev);
sp->rx_new = sp->tx_new = 0;
sp->rx_old = sp->tx_old = 0;
__sgiseeq_set_mac_address(dev);
/* Setup tx ring. */
for(i = 0; i < SEEQ_TX_BUFFERS; i++) {
sp->tx_desc[i].tdma.cntinfo = TCNTINFO_INIT;
dma_sync_desc_dev(dev, &sp->tx_desc[i]);
}
/* And now the rx ring. */
for (i = 0; i < SEEQ_RX_BUFFERS; i++) {
if (!sp->rx_desc[i].skb) {
dma_addr_t dma_addr;
struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
if (skb == NULL)
return -ENOMEM;
skb_reserve(skb, 2);
dma_addr = dma_map_single(dev->dev.parent,
skb->data - 2,
PKT_BUF_SZ, DMA_FROM_DEVICE);
sp->rx_desc[i].skb = skb;
sp->rx_desc[i].rdma.pbuf = dma_addr;
}
sp->rx_desc[i].rdma.cntinfo = RCNTINFO_INIT;
dma_sync_desc_dev(dev, &sp->rx_desc[i]);
}
sp->rx_desc[i - 1].rdma.cntinfo |= HPCDMA_EOR;
dma_sync_desc_dev(dev, &sp->rx_desc[i - 1]);
return 0;
}
static void seeq_purge_ring(struct net_device *dev)
{
struct sgiseeq_private *sp = netdev_priv(dev);
int i;
/* clear tx ring. */
for (i = 0; i < SEEQ_TX_BUFFERS; i++) {
if (sp->tx_desc[i].skb) {
dev_kfree_skb(sp->tx_desc[i].skb);
sp->tx_desc[i].skb = NULL;
}
}
/* And now the rx ring. */
for (i = 0; i < SEEQ_RX_BUFFERS; i++) {
if (sp->rx_desc[i].skb) {
dev_kfree_skb(sp->rx_desc[i].skb);
sp->rx_desc[i].skb = NULL;
}
}
}
#ifdef DEBUG
static struct sgiseeq_private *gpriv;
static struct net_device *gdev;
static void sgiseeq_dump_rings(void)
{
static int once;
struct sgiseeq_rx_desc *r = gpriv->rx_desc;
struct sgiseeq_tx_desc *t = gpriv->tx_desc;
struct hpc3_ethregs *hregs = gpriv->hregs;
int i;
if (once)
return;
once++;
printk("RING DUMP:\n");
for (i = 0; i < SEEQ_RX_BUFFERS; i++) {
printk("RX [%d]: @(%p) [%08x,%08x,%08x] ",
i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo,
r[i].rdma.pnext);
i += 1;
printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n",
i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo,
r[i].rdma.pnext);
}
for (i = 0; i < SEEQ_TX_BUFFERS; i++) {
printk("TX [%d]: @(%p) [%08x,%08x,%08x] ",
i, (&t[i]), t[i].tdma.pbuf, t[i].tdma.cntinfo,
t[i].tdma.pnext);
i += 1;
printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n",
i, (&t[i]), t[i].tdma.pbuf, t[i].tdma.cntinfo,
t[i].tdma.pnext);
}
printk("INFO: [rx_new = %d rx_old=%d] [tx_new = %d tx_old = %d]\n",
gpriv->rx_new, gpriv->rx_old, gpriv->tx_new, gpriv->tx_old);
printk("RREGS: rx_cbptr[%08x] rx_ndptr[%08x] rx_ctrl[%08x]\n",
hregs->rx_cbptr, hregs->rx_ndptr, hregs->rx_ctrl);
printk("TREGS: tx_cbptr[%08x] tx_ndptr[%08x] tx_ctrl[%08x]\n",
hregs->tx_cbptr, hregs->tx_ndptr, hregs->tx_ctrl);
}
#endif
#define TSTAT_INIT_SEEQ (SEEQ_TCMD_IPT|SEEQ_TCMD_I16|SEEQ_TCMD_IC|SEEQ_TCMD_IUF)
#define TSTAT_INIT_EDLC ((TSTAT_INIT_SEEQ) | SEEQ_TCMD_RB2)
static int init_seeq(struct net_device *dev, struct sgiseeq_private *sp,
struct sgiseeq_regs *sregs)
{
struct hpc3_ethregs *hregs = sp->hregs;
int err;
reset_hpc3_and_seeq(hregs, sregs);
err = seeq_init_ring(dev);
if (err)
return err;
/* Setup to field the proper interrupt types. */
if (sp->is_edlc) {
sregs->tstat = TSTAT_INIT_EDLC;
sregs->rw.wregs.control = sp->control;
sregs->rw.wregs.frame_gap = 0;
} else {
sregs->tstat = TSTAT_INIT_SEEQ;
}
hregs->rx_ndptr = VIRT_TO_DMA(sp, sp->rx_desc);
hregs->tx_ndptr = VIRT_TO_DMA(sp, sp->tx_desc);
seeq_go(sp, hregs, sregs);
return 0;
}
static void record_rx_errors(struct net_device *dev, unsigned char status)
{
if (status & SEEQ_RSTAT_OVERF ||
status & SEEQ_RSTAT_SFRAME)
dev->stats.rx_over_errors++;
if (status & SEEQ_RSTAT_CERROR)
dev->stats.rx_crc_errors++;
if (status & SEEQ_RSTAT_DERROR)
dev->stats.rx_frame_errors++;
if (status & SEEQ_RSTAT_REOF)
dev->stats.rx_errors++;
}
static inline void rx_maybe_restart(struct sgiseeq_private *sp,
struct hpc3_ethregs *hregs,
struct sgiseeq_regs *sregs)
{
if (!(hregs->rx_ctrl & HPC3_ERXCTRL_ACTIVE)) {
hregs->rx_ndptr = VIRT_TO_DMA(sp, sp->rx_desc + sp->rx_new);
seeq_go(sp, hregs, sregs);
}
}
static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp,
struct hpc3_ethregs *hregs,
struct sgiseeq_regs *sregs)
{
struct sgiseeq_rx_desc *rd;
struct sk_buff *skb = NULL;
struct sk_buff *newskb;
unsigned char pkt_status;
int len = 0;
unsigned int orig_end = PREV_RX(sp->rx_new);
/* Service every received packet. */
rd = &sp->rx_desc[sp->rx_new];
dma_sync_desc_cpu(dev, rd);
while (!(rd->rdma.cntinfo & HPCDMA_OWN)) {
len = PKT_BUF_SZ - (rd->rdma.cntinfo & HPCDMA_BCNT) - 3;
dma_unmap_single(dev->dev.parent, rd->rdma.pbuf,
PKT_BUF_SZ, DMA_FROM_DEVICE);
pkt_status = rd->skb->data[len];
if (pkt_status & SEEQ_RSTAT_FIG) {
/* Packet is OK. */
/* We don't want to receive our own packets */
if (!ether_addr_equal(rd->skb->data + 6, dev->dev_addr)) {
if (len > rx_copybreak) {
skb = rd->skb;
newskb = netdev_alloc_skb(dev, PKT_BUF_SZ);
if (!newskb) {
newskb = skb;
skb = NULL;
goto memory_squeeze;
}
skb_reserve(newskb, 2);
} else {
skb = netdev_alloc_skb_ip_align(dev, len);
if (skb)
skb_copy_to_linear_data(skb, rd->skb->data, len);
newskb = rd->skb;
}
memory_squeeze:
if (skb) {
skb_put(skb, len);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
} else {
dev->stats.rx_dropped++;
}
} else {
/* Silently drop my own packets */
newskb = rd->skb;
}
} else {
record_rx_errors(dev, pkt_status);
newskb = rd->skb;
}
rd->skb = newskb;
rd->rdma.pbuf = dma_map_single(dev->dev.parent,
newskb->data - 2,
PKT_BUF_SZ, DMA_FROM_DEVICE);
/* Return the entry to the ring pool. */
rd->rdma.cntinfo = RCNTINFO_INIT;
sp->rx_new = NEXT_RX(sp->rx_new);
dma_sync_desc_dev(dev, rd);
rd = &sp->rx_desc[sp->rx_new];
dma_sync_desc_cpu(dev, rd);
}
dma_sync_desc_cpu(dev, &sp->rx_desc[orig_end]);
sp->rx_desc[orig_end].rdma.cntinfo &= ~(HPCDMA_EOR);
dma_sync_desc_dev(dev, &sp->rx_desc[orig_end]);
dma_sync_desc_cpu(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
sp->rx_desc[PREV_RX(sp->rx_new)].rdma.cntinfo |= HPCDMA_EOR;
dma_sync_desc_dev(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
rx_maybe_restart(sp, hregs, sregs);
}
static inline void tx_maybe_reset_collisions(struct sgiseeq_private *sp,
struct sgiseeq_regs *sregs)
{
if (sp->is_edlc) {
sregs->rw.wregs.control = sp->control & ~(SEEQ_CTRL_XCNT);
sregs->rw.wregs.control = sp->control;
}
}
static inline void kick_tx(struct net_device *dev,
struct sgiseeq_private *sp,
struct hpc3_ethregs *hregs)
{
struct sgiseeq_tx_desc *td;
int i = sp->tx_old;
/* If the HPC aint doin nothin, and there are more packets
* with ETXD cleared and XIU set we must make very certain
* that we restart the HPC else we risk locking up the
* adapter. The following code is only safe iff the HPCDMA
* is not active!
*/
td = &sp->tx_desc[i];
dma_sync_desc_cpu(dev, td);
while ((td->tdma.cntinfo & (HPCDMA_XIU | HPCDMA_ETXD)) ==
(HPCDMA_XIU | HPCDMA_ETXD)) {
i = NEXT_TX(i);
td = &sp->tx_desc[i];
dma_sync_desc_cpu(dev, td);
}
if (td->tdma.cntinfo & HPCDMA_XIU) {
hregs->tx_ndptr = VIRT_TO_DMA(sp, td);
hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE;
}
}
static inline void sgiseeq_tx(struct net_device *dev, struct sgiseeq_private *sp,
struct hpc3_ethregs *hregs,
struct sgiseeq_regs *sregs)
{
struct sgiseeq_tx_desc *td;
unsigned long status = hregs->tx_ctrl;
int j;
tx_maybe_reset_collisions(sp, sregs);
if (!(status & (HPC3_ETXCTRL_ACTIVE | SEEQ_TSTAT_PTRANS))) {
/* Oops, HPC detected some sort of error. */
if (status & SEEQ_TSTAT_R16)
dev->stats.tx_aborted_errors++;
if (status & SEEQ_TSTAT_UFLOW)
dev->stats.tx_fifo_errors++;
if (status & SEEQ_TSTAT_LCLS)
dev->stats.collisions++;
}
/* Ack 'em... */
for (j = sp->tx_old; j != sp->tx_new; j = NEXT_TX(j)) {
td = &sp->tx_desc[j];
dma_sync_desc_cpu(dev, td);
if (!(td->tdma.cntinfo & (HPCDMA_XIU)))
break;
if (!(td->tdma.cntinfo & (HPCDMA_ETXD))) {
if (!(status & HPC3_ETXCTRL_ACTIVE)) {
hregs->tx_ndptr = VIRT_TO_DMA(sp, td);
hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE;
}
break;
}
dev->stats.tx_packets++;
sp->tx_old = NEXT_TX(sp->tx_old);
td->tdma.cntinfo &= ~(HPCDMA_XIU | HPCDMA_XIE);
td->tdma.cntinfo |= HPCDMA_EOX;
if (td->skb) {
dev_kfree_skb_any(td->skb);
td->skb = NULL;
}
dma_sync_desc_dev(dev, td);
}
}
static irqreturn_t sgiseeq_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *) dev_id;
struct sgiseeq_private *sp = netdev_priv(dev);
struct hpc3_ethregs *hregs = sp->hregs;
struct sgiseeq_regs *sregs = sp->sregs;
spin_lock(&sp->tx_lock);
/* Ack the IRQ and set software state. */
hregs->reset = HPC3_ERST_CLRIRQ;
/* Always check for received packets. */
sgiseeq_rx(dev, sp, hregs, sregs);
/* Only check for tx acks if we have something queued. */
if (sp->tx_old != sp->tx_new)
sgiseeq_tx(dev, sp, hregs, sregs);
if ((TX_BUFFS_AVAIL(sp) > 0) && netif_queue_stopped(dev)) {
netif_wake_queue(dev);
}
spin_unlock(&sp->tx_lock);
return IRQ_HANDLED;
}
static int sgiseeq_open(struct net_device *dev)
{
struct sgiseeq_private *sp = netdev_priv(dev);
struct sgiseeq_regs *sregs = sp->sregs;
unsigned int irq = dev->irq;
int err;
if (request_irq(irq, sgiseeq_interrupt, 0, sgiseeqstr, dev)) {
printk(KERN_ERR "Seeq8003: Can't get irq %d\n", dev->irq);
return -EAGAIN;
}
err = init_seeq(dev, sp, sregs);
if (err)
goto out_free_irq;
netif_start_queue(dev);
return 0;
out_free_irq:
free_irq(irq, dev);
return err;
}
static int sgiseeq_close(struct net_device *dev)
{
struct sgiseeq_private *sp = netdev_priv(dev);
struct sgiseeq_regs *sregs = sp->sregs;
unsigned int irq = dev->irq;
netif_stop_queue(dev);
/* Shutdown the Seeq. */
reset_hpc3_and_seeq(sp->hregs, sregs);
free_irq(irq, dev);
seeq_purge_ring(dev);
return 0;
}
static inline int sgiseeq_reset(struct net_device *dev)
{
struct sgiseeq_private *sp = netdev_priv(dev);
struct sgiseeq_regs *sregs = sp->sregs;
int err;
err = init_seeq(dev, sp, sregs);
if (err)
return err;
dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
return 0;
}
static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct sgiseeq_private *sp = netdev_priv(dev);
struct hpc3_ethregs *hregs = sp->hregs;
unsigned long flags;
struct sgiseeq_tx_desc *td;
int len, entry;
spin_lock_irqsave(&sp->tx_lock, flags);
/* Setup... */
len = skb->len;
if (len < ETH_ZLEN) {
if (skb_padto(skb, ETH_ZLEN)) {
spin_unlock_irqrestore(&sp->tx_lock, flags);
return NETDEV_TX_OK;
}
len = ETH_ZLEN;
}
dev->stats.tx_bytes += len;
entry = sp->tx_new;
td = &sp->tx_desc[entry];
dma_sync_desc_cpu(dev, td);
/* Create entry. There are so many races with adding a new
* descriptor to the chain:
* 1) Assume that the HPC is off processing a DMA chain while
* we are changing all of the following.
* 2) Do no allow the HPC to look at a new descriptor until
* we have completely set up it's state. This means, do
* not clear HPCDMA_EOX in the current last descritptor
* until the one we are adding looks consistent and could
* be processes right now.
* 3) The tx interrupt code must notice when we've added a new
* entry and the HPC got to the end of the chain before we
* added this new entry and restarted it.
*/
td->skb = skb;
td->tdma.pbuf = dma_map_single(dev->dev.parent, skb->data,
len, DMA_TO_DEVICE);
td->tdma.cntinfo = (len & HPCDMA_BCNT) |
HPCDMA_XIU | HPCDMA_EOXP | HPCDMA_XIE | HPCDMA_EOX;
dma_sync_desc_dev(dev, td);
if (sp->tx_old != sp->tx_new) {
struct sgiseeq_tx_desc *backend;
backend = &sp->tx_desc[PREV_TX(sp->tx_new)];
dma_sync_desc_cpu(dev, backend);
backend->tdma.cntinfo &= ~HPCDMA_EOX;
dma_sync_desc_dev(dev, backend);
}
sp->tx_new = NEXT_TX(sp->tx_new); /* Advance. */
/* Maybe kick the HPC back into motion. */
if (!(hregs->tx_ctrl & HPC3_ETXCTRL_ACTIVE))
kick_tx(dev, sp, hregs);
if (!TX_BUFFS_AVAIL(sp))
netif_stop_queue(dev);
spin_unlock_irqrestore(&sp->tx_lock, flags);
return NETDEV_TX_OK;
}
static void timeout(struct net_device *dev)
{
printk(KERN_NOTICE "%s: transmit timed out, resetting\n", dev->name);
sgiseeq_reset(dev);
dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
static void sgiseeq_set_multicast(struct net_device *dev)
{
struct sgiseeq_private *sp = netdev_priv(dev);
unsigned char oldmode = sp->mode;
if(dev->flags & IFF_PROMISC)
sp->mode = SEEQ_RCMD_RANY;
else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev))
sp->mode = SEEQ_RCMD_RBMCAST;
else
sp->mode = SEEQ_RCMD_RBCAST;
/* XXX I know this sucks, but is there a better way to reprogram
* XXX the receiver? At least, this shouldn't happen too often.
*/
if (oldmode != sp->mode)
sgiseeq_reset(dev);
}
static inline void setup_tx_ring(struct net_device *dev,
struct sgiseeq_tx_desc *buf,
int nbufs)
{
struct sgiseeq_private *sp = netdev_priv(dev);
int i = 0;
while (i < (nbufs - 1)) {
buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf + i + 1);
buf[i].tdma.pbuf = 0;
dma_sync_desc_dev(dev, &buf[i]);
i++;
}
buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf);
dma_sync_desc_dev(dev, &buf[i]);
}
static inline void setup_rx_ring(struct net_device *dev,
struct sgiseeq_rx_desc *buf,
int nbufs)
{
struct sgiseeq_private *sp = netdev_priv(dev);
int i = 0;
while (i < (nbufs - 1)) {
buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf + i + 1);
buf[i].rdma.pbuf = 0;
dma_sync_desc_dev(dev, &buf[i]);
i++;
}
buf[i].rdma.pbuf = 0;
buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf);
dma_sync_desc_dev(dev, &buf[i]);
}
static const struct net_device_ops sgiseeq_netdev_ops = {
.ndo_open = sgiseeq_open,
.ndo_stop = sgiseeq_close,
.ndo_start_xmit = sgiseeq_start_xmit,
.ndo_tx_timeout = timeout,
.ndo_set_rx_mode = sgiseeq_set_multicast,
.ndo_set_mac_address = sgiseeq_set_mac_address,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
};
static int sgiseeq_probe(struct platform_device *pdev)
{
struct sgiseeq_platform_data *pd = dev_get_platdata(&pdev->dev);
struct hpc3_regs *hpcregs = pd->hpc;
struct sgiseeq_init_block *sr;
unsigned int irq = pd->irq;
struct sgiseeq_private *sp;
struct net_device *dev;
int err;
dev = alloc_etherdev(sizeof (struct sgiseeq_private));
if (!dev) {
err = -ENOMEM;
goto err_out;
}
platform_set_drvdata(pdev, dev);
sp = netdev_priv(dev);
/* Make private data page aligned */
sr = dma_alloc_noncoherent(&pdev->dev, sizeof(*sp->srings),
&sp->srings_dma, GFP_KERNEL);
if (!sr) {
printk(KERN_ERR "Sgiseeq: Page alloc failed, aborting.\n");
err = -ENOMEM;
goto err_out_free_dev;
}
sp->srings = sr;
sp->rx_desc = sp->srings->rxvector;
sp->tx_desc = sp->srings->txvector;
spin_lock_init(&sp->tx_lock);
/* A couple calculations now, saves many cycles later. */
setup_rx_ring(dev, sp->rx_desc, SEEQ_RX_BUFFERS);
setup_tx_ring(dev, sp->tx_desc, SEEQ_TX_BUFFERS);
memcpy(dev->dev_addr, pd->mac, ETH_ALEN);
#ifdef DEBUG
gpriv = sp;
gdev = dev;
#endif
sp->sregs = (struct sgiseeq_regs *) &hpcregs->eth_ext[0];
sp->hregs = &hpcregs->ethregs;
sp->name = sgiseeqstr;
sp->mode = SEEQ_RCMD_RBCAST;
/* Setup PIO and DMA transfer timing */
sp->hregs->pconfig = 0x161;
sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP |
HPC3_EDCFG_FRXDC | HPC3_EDCFG_PTO | 0x026;
/* Setup PIO and DMA transfer timing */
sp->hregs->pconfig = 0x161;
sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP |
HPC3_EDCFG_FRXDC | HPC3_EDCFG_PTO | 0x026;
/* Reset the chip. */
hpc3_eth_reset(sp->hregs);
sp->is_edlc = !(sp->sregs->rw.rregs.collision_tx[0] & 0xff);
if (sp->is_edlc)
sp->control = SEEQ_CTRL_XCNT | SEEQ_CTRL_ACCNT |
SEEQ_CTRL_SFLAG | SEEQ_CTRL_ESHORT |
SEEQ_CTRL_ENCARR;
dev->netdev_ops = &sgiseeq_netdev_ops;
dev->watchdog_timeo = (200 * HZ) / 1000;
dev->irq = irq;
if (register_netdev(dev)) {
printk(KERN_ERR "Sgiseeq: Cannot register net device, "
"aborting.\n");
err = -ENODEV;
goto err_out_free_page;
}
printk(KERN_INFO "%s: %s %pM\n", dev->name, sgiseeqstr, dev->dev_addr);
return 0;
err_out_free_page:
free_page((unsigned long) sp->srings);
err_out_free_dev:
free_netdev(dev);
err_out:
return err;
}
static int __exit sgiseeq_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct sgiseeq_private *sp = netdev_priv(dev);
unregister_netdev(dev);
dma_free_noncoherent(&pdev->dev, sizeof(*sp->srings), sp->srings,
sp->srings_dma);
free_netdev(dev);
return 0;
}
static struct platform_driver sgiseeq_driver = {
.probe = sgiseeq_probe,
.remove = __exit_p(sgiseeq_remove),
.driver = {
.name = "sgiseeq",
.owner = THIS_MODULE,
}
};
module_platform_driver(sgiseeq_driver);
MODULE_DESCRIPTION("SGI Seeq 8003 driver");
MODULE_AUTHOR("Linux/MIPS Mailing List <linux-mips@linux-mips.org>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:sgiseeq");

View file

@ -0,0 +1,103 @@
/*
* sgiseeq.h: Defines for the Seeq8003 ethernet controller.
*
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
*/
#ifndef _SGISEEQ_H
#define _SGISEEQ_H
struct sgiseeq_wregs {
volatile unsigned int multicase_high[2];
volatile unsigned int frame_gap;
volatile unsigned int control;
};
struct sgiseeq_rregs {
volatile unsigned int collision_tx[2];
volatile unsigned int collision_all[2];
volatile unsigned int _unused0;
volatile unsigned int rflags;
};
struct sgiseeq_regs {
union {
volatile unsigned int eth_addr[6];
volatile unsigned int multicast_low[6];
struct sgiseeq_wregs wregs;
struct sgiseeq_rregs rregs;
} rw;
volatile unsigned int rstat;
volatile unsigned int tstat;
};
/* Seeq8003 receive status register */
#define SEEQ_RSTAT_OVERF 0x001 /* Overflow */
#define SEEQ_RSTAT_CERROR 0x002 /* CRC error */
#define SEEQ_RSTAT_DERROR 0x004 /* Dribble error */
#define SEEQ_RSTAT_SFRAME 0x008 /* Short frame */
#define SEEQ_RSTAT_REOF 0x010 /* Received end of frame */
#define SEEQ_RSTAT_FIG 0x020 /* Frame is good */
#define SEEQ_RSTAT_TIMEO 0x040 /* Timeout, or late receive */
#define SEEQ_RSTAT_WHICH 0x080 /* Which status, 1=old 0=new */
#define SEEQ_RSTAT_LITTLE 0x100 /* DMA is done in little endian format */
#define SEEQ_RSTAT_SDMA 0x200 /* DMA has started */
#define SEEQ_RSTAT_ADMA 0x400 /* DMA is active */
#define SEEQ_RSTAT_ROVERF 0x800 /* Receive buffer overflow */
/* Seeq8003 receive command register */
#define SEEQ_RCMD_RDISAB 0x000 /* Disable receiver on the Seeq8003 */
#define SEEQ_RCMD_IOVERF 0x001 /* IRQ on buffer overflows */
#define SEEQ_RCMD_ICRC 0x002 /* IRQ on CRC errors */
#define SEEQ_RCMD_IDRIB 0x004 /* IRQ on dribble errors */
#define SEEQ_RCMD_ISHORT 0x008 /* IRQ on short frames */
#define SEEQ_RCMD_IEOF 0x010 /* IRQ on end of frame */
#define SEEQ_RCMD_IGOOD 0x020 /* IRQ on good frames */
#define SEEQ_RCMD_RANY 0x040 /* Receive any frame */
#define SEEQ_RCMD_RBCAST 0x080 /* Receive broadcasts */
#define SEEQ_RCMD_RBMCAST 0x0c0 /* Receive broadcasts/multicasts */
/* Seeq8003 transmit status register */
#define SEEQ_TSTAT_UFLOW 0x001 /* Transmit buffer underflow */
#define SEEQ_TSTAT_CLS 0x002 /* Collision detected */
#define SEEQ_TSTAT_R16 0x004 /* Did 16 retries to tx a frame */
#define SEEQ_TSTAT_PTRANS 0x008 /* Packet was transmitted ok */
#define SEEQ_TSTAT_LCLS 0x010 /* Late collision occurred */
#define SEEQ_TSTAT_WHICH 0x080 /* Which status, 1=old 0=new */
#define SEEQ_TSTAT_TLE 0x100 /* DMA is done in little endian format */
#define SEEQ_TSTAT_SDMA 0x200 /* DMA has started */
#define SEEQ_TSTAT_ADMA 0x400 /* DMA is active */
/* Seeq8003 transmit command register */
#define SEEQ_TCMD_RB0 0x00 /* Register bank zero w/station addr */
#define SEEQ_TCMD_IUF 0x01 /* IRQ on tx underflow */
#define SEEQ_TCMD_IC 0x02 /* IRQ on collisions */
#define SEEQ_TCMD_I16 0x04 /* IRQ after 16 failed attempts to tx frame */
#define SEEQ_TCMD_IPT 0x08 /* IRQ when packet successfully transmitted */
#define SEEQ_TCMD_RB1 0x20 /* Register bank one w/multi-cast low byte */
#define SEEQ_TCMD_RB2 0x40 /* Register bank two w/multi-cast high byte */
/* Seeq8003 control register */
#define SEEQ_CTRL_XCNT 0x01
#define SEEQ_CTRL_ACCNT 0x02
#define SEEQ_CTRL_SFLAG 0x04
#define SEEQ_CTRL_EMULTI 0x08
#define SEEQ_CTRL_ESHORT 0x10
#define SEEQ_CTRL_ENCARR 0x20
/* Seeq8003 control registers on the SGI Hollywood HPC. */
#define SEEQ_HPIO_P1BITS 0x00000001 /* cycles to stay in P1 phase for PIO */
#define SEEQ_HPIO_P2BITS 0x00000060 /* cycles to stay in P2 phase for PIO */
#define SEEQ_HPIO_P3BITS 0x00000100 /* cycles to stay in P3 phase for PIO */
#define SEEQ_HDMA_D1BITS 0x00000006 /* cycles to stay in D1 phase for DMA */
#define SEEQ_HDMA_D2BITS 0x00000020 /* cycles to stay in D2 phase for DMA */
#define SEEQ_HDMA_D3BITS 0x00000000 /* cycles to stay in D3 phase for DMA */
#define SEEQ_HDMA_TIMEO 0x00030000 /* cycles for DMA timeout */
#define SEEQ_HCTL_NORM 0x00000000 /* Normal operation mode */
#define SEEQ_HCTL_RESET 0x00000001 /* Reset Seeq8003 and HPC interface */
#define SEEQ_HCTL_IPEND 0x00000002 /* IRQ is pending for the chip */
#define SEEQ_HCTL_IPG 0x00001000 /* Inter-packet gap */
#define SEEQ_HCTL_RFIX 0x00002000 /* At rxdc, clear end-of-packet */
#define SEEQ_HCTL_EFIX 0x00004000 /* fixes intr status bit settings */
#define SEEQ_HCTL_IFIX 0x00008000 /* enable startup timeouts */
#endif /* !(_SGISEEQ_H) */