Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

75
drivers/rapidio/Kconfig Normal file
View file

@ -0,0 +1,75 @@
#
# RapidIO configuration
#
source "drivers/rapidio/devices/Kconfig"
config RAPIDIO_DISC_TIMEOUT
int "Discovery timeout duration (seconds)"
depends on RAPIDIO
default "30"
---help---
Amount of time a discovery node waits for a host to complete
enumeration before giving up.
config RAPIDIO_ENABLE_RX_TX_PORTS
bool "Enable RapidIO Input/Output Ports"
depends on RAPIDIO
---help---
The RapidIO specification describes a Output port transmit
enable and a Input port receive enable. The recommended state
for Input ports and Output ports should be disabled. When
this switch is set the RapidIO subsystem will enable all
ports for Input/Output direction to allow other traffic
than Maintenance transfers.
config RAPIDIO_DMA_ENGINE
bool "DMA Engine support for RapidIO"
depends on RAPIDIO
select DMADEVICES
select DMA_ENGINE
help
Say Y here if you want to use DMA Engine frameork for RapidIO data
transfers to/from target RIO devices. RapidIO uses NREAD and
NWRITE (NWRITE_R, SWRITE) requests to transfer data between local
memory and memory on remote target device. You need a DMA controller
capable to perform data transfers to/from RapidIO.
If you are unsure about this, say Y here.
config RAPIDIO_DEBUG
bool "RapidIO subsystem debug messages"
depends on RAPIDIO
help
Say Y here if you want the RapidIO subsystem to produce a bunch of
debug messages to the system log. Select this if you are having a
problem with the RapidIO subsystem and want to see more of what is
going on.
If you are unsure about this, say N here.
choice
prompt "Enumeration method"
depends on RAPIDIO
default RAPIDIO_ENUM_BASIC
help
There are different enumeration and discovery mechanisms offered
for RapidIO subsystem. You may select single built-in method or
or any number of methods to be built as modules.
Selecting a built-in method disables use of loadable methods.
If unsure, select Basic built-in.
config RAPIDIO_ENUM_BASIC
tristate "Basic"
help
This option includes basic RapidIO fabric enumeration and discovery
mechanism similar to one described in RapidIO specification Annex 1.
endchoice
menu "RapidIO Switch drivers"
depends on RAPIDIO
source "drivers/rapidio/switches/Kconfig"
endmenu

12
drivers/rapidio/Makefile Normal file
View file

@ -0,0 +1,12 @@
#
# Makefile for RapidIO interconnect services
#
obj-$(CONFIG_RAPIDIO) += rapidio.o
rapidio-y := rio.o rio-access.o rio-driver.o rio-sysfs.o
obj-$(CONFIG_RAPIDIO_ENUM_BASIC) += rio-scan.o
obj-$(CONFIG_RAPIDIO) += switches/
obj-$(CONFIG_RAPIDIO) += devices/
subdir-ccflags-$(CONFIG_RAPIDIO_DEBUG) := -DDEBUG

View file

@ -0,0 +1,10 @@
#
# RapidIO master port configuration
#
config RAPIDIO_TSI721
tristate "IDT Tsi721 PCI Express SRIO Controller support"
depends on RAPIDIO && PCIEPORTBUS
default "n"
---help---
Include support for IDT Tsi721 PCI Express Serial RapidIO controller.

View file

@ -0,0 +1,7 @@
#
# Makefile for RapidIO devices
#
obj-$(CONFIG_RAPIDIO_TSI721) += tsi721_mport.o
tsi721_mport-y := tsi721.o
tsi721_mport-$(CONFIG_RAPIDIO_DMA_ENGINE) += tsi721_dma.o

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,855 @@
/*
* Tsi721 PCIExpress-to-SRIO bridge definitions
*
* Copyright 2011, Integrated Device Technology, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#ifndef __TSI721_H
#define __TSI721_H
#define DRV_NAME "tsi721"
#define DEFAULT_HOPCOUNT 0xff
#define DEFAULT_DESTID 0xff
/* PCI device ID */
#define PCI_DEVICE_ID_TSI721 0x80ab
#define BAR_0 0
#define BAR_1 1
#define BAR_2 2
#define BAR_4 4
#define TSI721_PC2SR_BARS 2
#define TSI721_PC2SR_WINS 8
#define TSI721_PC2SR_ZONES 8
#define TSI721_MAINT_WIN 0 /* Window for outbound maintenance requests */
#define IDB_QUEUE 0 /* Inbound Doorbell Queue to use */
#define IDB_QSIZE 512 /* Inbound Doorbell Queue size */
/* Memory space sizes */
#define TSI721_REG_SPACE_SIZE (512 * 1024) /* 512K */
#define TSI721_DB_WIN_SIZE (16 * 1024 * 1024) /* 16MB */
#define RIO_TT_CODE_8 0x00000000
#define RIO_TT_CODE_16 0x00000001
#define TSI721_DMA_MAXCH 8
#define TSI721_DMA_MINSTSSZ 32
#define TSI721_DMA_STSBLKSZ 8
#define TSI721_SRIO_MAXCH 8
#define DBELL_SID(buf) (((u8)buf[2] << 8) | (u8)buf[3])
#define DBELL_TID(buf) (((u8)buf[4] << 8) | (u8)buf[5])
#define DBELL_INF(buf) (((u8)buf[0] << 8) | (u8)buf[1])
#define TSI721_RIO_PW_MSG_SIZE 16 /* Tsi721 saves only 16 bytes of PW msg */
/* Register definitions */
/*
* Registers in PCIe configuration space
*/
#define TSI721_PCIECFG_MSIXTBL 0x0a4
#define TSI721_MSIXTBL_OFFSET 0x2c000
#define TSI721_PCIECFG_MSIXPBA 0x0a8
#define TSI721_MSIXPBA_OFFSET 0x2a000
#define TSI721_PCIECFG_EPCTL 0x400
#define MAX_READ_REQUEST_SZ_SHIFT 12
/*
* Event Management Registers
*/
#define TSI721_RIO_EM_INT_STAT 0x10910
#define TSI721_RIO_EM_INT_STAT_PW_RX 0x00010000
#define TSI721_RIO_EM_INT_ENABLE 0x10914
#define TSI721_RIO_EM_INT_ENABLE_PW_RX 0x00010000
#define TSI721_RIO_EM_DEV_INT_EN 0x10930
#define TSI721_RIO_EM_DEV_INT_EN_INT 0x00000001
/*
* Port-Write Block Registers
*/
#define TSI721_RIO_PW_CTL 0x10a04
#define TSI721_RIO_PW_CTL_PW_TIMER 0xf0000000
#define TSI721_RIO_PW_CTL_PWT_DIS (0 << 28)
#define TSI721_RIO_PW_CTL_PWT_103 (1 << 28)
#define TSI721_RIO_PW_CTL_PWT_205 (1 << 29)
#define TSI721_RIO_PW_CTL_PWT_410 (1 << 30)
#define TSI721_RIO_PW_CTL_PWT_820 (1 << 31)
#define TSI721_RIO_PW_CTL_PWC_MODE 0x01000000
#define TSI721_RIO_PW_CTL_PWC_CONT 0x00000000
#define TSI721_RIO_PW_CTL_PWC_REL 0x01000000
#define TSI721_RIO_PW_RX_STAT 0x10a10
#define TSI721_RIO_PW_RX_STAT_WR_SIZE 0x0000f000
#define TSI_RIO_PW_RX_STAT_WDPTR 0x00000100
#define TSI721_RIO_PW_RX_STAT_PW_SHORT 0x00000008
#define TSI721_RIO_PW_RX_STAT_PW_TRUNC 0x00000004
#define TSI721_RIO_PW_RX_STAT_PW_DISC 0x00000002
#define TSI721_RIO_PW_RX_STAT_PW_VAL 0x00000001
#define TSI721_RIO_PW_RX_CAPT(x) (0x10a20 + (x)*4)
/*
* Inbound Doorbells
*/
#define TSI721_IDB_ENTRY_SIZE 64
#define TSI721_IDQ_CTL(x) (0x20000 + (x) * 0x1000)
#define TSI721_IDQ_SUSPEND 0x00000002
#define TSI721_IDQ_INIT 0x00000001
#define TSI721_IDQ_STS(x) (0x20004 + (x) * 0x1000)
#define TSI721_IDQ_RUN 0x00200000
#define TSI721_IDQ_MASK(x) (0x20008 + (x) * 0x1000)
#define TSI721_IDQ_MASK_MASK 0xffff0000
#define TSI721_IDQ_MASK_PATT 0x0000ffff
#define TSI721_IDQ_RP(x) (0x2000c + (x) * 0x1000)
#define TSI721_IDQ_RP_PTR 0x0007ffff
#define TSI721_IDQ_WP(x) (0x20010 + (x) * 0x1000)
#define TSI721_IDQ_WP_PTR 0x0007ffff
#define TSI721_IDQ_BASEL(x) (0x20014 + (x) * 0x1000)
#define TSI721_IDQ_BASEL_ADDR 0xffffffc0
#define TSI721_IDQ_BASEU(x) (0x20018 + (x) * 0x1000)
#define TSI721_IDQ_SIZE(x) (0x2001c + (x) * 0x1000)
#define TSI721_IDQ_SIZE_VAL(size) (__fls(size) - 4)
#define TSI721_IDQ_SIZE_MIN 512
#define TSI721_IDQ_SIZE_MAX (512 * 1024)
#define TSI721_SR_CHINT(x) (0x20040 + (x) * 0x1000)
#define TSI721_SR_CHINTE(x) (0x20044 + (x) * 0x1000)
#define TSI721_SR_CHINTSET(x) (0x20048 + (x) * 0x1000)
#define TSI721_SR_CHINT_ODBOK 0x00000020
#define TSI721_SR_CHINT_IDBQRCV 0x00000010
#define TSI721_SR_CHINT_SUSP 0x00000008
#define TSI721_SR_CHINT_ODBTO 0x00000004
#define TSI721_SR_CHINT_ODBRTRY 0x00000002
#define TSI721_SR_CHINT_ODBERR 0x00000001
#define TSI721_SR_CHINT_ALL 0x0000003f
#define TSI721_IBWIN_NUM 8
#define TSI721_IBWIN_LB(x) (0x29000 + (x) * 0x20)
#define TSI721_IBWIN_LB_BA 0xfffff000
#define TSI721_IBWIN_LB_WEN 0x00000001
#define TSI721_IBWIN_UB(x) (0x29004 + (x) * 0x20)
#define TSI721_IBWIN_SZ(x) (0x29008 + (x) * 0x20)
#define TSI721_IBWIN_SZ_SIZE 0x00001f00
#define TSI721_IBWIN_SIZE(size) (__fls(size) - 12)
#define TSI721_IBWIN_TLA(x) (0x2900c + (x) * 0x20)
#define TSI721_IBWIN_TLA_ADD 0xfffff000
#define TSI721_IBWIN_TUA(x) (0x29010 + (x) * 0x20)
#define TSI721_SR2PC_GEN_INTE 0x29800
#define TSI721_SR2PC_PWE 0x29804
#define TSI721_SR2PC_GEN_INT 0x29808
#define TSI721_DEV_INTE 0x29840
#define TSI721_DEV_INT 0x29844
#define TSI721_DEV_INTSET 0x29848
#define TSI721_DEV_INT_BDMA_CH 0x00002000
#define TSI721_DEV_INT_BDMA_NCH 0x00001000
#define TSI721_DEV_INT_SMSG_CH 0x00000800
#define TSI721_DEV_INT_SMSG_NCH 0x00000400
#define TSI721_DEV_INT_SR2PC_CH 0x00000200
#define TSI721_DEV_INT_SRIO 0x00000020
#define TSI721_DEV_CHAN_INTE 0x2984c
#define TSI721_DEV_CHAN_INT 0x29850
#define TSI721_INT_SR2PC_CHAN_M 0xff000000
#define TSI721_INT_SR2PC_CHAN(x) (1 << (24 + (x)))
#define TSI721_INT_IMSG_CHAN_M 0x00ff0000
#define TSI721_INT_IMSG_CHAN(x) (1 << (16 + (x)))
#define TSI721_INT_OMSG_CHAN_M 0x0000ff00
#define TSI721_INT_OMSG_CHAN(x) (1 << (8 + (x)))
#define TSI721_INT_BDMA_CHAN_M 0x000000ff
#define TSI721_INT_BDMA_CHAN(x) (1 << (x))
/*
* PC2SR block registers
*/
#define TSI721_OBWIN_NUM TSI721_PC2SR_WINS
#define TSI721_OBWINLB(x) (0x40000 + (x) * 0x20)
#define TSI721_OBWINLB_BA 0xffff8000
#define TSI721_OBWINLB_WEN 0x00000001
#define TSI721_OBWINUB(x) (0x40004 + (x) * 0x20)
#define TSI721_OBWINSZ(x) (0x40008 + (x) * 0x20)
#define TSI721_OBWINSZ_SIZE 0x00001f00
#define TSI721_OBWIN_SIZE(size) (__fls(size) - 15)
#define TSI721_ZONE_SEL 0x41300
#define TSI721_ZONE_SEL_RD_WRB 0x00020000
#define TSI721_ZONE_SEL_GO 0x00010000
#define TSI721_ZONE_SEL_WIN 0x00000038
#define TSI721_ZONE_SEL_ZONE 0x00000007
#define TSI721_LUT_DATA0 0x41304
#define TSI721_LUT_DATA0_ADD 0xfffff000
#define TSI721_LUT_DATA0_RDTYPE 0x00000f00
#define TSI721_LUT_DATA0_NREAD 0x00000100
#define TSI721_LUT_DATA0_MNTRD 0x00000200
#define TSI721_LUT_DATA0_RDCRF 0x00000020
#define TSI721_LUT_DATA0_WRCRF 0x00000010
#define TSI721_LUT_DATA0_WRTYPE 0x0000000f
#define TSI721_LUT_DATA0_NWR 0x00000001
#define TSI721_LUT_DATA0_MNTWR 0x00000002
#define TSI721_LUT_DATA0_NWR_R 0x00000004
#define TSI721_LUT_DATA1 0x41308
#define TSI721_LUT_DATA2 0x4130c
#define TSI721_LUT_DATA2_HC 0xff000000
#define TSI721_LUT_DATA2_ADD65 0x000c0000
#define TSI721_LUT_DATA2_TT 0x00030000
#define TSI721_LUT_DATA2_DSTID 0x0000ffff
#define TSI721_PC2SR_INTE 0x41310
#define TSI721_DEVCTL 0x48004
#define TSI721_DEVCTL_SRBOOT_CMPL 0x00000004
#define TSI721_I2C_INT_ENABLE 0x49120
/*
* Block DMA Engine Registers
* x = 0..7
*/
#define TSI721_DMAC_BASE(x) (0x51000 + (x) * 0x1000)
#define TSI721_DMAC_DWRCNT 0x000
#define TSI721_DMAC_DRDCNT 0x004
#define TSI721_DMAC_CTL 0x008
#define TSI721_DMAC_CTL_SUSP 0x00000002
#define TSI721_DMAC_CTL_INIT 0x00000001
#define TSI721_DMAC_INT 0x00c
#define TSI721_DMAC_INT_STFULL 0x00000010
#define TSI721_DMAC_INT_DONE 0x00000008
#define TSI721_DMAC_INT_SUSP 0x00000004
#define TSI721_DMAC_INT_ERR 0x00000002
#define TSI721_DMAC_INT_IOFDONE 0x00000001
#define TSI721_DMAC_INT_ALL 0x0000001f
#define TSI721_DMAC_INTSET 0x010
#define TSI721_DMAC_STS 0x014
#define TSI721_DMAC_STS_ABORT 0x00400000
#define TSI721_DMAC_STS_RUN 0x00200000
#define TSI721_DMAC_STS_CS 0x001f0000
#define TSI721_DMAC_INTE 0x018
#define TSI721_DMAC_DPTRL 0x024
#define TSI721_DMAC_DPTRL_MASK 0xffffffe0
#define TSI721_DMAC_DPTRH 0x028
#define TSI721_DMAC_DSBL 0x02c
#define TSI721_DMAC_DSBL_MASK 0xffffffc0
#define TSI721_DMAC_DSBH 0x030
#define TSI721_DMAC_DSSZ 0x034
#define TSI721_DMAC_DSSZ_SIZE_M 0x0000000f
#define TSI721_DMAC_DSSZ_SIZE(size) (__fls(size) - 4)
#define TSI721_DMAC_DSRP 0x038
#define TSI721_DMAC_DSRP_MASK 0x0007ffff
#define TSI721_DMAC_DSWP 0x03c
#define TSI721_DMAC_DSWP_MASK 0x0007ffff
#define TSI721_BDMA_INTE 0x5f000
/*
* Messaging definitions
*/
#define TSI721_MSG_BUFFER_SIZE RIO_MAX_MSG_SIZE
#define TSI721_MSG_MAX_SIZE RIO_MAX_MSG_SIZE
#define TSI721_IMSG_MAXCH 8
#define TSI721_IMSG_CHNUM TSI721_IMSG_MAXCH
#define TSI721_IMSGD_MIN_RING_SIZE 32
#define TSI721_IMSGD_RING_SIZE 512
#define TSI721_OMSG_CHNUM 4 /* One channel per MBOX */
#define TSI721_OMSGD_MIN_RING_SIZE 32
#define TSI721_OMSGD_RING_SIZE 512
/*
* Outbound Messaging Engine Registers
* x = 0..7
*/
#define TSI721_OBDMAC_DWRCNT(x) (0x61000 + (x) * 0x1000)
#define TSI721_OBDMAC_DRDCNT(x) (0x61004 + (x) * 0x1000)
#define TSI721_OBDMAC_CTL(x) (0x61008 + (x) * 0x1000)
#define TSI721_OBDMAC_CTL_MASK 0x00000007
#define TSI721_OBDMAC_CTL_RETRY_THR 0x00000004
#define TSI721_OBDMAC_CTL_SUSPEND 0x00000002
#define TSI721_OBDMAC_CTL_INIT 0x00000001
#define TSI721_OBDMAC_INT(x) (0x6100c + (x) * 0x1000)
#define TSI721_OBDMAC_INTSET(x) (0x61010 + (x) * 0x1000)
#define TSI721_OBDMAC_INTE(x) (0x61018 + (x) * 0x1000)
#define TSI721_OBDMAC_INT_MASK 0x0000001F
#define TSI721_OBDMAC_INT_ST_FULL 0x00000010
#define TSI721_OBDMAC_INT_DONE 0x00000008
#define TSI721_OBDMAC_INT_SUSPENDED 0x00000004
#define TSI721_OBDMAC_INT_ERROR 0x00000002
#define TSI721_OBDMAC_INT_IOF_DONE 0x00000001
#define TSI721_OBDMAC_INT_ALL TSI721_OBDMAC_INT_MASK
#define TSI721_OBDMAC_STS(x) (0x61014 + (x) * 0x1000)
#define TSI721_OBDMAC_STS_MASK 0x007f0000
#define TSI721_OBDMAC_STS_ABORT 0x00400000
#define TSI721_OBDMAC_STS_RUN 0x00200000
#define TSI721_OBDMAC_STS_CS 0x001f0000
#define TSI721_OBDMAC_PWE(x) (0x6101c + (x) * 0x1000)
#define TSI721_OBDMAC_PWE_MASK 0x00000002
#define TSI721_OBDMAC_PWE_ERROR_EN 0x00000002
#define TSI721_OBDMAC_DPTRL(x) (0x61020 + (x) * 0x1000)
#define TSI721_OBDMAC_DPTRL_MASK 0xfffffff0
#define TSI721_OBDMAC_DPTRH(x) (0x61024 + (x) * 0x1000)
#define TSI721_OBDMAC_DPTRH_MASK 0xffffffff
#define TSI721_OBDMAC_DSBL(x) (0x61040 + (x) * 0x1000)
#define TSI721_OBDMAC_DSBL_MASK 0xffffffc0
#define TSI721_OBDMAC_DSBH(x) (0x61044 + (x) * 0x1000)
#define TSI721_OBDMAC_DSBH_MASK 0xffffffff
#define TSI721_OBDMAC_DSSZ(x) (0x61048 + (x) * 0x1000)
#define TSI721_OBDMAC_DSSZ_MASK 0x0000000f
#define TSI721_OBDMAC_DSRP(x) (0x6104c + (x) * 0x1000)
#define TSI721_OBDMAC_DSRP_MASK 0x0007ffff
#define TSI721_OBDMAC_DSWP(x) (0x61050 + (x) * 0x1000)
#define TSI721_OBDMAC_DSWP_MASK 0x0007ffff
#define TSI721_RQRPTO 0x60010
#define TSI721_RQRPTO_MASK 0x00ffffff
#define TSI721_RQRPTO_VAL 400 /* Response TO value */
/*
* Inbound Messaging Engine Registers
* x = 0..7
*/
#define TSI721_IB_DEVID_GLOBAL 0xffff
#define TSI721_IBDMAC_FQBL(x) (0x61200 + (x) * 0x1000)
#define TSI721_IBDMAC_FQBL_MASK 0xffffffc0
#define TSI721_IBDMAC_FQBH(x) (0x61204 + (x) * 0x1000)
#define TSI721_IBDMAC_FQBH_MASK 0xffffffff
#define TSI721_IBDMAC_FQSZ_ENTRY_INX TSI721_IMSGD_RING_SIZE
#define TSI721_IBDMAC_FQSZ(x) (0x61208 + (x) * 0x1000)
#define TSI721_IBDMAC_FQSZ_MASK 0x0000000f
#define TSI721_IBDMAC_FQRP(x) (0x6120c + (x) * 0x1000)
#define TSI721_IBDMAC_FQRP_MASK 0x0007ffff
#define TSI721_IBDMAC_FQWP(x) (0x61210 + (x) * 0x1000)
#define TSI721_IBDMAC_FQWP_MASK 0x0007ffff
#define TSI721_IBDMAC_FQTH(x) (0x61214 + (x) * 0x1000)
#define TSI721_IBDMAC_FQTH_MASK 0x0007ffff
#define TSI721_IB_DEVID 0x60020
#define TSI721_IB_DEVID_MASK 0x0000ffff
#define TSI721_IBDMAC_CTL(x) (0x61240 + (x) * 0x1000)
#define TSI721_IBDMAC_CTL_MASK 0x00000003
#define TSI721_IBDMAC_CTL_SUSPEND 0x00000002
#define TSI721_IBDMAC_CTL_INIT 0x00000001
#define TSI721_IBDMAC_STS(x) (0x61244 + (x) * 0x1000)
#define TSI721_IBDMAC_STS_MASK 0x007f0000
#define TSI721_IBSMAC_STS_ABORT 0x00400000
#define TSI721_IBSMAC_STS_RUN 0x00200000
#define TSI721_IBSMAC_STS_CS 0x001f0000
#define TSI721_IBDMAC_INT(x) (0x61248 + (x) * 0x1000)
#define TSI721_IBDMAC_INTSET(x) (0x6124c + (x) * 0x1000)
#define TSI721_IBDMAC_INTE(x) (0x61250 + (x) * 0x1000)
#define TSI721_IBDMAC_INT_MASK 0x0000100f
#define TSI721_IBDMAC_INT_SRTO 0x00001000
#define TSI721_IBDMAC_INT_SUSPENDED 0x00000008
#define TSI721_IBDMAC_INT_PC_ERROR 0x00000004
#define TSI721_IBDMAC_INT_FQ_LOW 0x00000002
#define TSI721_IBDMAC_INT_DQ_RCV 0x00000001
#define TSI721_IBDMAC_INT_ALL TSI721_IBDMAC_INT_MASK
#define TSI721_IBDMAC_PWE(x) (0x61254 + (x) * 0x1000)
#define TSI721_IBDMAC_PWE_MASK 0x00001700
#define TSI721_IBDMAC_PWE_SRTO 0x00001000
#define TSI721_IBDMAC_PWE_ILL_FMT 0x00000400
#define TSI721_IBDMAC_PWE_ILL_DEC 0x00000200
#define TSI721_IBDMAC_PWE_IMP_SP 0x00000100
#define TSI721_IBDMAC_DQBL(x) (0x61300 + (x) * 0x1000)
#define TSI721_IBDMAC_DQBL_MASK 0xffffffc0
#define TSI721_IBDMAC_DQBL_ADDR 0xffffffc0
#define TSI721_IBDMAC_DQBH(x) (0x61304 + (x) * 0x1000)
#define TSI721_IBDMAC_DQBH_MASK 0xffffffff
#define TSI721_IBDMAC_DQRP(x) (0x61308 + (x) * 0x1000)
#define TSI721_IBDMAC_DQRP_MASK 0x0007ffff
#define TSI721_IBDMAC_DQWR(x) (0x6130c + (x) * 0x1000)
#define TSI721_IBDMAC_DQWR_MASK 0x0007ffff
#define TSI721_IBDMAC_DQSZ(x) (0x61314 + (x) * 0x1000)
#define TSI721_IBDMAC_DQSZ_MASK 0x0000000f
/*
* Messaging Engine Interrupts
*/
#define TSI721_SMSG_PWE 0x6a004
#define TSI721_SMSG_INTE 0x6a000
#define TSI721_SMSG_INT 0x6a008
#define TSI721_SMSG_INTSET 0x6a010
#define TSI721_SMSG_INT_MASK 0x0086ffff
#define TSI721_SMSG_INT_UNS_RSP 0x00800000
#define TSI721_SMSG_INT_ECC_NCOR 0x00040000
#define TSI721_SMSG_INT_ECC_COR 0x00020000
#define TSI721_SMSG_INT_ECC_NCOR_CH 0x0000ff00
#define TSI721_SMSG_INT_ECC_COR_CH 0x000000ff
#define TSI721_SMSG_ECC_LOG 0x6a014
#define TSI721_SMSG_ECC_LOG_MASK 0x00070007
#define TSI721_SMSG_ECC_LOG_ECC_NCOR_M 0x00070000
#define TSI721_SMSG_ECC_LOG_ECC_COR_M 0x00000007
#define TSI721_RETRY_GEN_CNT 0x6a100
#define TSI721_RETRY_GEN_CNT_MASK 0xffffffff
#define TSI721_RETRY_RX_CNT 0x6a104
#define TSI721_RETRY_RX_CNT_MASK 0xffffffff
#define TSI721_SMSG_ECC_COR_LOG(x) (0x6a300 + (x) * 4)
#define TSI721_SMSG_ECC_COR_LOG_MASK 0x000000ff
#define TSI721_SMSG_ECC_NCOR(x) (0x6a340 + (x) * 4)
#define TSI721_SMSG_ECC_NCOR_MASK 0x000000ff
/*
* Block DMA Descriptors
*/
struct tsi721_dma_desc {
__le32 type_id;
#define TSI721_DMAD_DEVID 0x0000ffff
#define TSI721_DMAD_CRF 0x00010000
#define TSI721_DMAD_PRIO 0x00060000
#define TSI721_DMAD_RTYPE 0x00780000
#define TSI721_DMAD_IOF 0x08000000
#define TSI721_DMAD_DTYPE 0xe0000000
__le32 bcount;
#define TSI721_DMAD_BCOUNT1 0x03ffffff /* if DTYPE == 1 */
#define TSI721_DMAD_BCOUNT2 0x0000000f /* if DTYPE == 2 */
#define TSI721_DMAD_TT 0x0c000000
#define TSI721_DMAD_RADDR0 0xc0000000
union {
__le32 raddr_lo; /* if DTYPE == (1 || 2) */
__le32 next_lo; /* if DTYPE == 3 */
};
#define TSI721_DMAD_CFGOFF 0x00ffffff
#define TSI721_DMAD_HOPCNT 0xff000000
union {
__le32 raddr_hi; /* if DTYPE == (1 || 2) */
__le32 next_hi; /* if DTYPE == 3 */
};
union {
struct { /* if DTYPE == 1 */
__le32 bufptr_lo;
__le32 bufptr_hi;
__le32 s_dist;
__le32 s_size;
} t1;
__le32 data[4]; /* if DTYPE == 2 */
u32 reserved[4]; /* if DTYPE == 3 */
};
} __aligned(32);
/*
* Inbound Messaging Descriptor
*/
struct tsi721_imsg_desc {
__le32 type_id;
#define TSI721_IMD_DEVID 0x0000ffff
#define TSI721_IMD_CRF 0x00010000
#define TSI721_IMD_PRIO 0x00060000
#define TSI721_IMD_TT 0x00180000
#define TSI721_IMD_DTYPE 0xe0000000
__le32 msg_info;
#define TSI721_IMD_BCOUNT 0x00000ff8
#define TSI721_IMD_SSIZE 0x0000f000
#define TSI721_IMD_LETER 0x00030000
#define TSI721_IMD_XMBOX 0x003c0000
#define TSI721_IMD_MBOX 0x00c00000
#define TSI721_IMD_CS 0x78000000
#define TSI721_IMD_HO 0x80000000
__le32 bufptr_lo;
__le32 bufptr_hi;
u32 reserved[12];
} __aligned(64);
/*
* Outbound Messaging Descriptor
*/
struct tsi721_omsg_desc {
__le32 type_id;
#define TSI721_OMD_DEVID 0x0000ffff
#define TSI721_OMD_CRF 0x00010000
#define TSI721_OMD_PRIO 0x00060000
#define TSI721_OMD_IOF 0x08000000
#define TSI721_OMD_DTYPE 0xe0000000
#define TSI721_OMD_RSRVD 0x17f80000
__le32 msg_info;
#define TSI721_OMD_BCOUNT 0x00000ff8
#define TSI721_OMD_SSIZE 0x0000f000
#define TSI721_OMD_LETER 0x00030000
#define TSI721_OMD_XMBOX 0x003c0000
#define TSI721_OMD_MBOX 0x00c00000
#define TSI721_OMD_TT 0x0c000000
union {
__le32 bufptr_lo; /* if DTYPE == 4 */
__le32 next_lo; /* if DTYPE == 5 */
};
union {
__le32 bufptr_hi; /* if DTYPE == 4 */
__le32 next_hi; /* if DTYPE == 5 */
};
} __aligned(16);
struct tsi721_dma_sts {
__le64 desc_sts[8];
} __aligned(64);
struct tsi721_desc_sts_fifo {
union {
__le64 da64;
struct {
__le32 lo;
__le32 hi;
} da32;
} stat[8];
} __aligned(64);
/* Descriptor types for BDMA and Messaging blocks */
enum dma_dtype {
DTYPE1 = 1, /* Data Transfer DMA Descriptor */
DTYPE2 = 2, /* Immediate Data Transfer DMA Descriptor */
DTYPE3 = 3, /* Block Pointer DMA Descriptor */
DTYPE4 = 4, /* Outbound Msg DMA Descriptor */
DTYPE5 = 5, /* OB Messaging Block Pointer Descriptor */
DTYPE6 = 6 /* Inbound Messaging Descriptor */
};
enum dma_rtype {
NREAD = 0,
LAST_NWRITE_R = 1,
ALL_NWRITE = 2,
ALL_NWRITE_R = 3,
MAINT_RD = 4,
MAINT_WR = 5
};
/*
* mport Driver Definitions
*/
#define TSI721_DMA_CHNUM TSI721_DMA_MAXCH
#define TSI721_DMACH_MAINT 0 /* DMA channel for maint requests */
#define TSI721_DMACH_MAINT_NBD 32 /* Number of BDs for maint requests */
#define TSI721_DMACH_DMA 1 /* DMA channel for data transfers */
#define MSG_DMA_ENTRY_INX_TO_SIZE(x) ((0x10 << (x)) & 0xFFFF0)
enum tsi721_smsg_int_flag {
SMSG_INT_NONE = 0x00000000,
SMSG_INT_ECC_COR_CH = 0x000000ff,
SMSG_INT_ECC_NCOR_CH = 0x0000ff00,
SMSG_INT_ECC_COR = 0x00020000,
SMSG_INT_ECC_NCOR = 0x00040000,
SMSG_INT_UNS_RSP = 0x00800000,
SMSG_INT_ALL = 0x0006ffff
};
/* Structures */
#ifdef CONFIG_RAPIDIO_DMA_ENGINE
#define TSI721_BDMA_MAX_BCOUNT (TSI721_DMAD_BCOUNT1 + 1)
struct tsi721_tx_desc {
struct dma_async_tx_descriptor txd;
u16 destid;
/* low 64-bits of 66-bit RIO address */
u64 rio_addr;
/* upper 2-bits of 66-bit RIO address */
u8 rio_addr_u;
enum dma_rtype rtype;
struct list_head desc_node;
struct scatterlist *sg;
unsigned int sg_len;
enum dma_status status;
};
struct tsi721_bdma_chan {
int id;
void __iomem *regs;
int bd_num; /* number of HW buffer descriptors */
void *bd_base; /* start of DMA descriptors */
dma_addr_t bd_phys;
void *sts_base; /* start of DMA BD status FIFO */
dma_addr_t sts_phys;
int sts_size;
u32 sts_rdptr;
u32 wr_count;
u32 wr_count_next;
struct dma_chan dchan;
struct tsi721_tx_desc *tx_desc;
spinlock_t lock;
struct list_head active_list;
struct list_head queue;
struct list_head free_list;
struct tasklet_struct tasklet;
bool active;
};
#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
struct tsi721_bdma_maint {
int ch_id; /* BDMA channel number */
int bd_num; /* number of buffer descriptors */
void *bd_base; /* start of DMA descriptors */
dma_addr_t bd_phys;
void *sts_base; /* start of DMA BD status FIFO */
dma_addr_t sts_phys;
int sts_size;
};
struct tsi721_imsg_ring {
u32 size;
/* VA/PA of data buffers for incoming messages */
void *buf_base;
dma_addr_t buf_phys;
/* VA/PA of circular free buffer list */
void *imfq_base;
dma_addr_t imfq_phys;
/* VA/PA of Inbound message descriptors */
void *imd_base;
dma_addr_t imd_phys;
/* Inbound Queue buffer pointers */
void *imq_base[TSI721_IMSGD_RING_SIZE];
u32 rx_slot;
void *dev_id;
u32 fq_wrptr;
u32 desc_rdptr;
spinlock_t lock;
};
struct tsi721_omsg_ring {
u32 size;
/* VA/PA of OB Msg descriptors */
void *omd_base;
dma_addr_t omd_phys;
/* VA/PA of OB Msg data buffers */
void *omq_base[TSI721_OMSGD_RING_SIZE];
dma_addr_t omq_phys[TSI721_OMSGD_RING_SIZE];
/* VA/PA of OB Msg descriptor status FIFO */
void *sts_base;
dma_addr_t sts_phys;
u32 sts_size; /* # of allocated status entries */
u32 sts_rdptr;
u32 tx_slot;
void *dev_id;
u32 wr_count;
spinlock_t lock;
};
enum tsi721_flags {
TSI721_USING_MSI = (1 << 0),
TSI721_USING_MSIX = (1 << 1),
TSI721_IMSGID_SET = (1 << 2),
};
#ifdef CONFIG_PCI_MSI
/*
* MSI-X Table Entries (0 ... 69)
*/
#define TSI721_MSIX_DMACH_DONE(x) (0 + (x))
#define TSI721_MSIX_DMACH_INT(x) (8 + (x))
#define TSI721_MSIX_BDMA_INT 16
#define TSI721_MSIX_OMSG_DONE(x) (17 + (x))
#define TSI721_MSIX_OMSG_INT(x) (25 + (x))
#define TSI721_MSIX_IMSG_DQ_RCV(x) (33 + (x))
#define TSI721_MSIX_IMSG_INT(x) (41 + (x))
#define TSI721_MSIX_MSG_INT 49
#define TSI721_MSIX_SR2PC_IDBQ_RCV(x) (50 + (x))
#define TSI721_MSIX_SR2PC_CH_INT(x) (58 + (x))
#define TSI721_MSIX_SR2PC_INT 66
#define TSI721_MSIX_PC2SR_INT 67
#define TSI721_MSIX_SRIO_MAC_INT 68
#define TSI721_MSIX_I2C_INT 69
/* MSI-X vector and init table entry indexes */
enum tsi721_msix_vect {
TSI721_VECT_IDB,
TSI721_VECT_PWRX, /* PW_RX is part of SRIO MAC Interrupt reporting */
TSI721_VECT_OMB0_DONE,
TSI721_VECT_OMB1_DONE,
TSI721_VECT_OMB2_DONE,
TSI721_VECT_OMB3_DONE,
TSI721_VECT_OMB0_INT,
TSI721_VECT_OMB1_INT,
TSI721_VECT_OMB2_INT,
TSI721_VECT_OMB3_INT,
TSI721_VECT_IMB0_RCV,
TSI721_VECT_IMB1_RCV,
TSI721_VECT_IMB2_RCV,
TSI721_VECT_IMB3_RCV,
TSI721_VECT_IMB0_INT,
TSI721_VECT_IMB1_INT,
TSI721_VECT_IMB2_INT,
TSI721_VECT_IMB3_INT,
#ifdef CONFIG_RAPIDIO_DMA_ENGINE
TSI721_VECT_DMA0_DONE,
TSI721_VECT_DMA1_DONE,
TSI721_VECT_DMA2_DONE,
TSI721_VECT_DMA3_DONE,
TSI721_VECT_DMA4_DONE,
TSI721_VECT_DMA5_DONE,
TSI721_VECT_DMA6_DONE,
TSI721_VECT_DMA7_DONE,
TSI721_VECT_DMA0_INT,
TSI721_VECT_DMA1_INT,
TSI721_VECT_DMA2_INT,
TSI721_VECT_DMA3_INT,
TSI721_VECT_DMA4_INT,
TSI721_VECT_DMA5_INT,
TSI721_VECT_DMA6_INT,
TSI721_VECT_DMA7_INT,
#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
TSI721_VECT_MAX
};
#define IRQ_DEVICE_NAME_MAX 64
struct msix_irq {
u16 vector;
char irq_name[IRQ_DEVICE_NAME_MAX];
};
#endif /* CONFIG_PCI_MSI */
struct tsi721_device {
struct pci_dev *pdev;
struct rio_mport *mport;
u32 flags;
void __iomem *regs;
#ifdef CONFIG_PCI_MSI
struct msix_irq msix[TSI721_VECT_MAX];
#endif
/* Doorbells */
void __iomem *odb_base;
void *idb_base;
dma_addr_t idb_dma;
struct work_struct idb_work;
u32 db_discard_count;
/* Inbound Port-Write */
struct work_struct pw_work;
struct kfifo pw_fifo;
spinlock_t pw_fifo_lock;
u32 pw_discard_count;
/* BDMA Engine */
struct tsi721_bdma_maint mdma; /* Maintenance rd/wr request channel */
#ifdef CONFIG_RAPIDIO_DMA_ENGINE
struct tsi721_bdma_chan bdma[TSI721_DMA_CHNUM];
#endif
/* Inbound Messaging */
int imsg_init[TSI721_IMSG_CHNUM];
struct tsi721_imsg_ring imsg_ring[TSI721_IMSG_CHNUM];
/* Outbound Messaging */
int omsg_init[TSI721_OMSG_CHNUM];
struct tsi721_omsg_ring omsg_ring[TSI721_OMSG_CHNUM];
};
#ifdef CONFIG_RAPIDIO_DMA_ENGINE
extern void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan);
extern int tsi721_register_dma(struct tsi721_device *priv);
#endif
#endif

View file

@ -0,0 +1,911 @@
/*
* DMA Engine support for Tsi721 PCIExpress-to-SRIO bridge
*
* Copyright (c) 2011-2014 Integrated Device Technology, Inc.
* Alexandre Bounine <alexandre.bounine@idt.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
#include <linux/io.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/rio.h>
#include <linux/rio_drv.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/kfifo.h>
#include <linux/delay.h>
#include "../../dma/dmaengine.h"
#include "tsi721.h"
#define TSI721_DMA_TX_QUEUE_SZ 16 /* number of transaction descriptors */
#ifdef CONFIG_PCI_MSI
static irqreturn_t tsi721_bdma_msix(int irq, void *ptr);
#endif
static int tsi721_submit_sg(struct tsi721_tx_desc *desc);
static unsigned int dma_desc_per_channel = 128;
module_param(dma_desc_per_channel, uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(dma_desc_per_channel,
"Number of DMA descriptors per channel (default: 128)");
static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan)
{
return container_of(chan, struct tsi721_bdma_chan, dchan);
}
static inline struct tsi721_device *to_tsi721(struct dma_device *ddev)
{
return container_of(ddev, struct rio_mport, dma)->priv;
}
static inline
struct tsi721_tx_desc *to_tsi721_desc(struct dma_async_tx_descriptor *txd)
{
return container_of(txd, struct tsi721_tx_desc, txd);
}
static inline
struct tsi721_tx_desc *tsi721_dma_first_active(
struct tsi721_bdma_chan *bdma_chan)
{
return list_first_entry(&bdma_chan->active_list,
struct tsi721_tx_desc, desc_node);
}
static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num)
{
struct tsi721_dma_desc *bd_ptr;
struct device *dev = bdma_chan->dchan.device->dev;
u64 *sts_ptr;
dma_addr_t bd_phys;
dma_addr_t sts_phys;
int sts_size;
#ifdef CONFIG_PCI_MSI
struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
#endif
dev_dbg(dev, "Init Block DMA Engine, CH%d\n", bdma_chan->id);
/*
* Allocate space for DMA descriptors
* (add an extra element for link descriptor)
*/
bd_ptr = dma_zalloc_coherent(dev,
(bd_num + 1) * sizeof(struct tsi721_dma_desc),
&bd_phys, GFP_KERNEL);
if (!bd_ptr)
return -ENOMEM;
bdma_chan->bd_num = bd_num;
bdma_chan->bd_phys = bd_phys;
bdma_chan->bd_base = bd_ptr;
dev_dbg(dev, "DMA descriptors @ %p (phys = %llx)\n",
bd_ptr, (unsigned long long)bd_phys);
/* Allocate space for descriptor status FIFO */
sts_size = ((bd_num + 1) >= TSI721_DMA_MINSTSSZ) ?
(bd_num + 1) : TSI721_DMA_MINSTSSZ;
sts_size = roundup_pow_of_two(sts_size);
sts_ptr = dma_zalloc_coherent(dev,
sts_size * sizeof(struct tsi721_dma_sts),
&sts_phys, GFP_KERNEL);
if (!sts_ptr) {
/* Free space allocated for DMA descriptors */
dma_free_coherent(dev,
(bd_num + 1) * sizeof(struct tsi721_dma_desc),
bd_ptr, bd_phys);
bdma_chan->bd_base = NULL;
return -ENOMEM;
}
bdma_chan->sts_phys = sts_phys;
bdma_chan->sts_base = sts_ptr;
bdma_chan->sts_size = sts_size;
dev_dbg(dev,
"desc status FIFO @ %p (phys = %llx) size=0x%x\n",
sts_ptr, (unsigned long long)sts_phys, sts_size);
/* Initialize DMA descriptors ring using added link descriptor */
bd_ptr[bd_num].type_id = cpu_to_le32(DTYPE3 << 29);
bd_ptr[bd_num].next_lo = cpu_to_le32((u64)bd_phys &
TSI721_DMAC_DPTRL_MASK);
bd_ptr[bd_num].next_hi = cpu_to_le32((u64)bd_phys >> 32);
/* Setup DMA descriptor pointers */
iowrite32(((u64)bd_phys >> 32),
bdma_chan->regs + TSI721_DMAC_DPTRH);
iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK),
bdma_chan->regs + TSI721_DMAC_DPTRL);
/* Setup descriptor status FIFO */
iowrite32(((u64)sts_phys >> 32),
bdma_chan->regs + TSI721_DMAC_DSBH);
iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK),
bdma_chan->regs + TSI721_DMAC_DSBL);
iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size),
bdma_chan->regs + TSI721_DMAC_DSSZ);
/* Clear interrupt bits */
iowrite32(TSI721_DMAC_INT_ALL,
bdma_chan->regs + TSI721_DMAC_INT);
ioread32(bdma_chan->regs + TSI721_DMAC_INT);
#ifdef CONFIG_PCI_MSI
/* Request interrupt service if we are in MSI-X mode */
if (priv->flags & TSI721_USING_MSIX) {
int rc, idx;
idx = TSI721_VECT_DMA0_DONE + bdma_chan->id;
rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0,
priv->msix[idx].irq_name, (void *)bdma_chan);
if (rc) {
dev_dbg(dev, "Unable to get MSI-X for BDMA%d-DONE\n",
bdma_chan->id);
goto err_out;
}
idx = TSI721_VECT_DMA0_INT + bdma_chan->id;
rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0,
priv->msix[idx].irq_name, (void *)bdma_chan);
if (rc) {
dev_dbg(dev, "Unable to get MSI-X for BDMA%d-INT\n",
bdma_chan->id);
free_irq(
priv->msix[TSI721_VECT_DMA0_DONE +
bdma_chan->id].vector,
(void *)bdma_chan);
}
err_out:
if (rc) {
/* Free space allocated for DMA descriptors */
dma_free_coherent(dev,
(bd_num + 1) * sizeof(struct tsi721_dma_desc),
bd_ptr, bd_phys);
bdma_chan->bd_base = NULL;
/* Free space allocated for status descriptors */
dma_free_coherent(dev,
sts_size * sizeof(struct tsi721_dma_sts),
sts_ptr, sts_phys);
bdma_chan->sts_base = NULL;
return -EIO;
}
}
#endif /* CONFIG_PCI_MSI */
/* Toggle DMA channel initialization */
iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
ioread32(bdma_chan->regs + TSI721_DMAC_CTL);
bdma_chan->wr_count = bdma_chan->wr_count_next = 0;
bdma_chan->sts_rdptr = 0;
udelay(10);
return 0;
}
static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan)
{
u32 ch_stat;
#ifdef CONFIG_PCI_MSI
struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
#endif
if (bdma_chan->bd_base == NULL)
return 0;
/* Check if DMA channel still running */
ch_stat = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
if (ch_stat & TSI721_DMAC_STS_RUN)
return -EFAULT;
/* Put DMA channel into init state */
iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
#ifdef CONFIG_PCI_MSI
if (priv->flags & TSI721_USING_MSIX) {
free_irq(priv->msix[TSI721_VECT_DMA0_DONE +
bdma_chan->id].vector, (void *)bdma_chan);
free_irq(priv->msix[TSI721_VECT_DMA0_INT +
bdma_chan->id].vector, (void *)bdma_chan);
}
#endif /* CONFIG_PCI_MSI */
/* Free space allocated for DMA descriptors */
dma_free_coherent(bdma_chan->dchan.device->dev,
(bdma_chan->bd_num + 1) * sizeof(struct tsi721_dma_desc),
bdma_chan->bd_base, bdma_chan->bd_phys);
bdma_chan->bd_base = NULL;
/* Free space allocated for status FIFO */
dma_free_coherent(bdma_chan->dchan.device->dev,
bdma_chan->sts_size * sizeof(struct tsi721_dma_sts),
bdma_chan->sts_base, bdma_chan->sts_phys);
bdma_chan->sts_base = NULL;
return 0;
}
static void
tsi721_bdma_interrupt_enable(struct tsi721_bdma_chan *bdma_chan, int enable)
{
if (enable) {
/* Clear pending BDMA channel interrupts */
iowrite32(TSI721_DMAC_INT_ALL,
bdma_chan->regs + TSI721_DMAC_INT);
ioread32(bdma_chan->regs + TSI721_DMAC_INT);
/* Enable BDMA channel interrupts */
iowrite32(TSI721_DMAC_INT_ALL,
bdma_chan->regs + TSI721_DMAC_INTE);
} else {
/* Disable BDMA channel interrupts */
iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
/* Clear pending BDMA channel interrupts */
iowrite32(TSI721_DMAC_INT_ALL,
bdma_chan->regs + TSI721_DMAC_INT);
}
}
static bool tsi721_dma_is_idle(struct tsi721_bdma_chan *bdma_chan)
{
u32 sts;
sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
return ((sts & TSI721_DMAC_STS_RUN) == 0);
}
void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan)
{
/* Disable BDMA channel interrupts */
iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
if (bdma_chan->active)
tasklet_schedule(&bdma_chan->tasklet);
}
#ifdef CONFIG_PCI_MSI
/**
* tsi721_omsg_msix - MSI-X interrupt handler for BDMA channels
* @irq: Linux interrupt number
* @ptr: Pointer to interrupt-specific data (BDMA channel structure)
*
* Handles BDMA channel interrupts signaled using MSI-X.
*/
static irqreturn_t tsi721_bdma_msix(int irq, void *ptr)
{
struct tsi721_bdma_chan *bdma_chan = ptr;
tsi721_bdma_handler(bdma_chan);
return IRQ_HANDLED;
}
#endif /* CONFIG_PCI_MSI */
/* Must be called with the spinlock held */
static void tsi721_start_dma(struct tsi721_bdma_chan *bdma_chan)
{
if (!tsi721_dma_is_idle(bdma_chan)) {
dev_err(bdma_chan->dchan.device->dev,
"BUG: Attempt to start non-idle channel\n");
return;
}
if (bdma_chan->wr_count == bdma_chan->wr_count_next) {
dev_err(bdma_chan->dchan.device->dev,
"BUG: Attempt to start DMA with no BDs ready\n");
return;
}
dev_dbg(bdma_chan->dchan.device->dev,
"%s: chan_%d (wrc=%d)\n", __func__, bdma_chan->id,
bdma_chan->wr_count_next);
iowrite32(bdma_chan->wr_count_next,
bdma_chan->regs + TSI721_DMAC_DWRCNT);
ioread32(bdma_chan->regs + TSI721_DMAC_DWRCNT);
bdma_chan->wr_count = bdma_chan->wr_count_next;
}
static int
tsi721_desc_fill_init(struct tsi721_tx_desc *desc,
struct tsi721_dma_desc *bd_ptr,
struct scatterlist *sg, u32 sys_size)
{
u64 rio_addr;
if (bd_ptr == NULL)
return -EINVAL;
/* Initialize DMA descriptor */
bd_ptr->type_id = cpu_to_le32((DTYPE1 << 29) |
(desc->rtype << 19) | desc->destid);
bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) |
(sys_size << 26));
rio_addr = (desc->rio_addr >> 2) |
((u64)(desc->rio_addr_u & 0x3) << 62);
bd_ptr->raddr_lo = cpu_to_le32(rio_addr & 0xffffffff);
bd_ptr->raddr_hi = cpu_to_le32(rio_addr >> 32);
bd_ptr->t1.bufptr_lo = cpu_to_le32(
(u64)sg_dma_address(sg) & 0xffffffff);
bd_ptr->t1.bufptr_hi = cpu_to_le32((u64)sg_dma_address(sg) >> 32);
bd_ptr->t1.s_dist = 0;
bd_ptr->t1.s_size = 0;
return 0;
}
static int
tsi721_desc_fill_end(struct tsi721_dma_desc *bd_ptr, u32 bcount, bool interrupt)
{
if (bd_ptr == NULL)
return -EINVAL;
/* Update DMA descriptor */
if (interrupt)
bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF);
bd_ptr->bcount |= cpu_to_le32(bcount & TSI721_DMAD_BCOUNT1);
return 0;
}
static void tsi721_dma_tx_err(struct tsi721_bdma_chan *bdma_chan,
struct tsi721_tx_desc *desc)
{
struct dma_async_tx_descriptor *txd = &desc->txd;
dma_async_tx_callback callback = txd->callback;
void *param = txd->callback_param;
list_move(&desc->desc_node, &bdma_chan->free_list);
if (callback)
callback(param);
}
static void tsi721_clr_stat(struct tsi721_bdma_chan *bdma_chan)
{
u32 srd_ptr;
u64 *sts_ptr;
int i, j;
/* Check and clear descriptor status FIFO entries */
srd_ptr = bdma_chan->sts_rdptr;
sts_ptr = bdma_chan->sts_base;
j = srd_ptr * 8;
while (sts_ptr[j]) {
for (i = 0; i < 8 && sts_ptr[j]; i++, j++)
sts_ptr[j] = 0;
++srd_ptr;
srd_ptr %= bdma_chan->sts_size;
j = srd_ptr * 8;
}
iowrite32(srd_ptr, bdma_chan->regs + TSI721_DMAC_DSRP);
bdma_chan->sts_rdptr = srd_ptr;
}
/* Must be called with the channel spinlock held */
static int tsi721_submit_sg(struct tsi721_tx_desc *desc)
{
struct dma_chan *dchan = desc->txd.chan;
struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
u32 sys_size;
u64 rio_addr;
dma_addr_t next_addr;
u32 bcount;
struct scatterlist *sg;
unsigned int i;
int err = 0;
struct tsi721_dma_desc *bd_ptr = NULL;
u32 idx, rd_idx;
u32 add_count = 0;
if (!tsi721_dma_is_idle(bdma_chan)) {
dev_err(bdma_chan->dchan.device->dev,
"BUG: Attempt to use non-idle channel\n");
return -EIO;
}
/*
* Fill DMA channel's hardware buffer descriptors.
* (NOTE: RapidIO destination address is limited to 64 bits for now)
*/
rio_addr = desc->rio_addr;
next_addr = -1;
bcount = 0;
sys_size = dma_to_mport(bdma_chan->dchan.device)->sys_size;
rd_idx = ioread32(bdma_chan->regs + TSI721_DMAC_DRDCNT);
rd_idx %= (bdma_chan->bd_num + 1);
idx = bdma_chan->wr_count_next % (bdma_chan->bd_num + 1);
if (idx == bdma_chan->bd_num) {
/* wrap around link descriptor */
idx = 0;
add_count++;
}
dev_dbg(dchan->device->dev, "%s: BD ring status: rdi=%d wri=%d\n",
__func__, rd_idx, idx);
for_each_sg(desc->sg, sg, desc->sg_len, i) {
dev_dbg(dchan->device->dev, "sg%d/%d addr: 0x%llx len: %d\n",
i, desc->sg_len,
(unsigned long long)sg_dma_address(sg), sg_dma_len(sg));
if (sg_dma_len(sg) > TSI721_BDMA_MAX_BCOUNT) {
dev_err(dchan->device->dev,
"%s: SG entry %d is too large\n", __func__, i);
err = -EINVAL;
break;
}
/*
* If this sg entry forms contiguous block with previous one,
* try to merge it into existing DMA descriptor
*/
if (next_addr == sg_dma_address(sg) &&
bcount + sg_dma_len(sg) <= TSI721_BDMA_MAX_BCOUNT) {
/* Adjust byte count of the descriptor */
bcount += sg_dma_len(sg);
goto entry_done;
} else if (next_addr != -1) {
/* Finalize descriptor using total byte count value */
tsi721_desc_fill_end(bd_ptr, bcount, 0);
dev_dbg(dchan->device->dev,
"%s: prev desc final len: %d\n",
__func__, bcount);
}
desc->rio_addr = rio_addr;
if (i && idx == rd_idx) {
dev_dbg(dchan->device->dev,
"%s: HW descriptor ring is full @ %d\n",
__func__, i);
desc->sg = sg;
desc->sg_len -= i;
break;
}
bd_ptr = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[idx];
err = tsi721_desc_fill_init(desc, bd_ptr, sg, sys_size);
if (err) {
dev_err(dchan->device->dev,
"Failed to build desc: err=%d\n", err);
break;
}
dev_dbg(dchan->device->dev, "bd_ptr = %p did=%d raddr=0x%llx\n",
bd_ptr, desc->destid, desc->rio_addr);
next_addr = sg_dma_address(sg);
bcount = sg_dma_len(sg);
add_count++;
if (++idx == bdma_chan->bd_num) {
/* wrap around link descriptor */
idx = 0;
add_count++;
}
entry_done:
if (sg_is_last(sg)) {
tsi721_desc_fill_end(bd_ptr, bcount, 0);
dev_dbg(dchan->device->dev, "%s: last desc final len: %d\n",
__func__, bcount);
desc->sg_len = 0;
} else {
rio_addr += sg_dma_len(sg);
next_addr += sg_dma_len(sg);
}
}
if (!err)
bdma_chan->wr_count_next += add_count;
return err;
}
static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan)
{
struct tsi721_tx_desc *desc;
int err;
dev_dbg(bdma_chan->dchan.device->dev, "%s: Enter\n", __func__);
/*
* If there are any new transactions in the queue add them
* into the processing list
*/
if (!list_empty(&bdma_chan->queue))
list_splice_init(&bdma_chan->queue, &bdma_chan->active_list);
/* Start new transaction (if available) */
if (!list_empty(&bdma_chan->active_list)) {
desc = tsi721_dma_first_active(bdma_chan);
err = tsi721_submit_sg(desc);
if (!err)
tsi721_start_dma(bdma_chan);
else {
tsi721_dma_tx_err(bdma_chan, desc);
dev_dbg(bdma_chan->dchan.device->dev,
"ERR: tsi721_submit_sg failed with err=%d\n",
err);
}
}
dev_dbg(bdma_chan->dchan.device->dev, "%s: Exit\n", __func__);
}
static void tsi721_dma_tasklet(unsigned long data)
{
struct tsi721_bdma_chan *bdma_chan = (struct tsi721_bdma_chan *)data;
u32 dmac_int, dmac_sts;
dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT);
dev_dbg(bdma_chan->dchan.device->dev, "%s: DMAC%d_INT = 0x%x\n",
__func__, bdma_chan->id, dmac_int);
/* Clear channel interrupts */
iowrite32(dmac_int, bdma_chan->regs + TSI721_DMAC_INT);
if (dmac_int & TSI721_DMAC_INT_ERR) {
dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
dev_err(bdma_chan->dchan.device->dev,
"%s: DMA ERROR - DMAC%d_STS = 0x%x\n",
__func__, bdma_chan->id, dmac_sts);
}
if (dmac_int & TSI721_DMAC_INT_STFULL) {
dev_err(bdma_chan->dchan.device->dev,
"%s: DMAC%d descriptor status FIFO is full\n",
__func__, bdma_chan->id);
}
if (dmac_int & (TSI721_DMAC_INT_DONE | TSI721_DMAC_INT_IOFDONE)) {
struct tsi721_tx_desc *desc;
tsi721_clr_stat(bdma_chan);
spin_lock(&bdma_chan->lock);
desc = tsi721_dma_first_active(bdma_chan);
if (desc->sg_len == 0) {
dma_async_tx_callback callback = NULL;
void *param = NULL;
desc->status = DMA_COMPLETE;
dma_cookie_complete(&desc->txd);
if (desc->txd.flags & DMA_PREP_INTERRUPT) {
callback = desc->txd.callback;
param = desc->txd.callback_param;
}
list_move(&desc->desc_node, &bdma_chan->free_list);
spin_unlock(&bdma_chan->lock);
if (callback)
callback(param);
spin_lock(&bdma_chan->lock);
}
tsi721_advance_work(bdma_chan);
spin_unlock(&bdma_chan->lock);
}
/* Re-Enable BDMA channel interrupts */
iowrite32(TSI721_DMAC_INT_ALL, bdma_chan->regs + TSI721_DMAC_INTE);
}
static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd)
{
struct tsi721_tx_desc *desc = to_tsi721_desc(txd);
struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(txd->chan);
dma_cookie_t cookie;
/* Check if the descriptor is detached from any lists */
if (!list_empty(&desc->desc_node)) {
dev_err(bdma_chan->dchan.device->dev,
"%s: wrong state of descriptor %p\n", __func__, txd);
return -EIO;
}
spin_lock_bh(&bdma_chan->lock);
if (!bdma_chan->active) {
spin_unlock_bh(&bdma_chan->lock);
return -ENODEV;
}
cookie = dma_cookie_assign(txd);
desc->status = DMA_IN_PROGRESS;
list_add_tail(&desc->desc_node, &bdma_chan->queue);
spin_unlock_bh(&bdma_chan->lock);
return cookie;
}
static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
{
struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
struct tsi721_tx_desc *desc = NULL;
int i;
dev_dbg(dchan->device->dev, "%s: for channel %d\n",
__func__, bdma_chan->id);
if (bdma_chan->bd_base)
return TSI721_DMA_TX_QUEUE_SZ;
/* Initialize BDMA channel */
if (tsi721_bdma_ch_init(bdma_chan, dma_desc_per_channel)) {
dev_err(dchan->device->dev, "Unable to initialize data DMA"
" channel %d, aborting\n", bdma_chan->id);
return -ENODEV;
}
/* Allocate queue of transaction descriptors */
desc = kcalloc(TSI721_DMA_TX_QUEUE_SZ, sizeof(struct tsi721_tx_desc),
GFP_KERNEL);
if (!desc) {
dev_err(dchan->device->dev,
"Failed to allocate logical descriptors\n");
tsi721_bdma_ch_free(bdma_chan);
return -ENOMEM;
}
bdma_chan->tx_desc = desc;
for (i = 0; i < TSI721_DMA_TX_QUEUE_SZ; i++) {
dma_async_tx_descriptor_init(&desc[i].txd, dchan);
desc[i].txd.tx_submit = tsi721_tx_submit;
desc[i].txd.flags = DMA_CTRL_ACK;
list_add(&desc[i].desc_node, &bdma_chan->free_list);
}
dma_cookie_init(dchan);
bdma_chan->active = true;
tsi721_bdma_interrupt_enable(bdma_chan, 1);
return TSI721_DMA_TX_QUEUE_SZ;
}
static void tsi721_sync_dma_irq(struct tsi721_bdma_chan *bdma_chan)
{
struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
#ifdef CONFIG_PCI_MSI
if (priv->flags & TSI721_USING_MSIX) {
synchronize_irq(priv->msix[TSI721_VECT_DMA0_DONE +
bdma_chan->id].vector);
synchronize_irq(priv->msix[TSI721_VECT_DMA0_INT +
bdma_chan->id].vector);
} else
#endif
synchronize_irq(priv->pdev->irq);
}
static void tsi721_free_chan_resources(struct dma_chan *dchan)
{
struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
dev_dbg(dchan->device->dev, "%s: for channel %d\n",
__func__, bdma_chan->id);
if (bdma_chan->bd_base == NULL)
return;
BUG_ON(!list_empty(&bdma_chan->active_list));
BUG_ON(!list_empty(&bdma_chan->queue));
tsi721_bdma_interrupt_enable(bdma_chan, 0);
bdma_chan->active = false;
tsi721_sync_dma_irq(bdma_chan);
tasklet_kill(&bdma_chan->tasklet);
INIT_LIST_HEAD(&bdma_chan->free_list);
kfree(bdma_chan->tx_desc);
tsi721_bdma_ch_free(bdma_chan);
}
static
enum dma_status tsi721_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
return dma_cookie_status(dchan, cookie, txstate);
}
static void tsi721_issue_pending(struct dma_chan *dchan)
{
struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
dev_dbg(dchan->device->dev, "%s: Enter\n", __func__);
if (tsi721_dma_is_idle(bdma_chan) && bdma_chan->active) {
spin_lock_bh(&bdma_chan->lock);
tsi721_advance_work(bdma_chan);
spin_unlock_bh(&bdma_chan->lock);
}
}
static
struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
struct scatterlist *sgl, unsigned int sg_len,
enum dma_transfer_direction dir, unsigned long flags,
void *tinfo)
{
struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
struct tsi721_tx_desc *desc, *_d;
struct rio_dma_ext *rext = tinfo;
enum dma_rtype rtype;
struct dma_async_tx_descriptor *txd = NULL;
if (!sgl || !sg_len) {
dev_err(dchan->device->dev, "%s: No SG list\n", __func__);
return NULL;
}
dev_dbg(dchan->device->dev, "%s: %s\n", __func__,
(dir == DMA_DEV_TO_MEM)?"READ":"WRITE");
if (dir == DMA_DEV_TO_MEM)
rtype = NREAD;
else if (dir == DMA_MEM_TO_DEV) {
switch (rext->wr_type) {
case RDW_ALL_NWRITE:
rtype = ALL_NWRITE;
break;
case RDW_ALL_NWRITE_R:
rtype = ALL_NWRITE_R;
break;
case RDW_LAST_NWRITE_R:
default:
rtype = LAST_NWRITE_R;
break;
}
} else {
dev_err(dchan->device->dev,
"%s: Unsupported DMA direction option\n", __func__);
return NULL;
}
spin_lock_bh(&bdma_chan->lock);
list_for_each_entry_safe(desc, _d, &bdma_chan->free_list, desc_node) {
if (async_tx_test_ack(&desc->txd)) {
list_del_init(&desc->desc_node);
desc->destid = rext->destid;
desc->rio_addr = rext->rio_addr;
desc->rio_addr_u = 0;
desc->rtype = rtype;
desc->sg_len = sg_len;
desc->sg = sgl;
txd = &desc->txd;
txd->flags = flags;
break;
}
}
spin_unlock_bh(&bdma_chan->lock);
return txd;
}
static int tsi721_device_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
unsigned long arg)
{
struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
struct tsi721_tx_desc *desc, *_d;
u32 dmac_int;
LIST_HEAD(list);
dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
if (cmd != DMA_TERMINATE_ALL)
return -ENOSYS;
spin_lock_bh(&bdma_chan->lock);
bdma_chan->active = false;
if (!tsi721_dma_is_idle(bdma_chan)) {
/* make sure to stop the transfer */
iowrite32(TSI721_DMAC_CTL_SUSP,
bdma_chan->regs + TSI721_DMAC_CTL);
/* Wait until DMA channel stops */
do {
dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT);
} while ((dmac_int & TSI721_DMAC_INT_SUSP) == 0);
}
list_splice_init(&bdma_chan->active_list, &list);
list_splice_init(&bdma_chan->queue, &list);
list_for_each_entry_safe(desc, _d, &list, desc_node)
tsi721_dma_tx_err(bdma_chan, desc);
spin_unlock_bh(&bdma_chan->lock);
return 0;
}
int tsi721_register_dma(struct tsi721_device *priv)
{
int i;
int nr_channels = 0;
int err;
struct rio_mport *mport = priv->mport;
INIT_LIST_HEAD(&mport->dma.channels);
for (i = 0; i < TSI721_DMA_MAXCH; i++) {
struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i];
if (i == TSI721_DMACH_MAINT)
continue;
bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i);
bdma_chan->dchan.device = &mport->dma;
bdma_chan->dchan.cookie = 1;
bdma_chan->dchan.chan_id = i;
bdma_chan->id = i;
bdma_chan->active = false;
spin_lock_init(&bdma_chan->lock);
INIT_LIST_HEAD(&bdma_chan->active_list);
INIT_LIST_HEAD(&bdma_chan->queue);
INIT_LIST_HEAD(&bdma_chan->free_list);
tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet,
(unsigned long)bdma_chan);
list_add_tail(&bdma_chan->dchan.device_node,
&mport->dma.channels);
nr_channels++;
}
mport->dma.chancnt = nr_channels;
dma_cap_zero(mport->dma.cap_mask);
dma_cap_set(DMA_PRIVATE, mport->dma.cap_mask);
dma_cap_set(DMA_SLAVE, mport->dma.cap_mask);
mport->dma.dev = &priv->pdev->dev;
mport->dma.device_alloc_chan_resources = tsi721_alloc_chan_resources;
mport->dma.device_free_chan_resources = tsi721_free_chan_resources;
mport->dma.device_tx_status = tsi721_tx_status;
mport->dma.device_issue_pending = tsi721_issue_pending;
mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg;
mport->dma.device_control = tsi721_device_control;
err = dma_async_device_register(&mport->dma);
if (err)
dev_err(&priv->pdev->dev, "Failed to register DMA device\n");
return err;
}

View file

@ -0,0 +1,175 @@
/*
* RapidIO configuration space access support
*
* Copyright 2005 MontaVista Software, Inc.
* Matt Porter <mporter@kernel.crashing.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/rio.h>
#include <linux/module.h>
/*
* These interrupt-safe spinlocks protect all accesses to RIO
* configuration space and doorbell access.
*/
static DEFINE_SPINLOCK(rio_config_lock);
static DEFINE_SPINLOCK(rio_doorbell_lock);
/*
* Wrappers for all RIO configuration access functions. They just check
* alignment, do locking and call the low-level functions pointed to
* by rio_mport->ops.
*/
#define RIO_8_BAD 0
#define RIO_16_BAD (offset & 1)
#define RIO_32_BAD (offset & 3)
/**
* RIO_LOP_READ - Generate rio_local_read_config_* functions
* @size: Size of configuration space read (8, 16, 32 bits)
* @type: C type of value argument
* @len: Length of configuration space read (1, 2, 4 bytes)
*
* Generates rio_local_read_config_* functions used to access
* configuration space registers on the local device.
*/
#define RIO_LOP_READ(size,type,len) \
int __rio_local_read_config_##size \
(struct rio_mport *mport, u32 offset, type *value) \
{ \
int res; \
unsigned long flags; \
u32 data = 0; \
if (RIO_##size##_BAD) return RIO_BAD_SIZE; \
spin_lock_irqsave(&rio_config_lock, flags); \
res = mport->ops->lcread(mport, mport->id, offset, len, &data); \
*value = (type)data; \
spin_unlock_irqrestore(&rio_config_lock, flags); \
return res; \
}
/**
* RIO_LOP_WRITE - Generate rio_local_write_config_* functions
* @size: Size of configuration space write (8, 16, 32 bits)
* @type: C type of value argument
* @len: Length of configuration space write (1, 2, 4 bytes)
*
* Generates rio_local_write_config_* functions used to access
* configuration space registers on the local device.
*/
#define RIO_LOP_WRITE(size,type,len) \
int __rio_local_write_config_##size \
(struct rio_mport *mport, u32 offset, type value) \
{ \
int res; \
unsigned long flags; \
if (RIO_##size##_BAD) return RIO_BAD_SIZE; \
spin_lock_irqsave(&rio_config_lock, flags); \
res = mport->ops->lcwrite(mport, mport->id, offset, len, value);\
spin_unlock_irqrestore(&rio_config_lock, flags); \
return res; \
}
RIO_LOP_READ(8, u8, 1)
RIO_LOP_READ(16, u16, 2)
RIO_LOP_READ(32, u32, 4)
RIO_LOP_WRITE(8, u8, 1)
RIO_LOP_WRITE(16, u16, 2)
RIO_LOP_WRITE(32, u32, 4)
EXPORT_SYMBOL_GPL(__rio_local_read_config_8);
EXPORT_SYMBOL_GPL(__rio_local_read_config_16);
EXPORT_SYMBOL_GPL(__rio_local_read_config_32);
EXPORT_SYMBOL_GPL(__rio_local_write_config_8);
EXPORT_SYMBOL_GPL(__rio_local_write_config_16);
EXPORT_SYMBOL_GPL(__rio_local_write_config_32);
/**
* RIO_OP_READ - Generate rio_mport_read_config_* functions
* @size: Size of configuration space read (8, 16, 32 bits)
* @type: C type of value argument
* @len: Length of configuration space read (1, 2, 4 bytes)
*
* Generates rio_mport_read_config_* functions used to access
* configuration space registers on the local device.
*/
#define RIO_OP_READ(size,type,len) \
int rio_mport_read_config_##size \
(struct rio_mport *mport, u16 destid, u8 hopcount, u32 offset, type *value) \
{ \
int res; \
unsigned long flags; \
u32 data = 0; \
if (RIO_##size##_BAD) return RIO_BAD_SIZE; \
spin_lock_irqsave(&rio_config_lock, flags); \
res = mport->ops->cread(mport, mport->id, destid, hopcount, offset, len, &data); \
*value = (type)data; \
spin_unlock_irqrestore(&rio_config_lock, flags); \
return res; \
}
/**
* RIO_OP_WRITE - Generate rio_mport_write_config_* functions
* @size: Size of configuration space write (8, 16, 32 bits)
* @type: C type of value argument
* @len: Length of configuration space write (1, 2, 4 bytes)
*
* Generates rio_mport_write_config_* functions used to access
* configuration space registers on the local device.
*/
#define RIO_OP_WRITE(size,type,len) \
int rio_mport_write_config_##size \
(struct rio_mport *mport, u16 destid, u8 hopcount, u32 offset, type value) \
{ \
int res; \
unsigned long flags; \
if (RIO_##size##_BAD) return RIO_BAD_SIZE; \
spin_lock_irqsave(&rio_config_lock, flags); \
res = mport->ops->cwrite(mport, mport->id, destid, hopcount, offset, len, value); \
spin_unlock_irqrestore(&rio_config_lock, flags); \
return res; \
}
RIO_OP_READ(8, u8, 1)
RIO_OP_READ(16, u16, 2)
RIO_OP_READ(32, u32, 4)
RIO_OP_WRITE(8, u8, 1)
RIO_OP_WRITE(16, u16, 2)
RIO_OP_WRITE(32, u32, 4)
EXPORT_SYMBOL_GPL(rio_mport_read_config_8);
EXPORT_SYMBOL_GPL(rio_mport_read_config_16);
EXPORT_SYMBOL_GPL(rio_mport_read_config_32);
EXPORT_SYMBOL_GPL(rio_mport_write_config_8);
EXPORT_SYMBOL_GPL(rio_mport_write_config_16);
EXPORT_SYMBOL_GPL(rio_mport_write_config_32);
/**
* rio_mport_send_doorbell - Send a doorbell message
*
* @mport: RIO master port
* @destid: RIO device destination ID
* @data: Doorbell message data
*
* Send a doorbell message to a RIO device. The doorbell message
* has a 16-bit info field provided by the data argument.
*/
int rio_mport_send_doorbell(struct rio_mport *mport, u16 destid, u16 data)
{
int res;
unsigned long flags;
spin_lock_irqsave(&rio_doorbell_lock, flags);
res = mport->ops->dsend(mport, mport->id, destid, data);
spin_unlock_irqrestore(&rio_doorbell_lock, flags);
return res;
}
EXPORT_SYMBOL_GPL(rio_mport_send_doorbell);

View file

@ -0,0 +1,260 @@
/*
* RapidIO driver support
*
* Copyright 2005 MontaVista Software, Inc.
* Matt Porter <mporter@kernel.crashing.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/rio.h>
#include <linux/rio_ids.h>
#include "rio.h"
/**
* rio_match_device - Tell if a RIO device has a matching RIO device id structure
* @id: the RIO device id structure to match against
* @rdev: the RIO device structure to match against
*
* Used from driver probe and bus matching to check whether a RIO device
* matches a device id structure provided by a RIO driver. Returns the
* matching &struct rio_device_id or %NULL if there is no match.
*/
static const struct rio_device_id *rio_match_device(const struct rio_device_id
*id,
const struct rio_dev *rdev)
{
while (id->vid || id->asm_vid) {
if (((id->vid == RIO_ANY_ID) || (id->vid == rdev->vid)) &&
((id->did == RIO_ANY_ID) || (id->did == rdev->did)) &&
((id->asm_vid == RIO_ANY_ID)
|| (id->asm_vid == rdev->asm_vid))
&& ((id->asm_did == RIO_ANY_ID)
|| (id->asm_did == rdev->asm_did)))
return id;
id++;
}
return NULL;
}
/**
* rio_dev_get - Increments the reference count of the RIO device structure
*
* @rdev: RIO device being referenced
*
* Each live reference to a device should be refcounted.
*
* Drivers for RIO devices should normally record such references in
* their probe() methods, when they bind to a device, and release
* them by calling rio_dev_put(), in their disconnect() methods.
*/
struct rio_dev *rio_dev_get(struct rio_dev *rdev)
{
if (rdev)
get_device(&rdev->dev);
return rdev;
}
/**
* rio_dev_put - Release a use of the RIO device structure
*
* @rdev: RIO device being disconnected
*
* Must be called when a user of a device is finished with it.
* When the last user of the device calls this function, the
* memory of the device is freed.
*/
void rio_dev_put(struct rio_dev *rdev)
{
if (rdev)
put_device(&rdev->dev);
}
/**
* rio_device_probe - Tell if a RIO device structure has a matching RIO device id structure
* @dev: the RIO device structure to match against
*
* return 0 and set rio_dev->driver when drv claims rio_dev, else error
*/
static int rio_device_probe(struct device *dev)
{
struct rio_driver *rdrv = to_rio_driver(dev->driver);
struct rio_dev *rdev = to_rio_dev(dev);
int error = -ENODEV;
const struct rio_device_id *id;
if (!rdev->driver && rdrv->probe) {
if (!rdrv->id_table)
return error;
id = rio_match_device(rdrv->id_table, rdev);
rio_dev_get(rdev);
if (id)
error = rdrv->probe(rdev, id);
if (error >= 0) {
rdev->driver = rdrv;
error = 0;
} else
rio_dev_put(rdev);
}
return error;
}
/**
* rio_device_remove - Remove a RIO device from the system
*
* @dev: the RIO device structure to match against
*
* Remove a RIO device from the system. If it has an associated
* driver, then run the driver remove() method. Then update
* the reference count.
*/
static int rio_device_remove(struct device *dev)
{
struct rio_dev *rdev = to_rio_dev(dev);
struct rio_driver *rdrv = rdev->driver;
if (rdrv) {
if (rdrv->remove)
rdrv->remove(rdev);
rdev->driver = NULL;
}
rio_dev_put(rdev);
return 0;
}
/**
* rio_register_driver - register a new RIO driver
* @rdrv: the RIO driver structure to register
*
* Adds a &struct rio_driver to the list of registered drivers.
* Returns a negative value on error, otherwise 0. If no error
* occurred, the driver remains registered even if no device
* was claimed during registration.
*/
int rio_register_driver(struct rio_driver *rdrv)
{
/* initialize common driver fields */
rdrv->driver.name = rdrv->name;
rdrv->driver.bus = &rio_bus_type;
/* register with core */
return driver_register(&rdrv->driver);
}
/**
* rio_unregister_driver - unregister a RIO driver
* @rdrv: the RIO driver structure to unregister
*
* Deletes the &struct rio_driver from the list of registered RIO
* drivers, gives it a chance to clean up by calling its remove()
* function for each device it was responsible for, and marks those
* devices as driverless.
*/
void rio_unregister_driver(struct rio_driver *rdrv)
{
driver_unregister(&rdrv->driver);
}
void rio_attach_device(struct rio_dev *rdev)
{
rdev->dev.bus = &rio_bus_type;
}
EXPORT_SYMBOL_GPL(rio_attach_device);
/**
* rio_match_bus - Tell if a RIO device structure has a matching RIO driver device id structure
* @dev: the standard device structure to match against
* @drv: the standard driver structure containing the ids to match against
*
* Used by a driver to check whether a RIO device present in the
* system is in its list of supported devices. Returns 1 if
* there is a matching &struct rio_device_id or 0 if there is
* no match.
*/
static int rio_match_bus(struct device *dev, struct device_driver *drv)
{
struct rio_dev *rdev = to_rio_dev(dev);
struct rio_driver *rdrv = to_rio_driver(drv);
const struct rio_device_id *id = rdrv->id_table;
const struct rio_device_id *found_id;
if (!id)
goto out;
found_id = rio_match_device(id, rdev);
if (found_id)
return 1;
out:return 0;
}
static int rio_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct rio_dev *rdev;
if (!dev)
return -ENODEV;
rdev = to_rio_dev(dev);
if (!rdev)
return -ENODEV;
if (add_uevent_var(env, "MODALIAS=rapidio:v%04Xd%04Xav%04Xad%04X",
rdev->vid, rdev->did, rdev->asm_vid, rdev->asm_did))
return -ENOMEM;
return 0;
}
struct class rio_mport_class = {
.name = "rapidio_port",
.owner = THIS_MODULE,
.dev_groups = rio_mport_groups,
};
EXPORT_SYMBOL_GPL(rio_mport_class);
struct bus_type rio_bus_type = {
.name = "rapidio",
.match = rio_match_bus,
.dev_groups = rio_dev_groups,
.bus_groups = rio_bus_groups,
.probe = rio_device_probe,
.remove = rio_device_remove,
.uevent = rio_uevent,
};
/**
* rio_bus_init - Register the RapidIO bus with the device model
*
* Registers the RIO mport device class and RIO bus type with the Linux
* device model.
*/
static int __init rio_bus_init(void)
{
int ret;
ret = class_register(&rio_mport_class);
if (!ret) {
ret = bus_register(&rio_bus_type);
if (ret)
class_unregister(&rio_mport_class);
}
return ret;
}
postcore_initcall(rio_bus_init);
EXPORT_SYMBOL_GPL(rio_register_driver);
EXPORT_SYMBOL_GPL(rio_unregister_driver);
EXPORT_SYMBOL_GPL(rio_bus_type);
EXPORT_SYMBOL_GPL(rio_dev_get);
EXPORT_SYMBOL_GPL(rio_dev_put);

1201
drivers/rapidio/rio-scan.c Normal file

File diff suppressed because it is too large Load diff

383
drivers/rapidio/rio-sysfs.c Normal file
View file

@ -0,0 +1,383 @@
/*
* RapidIO sysfs attributes and support
*
* Copyright 2005 MontaVista Software, Inc.
* Matt Porter <mporter@kernel.crashing.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/kernel.h>
#include <linux/rio.h>
#include <linux/rio_drv.h>
#include <linux/stat.h>
#include <linux/capability.h>
#include "rio.h"
/* Sysfs support */
#define rio_config_attr(field, format_string) \
static ssize_t \
field##_show(struct device *dev, struct device_attribute *attr, char *buf) \
{ \
struct rio_dev *rdev = to_rio_dev(dev); \
\
return sprintf(buf, format_string, rdev->field); \
} \
static DEVICE_ATTR_RO(field);
rio_config_attr(did, "0x%04x\n");
rio_config_attr(vid, "0x%04x\n");
rio_config_attr(device_rev, "0x%08x\n");
rio_config_attr(asm_did, "0x%04x\n");
rio_config_attr(asm_vid, "0x%04x\n");
rio_config_attr(asm_rev, "0x%04x\n");
rio_config_attr(destid, "0x%04x\n");
rio_config_attr(hopcount, "0x%02x\n");
static ssize_t routes_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct rio_dev *rdev = to_rio_dev(dev);
char *str = buf;
int i;
for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size);
i++) {
if (rdev->rswitch->route_table[i] == RIO_INVALID_ROUTE)
continue;
str +=
sprintf(str, "%04x %02x\n", i,
rdev->rswitch->route_table[i]);
}
return (str - buf);
}
static DEVICE_ATTR_RO(routes);
static ssize_t lprev_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct rio_dev *rdev = to_rio_dev(dev);
return sprintf(buf, "%s\n",
(rdev->prev) ? rio_name(rdev->prev) : "root");
}
static DEVICE_ATTR_RO(lprev);
static ssize_t lnext_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct rio_dev *rdev = to_rio_dev(dev);
char *str = buf;
int i;
if (rdev->pef & RIO_PEF_SWITCH) {
for (i = 0; i < RIO_GET_TOTAL_PORTS(rdev->swpinfo); i++) {
if (rdev->rswitch->nextdev[i])
str += sprintf(str, "%s\n",
rio_name(rdev->rswitch->nextdev[i]));
else
str += sprintf(str, "null\n");
}
}
return str - buf;
}
static DEVICE_ATTR_RO(lnext);
static ssize_t modalias_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct rio_dev *rdev = to_rio_dev(dev);
return sprintf(buf, "rapidio:v%04Xd%04Xav%04Xad%04X\n",
rdev->vid, rdev->did, rdev->asm_vid, rdev->asm_did);
}
static DEVICE_ATTR_RO(modalias);
static struct attribute *rio_dev_attrs[] = {
&dev_attr_did.attr,
&dev_attr_vid.attr,
&dev_attr_device_rev.attr,
&dev_attr_asm_did.attr,
&dev_attr_asm_vid.attr,
&dev_attr_asm_rev.attr,
&dev_attr_lprev.attr,
&dev_attr_destid.attr,
&dev_attr_modalias.attr,
NULL,
};
static const struct attribute_group rio_dev_group = {
.attrs = rio_dev_attrs,
};
const struct attribute_group *rio_dev_groups[] = {
&rio_dev_group,
NULL,
};
static ssize_t
rio_read_config(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct rio_dev *dev =
to_rio_dev(container_of(kobj, struct device, kobj));
unsigned int size = 0x100;
loff_t init_off = off;
u8 *data = (u8 *) buf;
/* Several chips lock up trying to read undefined config space */
if (capable(CAP_SYS_ADMIN))
size = RIO_MAINT_SPACE_SZ;
if (off >= size)
return 0;
if (off + count > size) {
size -= off;
count = size;
} else {
size = count;
}
if ((off & 1) && size) {
u8 val;
rio_read_config_8(dev, off, &val);
data[off - init_off] = val;
off++;
size--;
}
if ((off & 3) && size > 2) {
u16 val;
rio_read_config_16(dev, off, &val);
data[off - init_off] = (val >> 8) & 0xff;
data[off - init_off + 1] = val & 0xff;
off += 2;
size -= 2;
}
while (size > 3) {
u32 val;
rio_read_config_32(dev, off, &val);
data[off - init_off] = (val >> 24) & 0xff;
data[off - init_off + 1] = (val >> 16) & 0xff;
data[off - init_off + 2] = (val >> 8) & 0xff;
data[off - init_off + 3] = val & 0xff;
off += 4;
size -= 4;
}
if (size >= 2) {
u16 val;
rio_read_config_16(dev, off, &val);
data[off - init_off] = (val >> 8) & 0xff;
data[off - init_off + 1] = val & 0xff;
off += 2;
size -= 2;
}
if (size > 0) {
u8 val;
rio_read_config_8(dev, off, &val);
data[off - init_off] = val;
off++;
--size;
}
return count;
}
static ssize_t
rio_write_config(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct rio_dev *dev =
to_rio_dev(container_of(kobj, struct device, kobj));
unsigned int size = count;
loff_t init_off = off;
u8 *data = (u8 *) buf;
if (off >= RIO_MAINT_SPACE_SZ)
return 0;
if (off + count > RIO_MAINT_SPACE_SZ) {
size = RIO_MAINT_SPACE_SZ - off;
count = size;
}
if ((off & 1) && size) {
rio_write_config_8(dev, off, data[off - init_off]);
off++;
size--;
}
if ((off & 3) && (size > 2)) {
u16 val = data[off - init_off + 1];
val |= (u16) data[off - init_off] << 8;
rio_write_config_16(dev, off, val);
off += 2;
size -= 2;
}
while (size > 3) {
u32 val = data[off - init_off + 3];
val |= (u32) data[off - init_off + 2] << 8;
val |= (u32) data[off - init_off + 1] << 16;
val |= (u32) data[off - init_off] << 24;
rio_write_config_32(dev, off, val);
off += 4;
size -= 4;
}
if (size >= 2) {
u16 val = data[off - init_off + 1];
val |= (u16) data[off - init_off] << 8;
rio_write_config_16(dev, off, val);
off += 2;
size -= 2;
}
if (size) {
rio_write_config_8(dev, off, data[off - init_off]);
off++;
--size;
}
return count;
}
static struct bin_attribute rio_config_attr = {
.attr = {
.name = "config",
.mode = S_IRUGO | S_IWUSR,
},
.size = RIO_MAINT_SPACE_SZ,
.read = rio_read_config,
.write = rio_write_config,
};
/**
* rio_create_sysfs_dev_files - create RIO specific sysfs files
* @rdev: device whose entries should be created
*
* Create files when @rdev is added to sysfs.
*/
int rio_create_sysfs_dev_files(struct rio_dev *rdev)
{
int err = 0;
err = device_create_bin_file(&rdev->dev, &rio_config_attr);
if (!err && (rdev->pef & RIO_PEF_SWITCH)) {
err |= device_create_file(&rdev->dev, &dev_attr_routes);
err |= device_create_file(&rdev->dev, &dev_attr_lnext);
err |= device_create_file(&rdev->dev, &dev_attr_hopcount);
}
if (err)
pr_warning("RIO: Failed to create attribute file(s) for %s\n",
rio_name(rdev));
return err;
}
/**
* rio_remove_sysfs_dev_files - cleanup RIO specific sysfs files
* @rdev: device whose entries we should free
*
* Cleanup when @rdev is removed from sysfs.
*/
void rio_remove_sysfs_dev_files(struct rio_dev *rdev)
{
device_remove_bin_file(&rdev->dev, &rio_config_attr);
if (rdev->pef & RIO_PEF_SWITCH) {
device_remove_file(&rdev->dev, &dev_attr_routes);
device_remove_file(&rdev->dev, &dev_attr_lnext);
device_remove_file(&rdev->dev, &dev_attr_hopcount);
}
}
static ssize_t bus_scan_store(struct bus_type *bus, const char *buf,
size_t count)
{
long val;
int rc;
if (kstrtol(buf, 0, &val) < 0)
return -EINVAL;
if (val == RIO_MPORT_ANY) {
rc = rio_init_mports();
goto exit;
}
if (val < 0 || val >= RIO_MAX_MPORTS)
return -EINVAL;
rc = rio_mport_scan((int)val);
exit:
if (!rc)
rc = count;
return rc;
}
static BUS_ATTR(scan, (S_IWUSR|S_IWGRP), NULL, bus_scan_store);
static struct attribute *rio_bus_attrs[] = {
&bus_attr_scan.attr,
NULL,
};
static const struct attribute_group rio_bus_group = {
.attrs = rio_bus_attrs,
};
const struct attribute_group *rio_bus_groups[] = {
&rio_bus_group,
NULL,
};
static ssize_t
port_destid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct rio_mport *mport = to_rio_mport(dev);
if (mport)
return sprintf(buf, "0x%04x\n", mport->host_deviceid);
else
return -ENODEV;
}
static DEVICE_ATTR_RO(port_destid);
static ssize_t sys_size_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct rio_mport *mport = to_rio_mport(dev);
if (mport)
return sprintf(buf, "%u\n", mport->sys_size);
else
return -ENODEV;
}
static DEVICE_ATTR_RO(sys_size);
static struct attribute *rio_mport_attrs[] = {
&dev_attr_port_destid.attr,
&dev_attr_sys_size.attr,
NULL,
};
static const struct attribute_group rio_mport_group = {
.attrs = rio_mport_attrs,
};
const struct attribute_group *rio_mport_groups[] = {
&rio_mport_group,
NULL,
};

1969
drivers/rapidio/rio.c Normal file

File diff suppressed because it is too large Load diff

56
drivers/rapidio/rio.h Normal file
View file

@ -0,0 +1,56 @@
/*
* RapidIO interconnect services
*
* Copyright 2005 MontaVista Software, Inc.
* Matt Porter <mporter@kernel.crashing.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/device.h>
#include <linux/list.h>
#include <linux/rio.h>
#define RIO_MAX_CHK_RETRY 3
#define RIO_MPORT_ANY (-1)
/* Functions internal to the RIO core code */
extern u32 rio_mport_get_feature(struct rio_mport *mport, int local, u16 destid,
u8 hopcount, int ftr);
extern u32 rio_mport_get_physefb(struct rio_mport *port, int local,
u16 destid, u8 hopcount);
extern u32 rio_mport_get_efb(struct rio_mport *port, int local, u16 destid,
u8 hopcount, u32 from);
extern int rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid,
u8 hopcount);
extern int rio_create_sysfs_dev_files(struct rio_dev *rdev);
extern int rio_lock_device(struct rio_mport *port, u16 destid,
u8 hopcount, int wait_ms);
extern int rio_unlock_device(struct rio_mport *port, u16 destid, u8 hopcount);
extern int rio_route_add_entry(struct rio_dev *rdev,
u16 table, u16 route_destid, u8 route_port, int lock);
extern int rio_route_get_entry(struct rio_dev *rdev, u16 table,
u16 route_destid, u8 *route_port, int lock);
extern int rio_route_clr_table(struct rio_dev *rdev, u16 table, int lock);
extern int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock);
extern struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from);
extern int rio_add_device(struct rio_dev *rdev);
extern int rio_enable_rx_tx_port(struct rio_mport *port, int local, u16 destid,
u8 hopcount, u8 port_num);
extern int rio_register_scan(int mport_id, struct rio_scan *scan_ops);
extern int rio_unregister_scan(int mport_id, struct rio_scan *scan_ops);
extern void rio_attach_device(struct rio_dev *rdev);
extern struct rio_mport *rio_find_mport(int mport_id);
extern int rio_mport_scan(int mport_id);
/* Structures internal to the RIO core code */
extern const struct attribute_group *rio_dev_groups[];
extern const struct attribute_group *rio_bus_groups[];
extern const struct attribute_group *rio_mport_groups[];
#define RIO_GET_DID(size, x) (size ? (x & 0xffff) : ((x & 0x00ff0000) >> 16))
#define RIO_SET_DID(size, x) (size ? (x & 0xffff) : ((x & 0x000000ff) << 16))

View file

@ -0,0 +1,24 @@
#
# RapidIO switches configuration
#
config RAPIDIO_TSI57X
tristate "IDT Tsi57x SRIO switches support"
---help---
Includes support for IDT Tsi57x family of serial RapidIO switches.
config RAPIDIO_CPS_XX
tristate "IDT CPS-xx SRIO switches support"
---help---
Includes support for IDT CPS-16/12/10/8 serial RapidIO switches.
config RAPIDIO_TSI568
tristate "Tsi568 SRIO switch support"
default n
---help---
Includes support for IDT Tsi568 serial RapidIO switch.
config RAPIDIO_CPS_GEN2
tristate "IDT CPS Gen.2 SRIO switch support"
default n
---help---
Includes support for ITD CPS Gen.2 serial RapidIO switches.

View file

@ -0,0 +1,8 @@
#
# Makefile for RIO switches
#
obj-$(CONFIG_RAPIDIO_TSI57X) += tsi57x.o
obj-$(CONFIG_RAPIDIO_CPS_XX) += idtcps.o
obj-$(CONFIG_RAPIDIO_TSI568) += tsi568.o
obj-$(CONFIG_RAPIDIO_CPS_GEN2) += idt_gen2.o

View file

@ -0,0 +1,495 @@
/*
* IDT CPS Gen.2 Serial RapidIO switch family support
*
* Copyright 2010 Integrated Device Technology, Inc.
* Alexandre Bounine <alexandre.bounine@idt.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/stat.h>
#include <linux/module.h>
#include <linux/rio.h>
#include <linux/rio_drv.h>
#include <linux/rio_ids.h>
#include <linux/delay.h>
#include <asm/page.h>
#include "../rio.h"
#define LOCAL_RTE_CONF_DESTID_SEL 0x010070
#define LOCAL_RTE_CONF_DESTID_SEL_PSEL 0x0000001f
#define IDT_LT_ERR_REPORT_EN 0x03100c
#define IDT_PORT_ERR_REPORT_EN(n) (0x031044 + (n)*0x40)
#define IDT_PORT_ERR_REPORT_EN_BC 0x03ff04
#define IDT_PORT_ISERR_REPORT_EN(n) (0x03104C + (n)*0x40)
#define IDT_PORT_ISERR_REPORT_EN_BC 0x03ff0c
#define IDT_PORT_INIT_TX_ACQUIRED 0x00000020
#define IDT_LANE_ERR_REPORT_EN(n) (0x038010 + (n)*0x100)
#define IDT_LANE_ERR_REPORT_EN_BC 0x03ff10
#define IDT_DEV_CTRL_1 0xf2000c
#define IDT_DEV_CTRL_1_GENPW 0x02000000
#define IDT_DEV_CTRL_1_PRSTBEH 0x00000001
#define IDT_CFGBLK_ERR_CAPTURE_EN 0x020008
#define IDT_CFGBLK_ERR_REPORT 0xf20014
#define IDT_CFGBLK_ERR_REPORT_GENPW 0x00000002
#define IDT_AUX_PORT_ERR_CAP_EN 0x020000
#define IDT_AUX_ERR_REPORT_EN 0xf20018
#define IDT_AUX_PORT_ERR_LOG_I2C 0x00000002
#define IDT_AUX_PORT_ERR_LOG_JTAG 0x00000001
#define IDT_ISLTL_ADDRESS_CAP 0x021014
#define IDT_RIO_DOMAIN 0xf20020
#define IDT_RIO_DOMAIN_MASK 0x000000ff
#define IDT_PW_INFO_CSR 0xf20024
#define IDT_SOFT_RESET 0xf20040
#define IDT_SOFT_RESET_REQ 0x00030097
#define IDT_I2C_MCTRL 0xf20050
#define IDT_I2C_MCTRL_GENPW 0x04000000
#define IDT_JTAG_CTRL 0xf2005c
#define IDT_JTAG_CTRL_GENPW 0x00000002
#define IDT_LANE_CTRL(n) (0xff8000 + (n)*0x100)
#define IDT_LANE_CTRL_BC 0xffff00
#define IDT_LANE_CTRL_GENPW 0x00200000
#define IDT_LANE_DFE_1_BC 0xffff18
#define IDT_LANE_DFE_2_BC 0xffff1c
#define IDT_PORT_OPS(n) (0xf40004 + (n)*0x100)
#define IDT_PORT_OPS_GENPW 0x08000000
#define IDT_PORT_OPS_PL_ELOG 0x00000040
#define IDT_PORT_OPS_LL_ELOG 0x00000020
#define IDT_PORT_OPS_LT_ELOG 0x00000010
#define IDT_PORT_OPS_BC 0xf4ff04
#define IDT_PORT_ISERR_DET(n) (0xf40008 + (n)*0x100)
#define IDT_ERR_CAP 0xfd0000
#define IDT_ERR_CAP_LOG_OVERWR 0x00000004
#define IDT_ERR_RD 0xfd0004
#define IDT_DEFAULT_ROUTE 0xde
#define IDT_NO_ROUTE 0xdf
static int
idtg2_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
u16 table, u16 route_destid, u8 route_port)
{
/*
* Select routing table to update
*/
if (table == RIO_GLOBAL_TABLE)
table = 0;
else
table++;
if (route_port == RIO_INVALID_ROUTE)
route_port = IDT_DEFAULT_ROUTE;
rio_mport_write_config_32(mport, destid, hopcount,
LOCAL_RTE_CONF_DESTID_SEL, table);
/*
* Program destination port for the specified destID
*/
rio_mport_write_config_32(mport, destid, hopcount,
RIO_STD_RTE_CONF_DESTID_SEL_CSR,
(u32)route_destid);
rio_mport_write_config_32(mport, destid, hopcount,
RIO_STD_RTE_CONF_PORT_SEL_CSR,
(u32)route_port);
udelay(10);
return 0;
}
static int
idtg2_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
u16 table, u16 route_destid, u8 *route_port)
{
u32 result;
/*
* Select routing table to read
*/
if (table == RIO_GLOBAL_TABLE)
table = 0;
else
table++;
rio_mport_write_config_32(mport, destid, hopcount,
LOCAL_RTE_CONF_DESTID_SEL, table);
rio_mport_write_config_32(mport, destid, hopcount,
RIO_STD_RTE_CONF_DESTID_SEL_CSR,
route_destid);
rio_mport_read_config_32(mport, destid, hopcount,
RIO_STD_RTE_CONF_PORT_SEL_CSR, &result);
if (IDT_DEFAULT_ROUTE == (u8)result || IDT_NO_ROUTE == (u8)result)
*route_port = RIO_INVALID_ROUTE;
else
*route_port = (u8)result;
return 0;
}
static int
idtg2_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
u16 table)
{
u32 i;
/*
* Select routing table to read
*/
if (table == RIO_GLOBAL_TABLE)
table = 0;
else
table++;
rio_mport_write_config_32(mport, destid, hopcount,
LOCAL_RTE_CONF_DESTID_SEL, table);
for (i = RIO_STD_RTE_CONF_EXTCFGEN;
i <= (RIO_STD_RTE_CONF_EXTCFGEN | 0xff);) {
rio_mport_write_config_32(mport, destid, hopcount,
RIO_STD_RTE_CONF_DESTID_SEL_CSR, i);
rio_mport_write_config_32(mport, destid, hopcount,
RIO_STD_RTE_CONF_PORT_SEL_CSR,
(IDT_DEFAULT_ROUTE << 24) | (IDT_DEFAULT_ROUTE << 16) |
(IDT_DEFAULT_ROUTE << 8) | IDT_DEFAULT_ROUTE);
i += 4;
}
return 0;
}
static int
idtg2_set_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
u8 sw_domain)
{
/*
* Switch domain configuration operates only at global level
*/
rio_mport_write_config_32(mport, destid, hopcount,
IDT_RIO_DOMAIN, (u32)sw_domain);
return 0;
}
static int
idtg2_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
u8 *sw_domain)
{
u32 regval;
/*
* Switch domain configuration operates only at global level
*/
rio_mport_read_config_32(mport, destid, hopcount,
IDT_RIO_DOMAIN, &regval);
*sw_domain = (u8)(regval & 0xff);
return 0;
}
static int
idtg2_em_init(struct rio_dev *rdev)
{
u32 regval;
int i, tmp;
/*
* This routine performs device-specific initialization only.
* All standard EM configuration should be performed at upper level.
*/
pr_debug("RIO: %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount);
/* Set Port-Write info CSR: PRIO=3 and CRF=1 */
rio_write_config_32(rdev, IDT_PW_INFO_CSR, 0x0000e000);
/*
* Configure LT LAYER error reporting.
*/
/* Enable standard (RIO.p8) error reporting */
rio_write_config_32(rdev, IDT_LT_ERR_REPORT_EN,
REM_LTL_ERR_ILLTRAN | REM_LTL_ERR_UNSOLR |
REM_LTL_ERR_UNSUPTR);
/* Use Port-Writes for LT layer error reporting.
* Enable per-port reset
*/
rio_read_config_32(rdev, IDT_DEV_CTRL_1, &regval);
rio_write_config_32(rdev, IDT_DEV_CTRL_1,
regval | IDT_DEV_CTRL_1_GENPW | IDT_DEV_CTRL_1_PRSTBEH);
/*
* Configure PORT error reporting.
*/
/* Report all RIO.p8 errors supported by device */
rio_write_config_32(rdev, IDT_PORT_ERR_REPORT_EN_BC, 0x807e8037);
/* Configure reporting of implementation specific errors/events */
rio_write_config_32(rdev, IDT_PORT_ISERR_REPORT_EN_BC,
IDT_PORT_INIT_TX_ACQUIRED);
/* Use Port-Writes for port error reporting and enable error logging */
tmp = RIO_GET_TOTAL_PORTS(rdev->swpinfo);
for (i = 0; i < tmp; i++) {
rio_read_config_32(rdev, IDT_PORT_OPS(i), &regval);
rio_write_config_32(rdev,
IDT_PORT_OPS(i), regval | IDT_PORT_OPS_GENPW |
IDT_PORT_OPS_PL_ELOG |
IDT_PORT_OPS_LL_ELOG |
IDT_PORT_OPS_LT_ELOG);
}
/* Overwrite error log if full */
rio_write_config_32(rdev, IDT_ERR_CAP, IDT_ERR_CAP_LOG_OVERWR);
/*
* Configure LANE error reporting.
*/
/* Disable line error reporting */
rio_write_config_32(rdev, IDT_LANE_ERR_REPORT_EN_BC, 0);
/* Use Port-Writes for lane error reporting (when enabled)
* (do per-lane update because lanes may have different configuration)
*/
tmp = (rdev->did == RIO_DID_IDTCPS1848) ? 48 : 16;
for (i = 0; i < tmp; i++) {
rio_read_config_32(rdev, IDT_LANE_CTRL(i), &regval);
rio_write_config_32(rdev, IDT_LANE_CTRL(i),
regval | IDT_LANE_CTRL_GENPW);
}
/*
* Configure AUX error reporting.
*/
/* Disable JTAG and I2C Error capture */
rio_write_config_32(rdev, IDT_AUX_PORT_ERR_CAP_EN, 0);
/* Disable JTAG and I2C Error reporting/logging */
rio_write_config_32(rdev, IDT_AUX_ERR_REPORT_EN, 0);
/* Disable Port-Write notification from JTAG */
rio_write_config_32(rdev, IDT_JTAG_CTRL, 0);
/* Disable Port-Write notification from I2C */
rio_read_config_32(rdev, IDT_I2C_MCTRL, &regval);
rio_write_config_32(rdev, IDT_I2C_MCTRL, regval & ~IDT_I2C_MCTRL_GENPW);
/*
* Configure CFG_BLK error reporting.
*/
/* Disable Configuration Block error capture */
rio_write_config_32(rdev, IDT_CFGBLK_ERR_CAPTURE_EN, 0);
/* Disable Port-Writes for Configuration Block error reporting */
rio_read_config_32(rdev, IDT_CFGBLK_ERR_REPORT, &regval);
rio_write_config_32(rdev, IDT_CFGBLK_ERR_REPORT,
regval & ~IDT_CFGBLK_ERR_REPORT_GENPW);
/* set TVAL = ~50us */
rio_write_config_32(rdev,
rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8);
return 0;
}
static int
idtg2_em_handler(struct rio_dev *rdev, u8 portnum)
{
u32 regval, em_perrdet, em_ltlerrdet;
rio_read_config_32(rdev,
rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, &em_ltlerrdet);
if (em_ltlerrdet) {
/* Service Logical/Transport Layer Error(s) */
if (em_ltlerrdet & REM_LTL_ERR_IMPSPEC) {
/* Implementation specific error reported */
rio_read_config_32(rdev,
IDT_ISLTL_ADDRESS_CAP, &regval);
pr_debug("RIO: %s Implementation Specific LTL errors" \
" 0x%x @(0x%x)\n",
rio_name(rdev), em_ltlerrdet, regval);
/* Clear implementation specific address capture CSR */
rio_write_config_32(rdev, IDT_ISLTL_ADDRESS_CAP, 0);
}
}
rio_read_config_32(rdev,
rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), &em_perrdet);
if (em_perrdet) {
/* Service Port-Level Error(s) */
if (em_perrdet & REM_PED_IMPL_SPEC) {
/* Implementation Specific port error reported */
/* Get IS errors reported */
rio_read_config_32(rdev,
IDT_PORT_ISERR_DET(portnum), &regval);
pr_debug("RIO: %s Implementation Specific Port" \
" errors 0x%x\n", rio_name(rdev), regval);
/* Clear all implementation specific events */
rio_write_config_32(rdev,
IDT_PORT_ISERR_DET(portnum), 0);
}
}
return 0;
}
static ssize_t
idtg2_show_errlog(struct device *dev, struct device_attribute *attr, char *buf)
{
struct rio_dev *rdev = to_rio_dev(dev);
ssize_t len = 0;
u32 regval;
while (!rio_read_config_32(rdev, IDT_ERR_RD, &regval)) {
if (!regval) /* 0 = end of log */
break;
len += snprintf(buf + len, PAGE_SIZE - len,
"%08x\n", regval);
if (len >= (PAGE_SIZE - 10))
break;
}
return len;
}
static DEVICE_ATTR(errlog, S_IRUGO, idtg2_show_errlog, NULL);
static int idtg2_sysfs(struct rio_dev *rdev, bool create)
{
struct device *dev = &rdev->dev;
int err = 0;
if (create) {
/* Initialize sysfs entries */
err = device_create_file(dev, &dev_attr_errlog);
if (err)
dev_err(dev, "Unable create sysfs errlog file\n");
} else
device_remove_file(dev, &dev_attr_errlog);
return err;
}
static struct rio_switch_ops idtg2_switch_ops = {
.owner = THIS_MODULE,
.add_entry = idtg2_route_add_entry,
.get_entry = idtg2_route_get_entry,
.clr_table = idtg2_route_clr_table,
.set_domain = idtg2_set_domain,
.get_domain = idtg2_get_domain,
.em_init = idtg2_em_init,
.em_handle = idtg2_em_handler,
};
static int idtg2_probe(struct rio_dev *rdev, const struct rio_device_id *id)
{
pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
spin_lock(&rdev->rswitch->lock);
if (rdev->rswitch->ops) {
spin_unlock(&rdev->rswitch->lock);
return -EINVAL;
}
rdev->rswitch->ops = &idtg2_switch_ops;
if (rdev->do_enum) {
/* Ensure that default routing is disabled on startup */
rio_write_config_32(rdev,
RIO_STD_RTE_DEFAULT_PORT, IDT_NO_ROUTE);
}
/* Create device-specific sysfs attributes */
idtg2_sysfs(rdev, true);
spin_unlock(&rdev->rswitch->lock);
return 0;
}
static void idtg2_remove(struct rio_dev *rdev)
{
pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
spin_lock(&rdev->rswitch->lock);
if (rdev->rswitch->ops != &idtg2_switch_ops) {
spin_unlock(&rdev->rswitch->lock);
return;
}
rdev->rswitch->ops = NULL;
/* Remove device-specific sysfs attributes */
idtg2_sysfs(rdev, false);
spin_unlock(&rdev->rswitch->lock);
}
static struct rio_device_id idtg2_id_table[] = {
{RIO_DEVICE(RIO_DID_IDTCPS1848, RIO_VID_IDT)},
{RIO_DEVICE(RIO_DID_IDTCPS1616, RIO_VID_IDT)},
{RIO_DEVICE(RIO_DID_IDTVPS1616, RIO_VID_IDT)},
{RIO_DEVICE(RIO_DID_IDTSPS1616, RIO_VID_IDT)},
{RIO_DEVICE(RIO_DID_IDTCPS1432, RIO_VID_IDT)},
{ 0, } /* terminate list */
};
static struct rio_driver idtg2_driver = {
.name = "idt_gen2",
.id_table = idtg2_id_table,
.probe = idtg2_probe,
.remove = idtg2_remove,
};
static int __init idtg2_init(void)
{
return rio_register_driver(&idtg2_driver);
}
static void __exit idtg2_exit(void)
{
pr_debug("RIO: %s\n", __func__);
rio_unregister_driver(&idtg2_driver);
pr_debug("RIO: %s done\n", __func__);
}
device_initcall(idtg2_init);
module_exit(idtg2_exit);
MODULE_DESCRIPTION("IDT CPS Gen.2 Serial RapidIO switch family driver");
MODULE_AUTHOR("Integrated Device Technology, Inc.");
MODULE_LICENSE("GPL");

View file

@ -0,0 +1,203 @@
/*
* IDT CPS RapidIO switches support
*
* Copyright 2009-2010 Integrated Device Technology, Inc.
* Alexandre Bounine <alexandre.bounine@idt.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/rio.h>
#include <linux/rio_drv.h>
#include <linux/rio_ids.h>
#include <linux/module.h>
#include "../rio.h"
#define CPS_DEFAULT_ROUTE 0xde
#define CPS_NO_ROUTE 0xdf
#define IDTCPS_RIO_DOMAIN 0xf20020
static int
idtcps_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
u16 table, u16 route_destid, u8 route_port)
{
u32 result;
if (route_port == RIO_INVALID_ROUTE)
route_port = CPS_DEFAULT_ROUTE;
if (table == RIO_GLOBAL_TABLE) {
rio_mport_write_config_32(mport, destid, hopcount,
RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid);
rio_mport_read_config_32(mport, destid, hopcount,
RIO_STD_RTE_CONF_PORT_SEL_CSR, &result);
result = (0xffffff00 & result) | (u32)route_port;
rio_mport_write_config_32(mport, destid, hopcount,
RIO_STD_RTE_CONF_PORT_SEL_CSR, result);
}
return 0;
}
static int
idtcps_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
u16 table, u16 route_destid, u8 *route_port)
{
u32 result;
if (table == RIO_GLOBAL_TABLE) {
rio_mport_write_config_32(mport, destid, hopcount,
RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid);
rio_mport_read_config_32(mport, destid, hopcount,
RIO_STD_RTE_CONF_PORT_SEL_CSR, &result);
if (CPS_DEFAULT_ROUTE == (u8)result ||
CPS_NO_ROUTE == (u8)result)
*route_port = RIO_INVALID_ROUTE;
else
*route_port = (u8)result;
}
return 0;
}
static int
idtcps_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
u16 table)
{
u32 i;
if (table == RIO_GLOBAL_TABLE) {
for (i = 0x80000000; i <= 0x800000ff;) {
rio_mport_write_config_32(mport, destid, hopcount,
RIO_STD_RTE_CONF_DESTID_SEL_CSR, i);
rio_mport_write_config_32(mport, destid, hopcount,
RIO_STD_RTE_CONF_PORT_SEL_CSR,
(CPS_DEFAULT_ROUTE << 24) |
(CPS_DEFAULT_ROUTE << 16) |
(CPS_DEFAULT_ROUTE << 8) | CPS_DEFAULT_ROUTE);
i += 4;
}
}
return 0;
}
static int
idtcps_set_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
u8 sw_domain)
{
/*
* Switch domain configuration operates only at global level
*/
rio_mport_write_config_32(mport, destid, hopcount,
IDTCPS_RIO_DOMAIN, (u32)sw_domain);
return 0;
}
static int
idtcps_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
u8 *sw_domain)
{
u32 regval;
/*
* Switch domain configuration operates only at global level
*/
rio_mport_read_config_32(mport, destid, hopcount,
IDTCPS_RIO_DOMAIN, &regval);
*sw_domain = (u8)(regval & 0xff);
return 0;
}
static struct rio_switch_ops idtcps_switch_ops = {
.owner = THIS_MODULE,
.add_entry = idtcps_route_add_entry,
.get_entry = idtcps_route_get_entry,
.clr_table = idtcps_route_clr_table,
.set_domain = idtcps_set_domain,
.get_domain = idtcps_get_domain,
.em_init = NULL,
.em_handle = NULL,
};
static int idtcps_probe(struct rio_dev *rdev, const struct rio_device_id *id)
{
pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
spin_lock(&rdev->rswitch->lock);
if (rdev->rswitch->ops) {
spin_unlock(&rdev->rswitch->lock);
return -EINVAL;
}
rdev->rswitch->ops = &idtcps_switch_ops;
if (rdev->do_enum) {
/* set TVAL = ~50us */
rio_write_config_32(rdev,
rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8);
/* Ensure that default routing is disabled on startup */
rio_write_config_32(rdev,
RIO_STD_RTE_DEFAULT_PORT, CPS_NO_ROUTE);
}
spin_unlock(&rdev->rswitch->lock);
return 0;
}
static void idtcps_remove(struct rio_dev *rdev)
{
pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
spin_lock(&rdev->rswitch->lock);
if (rdev->rswitch->ops != &idtcps_switch_ops) {
spin_unlock(&rdev->rswitch->lock);
return;
}
rdev->rswitch->ops = NULL;
spin_unlock(&rdev->rswitch->lock);
}
static struct rio_device_id idtcps_id_table[] = {
{RIO_DEVICE(RIO_DID_IDTCPS6Q, RIO_VID_IDT)},
{RIO_DEVICE(RIO_DID_IDTCPS8, RIO_VID_IDT)},
{RIO_DEVICE(RIO_DID_IDTCPS10Q, RIO_VID_IDT)},
{RIO_DEVICE(RIO_DID_IDTCPS12, RIO_VID_IDT)},
{RIO_DEVICE(RIO_DID_IDTCPS16, RIO_VID_IDT)},
{RIO_DEVICE(RIO_DID_IDT70K200, RIO_VID_IDT)},
{ 0, } /* terminate list */
};
static struct rio_driver idtcps_driver = {
.name = "idtcps",
.id_table = idtcps_id_table,
.probe = idtcps_probe,
.remove = idtcps_remove,
};
static int __init idtcps_init(void)
{
return rio_register_driver(&idtcps_driver);
}
static void __exit idtcps_exit(void)
{
rio_unregister_driver(&idtcps_driver);
}
device_initcall(idtcps_init);
module_exit(idtcps_exit);
MODULE_DESCRIPTION("IDT CPS Gen.1 Serial RapidIO switch family driver");
MODULE_AUTHOR("Integrated Device Technology, Inc.");
MODULE_LICENSE("GPL");

View file

@ -0,0 +1,199 @@
/*
* RapidIO Tsi568 switch support
*
* Copyright 2009-2010 Integrated Device Technology, Inc.
* Alexandre Bounine <alexandre.bounine@idt.com>
* - Added EM support
* - Modified switch operations initialization.
*
* Copyright 2005 MontaVista Software, Inc.
* Matt Porter <mporter@kernel.crashing.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/rio.h>
#include <linux/rio_drv.h>
#include <linux/rio_ids.h>
#include <linux/delay.h>
#include <linux/module.h>
#include "../rio.h"
/* Global (broadcast) route registers */
#define SPBC_ROUTE_CFG_DESTID 0x10070
#define SPBC_ROUTE_CFG_PORT 0x10074
/* Per port route registers */
#define SPP_ROUTE_CFG_DESTID(n) (0x11070 + 0x100*n)
#define SPP_ROUTE_CFG_PORT(n) (0x11074 + 0x100*n)
#define TSI568_SP_MODE(n) (0x11004 + 0x100*n)
#define TSI568_SP_MODE_PW_DIS 0x08000000
static int
tsi568_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
u16 table, u16 route_destid, u8 route_port)
{
if (table == RIO_GLOBAL_TABLE) {
rio_mport_write_config_32(mport, destid, hopcount,
SPBC_ROUTE_CFG_DESTID, route_destid);
rio_mport_write_config_32(mport, destid, hopcount,
SPBC_ROUTE_CFG_PORT, route_port);
} else {
rio_mport_write_config_32(mport, destid, hopcount,
SPP_ROUTE_CFG_DESTID(table),
route_destid);
rio_mport_write_config_32(mport, destid, hopcount,
SPP_ROUTE_CFG_PORT(table), route_port);
}
udelay(10);
return 0;
}
static int
tsi568_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
u16 table, u16 route_destid, u8 *route_port)
{
int ret = 0;
u32 result;
if (table == RIO_GLOBAL_TABLE) {
rio_mport_write_config_32(mport, destid, hopcount,
SPBC_ROUTE_CFG_DESTID, route_destid);
rio_mport_read_config_32(mport, destid, hopcount,
SPBC_ROUTE_CFG_PORT, &result);
} else {
rio_mport_write_config_32(mport, destid, hopcount,
SPP_ROUTE_CFG_DESTID(table),
route_destid);
rio_mport_read_config_32(mport, destid, hopcount,
SPP_ROUTE_CFG_PORT(table), &result);
}
*route_port = result;
if (*route_port > 15)
ret = -1;
return ret;
}
static int
tsi568_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
u16 table)
{
u32 route_idx;
u32 lut_size;
lut_size = (mport->sys_size) ? 0x1ff : 0xff;
if (table == RIO_GLOBAL_TABLE) {
rio_mport_write_config_32(mport, destid, hopcount,
SPBC_ROUTE_CFG_DESTID, 0x80000000);
for (route_idx = 0; route_idx <= lut_size; route_idx++)
rio_mport_write_config_32(mport, destid, hopcount,
SPBC_ROUTE_CFG_PORT,
RIO_INVALID_ROUTE);
} else {
rio_mport_write_config_32(mport, destid, hopcount,
SPP_ROUTE_CFG_DESTID(table),
0x80000000);
for (route_idx = 0; route_idx <= lut_size; route_idx++)
rio_mport_write_config_32(mport, destid, hopcount,
SPP_ROUTE_CFG_PORT(table),
RIO_INVALID_ROUTE);
}
return 0;
}
static int
tsi568_em_init(struct rio_dev *rdev)
{
u32 regval;
int portnum;
pr_debug("TSI568 %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount);
/* Make sure that Port-Writes are disabled (for all ports) */
for (portnum = 0;
portnum < RIO_GET_TOTAL_PORTS(rdev->swpinfo); portnum++) {
rio_read_config_32(rdev, TSI568_SP_MODE(portnum), &regval);
rio_write_config_32(rdev, TSI568_SP_MODE(portnum),
regval | TSI568_SP_MODE_PW_DIS);
}
return 0;
}
static struct rio_switch_ops tsi568_switch_ops = {
.owner = THIS_MODULE,
.add_entry = tsi568_route_add_entry,
.get_entry = tsi568_route_get_entry,
.clr_table = tsi568_route_clr_table,
.set_domain = NULL,
.get_domain = NULL,
.em_init = tsi568_em_init,
.em_handle = NULL,
};
static int tsi568_probe(struct rio_dev *rdev, const struct rio_device_id *id)
{
pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
spin_lock(&rdev->rswitch->lock);
if (rdev->rswitch->ops) {
spin_unlock(&rdev->rswitch->lock);
return -EINVAL;
}
rdev->rswitch->ops = &tsi568_switch_ops;
spin_unlock(&rdev->rswitch->lock);
return 0;
}
static void tsi568_remove(struct rio_dev *rdev)
{
pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
spin_lock(&rdev->rswitch->lock);
if (rdev->rswitch->ops != &tsi568_switch_ops) {
spin_unlock(&rdev->rswitch->lock);
return;
}
rdev->rswitch->ops = NULL;
spin_unlock(&rdev->rswitch->lock);
}
static struct rio_device_id tsi568_id_table[] = {
{RIO_DEVICE(RIO_DID_TSI568, RIO_VID_TUNDRA)},
{ 0, } /* terminate list */
};
static struct rio_driver tsi568_driver = {
.name = "tsi568",
.id_table = tsi568_id_table,
.probe = tsi568_probe,
.remove = tsi568_remove,
};
static int __init tsi568_init(void)
{
return rio_register_driver(&tsi568_driver);
}
static void __exit tsi568_exit(void)
{
rio_unregister_driver(&tsi568_driver);
}
device_initcall(tsi568_init);
module_exit(tsi568_exit);
MODULE_DESCRIPTION("IDT Tsi568 Serial RapidIO switch driver");
MODULE_AUTHOR("Integrated Device Technology, Inc.");
MODULE_LICENSE("GPL");

View file

@ -0,0 +1,371 @@
/*
* RapidIO Tsi57x switch family support
*
* Copyright 2009-2010 Integrated Device Technology, Inc.
* Alexandre Bounine <alexandre.bounine@idt.com>
* - Added EM support
* - Modified switch operations initialization.
*
* Copyright 2005 MontaVista Software, Inc.
* Matt Porter <mporter@kernel.crashing.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/rio.h>
#include <linux/rio_drv.h>
#include <linux/rio_ids.h>
#include <linux/delay.h>
#include <linux/module.h>
#include "../rio.h"
/* Global (broadcast) route registers */
#define SPBC_ROUTE_CFG_DESTID 0x10070
#define SPBC_ROUTE_CFG_PORT 0x10074
/* Per port route registers */
#define SPP_ROUTE_CFG_DESTID(n) (0x11070 + 0x100*n)
#define SPP_ROUTE_CFG_PORT(n) (0x11074 + 0x100*n)
#define TSI578_SP_MODE(n) (0x11004 + n*0x100)
#define TSI578_SP_MODE_GLBL 0x10004
#define TSI578_SP_MODE_PW_DIS 0x08000000
#define TSI578_SP_MODE_LUT_512 0x01000000
#define TSI578_SP_CTL_INDEP(n) (0x13004 + n*0x100)
#define TSI578_SP_LUT_PEINF(n) (0x13010 + n*0x100)
#define TSI578_SP_CS_TX(n) (0x13014 + n*0x100)
#define TSI578_SP_INT_STATUS(n) (0x13018 + n*0x100)
#define TSI578_GLBL_ROUTE_BASE 0x10078
static int
tsi57x_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
u16 table, u16 route_destid, u8 route_port)
{
if (table == RIO_GLOBAL_TABLE) {
rio_mport_write_config_32(mport, destid, hopcount,
SPBC_ROUTE_CFG_DESTID, route_destid);
rio_mport_write_config_32(mport, destid, hopcount,
SPBC_ROUTE_CFG_PORT, route_port);
} else {
rio_mport_write_config_32(mport, destid, hopcount,
SPP_ROUTE_CFG_DESTID(table), route_destid);
rio_mport_write_config_32(mport, destid, hopcount,
SPP_ROUTE_CFG_PORT(table), route_port);
}
udelay(10);
return 0;
}
static int
tsi57x_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
u16 table, u16 route_destid, u8 *route_port)
{
int ret = 0;
u32 result;
if (table == RIO_GLOBAL_TABLE) {
/* Use local RT of the ingress port to avoid possible
race condition */
rio_mport_read_config_32(mport, destid, hopcount,
RIO_SWP_INFO_CAR, &result);
table = (result & RIO_SWP_INFO_PORT_NUM_MASK);
}
rio_mport_write_config_32(mport, destid, hopcount,
SPP_ROUTE_CFG_DESTID(table), route_destid);
rio_mport_read_config_32(mport, destid, hopcount,
SPP_ROUTE_CFG_PORT(table), &result);
*route_port = (u8)result;
if (*route_port > 15)
ret = -1;
return ret;
}
static int
tsi57x_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
u16 table)
{
u32 route_idx;
u32 lut_size;
lut_size = (mport->sys_size) ? 0x1ff : 0xff;
if (table == RIO_GLOBAL_TABLE) {
rio_mport_write_config_32(mport, destid, hopcount,
SPBC_ROUTE_CFG_DESTID, 0x80000000);
for (route_idx = 0; route_idx <= lut_size; route_idx++)
rio_mport_write_config_32(mport, destid, hopcount,
SPBC_ROUTE_CFG_PORT,
RIO_INVALID_ROUTE);
} else {
rio_mport_write_config_32(mport, destid, hopcount,
SPP_ROUTE_CFG_DESTID(table), 0x80000000);
for (route_idx = 0; route_idx <= lut_size; route_idx++)
rio_mport_write_config_32(mport, destid, hopcount,
SPP_ROUTE_CFG_PORT(table) , RIO_INVALID_ROUTE);
}
return 0;
}
static int
tsi57x_set_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
u8 sw_domain)
{
u32 regval;
/*
* Switch domain configuration operates only at global level
*/
/* Turn off flat (LUT_512) mode */
rio_mport_read_config_32(mport, destid, hopcount,
TSI578_SP_MODE_GLBL, &regval);
rio_mport_write_config_32(mport, destid, hopcount, TSI578_SP_MODE_GLBL,
regval & ~TSI578_SP_MODE_LUT_512);
/* Set switch domain base */
rio_mport_write_config_32(mport, destid, hopcount,
TSI578_GLBL_ROUTE_BASE,
(u32)(sw_domain << 24));
return 0;
}
static int
tsi57x_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
u8 *sw_domain)
{
u32 regval;
/*
* Switch domain configuration operates only at global level
*/
rio_mport_read_config_32(mport, destid, hopcount,
TSI578_GLBL_ROUTE_BASE, &regval);
*sw_domain = (u8)(regval >> 24);
return 0;
}
static int
tsi57x_em_init(struct rio_dev *rdev)
{
u32 regval;
int portnum;
pr_debug("TSI578 %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount);
for (portnum = 0;
portnum < RIO_GET_TOTAL_PORTS(rdev->swpinfo); portnum++) {
/* Make sure that Port-Writes are enabled (for all ports) */
rio_read_config_32(rdev,
TSI578_SP_MODE(portnum), &regval);
rio_write_config_32(rdev,
TSI578_SP_MODE(portnum),
regval & ~TSI578_SP_MODE_PW_DIS);
/* Clear all pending interrupts */
rio_read_config_32(rdev,
rdev->phys_efptr +
RIO_PORT_N_ERR_STS_CSR(portnum),
&regval);
rio_write_config_32(rdev,
rdev->phys_efptr +
RIO_PORT_N_ERR_STS_CSR(portnum),
regval & 0x07120214);
rio_read_config_32(rdev,
TSI578_SP_INT_STATUS(portnum), &regval);
rio_write_config_32(rdev,
TSI578_SP_INT_STATUS(portnum),
regval & 0x000700bd);
/* Enable all interrupts to allow ports to send a port-write */
rio_read_config_32(rdev,
TSI578_SP_CTL_INDEP(portnum), &regval);
rio_write_config_32(rdev,
TSI578_SP_CTL_INDEP(portnum),
regval | 0x000b0000);
/* Skip next (odd) port if the current port is in x4 mode */
rio_read_config_32(rdev,
rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
&regval);
if ((regval & RIO_PORT_N_CTL_PWIDTH) == RIO_PORT_N_CTL_PWIDTH_4)
portnum++;
}
/* set TVAL = ~50us */
rio_write_config_32(rdev,
rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x9a << 8);
return 0;
}
static int
tsi57x_em_handler(struct rio_dev *rdev, u8 portnum)
{
struct rio_mport *mport = rdev->net->hport;
u32 intstat, err_status;
int sendcount, checkcount;
u8 route_port;
u32 regval;
rio_read_config_32(rdev,
rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
&err_status);
if ((err_status & RIO_PORT_N_ERR_STS_PORT_OK) &&
(err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES |
RIO_PORT_N_ERR_STS_PW_INP_ES))) {
/* Remove any queued packets by locking/unlocking port */
rio_read_config_32(rdev,
rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
&regval);
if (!(regval & RIO_PORT_N_CTL_LOCKOUT)) {
rio_write_config_32(rdev,
rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
regval | RIO_PORT_N_CTL_LOCKOUT);
udelay(50);
rio_write_config_32(rdev,
rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
regval);
}
/* Read from link maintenance response register to clear
* valid bit
*/
rio_read_config_32(rdev,
rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(portnum),
&regval);
/* Send a Packet-Not-Accepted/Link-Request-Input-Status control
* symbol to recover from IES/OES
*/
sendcount = 3;
while (sendcount) {
rio_write_config_32(rdev,
TSI578_SP_CS_TX(portnum), 0x40fc8000);
checkcount = 3;
while (checkcount--) {
udelay(50);
rio_read_config_32(rdev,
rdev->phys_efptr +
RIO_PORT_N_MNT_RSP_CSR(portnum),
&regval);
if (regval & RIO_PORT_N_MNT_RSP_RVAL)
goto exit_es;
}
sendcount--;
}
}
exit_es:
/* Clear implementation specific error status bits */
rio_read_config_32(rdev, TSI578_SP_INT_STATUS(portnum), &intstat);
pr_debug("TSI578[%x:%x] SP%d_INT_STATUS=0x%08x\n",
rdev->destid, rdev->hopcount, portnum, intstat);
if (intstat & 0x10000) {
rio_read_config_32(rdev,
TSI578_SP_LUT_PEINF(portnum), &regval);
regval = (mport->sys_size) ? (regval >> 16) : (regval >> 24);
route_port = rdev->rswitch->route_table[regval];
pr_debug("RIO: TSI578[%s] P%d LUT Parity Error (destID=%d)\n",
rio_name(rdev), portnum, regval);
tsi57x_route_add_entry(mport, rdev->destid, rdev->hopcount,
RIO_GLOBAL_TABLE, regval, route_port);
}
rio_write_config_32(rdev, TSI578_SP_INT_STATUS(portnum),
intstat & 0x000700bd);
return 0;
}
static struct rio_switch_ops tsi57x_switch_ops = {
.owner = THIS_MODULE,
.add_entry = tsi57x_route_add_entry,
.get_entry = tsi57x_route_get_entry,
.clr_table = tsi57x_route_clr_table,
.set_domain = tsi57x_set_domain,
.get_domain = tsi57x_get_domain,
.em_init = tsi57x_em_init,
.em_handle = tsi57x_em_handler,
};
static int tsi57x_probe(struct rio_dev *rdev, const struct rio_device_id *id)
{
pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
spin_lock(&rdev->rswitch->lock);
if (rdev->rswitch->ops) {
spin_unlock(&rdev->rswitch->lock);
return -EINVAL;
}
rdev->rswitch->ops = &tsi57x_switch_ops;
if (rdev->do_enum) {
/* Ensure that default routing is disabled on startup */
rio_write_config_32(rdev, RIO_STD_RTE_DEFAULT_PORT,
RIO_INVALID_ROUTE);
}
spin_unlock(&rdev->rswitch->lock);
return 0;
}
static void tsi57x_remove(struct rio_dev *rdev)
{
pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
spin_lock(&rdev->rswitch->lock);
if (rdev->rswitch->ops != &tsi57x_switch_ops) {
spin_unlock(&rdev->rswitch->lock);
return;
}
rdev->rswitch->ops = NULL;
spin_unlock(&rdev->rswitch->lock);
}
static struct rio_device_id tsi57x_id_table[] = {
{RIO_DEVICE(RIO_DID_TSI572, RIO_VID_TUNDRA)},
{RIO_DEVICE(RIO_DID_TSI574, RIO_VID_TUNDRA)},
{RIO_DEVICE(RIO_DID_TSI577, RIO_VID_TUNDRA)},
{RIO_DEVICE(RIO_DID_TSI578, RIO_VID_TUNDRA)},
{ 0, } /* terminate list */
};
static struct rio_driver tsi57x_driver = {
.name = "tsi57x",
.id_table = tsi57x_id_table,
.probe = tsi57x_probe,
.remove = tsi57x_remove,
};
static int __init tsi57x_init(void)
{
return rio_register_driver(&tsi57x_driver);
}
static void __exit tsi57x_exit(void)
{
rio_unregister_driver(&tsi57x_driver);
}
device_initcall(tsi57x_init);
module_exit(tsi57x_exit);
MODULE_DESCRIPTION("IDT Tsi57x Serial RapidIO switch family driver");
MODULE_AUTHOR("Integrated Device Technology, Inc.");
MODULE_LICENSE("GPL");