Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

View file

@ -0,0 +1,8 @@
if PLAT_PXA
config PXA_SSP
tristate
help
Enable support for PXA2xx SSP ports
endif

View file

@ -0,0 +1,10 @@
#
# Makefile for code common across different PXA processor families
#
obj-y := dma.o
obj-$(CONFIG_PXA3xx) += mfp.o
obj-$(CONFIG_ARCH_MMP) += mfp.o
obj-$(CONFIG_PXA_SSP) += ssp.o

391
arch/arm/plat-pxa/dma.c Normal file
View file

@ -0,0 +1,391 @@
/*
* linux/arch/arm/plat-pxa/dma.c
*
* PXA DMA registration and IRQ dispatching
*
* Author: Nicolas Pitre
* Created: Nov 15, 2001
* Copyright: MontaVista Software Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/dma-mapping.h>
#include <asm/irq.h>
#include <asm/memory.h>
#include <mach/hardware.h>
#include <mach/dma.h>
#define DMA_DEBUG_NAME "pxa_dma"
#define DMA_MAX_REQUESTERS 64
struct dma_channel {
char *name;
pxa_dma_prio prio;
void (*irq_handler)(int, void *);
void *data;
spinlock_t lock;
};
static struct dma_channel *dma_channels;
static int num_dma_channels;
/*
* Debug fs
*/
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/seq_file.h>
static struct dentry *dbgfs_root, *dbgfs_state, **dbgfs_chan;
static int dbg_show_requester_chan(struct seq_file *s, void *p)
{
int pos = 0;
int chan = (int)s->private;
int i;
u32 drcmr;
pos += seq_printf(s, "DMA channel %d requesters list :\n", chan);
for (i = 0; i < DMA_MAX_REQUESTERS; i++) {
drcmr = DRCMR(i);
if ((drcmr & DRCMR_CHLNUM) == chan)
pos += seq_printf(s, "\tRequester %d (MAPVLD=%d)\n", i,
!!(drcmr & DRCMR_MAPVLD));
}
return pos;
}
static inline int dbg_burst_from_dcmd(u32 dcmd)
{
int burst = (dcmd >> 16) & 0x3;
return burst ? 4 << burst : 0;
}
static int is_phys_valid(unsigned long addr)
{
return pfn_valid(__phys_to_pfn(addr));
}
#define DCSR_STR(flag) (dcsr & DCSR_##flag ? #flag" " : "")
#define DCMD_STR(flag) (dcmd & DCMD_##flag ? #flag" " : "")
static int dbg_show_descriptors(struct seq_file *s, void *p)
{
int pos = 0;
int chan = (int)s->private;
int i, max_show = 20, burst, width;
u32 dcmd;
unsigned long phys_desc;
struct pxa_dma_desc *desc;
unsigned long flags;
spin_lock_irqsave(&dma_channels[chan].lock, flags);
phys_desc = DDADR(chan);
pos += seq_printf(s, "DMA channel %d descriptors :\n", chan);
pos += seq_printf(s, "[%03d] First descriptor unknown\n", 0);
for (i = 1; i < max_show && is_phys_valid(phys_desc); i++) {
desc = phys_to_virt(phys_desc);
dcmd = desc->dcmd;
burst = dbg_burst_from_dcmd(dcmd);
width = (1 << ((dcmd >> 14) & 0x3)) >> 1;
pos += seq_printf(s, "[%03d] Desc at %08lx(virt %p)\n",
i, phys_desc, desc);
pos += seq_printf(s, "\tDDADR = %08x\n", desc->ddadr);
pos += seq_printf(s, "\tDSADR = %08x\n", desc->dsadr);
pos += seq_printf(s, "\tDTADR = %08x\n", desc->dtadr);
pos += seq_printf(s, "\tDCMD = %08x (%s%s%s%s%s%s%sburst=%d"
" width=%d len=%d)\n",
dcmd,
DCMD_STR(INCSRCADDR), DCMD_STR(INCTRGADDR),
DCMD_STR(FLOWSRC), DCMD_STR(FLOWTRG),
DCMD_STR(STARTIRQEN), DCMD_STR(ENDIRQEN),
DCMD_STR(ENDIAN), burst, width,
dcmd & DCMD_LENGTH);
phys_desc = desc->ddadr;
}
if (i == max_show)
pos += seq_printf(s, "[%03d] Desc at %08lx ... max display reached\n",
i, phys_desc);
else
pos += seq_printf(s, "[%03d] Desc at %08lx is %s\n",
i, phys_desc, phys_desc == DDADR_STOP ?
"DDADR_STOP" : "invalid");
spin_unlock_irqrestore(&dma_channels[chan].lock, flags);
return pos;
}
static int dbg_show_chan_state(struct seq_file *s, void *p)
{
int pos = 0;
int chan = (int)s->private;
u32 dcsr, dcmd;
int burst, width;
static char *str_prio[] = { "high", "normal", "low" };
dcsr = DCSR(chan);
dcmd = DCMD(chan);
burst = dbg_burst_from_dcmd(dcmd);
width = (1 << ((dcmd >> 14) & 0x3)) >> 1;
pos += seq_printf(s, "DMA channel %d\n", chan);
pos += seq_printf(s, "\tPriority : %s\n",
str_prio[dma_channels[chan].prio]);
pos += seq_printf(s, "\tUnaligned transfer bit: %s\n",
DALGN & (1 << chan) ? "yes" : "no");
pos += seq_printf(s, "\tDCSR = %08x (%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n",
dcsr, DCSR_STR(RUN), DCSR_STR(NODESC),
DCSR_STR(STOPIRQEN), DCSR_STR(EORIRQEN),
DCSR_STR(EORJMPEN), DCSR_STR(EORSTOPEN),
DCSR_STR(SETCMPST), DCSR_STR(CLRCMPST),
DCSR_STR(CMPST), DCSR_STR(EORINTR), DCSR_STR(REQPEND),
DCSR_STR(STOPSTATE), DCSR_STR(ENDINTR),
DCSR_STR(STARTINTR), DCSR_STR(BUSERR));
pos += seq_printf(s, "\tDCMD = %08x (%s%s%s%s%s%s%sburst=%d width=%d"
" len=%d)\n",
dcmd,
DCMD_STR(INCSRCADDR), DCMD_STR(INCTRGADDR),
DCMD_STR(FLOWSRC), DCMD_STR(FLOWTRG),
DCMD_STR(STARTIRQEN), DCMD_STR(ENDIRQEN),
DCMD_STR(ENDIAN), burst, width, dcmd & DCMD_LENGTH);
pos += seq_printf(s, "\tDSADR = %08x\n", DSADR(chan));
pos += seq_printf(s, "\tDTADR = %08x\n", DTADR(chan));
pos += seq_printf(s, "\tDDADR = %08x\n", DDADR(chan));
return pos;
}
static int dbg_show_state(struct seq_file *s, void *p)
{
int pos = 0;
/* basic device status */
pos += seq_printf(s, "DMA engine status\n");
pos += seq_printf(s, "\tChannel number: %d\n", num_dma_channels);
return pos;
}
#define DBGFS_FUNC_DECL(name) \
static int dbg_open_##name(struct inode *inode, struct file *file) \
{ \
return single_open(file, dbg_show_##name, inode->i_private); \
} \
static const struct file_operations dbg_fops_##name = { \
.owner = THIS_MODULE, \
.open = dbg_open_##name, \
.llseek = seq_lseek, \
.read = seq_read, \
.release = single_release, \
}
DBGFS_FUNC_DECL(state);
DBGFS_FUNC_DECL(chan_state);
DBGFS_FUNC_DECL(descriptors);
DBGFS_FUNC_DECL(requester_chan);
static struct dentry *pxa_dma_dbg_alloc_chan(int ch, struct dentry *chandir)
{
char chan_name[11];
struct dentry *chan, *chan_state = NULL, *chan_descr = NULL;
struct dentry *chan_reqs = NULL;
void *dt;
scnprintf(chan_name, sizeof(chan_name), "%d", ch);
chan = debugfs_create_dir(chan_name, chandir);
dt = (void *)ch;
if (chan)
chan_state = debugfs_create_file("state", 0400, chan, dt,
&dbg_fops_chan_state);
if (chan_state)
chan_descr = debugfs_create_file("descriptors", 0400, chan, dt,
&dbg_fops_descriptors);
if (chan_descr)
chan_reqs = debugfs_create_file("requesters", 0400, chan, dt,
&dbg_fops_requester_chan);
if (!chan_reqs)
goto err_state;
return chan;
err_state:
debugfs_remove_recursive(chan);
return NULL;
}
static void pxa_dma_init_debugfs(void)
{
int i;
struct dentry *chandir;
dbgfs_root = debugfs_create_dir(DMA_DEBUG_NAME, NULL);
if (IS_ERR(dbgfs_root) || !dbgfs_root)
goto err_root;
dbgfs_state = debugfs_create_file("state", 0400, dbgfs_root, NULL,
&dbg_fops_state);
if (!dbgfs_state)
goto err_state;
dbgfs_chan = kmalloc(sizeof(*dbgfs_state) * num_dma_channels,
GFP_KERNEL);
if (!dbgfs_chan)
goto err_alloc;
chandir = debugfs_create_dir("channels", dbgfs_root);
if (!chandir)
goto err_chandir;
for (i = 0; i < num_dma_channels; i++) {
dbgfs_chan[i] = pxa_dma_dbg_alloc_chan(i, chandir);
if (!dbgfs_chan[i])
goto err_chans;
}
return;
err_chans:
err_chandir:
kfree(dbgfs_chan);
err_alloc:
err_state:
debugfs_remove_recursive(dbgfs_root);
err_root:
pr_err("pxa_dma: debugfs is not available\n");
}
static void __exit pxa_dma_cleanup_debugfs(void)
{
debugfs_remove_recursive(dbgfs_root);
}
#else
static inline void pxa_dma_init_debugfs(void) {}
static inline void pxa_dma_cleanup_debugfs(void) {}
#endif
int pxa_request_dma (char *name, pxa_dma_prio prio,
void (*irq_handler)(int, void *),
void *data)
{
unsigned long flags;
int i, found = 0;
/* basic sanity checks */
if (!name || !irq_handler)
return -EINVAL;
local_irq_save(flags);
do {
/* try grabbing a DMA channel with the requested priority */
for (i = 0; i < num_dma_channels; i++) {
if ((dma_channels[i].prio == prio) &&
!dma_channels[i].name) {
found = 1;
break;
}
}
/* if requested prio group is full, try a hier priority */
} while (!found && prio--);
if (found) {
DCSR(i) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR;
dma_channels[i].name = name;
dma_channels[i].irq_handler = irq_handler;
dma_channels[i].data = data;
} else {
printk (KERN_WARNING "No more available DMA channels for %s\n", name);
i = -ENODEV;
}
local_irq_restore(flags);
return i;
}
EXPORT_SYMBOL(pxa_request_dma);
void pxa_free_dma (int dma_ch)
{
unsigned long flags;
if (!dma_channels[dma_ch].name) {
printk (KERN_CRIT
"%s: trying to free channel %d which is already freed\n",
__func__, dma_ch);
return;
}
local_irq_save(flags);
DCSR(dma_ch) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR;
dma_channels[dma_ch].name = NULL;
local_irq_restore(flags);
}
EXPORT_SYMBOL(pxa_free_dma);
static irqreturn_t dma_irq_handler(int irq, void *dev_id)
{
int i, dint = DINT;
struct dma_channel *channel;
while (dint) {
i = __ffs(dint);
dint &= (dint - 1);
channel = &dma_channels[i];
if (channel->name && channel->irq_handler) {
channel->irq_handler(i, channel->data);
} else {
/*
* IRQ for an unregistered DMA channel:
* let's clear the interrupts and disable it.
*/
printk (KERN_WARNING "spurious IRQ for DMA channel %d\n", i);
DCSR(i) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR;
}
}
return IRQ_HANDLED;
}
int __init pxa_init_dma(int irq, int num_ch)
{
int i, ret;
dma_channels = kzalloc(sizeof(struct dma_channel) * num_ch, GFP_KERNEL);
if (dma_channels == NULL)
return -ENOMEM;
/* dma channel priorities on pxa2xx processors:
* ch 0 - 3, 16 - 19 <--> (0) DMA_PRIO_HIGH
* ch 4 - 7, 20 - 23 <--> (1) DMA_PRIO_MEDIUM
* ch 8 - 15, 24 - 31 <--> (2) DMA_PRIO_LOW
*/
for (i = 0; i < num_ch; i++) {
DCSR(i) = 0;
dma_channels[i].prio = min((i & 0xf) >> 2, DMA_PRIO_LOW);
spin_lock_init(&dma_channels[i].lock);
}
ret = request_irq(irq, dma_irq_handler, 0, "DMA", NULL);
if (ret) {
printk (KERN_CRIT "Wow! Can't register IRQ for DMA\n");
kfree(dma_channels);
return ret;
}
num_dma_channels = num_ch;
pxa_dma_init_debugfs();
return 0;
}

View file

@ -0,0 +1,85 @@
#ifndef __PLAT_DMA_H
#define __PLAT_DMA_H
#define DMAC_REG(x) (*((volatile u32 *)(DMAC_REGS_VIRT + (x))))
#define DCSR(n) DMAC_REG((n) << 2)
#define DALGN DMAC_REG(0x00a0) /* DMA Alignment Register */
#define DINT DMAC_REG(0x00f0) /* DMA Interrupt Register */
#define DDADR(n) DMAC_REG(0x0200 + ((n) << 4))
#define DSADR(n) DMAC_REG(0x0204 + ((n) << 4))
#define DTADR(n) DMAC_REG(0x0208 + ((n) << 4))
#define DCMD(n) DMAC_REG(0x020c + ((n) << 4))
#define DRCMR(n) DMAC_REG((((n) < 64) ? 0x0100 : 0x1100) + \
(((n) & 0x3f) << 2))
#define DCSR_RUN (1 << 31) /* Run Bit (read / write) */
#define DCSR_NODESC (1 << 30) /* No-Descriptor Fetch (read / write) */
#define DCSR_STOPIRQEN (1 << 29) /* Stop Interrupt Enable (read / write) */
#define DCSR_REQPEND (1 << 8) /* Request Pending (read-only) */
#define DCSR_STOPSTATE (1 << 3) /* Stop State (read-only) */
#define DCSR_ENDINTR (1 << 2) /* End Interrupt (read / write) */
#define DCSR_STARTINTR (1 << 1) /* Start Interrupt (read / write) */
#define DCSR_BUSERR (1 << 0) /* Bus Error Interrupt (read / write) */
#define DCSR_EORIRQEN (1 << 28) /* End of Receive Interrupt Enable (R/W) */
#define DCSR_EORJMPEN (1 << 27) /* Jump to next descriptor on EOR */
#define DCSR_EORSTOPEN (1 << 26) /* STOP on an EOR */
#define DCSR_SETCMPST (1 << 25) /* Set Descriptor Compare Status */
#define DCSR_CLRCMPST (1 << 24) /* Clear Descriptor Compare Status */
#define DCSR_CMPST (1 << 10) /* The Descriptor Compare Status */
#define DCSR_EORINTR (1 << 9) /* The end of Receive */
#define DRCMR_MAPVLD (1 << 7) /* Map Valid (read / write) */
#define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */
#define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */
#define DDADR_STOP (1 << 0) /* Stop (read / write) */
#define DCMD_INCSRCADDR (1 << 31) /* Source Address Increment Setting. */
#define DCMD_INCTRGADDR (1 << 30) /* Target Address Increment Setting. */
#define DCMD_FLOWSRC (1 << 29) /* Flow Control by the source. */
#define DCMD_FLOWTRG (1 << 28) /* Flow Control by the target. */
#define DCMD_STARTIRQEN (1 << 22) /* Start Interrupt Enable */
#define DCMD_ENDIRQEN (1 << 21) /* End Interrupt Enable */
#define DCMD_ENDIAN (1 << 18) /* Device Endian-ness. */
#define DCMD_BURST8 (1 << 16) /* 8 byte burst */
#define DCMD_BURST16 (2 << 16) /* 16 byte burst */
#define DCMD_BURST32 (3 << 16) /* 32 byte burst */
#define DCMD_WIDTH1 (1 << 14) /* 1 byte width */
#define DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */
#define DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */
#define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
/*
* Descriptor structure for PXA's DMA engine
* Note: this structure must always be aligned to a 16-byte boundary.
*/
typedef struct pxa_dma_desc {
volatile u32 ddadr; /* Points to the next descriptor + flags */
volatile u32 dsadr; /* DSADR value for the current transfer */
volatile u32 dtadr; /* DTADR value for the current transfer */
volatile u32 dcmd; /* DCMD value for the current transfer */
} pxa_dma_desc;
typedef enum {
DMA_PRIO_HIGH = 0,
DMA_PRIO_MEDIUM = 1,
DMA_PRIO_LOW = 2
} pxa_dma_prio;
/*
* DMA registration
*/
int __init pxa_init_dma(int irq, int num_ch);
int pxa_request_dma (char *name,
pxa_dma_prio prio,
void (*irq_handler)(int, void *),
void *data);
void pxa_free_dma (int dma_ch);
#endif /* __PLAT_DMA_H */

View file

@ -0,0 +1,475 @@
/*
* arch/arm/plat-pxa/include/plat/mfp.h
*
* Common Multi-Function Pin Definitions
*
* Copyright (C) 2007 Marvell International Ltd.
*
* 2007-8-21: eric miao <eric.miao@marvell.com>
* initial version
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_PLAT_MFP_H
#define __ASM_PLAT_MFP_H
#define mfp_to_gpio(m) ((m) % 256)
/* list of all the configurable MFP pins */
enum {
MFP_PIN_INVALID = -1,
MFP_PIN_GPIO0 = 0,
MFP_PIN_GPIO1,
MFP_PIN_GPIO2,
MFP_PIN_GPIO3,
MFP_PIN_GPIO4,
MFP_PIN_GPIO5,
MFP_PIN_GPIO6,
MFP_PIN_GPIO7,
MFP_PIN_GPIO8,
MFP_PIN_GPIO9,
MFP_PIN_GPIO10,
MFP_PIN_GPIO11,
MFP_PIN_GPIO12,
MFP_PIN_GPIO13,
MFP_PIN_GPIO14,
MFP_PIN_GPIO15,
MFP_PIN_GPIO16,
MFP_PIN_GPIO17,
MFP_PIN_GPIO18,
MFP_PIN_GPIO19,
MFP_PIN_GPIO20,
MFP_PIN_GPIO21,
MFP_PIN_GPIO22,
MFP_PIN_GPIO23,
MFP_PIN_GPIO24,
MFP_PIN_GPIO25,
MFP_PIN_GPIO26,
MFP_PIN_GPIO27,
MFP_PIN_GPIO28,
MFP_PIN_GPIO29,
MFP_PIN_GPIO30,
MFP_PIN_GPIO31,
MFP_PIN_GPIO32,
MFP_PIN_GPIO33,
MFP_PIN_GPIO34,
MFP_PIN_GPIO35,
MFP_PIN_GPIO36,
MFP_PIN_GPIO37,
MFP_PIN_GPIO38,
MFP_PIN_GPIO39,
MFP_PIN_GPIO40,
MFP_PIN_GPIO41,
MFP_PIN_GPIO42,
MFP_PIN_GPIO43,
MFP_PIN_GPIO44,
MFP_PIN_GPIO45,
MFP_PIN_GPIO46,
MFP_PIN_GPIO47,
MFP_PIN_GPIO48,
MFP_PIN_GPIO49,
MFP_PIN_GPIO50,
MFP_PIN_GPIO51,
MFP_PIN_GPIO52,
MFP_PIN_GPIO53,
MFP_PIN_GPIO54,
MFP_PIN_GPIO55,
MFP_PIN_GPIO56,
MFP_PIN_GPIO57,
MFP_PIN_GPIO58,
MFP_PIN_GPIO59,
MFP_PIN_GPIO60,
MFP_PIN_GPIO61,
MFP_PIN_GPIO62,
MFP_PIN_GPIO63,
MFP_PIN_GPIO64,
MFP_PIN_GPIO65,
MFP_PIN_GPIO66,
MFP_PIN_GPIO67,
MFP_PIN_GPIO68,
MFP_PIN_GPIO69,
MFP_PIN_GPIO70,
MFP_PIN_GPIO71,
MFP_PIN_GPIO72,
MFP_PIN_GPIO73,
MFP_PIN_GPIO74,
MFP_PIN_GPIO75,
MFP_PIN_GPIO76,
MFP_PIN_GPIO77,
MFP_PIN_GPIO78,
MFP_PIN_GPIO79,
MFP_PIN_GPIO80,
MFP_PIN_GPIO81,
MFP_PIN_GPIO82,
MFP_PIN_GPIO83,
MFP_PIN_GPIO84,
MFP_PIN_GPIO85,
MFP_PIN_GPIO86,
MFP_PIN_GPIO87,
MFP_PIN_GPIO88,
MFP_PIN_GPIO89,
MFP_PIN_GPIO90,
MFP_PIN_GPIO91,
MFP_PIN_GPIO92,
MFP_PIN_GPIO93,
MFP_PIN_GPIO94,
MFP_PIN_GPIO95,
MFP_PIN_GPIO96,
MFP_PIN_GPIO97,
MFP_PIN_GPIO98,
MFP_PIN_GPIO99,
MFP_PIN_GPIO100,
MFP_PIN_GPIO101,
MFP_PIN_GPIO102,
MFP_PIN_GPIO103,
MFP_PIN_GPIO104,
MFP_PIN_GPIO105,
MFP_PIN_GPIO106,
MFP_PIN_GPIO107,
MFP_PIN_GPIO108,
MFP_PIN_GPIO109,
MFP_PIN_GPIO110,
MFP_PIN_GPIO111,
MFP_PIN_GPIO112,
MFP_PIN_GPIO113,
MFP_PIN_GPIO114,
MFP_PIN_GPIO115,
MFP_PIN_GPIO116,
MFP_PIN_GPIO117,
MFP_PIN_GPIO118,
MFP_PIN_GPIO119,
MFP_PIN_GPIO120,
MFP_PIN_GPIO121,
MFP_PIN_GPIO122,
MFP_PIN_GPIO123,
MFP_PIN_GPIO124,
MFP_PIN_GPIO125,
MFP_PIN_GPIO126,
MFP_PIN_GPIO127,
MFP_PIN_GPIO128,
MFP_PIN_GPIO129,
MFP_PIN_GPIO130,
MFP_PIN_GPIO131,
MFP_PIN_GPIO132,
MFP_PIN_GPIO133,
MFP_PIN_GPIO134,
MFP_PIN_GPIO135,
MFP_PIN_GPIO136,
MFP_PIN_GPIO137,
MFP_PIN_GPIO138,
MFP_PIN_GPIO139,
MFP_PIN_GPIO140,
MFP_PIN_GPIO141,
MFP_PIN_GPIO142,
MFP_PIN_GPIO143,
MFP_PIN_GPIO144,
MFP_PIN_GPIO145,
MFP_PIN_GPIO146,
MFP_PIN_GPIO147,
MFP_PIN_GPIO148,
MFP_PIN_GPIO149,
MFP_PIN_GPIO150,
MFP_PIN_GPIO151,
MFP_PIN_GPIO152,
MFP_PIN_GPIO153,
MFP_PIN_GPIO154,
MFP_PIN_GPIO155,
MFP_PIN_GPIO156,
MFP_PIN_GPIO157,
MFP_PIN_GPIO158,
MFP_PIN_GPIO159,
MFP_PIN_GPIO160,
MFP_PIN_GPIO161,
MFP_PIN_GPIO162,
MFP_PIN_GPIO163,
MFP_PIN_GPIO164,
MFP_PIN_GPIO165,
MFP_PIN_GPIO166,
MFP_PIN_GPIO167,
MFP_PIN_GPIO168,
MFP_PIN_GPIO169,
MFP_PIN_GPIO170,
MFP_PIN_GPIO171,
MFP_PIN_GPIO172,
MFP_PIN_GPIO173,
MFP_PIN_GPIO174,
MFP_PIN_GPIO175,
MFP_PIN_GPIO176,
MFP_PIN_GPIO177,
MFP_PIN_GPIO178,
MFP_PIN_GPIO179,
MFP_PIN_GPIO180,
MFP_PIN_GPIO181,
MFP_PIN_GPIO182,
MFP_PIN_GPIO183,
MFP_PIN_GPIO184,
MFP_PIN_GPIO185,
MFP_PIN_GPIO186,
MFP_PIN_GPIO187,
MFP_PIN_GPIO188,
MFP_PIN_GPIO189,
MFP_PIN_GPIO190,
MFP_PIN_GPIO191,
MFP_PIN_GPIO255 = 255,
MFP_PIN_GPIO0_2,
MFP_PIN_GPIO1_2,
MFP_PIN_GPIO2_2,
MFP_PIN_GPIO3_2,
MFP_PIN_GPIO4_2,
MFP_PIN_GPIO5_2,
MFP_PIN_GPIO6_2,
MFP_PIN_GPIO7_2,
MFP_PIN_GPIO8_2,
MFP_PIN_GPIO9_2,
MFP_PIN_GPIO10_2,
MFP_PIN_GPIO11_2,
MFP_PIN_GPIO12_2,
MFP_PIN_GPIO13_2,
MFP_PIN_GPIO14_2,
MFP_PIN_GPIO15_2,
MFP_PIN_GPIO16_2,
MFP_PIN_GPIO17_2,
MFP_PIN_ULPI_STP,
MFP_PIN_ULPI_NXT,
MFP_PIN_ULPI_DIR,
MFP_PIN_nXCVREN,
MFP_PIN_DF_CLE_nOE,
MFP_PIN_DF_nADV1_ALE,
MFP_PIN_DF_SCLK_E,
MFP_PIN_DF_SCLK_S,
MFP_PIN_nBE0,
MFP_PIN_nBE1,
MFP_PIN_DF_nADV2_ALE,
MFP_PIN_DF_INT_RnB,
MFP_PIN_DF_nCS0,
MFP_PIN_DF_nCS1,
MFP_PIN_nLUA,
MFP_PIN_nLLA,
MFP_PIN_DF_nWE,
MFP_PIN_DF_ALE_nWE,
MFP_PIN_DF_nRE_nOE,
MFP_PIN_DF_ADDR0,
MFP_PIN_DF_ADDR1,
MFP_PIN_DF_ADDR2,
MFP_PIN_DF_ADDR3,
MFP_PIN_DF_IO0,
MFP_PIN_DF_IO1,
MFP_PIN_DF_IO2,
MFP_PIN_DF_IO3,
MFP_PIN_DF_IO4,
MFP_PIN_DF_IO5,
MFP_PIN_DF_IO6,
MFP_PIN_DF_IO7,
MFP_PIN_DF_IO8,
MFP_PIN_DF_IO9,
MFP_PIN_DF_IO10,
MFP_PIN_DF_IO11,
MFP_PIN_DF_IO12,
MFP_PIN_DF_IO13,
MFP_PIN_DF_IO14,
MFP_PIN_DF_IO15,
MFP_PIN_DF_nCS0_SM_nCS2,
MFP_PIN_DF_nCS1_SM_nCS3,
MFP_PIN_SM_nCS0,
MFP_PIN_SM_nCS1,
MFP_PIN_DF_WEn,
MFP_PIN_DF_REn,
MFP_PIN_DF_CLE_SM_OEn,
MFP_PIN_DF_ALE_SM_WEn,
MFP_PIN_DF_RDY0,
MFP_PIN_DF_RDY1,
MFP_PIN_SM_SCLK,
MFP_PIN_SM_BE0,
MFP_PIN_SM_BE1,
MFP_PIN_SM_ADV,
MFP_PIN_SM_ADVMUX,
MFP_PIN_SM_RDY,
MFP_PIN_MMC1_DAT7,
MFP_PIN_MMC1_DAT6,
MFP_PIN_MMC1_DAT5,
MFP_PIN_MMC1_DAT4,
MFP_PIN_MMC1_DAT3,
MFP_PIN_MMC1_DAT2,
MFP_PIN_MMC1_DAT1,
MFP_PIN_MMC1_DAT0,
MFP_PIN_MMC1_CMD,
MFP_PIN_MMC1_CLK,
MFP_PIN_MMC1_CD,
MFP_PIN_MMC1_WP,
/* additional pins on PXA930 */
MFP_PIN_GSIM_UIO,
MFP_PIN_GSIM_UCLK,
MFP_PIN_GSIM_UDET,
MFP_PIN_GSIM_nURST,
MFP_PIN_PMIC_INT,
MFP_PIN_RDY,
/* additional pins on MMP2 */
MFP_PIN_TWSI1_SCL,
MFP_PIN_TWSI1_SDA,
MFP_PIN_TWSI4_SCL,
MFP_PIN_TWSI4_SDA,
MFP_PIN_CLK_REQ,
MFP_PIN_MAX,
};
/*
* a possible MFP configuration is represented by a 32-bit integer
*
* bit 0.. 9 - MFP Pin Number (1024 Pins Maximum)
* bit 10..12 - Alternate Function Selection
* bit 13..15 - Drive Strength
* bit 16..18 - Low Power Mode State
* bit 19..20 - Low Power Mode Edge Detection
* bit 21..22 - Run Mode Pull State
*
* to facilitate the definition, the following macros are provided
*
* MFP_CFG_DEFAULT - default MFP configuration value, with
* alternate function = 0,
* drive strength = fast 3mA (MFP_DS03X)
* low power mode = default
* edge detection = none
*
* MFP_CFG - default MFPR value with alternate function
* MFP_CFG_DRV - default MFPR value with alternate function and
* pin drive strength
* MFP_CFG_LPM - default MFPR value with alternate function and
* low power mode
* MFP_CFG_X - default MFPR value with alternate function,
* pin drive strength and low power mode
*/
typedef unsigned long mfp_cfg_t;
#define MFP_PIN(x) ((x) & 0x3ff)
#define MFP_AF0 (0x0 << 10)
#define MFP_AF1 (0x1 << 10)
#define MFP_AF2 (0x2 << 10)
#define MFP_AF3 (0x3 << 10)
#define MFP_AF4 (0x4 << 10)
#define MFP_AF5 (0x5 << 10)
#define MFP_AF6 (0x6 << 10)
#define MFP_AF7 (0x7 << 10)
#define MFP_AF_MASK (0x7 << 10)
#define MFP_AF(x) (((x) >> 10) & 0x7)
#define MFP_DS01X (0x0 << 13)
#define MFP_DS02X (0x1 << 13)
#define MFP_DS03X (0x2 << 13)
#define MFP_DS04X (0x3 << 13)
#define MFP_DS06X (0x4 << 13)
#define MFP_DS08X (0x5 << 13)
#define MFP_DS10X (0x6 << 13)
#define MFP_DS13X (0x7 << 13)
#define MFP_DS_MASK (0x7 << 13)
#define MFP_DS(x) (((x) >> 13) & 0x7)
#define MFP_LPM_DEFAULT (0x0 << 16)
#define MFP_LPM_DRIVE_LOW (0x1 << 16)
#define MFP_LPM_DRIVE_HIGH (0x2 << 16)
#define MFP_LPM_PULL_LOW (0x3 << 16)
#define MFP_LPM_PULL_HIGH (0x4 << 16)
#define MFP_LPM_FLOAT (0x5 << 16)
#define MFP_LPM_INPUT (0x6 << 16)
#define MFP_LPM_STATE_MASK (0x7 << 16)
#define MFP_LPM_STATE(x) (((x) >> 16) & 0x7)
#define MFP_LPM_EDGE_NONE (0x0 << 19)
#define MFP_LPM_EDGE_RISE (0x1 << 19)
#define MFP_LPM_EDGE_FALL (0x2 << 19)
#define MFP_LPM_EDGE_BOTH (0x3 << 19)
#define MFP_LPM_EDGE_MASK (0x3 << 19)
#define MFP_LPM_EDGE(x) (((x) >> 19) & 0x3)
#define MFP_PULL_NONE (0x0 << 21)
#define MFP_PULL_LOW (0x1 << 21)
#define MFP_PULL_HIGH (0x2 << 21)
#define MFP_PULL_BOTH (0x3 << 21)
#define MFP_PULL_FLOAT (0x4 << 21)
#define MFP_PULL_MASK (0x7 << 21)
#define MFP_PULL(x) (((x) >> 21) & 0x7)
#define MFP_CFG_DEFAULT (MFP_AF0 | MFP_DS03X | MFP_LPM_DEFAULT |\
MFP_LPM_EDGE_NONE | MFP_PULL_NONE)
#define MFP_CFG(pin, af) \
((MFP_CFG_DEFAULT & ~MFP_AF_MASK) |\
(MFP_PIN(MFP_PIN_##pin) | MFP_##af))
#define MFP_CFG_DRV(pin, af, drv) \
((MFP_CFG_DEFAULT & ~(MFP_AF_MASK | MFP_DS_MASK)) |\
(MFP_PIN(MFP_PIN_##pin) | MFP_##af | MFP_##drv))
#define MFP_CFG_LPM(pin, af, lpm) \
((MFP_CFG_DEFAULT & ~(MFP_AF_MASK | MFP_LPM_STATE_MASK)) |\
(MFP_PIN(MFP_PIN_##pin) | MFP_##af | MFP_LPM_##lpm))
#define MFP_CFG_X(pin, af, drv, lpm) \
((MFP_CFG_DEFAULT & ~(MFP_AF_MASK | MFP_DS_MASK | MFP_LPM_STATE_MASK)) |\
(MFP_PIN(MFP_PIN_##pin) | MFP_##af | MFP_##drv | MFP_LPM_##lpm))
#if defined(CONFIG_PXA3xx) || defined(CONFIG_ARCH_MMP)
/*
* each MFP pin will have a MFPR register, since the offset of the
* register varies between processors, the processor specific code
* should initialize the pin offsets by mfp_init()
*
* mfp_init_base() - accepts a virtual base for all MFPR registers and
* initialize the MFP table to a default state
*
* mfp_init_addr() - accepts a table of "mfp_addr_map" structure, which
* represents a range of MFP pins from "start" to "end", with the offset
* beginning at "offset", to define a single pin, let "end" = -1.
*
* use
*
* MFP_ADDR_X() to define a range of pins
* MFP_ADDR() to define a single pin
* MFP_ADDR_END to signal the end of pin offset definitions
*/
struct mfp_addr_map {
unsigned int start;
unsigned int end;
unsigned long offset;
};
#define MFP_ADDR_X(start, end, offset) \
{ MFP_PIN_##start, MFP_PIN_##end, offset }
#define MFP_ADDR(pin, offset) \
{ MFP_PIN_##pin, -1, offset }
#define MFP_ADDR_END { MFP_PIN_INVALID, 0 }
void __init mfp_init_base(void __iomem *mfpr_base);
void __init mfp_init_addr(struct mfp_addr_map *map);
/*
* mfp_{read, write}() - for direct read/write access to the MFPR register
* mfp_config() - for configuring a group of MFPR registers
* mfp_config_lpm() - configuring all low power MFPR registers for suspend
* mfp_config_run() - configuring all run time MFPR registers after resume
*/
unsigned long mfp_read(int mfp);
void mfp_write(int mfp, unsigned long mfpr_val);
void mfp_config(unsigned long *mfp_cfgs, int num);
void mfp_config_run(void);
void mfp_config_lpm(void);
#endif /* CONFIG_PXA3xx || CONFIG_ARCH_MMP */
#endif /* __ASM_PLAT_MFP_H */

285
arch/arm/plat-pxa/mfp.c Normal file
View file

@ -0,0 +1,285 @@
/*
* linux/arch/arm/plat-pxa/mfp.c
*
* Multi-Function Pin Support
*
* Copyright (C) 2007 Marvell Internation Ltd.
*
* 2007-08-21: eric miao <eric.miao@marvell.com>
* initial version
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
#include <plat/mfp.h>
#define MFPR_SIZE (PAGE_SIZE)
/* MFPR register bit definitions */
#define MFPR_PULL_SEL (0x1 << 15)
#define MFPR_PULLUP_EN (0x1 << 14)
#define MFPR_PULLDOWN_EN (0x1 << 13)
#define MFPR_SLEEP_SEL (0x1 << 9)
#define MFPR_SLEEP_OE_N (0x1 << 7)
#define MFPR_EDGE_CLEAR (0x1 << 6)
#define MFPR_EDGE_FALL_EN (0x1 << 5)
#define MFPR_EDGE_RISE_EN (0x1 << 4)
#define MFPR_SLEEP_DATA(x) ((x) << 8)
#define MFPR_DRIVE(x) (((x) & 0x7) << 10)
#define MFPR_AF_SEL(x) (((x) & 0x7) << 0)
#define MFPR_EDGE_NONE (0)
#define MFPR_EDGE_RISE (MFPR_EDGE_RISE_EN)
#define MFPR_EDGE_FALL (MFPR_EDGE_FALL_EN)
#define MFPR_EDGE_BOTH (MFPR_EDGE_RISE | MFPR_EDGE_FALL)
/*
* Table that determines the low power modes outputs, with actual settings
* used in parentheses for don't-care values. Except for the float output,
* the configured driven and pulled levels match, so if there is a need for
* non-LPM pulled output, the same configuration could probably be used.
*
* Output value sleep_oe_n sleep_data pullup_en pulldown_en pull_sel
* (bit 7) (bit 8) (bit 14) (bit 13) (bit 15)
*
* Input 0 X(0) X(0) X(0) 0
* Drive 0 0 0 0 X(1) 0
* Drive 1 0 1 X(1) 0 0
* Pull hi (1) 1 X(1) 1 0 0
* Pull lo (0) 1 X(0) 0 1 0
* Z (float) 1 X(0) 0 0 0
*/
#define MFPR_LPM_INPUT (0)
#define MFPR_LPM_DRIVE_LOW (MFPR_SLEEP_DATA(0) | MFPR_PULLDOWN_EN)
#define MFPR_LPM_DRIVE_HIGH (MFPR_SLEEP_DATA(1) | MFPR_PULLUP_EN)
#define MFPR_LPM_PULL_LOW (MFPR_LPM_DRIVE_LOW | MFPR_SLEEP_OE_N)
#define MFPR_LPM_PULL_HIGH (MFPR_LPM_DRIVE_HIGH | MFPR_SLEEP_OE_N)
#define MFPR_LPM_FLOAT (MFPR_SLEEP_OE_N)
#define MFPR_LPM_MASK (0xe080)
/*
* The pullup and pulldown state of the MFP pin at run mode is by default
* determined by the selected alternate function. In case that some buggy
* devices need to override this default behavior, the definitions below
* indicates the setting of corresponding MFPR bits
*
* Definition pull_sel pullup_en pulldown_en
* MFPR_PULL_NONE 0 0 0
* MFPR_PULL_LOW 1 0 1
* MFPR_PULL_HIGH 1 1 0
* MFPR_PULL_BOTH 1 1 1
* MFPR_PULL_FLOAT 1 0 0
*/
#define MFPR_PULL_NONE (0)
#define MFPR_PULL_LOW (MFPR_PULL_SEL | MFPR_PULLDOWN_EN)
#define MFPR_PULL_BOTH (MFPR_PULL_LOW | MFPR_PULLUP_EN)
#define MFPR_PULL_HIGH (MFPR_PULL_SEL | MFPR_PULLUP_EN)
#define MFPR_PULL_FLOAT (MFPR_PULL_SEL)
/* mfp_spin_lock is used to ensure that MFP register configuration
* (most likely a read-modify-write operation) is atomic, and that
* mfp_table[] is consistent
*/
static DEFINE_SPINLOCK(mfp_spin_lock);
static void __iomem *mfpr_mmio_base;
struct mfp_pin {
unsigned long config; /* -1 for not configured */
unsigned long mfpr_off; /* MFPRxx Register offset */
unsigned long mfpr_run; /* Run-Mode Register Value */
unsigned long mfpr_lpm; /* Low Power Mode Register Value */
};
static struct mfp_pin mfp_table[MFP_PIN_MAX];
/* mapping of MFP_LPM_* definitions to MFPR_LPM_* register bits */
static const unsigned long mfpr_lpm[] = {
MFPR_LPM_INPUT,
MFPR_LPM_DRIVE_LOW,
MFPR_LPM_DRIVE_HIGH,
MFPR_LPM_PULL_LOW,
MFPR_LPM_PULL_HIGH,
MFPR_LPM_FLOAT,
MFPR_LPM_INPUT,
};
/* mapping of MFP_PULL_* definitions to MFPR_PULL_* register bits */
static const unsigned long mfpr_pull[] = {
MFPR_PULL_NONE,
MFPR_PULL_LOW,
MFPR_PULL_HIGH,
MFPR_PULL_BOTH,
MFPR_PULL_FLOAT,
};
/* mapping of MFP_LPM_EDGE_* definitions to MFPR_EDGE_* register bits */
static const unsigned long mfpr_edge[] = {
MFPR_EDGE_NONE,
MFPR_EDGE_RISE,
MFPR_EDGE_FALL,
MFPR_EDGE_BOTH,
};
#define mfpr_readl(off) \
__raw_readl(mfpr_mmio_base + (off))
#define mfpr_writel(off, val) \
__raw_writel(val, mfpr_mmio_base + (off))
#define mfp_configured(p) ((p)->config != -1)
/*
* perform a read-back of any valid MFPR register to make sure the
* previous writings are finished
*/
static unsigned long mfpr_off_readback;
#define mfpr_sync() (void)__raw_readl(mfpr_mmio_base + mfpr_off_readback)
static inline void __mfp_config_run(struct mfp_pin *p)
{
if (mfp_configured(p))
mfpr_writel(p->mfpr_off, p->mfpr_run);
}
static inline void __mfp_config_lpm(struct mfp_pin *p)
{
if (mfp_configured(p)) {
unsigned long mfpr_clr = (p->mfpr_run & ~MFPR_EDGE_BOTH) | MFPR_EDGE_CLEAR;
if (mfpr_clr != p->mfpr_run)
mfpr_writel(p->mfpr_off, mfpr_clr);
if (p->mfpr_lpm != mfpr_clr)
mfpr_writel(p->mfpr_off, p->mfpr_lpm);
}
}
void mfp_config(unsigned long *mfp_cfgs, int num)
{
unsigned long flags;
int i;
spin_lock_irqsave(&mfp_spin_lock, flags);
for (i = 0; i < num; i++, mfp_cfgs++) {
unsigned long tmp, c = *mfp_cfgs;
struct mfp_pin *p;
int pin, af, drv, lpm, edge, pull;
pin = MFP_PIN(c);
BUG_ON(pin >= MFP_PIN_MAX);
p = &mfp_table[pin];
af = MFP_AF(c);
drv = MFP_DS(c);
lpm = MFP_LPM_STATE(c);
edge = MFP_LPM_EDGE(c);
pull = MFP_PULL(c);
/* run-mode pull settings will conflict with MFPR bits of
* low power mode state, calculate mfpr_run and mfpr_lpm
* individually if pull != MFP_PULL_NONE
*/
tmp = MFPR_AF_SEL(af) | MFPR_DRIVE(drv);
if (likely(pull == MFP_PULL_NONE)) {
p->mfpr_run = tmp | mfpr_lpm[lpm] | mfpr_edge[edge];
p->mfpr_lpm = p->mfpr_run;
} else {
p->mfpr_lpm = tmp | mfpr_lpm[lpm] | mfpr_edge[edge];
p->mfpr_run = tmp | mfpr_pull[pull];
}
p->config = c; __mfp_config_run(p);
}
mfpr_sync();
spin_unlock_irqrestore(&mfp_spin_lock, flags);
}
unsigned long mfp_read(int mfp)
{
unsigned long val, flags;
BUG_ON(mfp < 0 || mfp >= MFP_PIN_MAX);
spin_lock_irqsave(&mfp_spin_lock, flags);
val = mfpr_readl(mfp_table[mfp].mfpr_off);
spin_unlock_irqrestore(&mfp_spin_lock, flags);
return val;
}
void mfp_write(int mfp, unsigned long val)
{
unsigned long flags;
BUG_ON(mfp < 0 || mfp >= MFP_PIN_MAX);
spin_lock_irqsave(&mfp_spin_lock, flags);
mfpr_writel(mfp_table[mfp].mfpr_off, val);
mfpr_sync();
spin_unlock_irqrestore(&mfp_spin_lock, flags);
}
void __init mfp_init_base(void __iomem *mfpr_base)
{
int i;
/* initialize the table with default - unconfigured */
for (i = 0; i < ARRAY_SIZE(mfp_table); i++)
mfp_table[i].config = -1;
mfpr_mmio_base = mfpr_base;
}
void __init mfp_init_addr(struct mfp_addr_map *map)
{
struct mfp_addr_map *p;
unsigned long offset, flags;
int i;
spin_lock_irqsave(&mfp_spin_lock, flags);
/* mfp offset for readback */
mfpr_off_readback = map[0].offset;
for (p = map; p->start != MFP_PIN_INVALID; p++) {
offset = p->offset;
i = p->start;
do {
mfp_table[i].mfpr_off = offset;
mfp_table[i].mfpr_run = 0;
mfp_table[i].mfpr_lpm = 0;
offset += 4; i++;
} while ((i <= p->end) && (p->end != -1));
}
spin_unlock_irqrestore(&mfp_spin_lock, flags);
}
void mfp_config_lpm(void)
{
struct mfp_pin *p = &mfp_table[0];
int pin;
for (pin = 0; pin < ARRAY_SIZE(mfp_table); pin++, p++)
__mfp_config_lpm(p);
}
void mfp_config_run(void)
{
struct mfp_pin *p = &mfp_table[0];
int pin;
for (pin = 0; pin < ARRAY_SIZE(mfp_table); pin++, p++)
__mfp_config_run(p);
}

293
arch/arm/plat-pxa/ssp.c Normal file
View file

@ -0,0 +1,293 @@
/*
* linux/arch/arm/mach-pxa/ssp.c
*
* based on linux/arch/arm/mach-sa1100/ssp.c by Russell King
*
* Copyright (C) 2003 Russell King.
* Copyright (C) 2003 Wolfson Microelectronics PLC
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* PXA2xx SSP driver. This provides the generic core for simple
* IO-based SSP applications and allows easy port setup for DMA access.
*
* Author: Liam Girdwood <liam.girdwood@wolfsonmicro.com>
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/spi/pxa2xx_spi.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <asm/irq.h>
#include <mach/hardware.h>
static DEFINE_MUTEX(ssp_lock);
static LIST_HEAD(ssp_list);
struct ssp_device *pxa_ssp_request(int port, const char *label)
{
struct ssp_device *ssp = NULL;
mutex_lock(&ssp_lock);
list_for_each_entry(ssp, &ssp_list, node) {
if (ssp->port_id == port && ssp->use_count == 0) {
ssp->use_count++;
ssp->label = label;
break;
}
}
mutex_unlock(&ssp_lock);
if (&ssp->node == &ssp_list)
return NULL;
return ssp;
}
EXPORT_SYMBOL(pxa_ssp_request);
struct ssp_device *pxa_ssp_request_of(const struct device_node *of_node,
const char *label)
{
struct ssp_device *ssp = NULL;
mutex_lock(&ssp_lock);
list_for_each_entry(ssp, &ssp_list, node) {
if (ssp->of_node == of_node && ssp->use_count == 0) {
ssp->use_count++;
ssp->label = label;
break;
}
}
mutex_unlock(&ssp_lock);
if (&ssp->node == &ssp_list)
return NULL;
return ssp;
}
EXPORT_SYMBOL(pxa_ssp_request_of);
void pxa_ssp_free(struct ssp_device *ssp)
{
mutex_lock(&ssp_lock);
if (ssp->use_count) {
ssp->use_count--;
ssp->label = NULL;
} else
dev_err(&ssp->pdev->dev, "device already free\n");
mutex_unlock(&ssp_lock);
}
EXPORT_SYMBOL(pxa_ssp_free);
#ifdef CONFIG_OF
static const struct of_device_id pxa_ssp_of_ids[] = {
{ .compatible = "mrvl,pxa25x-ssp", .data = (void *) PXA25x_SSP },
{ .compatible = "mvrl,pxa25x-nssp", .data = (void *) PXA25x_NSSP },
{ .compatible = "mrvl,pxa27x-ssp", .data = (void *) PXA27x_SSP },
{ .compatible = "mrvl,pxa3xx-ssp", .data = (void *) PXA3xx_SSP },
{ .compatible = "mvrl,pxa168-ssp", .data = (void *) PXA168_SSP },
{ .compatible = "mrvl,pxa910-ssp", .data = (void *) PXA910_SSP },
{ .compatible = "mrvl,ce4100-ssp", .data = (void *) CE4100_SSP },
{ .compatible = "mrvl,lpss-ssp", .data = (void *) LPSS_SSP },
{ },
};
MODULE_DEVICE_TABLE(of, pxa_ssp_of_ids);
#endif
static int pxa_ssp_probe(struct platform_device *pdev)
{
struct resource *res;
struct ssp_device *ssp;
struct device *dev = &pdev->dev;
ssp = devm_kzalloc(dev, sizeof(struct ssp_device), GFP_KERNEL);
if (ssp == NULL)
return -ENOMEM;
ssp->pdev = pdev;
ssp->clk = devm_clk_get(dev, NULL);
if (IS_ERR(ssp->clk))
return PTR_ERR(ssp->clk);
if (dev->of_node) {
struct of_phandle_args dma_spec;
struct device_node *np = dev->of_node;
int ret;
/*
* FIXME: we should allocate the DMA channel from this
* context and pass the channel down to the ssp users.
* For now, we lookup the rx and tx indices manually
*/
/* rx */
ret = of_parse_phandle_with_args(np, "dmas", "#dma-cells",
0, &dma_spec);
if (ret) {
dev_err(dev, "Can't parse dmas property\n");
return -ENODEV;
}
ssp->drcmr_rx = dma_spec.args[0];
of_node_put(dma_spec.np);
/* tx */
ret = of_parse_phandle_with_args(np, "dmas", "#dma-cells",
1, &dma_spec);
if (ret) {
dev_err(dev, "Can't parse dmas property\n");
return -ENODEV;
}
ssp->drcmr_tx = dma_spec.args[0];
of_node_put(dma_spec.np);
} else {
res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
if (res == NULL) {
dev_err(dev, "no SSP RX DRCMR defined\n");
return -ENODEV;
}
ssp->drcmr_rx = res->start;
res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
if (res == NULL) {
dev_err(dev, "no SSP TX DRCMR defined\n");
return -ENODEV;
}
ssp->drcmr_tx = res->start;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(dev, "no memory resource defined\n");
return -ENODEV;
}
res = devm_request_mem_region(dev, res->start, resource_size(res),
pdev->name);
if (res == NULL) {
dev_err(dev, "failed to request memory resource\n");
return -EBUSY;
}
ssp->phys_base = res->start;
ssp->mmio_base = devm_ioremap(dev, res->start, resource_size(res));
if (ssp->mmio_base == NULL) {
dev_err(dev, "failed to ioremap() registers\n");
return -ENODEV;
}
ssp->irq = platform_get_irq(pdev, 0);
if (ssp->irq < 0) {
dev_err(dev, "no IRQ resource defined\n");
return -ENODEV;
}
if (dev->of_node) {
const struct of_device_id *id =
of_match_device(of_match_ptr(pxa_ssp_of_ids), dev);
ssp->type = (int) id->data;
} else {
const struct platform_device_id *id =
platform_get_device_id(pdev);
ssp->type = (int) id->driver_data;
/* PXA2xx/3xx SSP ports starts from 1 and the internal pdev->id
* starts from 0, do a translation here
*/
ssp->port_id = pdev->id + 1;
}
ssp->use_count = 0;
ssp->of_node = dev->of_node;
mutex_lock(&ssp_lock);
list_add(&ssp->node, &ssp_list);
mutex_unlock(&ssp_lock);
platform_set_drvdata(pdev, ssp);
return 0;
}
static int pxa_ssp_remove(struct platform_device *pdev)
{
struct resource *res;
struct ssp_device *ssp;
ssp = platform_get_drvdata(pdev);
if (ssp == NULL)
return -ENODEV;
iounmap(ssp->mmio_base);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
clk_put(ssp->clk);
mutex_lock(&ssp_lock);
list_del(&ssp->node);
mutex_unlock(&ssp_lock);
kfree(ssp);
return 0;
}
static const struct platform_device_id ssp_id_table[] = {
{ "pxa25x-ssp", PXA25x_SSP },
{ "pxa25x-nssp", PXA25x_NSSP },
{ "pxa27x-ssp", PXA27x_SSP },
{ "pxa3xx-ssp", PXA3xx_SSP },
{ "pxa168-ssp", PXA168_SSP },
{ "pxa910-ssp", PXA910_SSP },
{ },
};
static struct platform_driver pxa_ssp_driver = {
.probe = pxa_ssp_probe,
.remove = pxa_ssp_remove,
.driver = {
.owner = THIS_MODULE,
.name = "pxa2xx-ssp",
.of_match_table = of_match_ptr(pxa_ssp_of_ids),
},
.id_table = ssp_id_table,
};
static int __init pxa_ssp_init(void)
{
return platform_driver_register(&pxa_ssp_driver);
}
static void __exit pxa_ssp_exit(void)
{
platform_driver_unregister(&pxa_ssp_driver);
}
arch_initcall(pxa_ssp_init);
module_exit(pxa_ssp_exit);
MODULE_DESCRIPTION("PXA SSP driver");
MODULE_AUTHOR("Liam Girdwood");
MODULE_LICENSE("GPL");