mirror of
https://github.com/AetherDroid/android_kernel_samsung_on5xelte.git
synced 2025-09-08 17:18:05 -04:00
Fixed MTP to work with TWRP
This commit is contained in:
commit
f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions
54
drivers/dma/sh/Kconfig
Normal file
54
drivers/dma/sh/Kconfig
Normal file
|
@ -0,0 +1,54 @@
|
|||
#
|
||||
# DMA engine configuration for sh
|
||||
#
|
||||
|
||||
#
|
||||
# DMA Engine Helpers
|
||||
#
|
||||
|
||||
config SH_DMAE_BASE
|
||||
bool "Renesas SuperH DMA Engine support"
|
||||
depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
|
||||
depends on !SUPERH || SH_DMA
|
||||
depends on !SH_DMA_API
|
||||
default y
|
||||
select DMA_ENGINE
|
||||
help
|
||||
Enable support for the Renesas SuperH DMA controllers.
|
||||
|
||||
#
|
||||
# DMA Controllers
|
||||
#
|
||||
|
||||
config SH_DMAE
|
||||
tristate "Renesas SuperH DMAC support"
|
||||
depends on SH_DMAE_BASE
|
||||
help
|
||||
Enable support for the Renesas SuperH DMA controllers.
|
||||
|
||||
if SH_DMAE
|
||||
|
||||
config SH_DMAE_R8A73A4
|
||||
def_bool y
|
||||
depends on ARCH_R8A73A4
|
||||
depends on OF
|
||||
|
||||
endif
|
||||
|
||||
config SUDMAC
|
||||
tristate "Renesas SUDMAC support"
|
||||
depends on SH_DMAE_BASE
|
||||
help
|
||||
Enable support for the Renesas SUDMAC controllers.
|
||||
|
||||
config RCAR_HPB_DMAE
|
||||
tristate "Renesas R-Car HPB DMAC support"
|
||||
depends on SH_DMAE_BASE
|
||||
help
|
||||
Enable support for the Renesas R-Car series DMA controllers.
|
||||
|
||||
config RCAR_AUDMAC_PP
|
||||
tristate "Renesas R-Car Audio DMAC Peripheral Peripheral support"
|
||||
depends on SH_DMAE_BASE
|
||||
help
|
||||
Enable support for the Renesas R-Car Audio DMAC Peripheral Peripheral controllers.
|
18
drivers/dma/sh/Makefile
Normal file
18
drivers/dma/sh/Makefile
Normal file
|
@ -0,0 +1,18 @@
|
|||
#
|
||||
# DMA Engine Helpers
|
||||
#
|
||||
|
||||
obj-$(CONFIG_SH_DMAE_BASE) += shdma-base.o shdma-of.o
|
||||
|
||||
#
|
||||
# DMA Controllers
|
||||
#
|
||||
|
||||
shdma-y := shdmac.o
|
||||
shdma-$(CONFIG_SH_DMAE_R8A73A4) += shdma-r8a73a4.o
|
||||
shdma-objs := $(shdma-y)
|
||||
obj-$(CONFIG_SH_DMAE) += shdma.o
|
||||
|
||||
obj-$(CONFIG_SUDMAC) += sudmac.o
|
||||
obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o
|
||||
obj-$(CONFIG_RCAR_AUDMAC_PP) += rcar-audmapp.o
|
379
drivers/dma/sh/rcar-audmapp.c
Normal file
379
drivers/dma/sh/rcar-audmapp.c
Normal file
|
@ -0,0 +1,379 @@
|
|||
/*
|
||||
* This is for Renesas R-Car Audio-DMAC-peri-peri.
|
||||
*
|
||||
* Copyright (C) 2014 Renesas Electronics Corporation
|
||||
* Copyright (C) 2014 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
|
||||
*
|
||||
* based on the drivers/dma/sh/shdma.c
|
||||
*
|
||||
* Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
|
||||
* Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
|
||||
* Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
|
||||
* Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
|
||||
*
|
||||
* This is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
*/
|
||||
#include <linux/delay.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/of_dma.h>
|
||||
#include <linux/platform_data/dma-rcar-audmapp.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/shdma-base.h>
|
||||
|
||||
/*
|
||||
* DMA register
|
||||
*/
|
||||
#define PDMASAR 0x00
|
||||
#define PDMADAR 0x04
|
||||
#define PDMACHCR 0x0c
|
||||
|
||||
/* PDMACHCR */
|
||||
#define PDMACHCR_DE (1 << 0)
|
||||
|
||||
#define AUDMAPP_MAX_CHANNELS 29
|
||||
|
||||
/* Default MEMCPY transfer size = 2^2 = 4 bytes */
|
||||
#define LOG2_DEFAULT_XFER_SIZE 2
|
||||
#define AUDMAPP_SLAVE_NUMBER 256
|
||||
#define AUDMAPP_LEN_MAX (16 * 1024 * 1024)
|
||||
|
||||
struct audmapp_chan {
|
||||
struct shdma_chan shdma_chan;
|
||||
void __iomem *base;
|
||||
dma_addr_t slave_addr;
|
||||
u32 chcr;
|
||||
};
|
||||
|
||||
struct audmapp_device {
|
||||
struct shdma_dev shdma_dev;
|
||||
struct audmapp_pdata *pdata;
|
||||
struct device *dev;
|
||||
void __iomem *chan_reg;
|
||||
};
|
||||
|
||||
struct audmapp_desc {
|
||||
struct shdma_desc shdma_desc;
|
||||
dma_addr_t src;
|
||||
dma_addr_t dst;
|
||||
};
|
||||
|
||||
#define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan)
|
||||
|
||||
#define to_chan(chan) container_of(chan, struct audmapp_chan, shdma_chan)
|
||||
#define to_desc(sdesc) container_of(sdesc, struct audmapp_desc, shdma_desc)
|
||||
#define to_dev(chan) container_of(chan->shdma_chan.dma_chan.device, \
|
||||
struct audmapp_device, shdma_dev.dma_dev)
|
||||
|
||||
static void audmapp_write(struct audmapp_chan *auchan, u32 data, u32 reg)
|
||||
{
|
||||
struct audmapp_device *audev = to_dev(auchan);
|
||||
struct device *dev = audev->dev;
|
||||
|
||||
dev_dbg(dev, "w %p : %08x\n", auchan->base + reg, data);
|
||||
|
||||
iowrite32(data, auchan->base + reg);
|
||||
}
|
||||
|
||||
static u32 audmapp_read(struct audmapp_chan *auchan, u32 reg)
|
||||
{
|
||||
return ioread32(auchan->base + reg);
|
||||
}
|
||||
|
||||
static void audmapp_halt(struct shdma_chan *schan)
|
||||
{
|
||||
struct audmapp_chan *auchan = to_chan(schan);
|
||||
int i;
|
||||
|
||||
audmapp_write(auchan, 0, PDMACHCR);
|
||||
|
||||
for (i = 0; i < 1024; i++) {
|
||||
if (0 == audmapp_read(auchan, PDMACHCR))
|
||||
return;
|
||||
udelay(1);
|
||||
}
|
||||
}
|
||||
|
||||
static void audmapp_start_xfer(struct shdma_chan *schan,
|
||||
struct shdma_desc *sdesc)
|
||||
{
|
||||
struct audmapp_chan *auchan = to_chan(schan);
|
||||
struct audmapp_device *audev = to_dev(auchan);
|
||||
struct audmapp_desc *desc = to_desc(sdesc);
|
||||
struct device *dev = audev->dev;
|
||||
u32 chcr = auchan->chcr | PDMACHCR_DE;
|
||||
|
||||
dev_dbg(dev, "src/dst/chcr = %pad/%pad/%08x\n",
|
||||
&desc->src, &desc->dst, chcr);
|
||||
|
||||
audmapp_write(auchan, desc->src, PDMASAR);
|
||||
audmapp_write(auchan, desc->dst, PDMADAR);
|
||||
audmapp_write(auchan, chcr, PDMACHCR);
|
||||
}
|
||||
|
||||
static int audmapp_get_config(struct audmapp_chan *auchan, int slave_id,
|
||||
u32 *chcr, dma_addr_t *dst)
|
||||
{
|
||||
struct audmapp_device *audev = to_dev(auchan);
|
||||
struct audmapp_pdata *pdata = audev->pdata;
|
||||
struct audmapp_slave_config *cfg;
|
||||
int i;
|
||||
|
||||
*chcr = 0;
|
||||
*dst = 0;
|
||||
|
||||
if (!pdata) { /* DT */
|
||||
*chcr = ((u32)slave_id) << 16;
|
||||
auchan->shdma_chan.slave_id = (slave_id) >> 8;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* non-DT */
|
||||
|
||||
if (slave_id >= AUDMAPP_SLAVE_NUMBER)
|
||||
return -ENXIO;
|
||||
|
||||
for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
|
||||
if (cfg->slave_id == slave_id) {
|
||||
*chcr = cfg->chcr;
|
||||
*dst = cfg->dst;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
static int audmapp_set_slave(struct shdma_chan *schan, int slave_id,
|
||||
dma_addr_t slave_addr, bool try)
|
||||
{
|
||||
struct audmapp_chan *auchan = to_chan(schan);
|
||||
u32 chcr;
|
||||
dma_addr_t dst;
|
||||
int ret;
|
||||
|
||||
ret = audmapp_get_config(auchan, slave_id, &chcr, &dst);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (try)
|
||||
return 0;
|
||||
|
||||
auchan->chcr = chcr;
|
||||
auchan->slave_addr = slave_addr ? : dst;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int audmapp_desc_setup(struct shdma_chan *schan,
|
||||
struct shdma_desc *sdesc,
|
||||
dma_addr_t src, dma_addr_t dst, size_t *len)
|
||||
{
|
||||
struct audmapp_desc *desc = to_desc(sdesc);
|
||||
|
||||
if (*len > (size_t)AUDMAPP_LEN_MAX)
|
||||
*len = (size_t)AUDMAPP_LEN_MAX;
|
||||
|
||||
desc->src = src;
|
||||
desc->dst = dst;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void audmapp_setup_xfer(struct shdma_chan *schan,
|
||||
int slave_id)
|
||||
{
|
||||
}
|
||||
|
||||
static dma_addr_t audmapp_slave_addr(struct shdma_chan *schan)
|
||||
{
|
||||
struct audmapp_chan *auchan = to_chan(schan);
|
||||
|
||||
return auchan->slave_addr;
|
||||
}
|
||||
|
||||
static bool audmapp_channel_busy(struct shdma_chan *schan)
|
||||
{
|
||||
struct audmapp_chan *auchan = to_chan(schan);
|
||||
u32 chcr = audmapp_read(auchan, PDMACHCR);
|
||||
|
||||
return chcr & ~PDMACHCR_DE;
|
||||
}
|
||||
|
||||
static bool audmapp_desc_completed(struct shdma_chan *schan,
|
||||
struct shdma_desc *sdesc)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static struct shdma_desc *audmapp_embedded_desc(void *buf, int i)
|
||||
{
|
||||
return &((struct audmapp_desc *)buf)[i].shdma_desc;
|
||||
}
|
||||
|
||||
static const struct shdma_ops audmapp_shdma_ops = {
|
||||
.halt_channel = audmapp_halt,
|
||||
.desc_setup = audmapp_desc_setup,
|
||||
.set_slave = audmapp_set_slave,
|
||||
.start_xfer = audmapp_start_xfer,
|
||||
.embedded_desc = audmapp_embedded_desc,
|
||||
.setup_xfer = audmapp_setup_xfer,
|
||||
.slave_addr = audmapp_slave_addr,
|
||||
.channel_busy = audmapp_channel_busy,
|
||||
.desc_completed = audmapp_desc_completed,
|
||||
};
|
||||
|
||||
static int audmapp_chan_probe(struct platform_device *pdev,
|
||||
struct audmapp_device *audev, int id)
|
||||
{
|
||||
struct shdma_dev *sdev = &audev->shdma_dev;
|
||||
struct audmapp_chan *auchan;
|
||||
struct shdma_chan *schan;
|
||||
struct device *dev = audev->dev;
|
||||
|
||||
auchan = devm_kzalloc(dev, sizeof(*auchan), GFP_KERNEL);
|
||||
if (!auchan)
|
||||
return -ENOMEM;
|
||||
|
||||
schan = &auchan->shdma_chan;
|
||||
schan->max_xfer_len = AUDMAPP_LEN_MAX;
|
||||
|
||||
shdma_chan_probe(sdev, schan, id);
|
||||
|
||||
auchan->base = audev->chan_reg + 0x20 + (0x10 * id);
|
||||
dev_dbg(dev, "%02d : %p / %p", id, auchan->base, audev->chan_reg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void audmapp_chan_remove(struct audmapp_device *audev)
|
||||
{
|
||||
struct dma_device *dma_dev = &audev->shdma_dev.dma_dev;
|
||||
struct shdma_chan *schan;
|
||||
int i;
|
||||
|
||||
shdma_for_each_chan(schan, &audev->shdma_dev, i) {
|
||||
BUG_ON(!schan);
|
||||
shdma_chan_remove(schan);
|
||||
}
|
||||
dma_dev->chancnt = 0;
|
||||
}
|
||||
|
||||
static struct dma_chan *audmapp_of_xlate(struct of_phandle_args *dma_spec,
|
||||
struct of_dma *ofdma)
|
||||
{
|
||||
dma_cap_mask_t mask;
|
||||
struct dma_chan *chan;
|
||||
u32 chcr = dma_spec->args[0];
|
||||
|
||||
if (dma_spec->args_count != 1)
|
||||
return NULL;
|
||||
|
||||
dma_cap_zero(mask);
|
||||
dma_cap_set(DMA_SLAVE, mask);
|
||||
|
||||
chan = dma_request_channel(mask, shdma_chan_filter, NULL);
|
||||
if (chan)
|
||||
to_shdma_chan(chan)->hw_req = chcr;
|
||||
|
||||
return chan;
|
||||
}
|
||||
|
||||
static int audmapp_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct audmapp_pdata *pdata = pdev->dev.platform_data;
|
||||
struct device_node *np = pdev->dev.of_node;
|
||||
struct audmapp_device *audev;
|
||||
struct shdma_dev *sdev;
|
||||
struct dma_device *dma_dev;
|
||||
struct resource *res;
|
||||
int err, i;
|
||||
|
||||
if (np)
|
||||
of_dma_controller_register(np, audmapp_of_xlate, pdev);
|
||||
else if (!pdata)
|
||||
return -ENODEV;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
|
||||
audev = devm_kzalloc(&pdev->dev, sizeof(*audev), GFP_KERNEL);
|
||||
if (!audev)
|
||||
return -ENOMEM;
|
||||
|
||||
audev->dev = &pdev->dev;
|
||||
audev->pdata = pdata;
|
||||
audev->chan_reg = devm_ioremap_resource(&pdev->dev, res);
|
||||
if (IS_ERR(audev->chan_reg))
|
||||
return PTR_ERR(audev->chan_reg);
|
||||
|
||||
sdev = &audev->shdma_dev;
|
||||
sdev->ops = &audmapp_shdma_ops;
|
||||
sdev->desc_size = sizeof(struct audmapp_desc);
|
||||
|
||||
dma_dev = &sdev->dma_dev;
|
||||
dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE;
|
||||
dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
|
||||
|
||||
err = shdma_init(&pdev->dev, sdev, AUDMAPP_MAX_CHANNELS);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
platform_set_drvdata(pdev, audev);
|
||||
|
||||
/* Create DMA Channel */
|
||||
for (i = 0; i < AUDMAPP_MAX_CHANNELS; i++) {
|
||||
err = audmapp_chan_probe(pdev, audev, i);
|
||||
if (err)
|
||||
goto chan_probe_err;
|
||||
}
|
||||
|
||||
err = dma_async_device_register(dma_dev);
|
||||
if (err < 0)
|
||||
goto chan_probe_err;
|
||||
|
||||
return err;
|
||||
|
||||
chan_probe_err:
|
||||
audmapp_chan_remove(audev);
|
||||
shdma_cleanup(sdev);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int audmapp_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct audmapp_device *audev = platform_get_drvdata(pdev);
|
||||
struct dma_device *dma_dev = &audev->shdma_dev.dma_dev;
|
||||
|
||||
dma_async_device_unregister(dma_dev);
|
||||
|
||||
audmapp_chan_remove(audev);
|
||||
shdma_cleanup(&audev->shdma_dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id audmapp_of_match[] = {
|
||||
{ .compatible = "renesas,rcar-audmapp", },
|
||||
{},
|
||||
};
|
||||
|
||||
static struct platform_driver audmapp_driver = {
|
||||
.probe = audmapp_probe,
|
||||
.remove = audmapp_remove,
|
||||
.driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = "rcar-audmapp-engine",
|
||||
.of_match_table = audmapp_of_match,
|
||||
},
|
||||
};
|
||||
module_platform_driver(audmapp_driver);
|
||||
|
||||
MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
|
||||
MODULE_DESCRIPTION("Renesas R-Car Audio DMAC peri-peri driver");
|
||||
MODULE_LICENSE("GPL");
|
666
drivers/dma/sh/rcar-hpbdma.c
Normal file
666
drivers/dma/sh/rcar-hpbdma.c
Normal file
|
@ -0,0 +1,666 @@
|
|||
/*
|
||||
* Copyright (C) 2011-2013 Renesas Electronics Corporation
|
||||
* Copyright (C) 2013 Cogent Embedded, Inc.
|
||||
*
|
||||
* This file is based on the drivers/dma/sh/shdma.c
|
||||
*
|
||||
* Renesas SuperH DMA Engine support
|
||||
*
|
||||
* This is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* - DMA of SuperH does not have Hardware DMA chain mode.
|
||||
* - max DMA size is 16MB.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/platform_data/dma-rcar-hpbdma.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/shdma-base.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
/* DMA channel registers */
|
||||
#define HPB_DMAE_DSAR0 0x00
|
||||
#define HPB_DMAE_DDAR0 0x04
|
||||
#define HPB_DMAE_DTCR0 0x08
|
||||
#define HPB_DMAE_DSAR1 0x0C
|
||||
#define HPB_DMAE_DDAR1 0x10
|
||||
#define HPB_DMAE_DTCR1 0x14
|
||||
#define HPB_DMAE_DSASR 0x18
|
||||
#define HPB_DMAE_DDASR 0x1C
|
||||
#define HPB_DMAE_DTCSR 0x20
|
||||
#define HPB_DMAE_DPTR 0x24
|
||||
#define HPB_DMAE_DCR 0x28
|
||||
#define HPB_DMAE_DCMDR 0x2C
|
||||
#define HPB_DMAE_DSTPR 0x30
|
||||
#define HPB_DMAE_DSTSR 0x34
|
||||
#define HPB_DMAE_DDBGR 0x38
|
||||
#define HPB_DMAE_DDBGR2 0x3C
|
||||
#define HPB_DMAE_CHAN(n) (0x40 * (n))
|
||||
|
||||
/* DMA command register (DCMDR) bits */
|
||||
#define HPB_DMAE_DCMDR_BDOUT BIT(7)
|
||||
#define HPB_DMAE_DCMDR_DQSPD BIT(6)
|
||||
#define HPB_DMAE_DCMDR_DQSPC BIT(5)
|
||||
#define HPB_DMAE_DCMDR_DMSPD BIT(4)
|
||||
#define HPB_DMAE_DCMDR_DMSPC BIT(3)
|
||||
#define HPB_DMAE_DCMDR_DQEND BIT(2)
|
||||
#define HPB_DMAE_DCMDR_DNXT BIT(1)
|
||||
#define HPB_DMAE_DCMDR_DMEN BIT(0)
|
||||
|
||||
/* DMA forced stop register (DSTPR) bits */
|
||||
#define HPB_DMAE_DSTPR_DMSTP BIT(0)
|
||||
|
||||
/* DMA status register (DSTSR) bits */
|
||||
#define HPB_DMAE_DSTSR_DQSTS BIT(2)
|
||||
#define HPB_DMAE_DSTSR_DMSTS BIT(0)
|
||||
|
||||
/* DMA common registers */
|
||||
#define HPB_DMAE_DTIMR 0x00
|
||||
#define HPB_DMAE_DINTSR0 0x0C
|
||||
#define HPB_DMAE_DINTSR1 0x10
|
||||
#define HPB_DMAE_DINTCR0 0x14
|
||||
#define HPB_DMAE_DINTCR1 0x18
|
||||
#define HPB_DMAE_DINTMR0 0x1C
|
||||
#define HPB_DMAE_DINTMR1 0x20
|
||||
#define HPB_DMAE_DACTSR0 0x24
|
||||
#define HPB_DMAE_DACTSR1 0x28
|
||||
#define HPB_DMAE_HSRSTR(n) (0x40 + (n) * 4)
|
||||
#define HPB_DMAE_HPB_DMASPR(n) (0x140 + (n) * 4)
|
||||
#define HPB_DMAE_HPB_DMLVLR0 0x160
|
||||
#define HPB_DMAE_HPB_DMLVLR1 0x164
|
||||
#define HPB_DMAE_HPB_DMSHPT0 0x168
|
||||
#define HPB_DMAE_HPB_DMSHPT1 0x16C
|
||||
|
||||
#define HPB_DMA_SLAVE_NUMBER 256
|
||||
#define HPB_DMA_TCR_MAX 0x01000000 /* 16 MiB */
|
||||
|
||||
struct hpb_dmae_chan {
|
||||
struct shdma_chan shdma_chan;
|
||||
int xfer_mode; /* DMA transfer mode */
|
||||
#define XFER_SINGLE 1
|
||||
#define XFER_DOUBLE 2
|
||||
unsigned plane_idx; /* current DMA information set */
|
||||
bool first_desc; /* first/next transfer */
|
||||
int xmit_shift; /* log_2(bytes_per_xfer) */
|
||||
void __iomem *base;
|
||||
const struct hpb_dmae_slave_config *cfg;
|
||||
char dev_id[16]; /* unique name per DMAC of channel */
|
||||
dma_addr_t slave_addr;
|
||||
};
|
||||
|
||||
struct hpb_dmae_device {
|
||||
struct shdma_dev shdma_dev;
|
||||
spinlock_t reg_lock; /* comm_reg operation lock */
|
||||
struct hpb_dmae_pdata *pdata;
|
||||
void __iomem *chan_reg;
|
||||
void __iomem *comm_reg;
|
||||
void __iomem *reset_reg;
|
||||
void __iomem *mode_reg;
|
||||
};
|
||||
|
||||
struct hpb_dmae_regs {
|
||||
u32 sar; /* SAR / source address */
|
||||
u32 dar; /* DAR / destination address */
|
||||
u32 tcr; /* TCR / transfer count */
|
||||
};
|
||||
|
||||
struct hpb_desc {
|
||||
struct shdma_desc shdma_desc;
|
||||
struct hpb_dmae_regs hw;
|
||||
unsigned plane_idx;
|
||||
};
|
||||
|
||||
#define to_chan(schan) container_of(schan, struct hpb_dmae_chan, shdma_chan)
|
||||
#define to_desc(sdesc) container_of(sdesc, struct hpb_desc, shdma_desc)
|
||||
#define to_dev(sc) container_of(sc->shdma_chan.dma_chan.device, \
|
||||
struct hpb_dmae_device, shdma_dev.dma_dev)
|
||||
|
||||
static void ch_reg_write(struct hpb_dmae_chan *hpb_dc, u32 data, u32 reg)
|
||||
{
|
||||
iowrite32(data, hpb_dc->base + reg);
|
||||
}
|
||||
|
||||
static u32 ch_reg_read(struct hpb_dmae_chan *hpb_dc, u32 reg)
|
||||
{
|
||||
return ioread32(hpb_dc->base + reg);
|
||||
}
|
||||
|
||||
static void dcmdr_write(struct hpb_dmae_device *hpbdev, u32 data)
|
||||
{
|
||||
iowrite32(data, hpbdev->chan_reg + HPB_DMAE_DCMDR);
|
||||
}
|
||||
|
||||
static void hsrstr_write(struct hpb_dmae_device *hpbdev, u32 ch)
|
||||
{
|
||||
iowrite32(0x1, hpbdev->comm_reg + HPB_DMAE_HSRSTR(ch));
|
||||
}
|
||||
|
||||
static u32 dintsr_read(struct hpb_dmae_device *hpbdev, u32 ch)
|
||||
{
|
||||
u32 v;
|
||||
|
||||
if (ch < 32)
|
||||
v = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTSR0) >> ch;
|
||||
else
|
||||
v = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTSR1) >> (ch - 32);
|
||||
return v & 0x1;
|
||||
}
|
||||
|
||||
static void dintcr_write(struct hpb_dmae_device *hpbdev, u32 ch)
|
||||
{
|
||||
if (ch < 32)
|
||||
iowrite32((0x1 << ch), hpbdev->comm_reg + HPB_DMAE_DINTCR0);
|
||||
else
|
||||
iowrite32((0x1 << (ch - 32)),
|
||||
hpbdev->comm_reg + HPB_DMAE_DINTCR1);
|
||||
}
|
||||
|
||||
static void asyncmdr_write(struct hpb_dmae_device *hpbdev, u32 data)
|
||||
{
|
||||
iowrite32(data, hpbdev->mode_reg);
|
||||
}
|
||||
|
||||
static u32 asyncmdr_read(struct hpb_dmae_device *hpbdev)
|
||||
{
|
||||
return ioread32(hpbdev->mode_reg);
|
||||
}
|
||||
|
||||
static void hpb_dmae_enable_int(struct hpb_dmae_device *hpbdev, u32 ch)
|
||||
{
|
||||
u32 intreg;
|
||||
|
||||
spin_lock_irq(&hpbdev->reg_lock);
|
||||
if (ch < 32) {
|
||||
intreg = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTMR0);
|
||||
iowrite32(BIT(ch) | intreg,
|
||||
hpbdev->comm_reg + HPB_DMAE_DINTMR0);
|
||||
} else {
|
||||
intreg = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTMR1);
|
||||
iowrite32(BIT(ch - 32) | intreg,
|
||||
hpbdev->comm_reg + HPB_DMAE_DINTMR1);
|
||||
}
|
||||
spin_unlock_irq(&hpbdev->reg_lock);
|
||||
}
|
||||
|
||||
static void hpb_dmae_async_reset(struct hpb_dmae_device *hpbdev, u32 data)
|
||||
{
|
||||
u32 rstr;
|
||||
int timeout = 10000; /* 100 ms */
|
||||
|
||||
spin_lock(&hpbdev->reg_lock);
|
||||
rstr = ioread32(hpbdev->reset_reg);
|
||||
rstr |= data;
|
||||
iowrite32(rstr, hpbdev->reset_reg);
|
||||
do {
|
||||
rstr = ioread32(hpbdev->reset_reg);
|
||||
if ((rstr & data) == data)
|
||||
break;
|
||||
udelay(10);
|
||||
} while (timeout--);
|
||||
|
||||
if (timeout < 0)
|
||||
dev_err(hpbdev->shdma_dev.dma_dev.dev,
|
||||
"%s timeout\n", __func__);
|
||||
|
||||
rstr &= ~data;
|
||||
iowrite32(rstr, hpbdev->reset_reg);
|
||||
spin_unlock(&hpbdev->reg_lock);
|
||||
}
|
||||
|
||||
static void hpb_dmae_set_async_mode(struct hpb_dmae_device *hpbdev,
|
||||
u32 mask, u32 data)
|
||||
{
|
||||
u32 mode;
|
||||
|
||||
spin_lock_irq(&hpbdev->reg_lock);
|
||||
mode = asyncmdr_read(hpbdev);
|
||||
mode &= ~mask;
|
||||
mode |= data;
|
||||
asyncmdr_write(hpbdev, mode);
|
||||
spin_unlock_irq(&hpbdev->reg_lock);
|
||||
}
|
||||
|
||||
static void hpb_dmae_ctl_stop(struct hpb_dmae_device *hpbdev)
|
||||
{
|
||||
dcmdr_write(hpbdev, HPB_DMAE_DCMDR_DQSPD);
|
||||
}
|
||||
|
||||
static void hpb_dmae_reset(struct hpb_dmae_device *hpbdev)
|
||||
{
|
||||
u32 ch;
|
||||
|
||||
for (ch = 0; ch < hpbdev->pdata->num_hw_channels; ch++)
|
||||
hsrstr_write(hpbdev, ch);
|
||||
}
|
||||
|
||||
static unsigned int calc_xmit_shift(struct hpb_dmae_chan *hpb_chan)
|
||||
{
|
||||
struct hpb_dmae_device *hpbdev = to_dev(hpb_chan);
|
||||
struct hpb_dmae_pdata *pdata = hpbdev->pdata;
|
||||
int width = ch_reg_read(hpb_chan, HPB_DMAE_DCR);
|
||||
int i;
|
||||
|
||||
switch (width & (HPB_DMAE_DCR_SPDS_MASK | HPB_DMAE_DCR_DPDS_MASK)) {
|
||||
case HPB_DMAE_DCR_SPDS_8BIT | HPB_DMAE_DCR_DPDS_8BIT:
|
||||
default:
|
||||
i = XMIT_SZ_8BIT;
|
||||
break;
|
||||
case HPB_DMAE_DCR_SPDS_16BIT | HPB_DMAE_DCR_DPDS_16BIT:
|
||||
i = XMIT_SZ_16BIT;
|
||||
break;
|
||||
case HPB_DMAE_DCR_SPDS_32BIT | HPB_DMAE_DCR_DPDS_32BIT:
|
||||
i = XMIT_SZ_32BIT;
|
||||
break;
|
||||
}
|
||||
return pdata->ts_shift[i];
|
||||
}
|
||||
|
||||
static void hpb_dmae_set_reg(struct hpb_dmae_chan *hpb_chan,
|
||||
struct hpb_dmae_regs *hw, unsigned plane)
|
||||
{
|
||||
ch_reg_write(hpb_chan, hw->sar,
|
||||
plane ? HPB_DMAE_DSAR1 : HPB_DMAE_DSAR0);
|
||||
ch_reg_write(hpb_chan, hw->dar,
|
||||
plane ? HPB_DMAE_DDAR1 : HPB_DMAE_DDAR0);
|
||||
ch_reg_write(hpb_chan, hw->tcr >> hpb_chan->xmit_shift,
|
||||
plane ? HPB_DMAE_DTCR1 : HPB_DMAE_DTCR0);
|
||||
}
|
||||
|
||||
static void hpb_dmae_start(struct hpb_dmae_chan *hpb_chan, bool next)
|
||||
{
|
||||
ch_reg_write(hpb_chan, (next ? HPB_DMAE_DCMDR_DNXT : 0) |
|
||||
HPB_DMAE_DCMDR_DMEN, HPB_DMAE_DCMDR);
|
||||
}
|
||||
|
||||
static void hpb_dmae_halt(struct shdma_chan *schan)
|
||||
{
|
||||
struct hpb_dmae_chan *chan = to_chan(schan);
|
||||
|
||||
ch_reg_write(chan, HPB_DMAE_DCMDR_DQEND, HPB_DMAE_DCMDR);
|
||||
ch_reg_write(chan, HPB_DMAE_DSTPR_DMSTP, HPB_DMAE_DSTPR);
|
||||
|
||||
chan->plane_idx = 0;
|
||||
chan->first_desc = true;
|
||||
}
|
||||
|
||||
static const struct hpb_dmae_slave_config *
|
||||
hpb_dmae_find_slave(struct hpb_dmae_chan *hpb_chan, int slave_id)
|
||||
{
|
||||
struct hpb_dmae_device *hpbdev = to_dev(hpb_chan);
|
||||
struct hpb_dmae_pdata *pdata = hpbdev->pdata;
|
||||
int i;
|
||||
|
||||
if (slave_id >= HPB_DMA_SLAVE_NUMBER)
|
||||
return NULL;
|
||||
|
||||
for (i = 0; i < pdata->num_slaves; i++)
|
||||
if (pdata->slaves[i].id == slave_id)
|
||||
return pdata->slaves + i;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void hpb_dmae_start_xfer(struct shdma_chan *schan,
|
||||
struct shdma_desc *sdesc)
|
||||
{
|
||||
struct hpb_dmae_chan *chan = to_chan(schan);
|
||||
struct hpb_dmae_device *hpbdev = to_dev(chan);
|
||||
struct hpb_desc *desc = to_desc(sdesc);
|
||||
|
||||
if (chan->cfg->flags & HPB_DMAE_SET_ASYNC_RESET)
|
||||
hpb_dmae_async_reset(hpbdev, chan->cfg->rstr);
|
||||
|
||||
desc->plane_idx = chan->plane_idx;
|
||||
hpb_dmae_set_reg(chan, &desc->hw, chan->plane_idx);
|
||||
hpb_dmae_start(chan, !chan->first_desc);
|
||||
|
||||
if (chan->xfer_mode == XFER_DOUBLE) {
|
||||
chan->plane_idx ^= 1;
|
||||
chan->first_desc = false;
|
||||
}
|
||||
}
|
||||
|
||||
static bool hpb_dmae_desc_completed(struct shdma_chan *schan,
|
||||
struct shdma_desc *sdesc)
|
||||
{
|
||||
/*
|
||||
* This is correct since we always have at most single
|
||||
* outstanding DMA transfer per channel, and by the time
|
||||
* we get completion interrupt the transfer is completed.
|
||||
* This will change if we ever use alternating DMA
|
||||
* information sets and submit two descriptors at once.
|
||||
*/
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool hpb_dmae_chan_irq(struct shdma_chan *schan, int irq)
|
||||
{
|
||||
struct hpb_dmae_chan *chan = to_chan(schan);
|
||||
struct hpb_dmae_device *hpbdev = to_dev(chan);
|
||||
int ch = chan->cfg->dma_ch;
|
||||
|
||||
/* Check Complete DMA Transfer */
|
||||
if (dintsr_read(hpbdev, ch)) {
|
||||
/* Clear Interrupt status */
|
||||
dintcr_write(hpbdev, ch);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static int hpb_dmae_desc_setup(struct shdma_chan *schan,
|
||||
struct shdma_desc *sdesc,
|
||||
dma_addr_t src, dma_addr_t dst, size_t *len)
|
||||
{
|
||||
struct hpb_desc *desc = to_desc(sdesc);
|
||||
|
||||
if (*len > (size_t)HPB_DMA_TCR_MAX)
|
||||
*len = (size_t)HPB_DMA_TCR_MAX;
|
||||
|
||||
desc->hw.sar = src;
|
||||
desc->hw.dar = dst;
|
||||
desc->hw.tcr = *len;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static size_t hpb_dmae_get_partial(struct shdma_chan *schan,
|
||||
struct shdma_desc *sdesc)
|
||||
{
|
||||
struct hpb_desc *desc = to_desc(sdesc);
|
||||
struct hpb_dmae_chan *chan = to_chan(schan);
|
||||
u32 tcr = ch_reg_read(chan, desc->plane_idx ?
|
||||
HPB_DMAE_DTCR1 : HPB_DMAE_DTCR0);
|
||||
|
||||
return (desc->hw.tcr - tcr) << chan->xmit_shift;
|
||||
}
|
||||
|
||||
static bool hpb_dmae_channel_busy(struct shdma_chan *schan)
|
||||
{
|
||||
struct hpb_dmae_chan *chan = to_chan(schan);
|
||||
u32 dstsr = ch_reg_read(chan, HPB_DMAE_DSTSR);
|
||||
|
||||
if (chan->xfer_mode == XFER_DOUBLE)
|
||||
return dstsr & HPB_DMAE_DSTSR_DQSTS;
|
||||
else
|
||||
return dstsr & HPB_DMAE_DSTSR_DMSTS;
|
||||
}
|
||||
|
||||
static int
|
||||
hpb_dmae_alloc_chan_resources(struct hpb_dmae_chan *hpb_chan,
|
||||
const struct hpb_dmae_slave_config *cfg)
|
||||
{
|
||||
struct hpb_dmae_device *hpbdev = to_dev(hpb_chan);
|
||||
struct hpb_dmae_pdata *pdata = hpbdev->pdata;
|
||||
const struct hpb_dmae_channel *channel = pdata->channels;
|
||||
int slave_id = cfg->id;
|
||||
int i, err;
|
||||
|
||||
for (i = 0; i < pdata->num_channels; i++, channel++) {
|
||||
if (channel->s_id == slave_id) {
|
||||
struct device *dev = hpb_chan->shdma_chan.dev;
|
||||
|
||||
hpb_chan->base = hpbdev->chan_reg +
|
||||
HPB_DMAE_CHAN(cfg->dma_ch);
|
||||
|
||||
dev_dbg(dev, "Detected Slave device\n");
|
||||
dev_dbg(dev, " -- slave_id : 0x%x\n", slave_id);
|
||||
dev_dbg(dev, " -- cfg->dma_ch : %d\n", cfg->dma_ch);
|
||||
dev_dbg(dev, " -- channel->ch_irq: %d\n",
|
||||
channel->ch_irq);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
err = shdma_request_irq(&hpb_chan->shdma_chan, channel->ch_irq,
|
||||
IRQF_SHARED, hpb_chan->dev_id);
|
||||
if (err) {
|
||||
dev_err(hpb_chan->shdma_chan.dev,
|
||||
"DMA channel request_irq %d failed with error %d\n",
|
||||
channel->ch_irq, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
hpb_chan->plane_idx = 0;
|
||||
hpb_chan->first_desc = true;
|
||||
|
||||
if ((cfg->dcr & (HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) == 0) {
|
||||
hpb_chan->xfer_mode = XFER_SINGLE;
|
||||
} else if ((cfg->dcr & (HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) ==
|
||||
(HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) {
|
||||
hpb_chan->xfer_mode = XFER_DOUBLE;
|
||||
} else {
|
||||
dev_err(hpb_chan->shdma_chan.dev, "DCR setting error");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (cfg->flags & HPB_DMAE_SET_ASYNC_MODE)
|
||||
hpb_dmae_set_async_mode(hpbdev, cfg->mdm, cfg->mdr);
|
||||
ch_reg_write(hpb_chan, cfg->dcr, HPB_DMAE_DCR);
|
||||
ch_reg_write(hpb_chan, cfg->port, HPB_DMAE_DPTR);
|
||||
hpb_chan->xmit_shift = calc_xmit_shift(hpb_chan);
|
||||
hpb_dmae_enable_int(hpbdev, cfg->dma_ch);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id,
|
||||
dma_addr_t slave_addr, bool try)
|
||||
{
|
||||
struct hpb_dmae_chan *chan = to_chan(schan);
|
||||
const struct hpb_dmae_slave_config *sc =
|
||||
hpb_dmae_find_slave(chan, slave_id);
|
||||
|
||||
if (!sc)
|
||||
return -ENODEV;
|
||||
if (try)
|
||||
return 0;
|
||||
chan->cfg = sc;
|
||||
chan->slave_addr = slave_addr ? : sc->addr;
|
||||
return hpb_dmae_alloc_chan_resources(chan, sc);
|
||||
}
|
||||
|
||||
static void hpb_dmae_setup_xfer(struct shdma_chan *schan, int slave_id)
|
||||
{
|
||||
}
|
||||
|
||||
static dma_addr_t hpb_dmae_slave_addr(struct shdma_chan *schan)
|
||||
{
|
||||
struct hpb_dmae_chan *chan = to_chan(schan);
|
||||
|
||||
return chan->slave_addr;
|
||||
}
|
||||
|
||||
static struct shdma_desc *hpb_dmae_embedded_desc(void *buf, int i)
|
||||
{
|
||||
return &((struct hpb_desc *)buf)[i].shdma_desc;
|
||||
}
|
||||
|
||||
static const struct shdma_ops hpb_dmae_ops = {
|
||||
.desc_completed = hpb_dmae_desc_completed,
|
||||
.halt_channel = hpb_dmae_halt,
|
||||
.channel_busy = hpb_dmae_channel_busy,
|
||||
.slave_addr = hpb_dmae_slave_addr,
|
||||
.desc_setup = hpb_dmae_desc_setup,
|
||||
.set_slave = hpb_dmae_set_slave,
|
||||
.setup_xfer = hpb_dmae_setup_xfer,
|
||||
.start_xfer = hpb_dmae_start_xfer,
|
||||
.embedded_desc = hpb_dmae_embedded_desc,
|
||||
.chan_irq = hpb_dmae_chan_irq,
|
||||
.get_partial = hpb_dmae_get_partial,
|
||||
};
|
||||
|
||||
static int hpb_dmae_chan_probe(struct hpb_dmae_device *hpbdev, int id)
|
||||
{
|
||||
struct shdma_dev *sdev = &hpbdev->shdma_dev;
|
||||
struct platform_device *pdev =
|
||||
to_platform_device(hpbdev->shdma_dev.dma_dev.dev);
|
||||
struct hpb_dmae_chan *new_hpb_chan;
|
||||
struct shdma_chan *schan;
|
||||
|
||||
/* Alloc channel */
|
||||
new_hpb_chan = devm_kzalloc(&pdev->dev,
|
||||
sizeof(struct hpb_dmae_chan), GFP_KERNEL);
|
||||
if (!new_hpb_chan) {
|
||||
dev_err(hpbdev->shdma_dev.dma_dev.dev,
|
||||
"No free memory for allocating DMA channels!\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
schan = &new_hpb_chan->shdma_chan;
|
||||
schan->max_xfer_len = HPB_DMA_TCR_MAX;
|
||||
|
||||
shdma_chan_probe(sdev, schan, id);
|
||||
|
||||
if (pdev->id >= 0)
|
||||
snprintf(new_hpb_chan->dev_id, sizeof(new_hpb_chan->dev_id),
|
||||
"hpb-dmae%d.%d", pdev->id, id);
|
||||
else
|
||||
snprintf(new_hpb_chan->dev_id, sizeof(new_hpb_chan->dev_id),
|
||||
"hpb-dma.%d", id);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hpb_dmae_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct hpb_dmae_pdata *pdata = pdev->dev.platform_data;
|
||||
struct hpb_dmae_device *hpbdev;
|
||||
struct dma_device *dma_dev;
|
||||
struct resource *chan, *comm, *rest, *mode, *irq_res;
|
||||
int err, i;
|
||||
|
||||
/* Get platform data */
|
||||
if (!pdata || !pdata->num_channels)
|
||||
return -ENODEV;
|
||||
|
||||
chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
comm = platform_get_resource(pdev, IORESOURCE_MEM, 1);
|
||||
rest = platform_get_resource(pdev, IORESOURCE_MEM, 2);
|
||||
mode = platform_get_resource(pdev, IORESOURCE_MEM, 3);
|
||||
|
||||
irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
|
||||
if (!irq_res)
|
||||
return -ENODEV;
|
||||
|
||||
hpbdev = devm_kzalloc(&pdev->dev, sizeof(struct hpb_dmae_device),
|
||||
GFP_KERNEL);
|
||||
if (!hpbdev) {
|
||||
dev_err(&pdev->dev, "Not enough memory\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
hpbdev->chan_reg = devm_ioremap_resource(&pdev->dev, chan);
|
||||
if (IS_ERR(hpbdev->chan_reg))
|
||||
return PTR_ERR(hpbdev->chan_reg);
|
||||
|
||||
hpbdev->comm_reg = devm_ioremap_resource(&pdev->dev, comm);
|
||||
if (IS_ERR(hpbdev->comm_reg))
|
||||
return PTR_ERR(hpbdev->comm_reg);
|
||||
|
||||
hpbdev->reset_reg = devm_ioremap_resource(&pdev->dev, rest);
|
||||
if (IS_ERR(hpbdev->reset_reg))
|
||||
return PTR_ERR(hpbdev->reset_reg);
|
||||
|
||||
hpbdev->mode_reg = devm_ioremap_resource(&pdev->dev, mode);
|
||||
if (IS_ERR(hpbdev->mode_reg))
|
||||
return PTR_ERR(hpbdev->mode_reg);
|
||||
|
||||
dma_dev = &hpbdev->shdma_dev.dma_dev;
|
||||
|
||||
spin_lock_init(&hpbdev->reg_lock);
|
||||
|
||||
/* Platform data */
|
||||
hpbdev->pdata = pdata;
|
||||
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
err = pm_runtime_get_sync(&pdev->dev);
|
||||
if (err < 0)
|
||||
dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err);
|
||||
|
||||
/* Reset DMA controller */
|
||||
hpb_dmae_reset(hpbdev);
|
||||
|
||||
pm_runtime_put(&pdev->dev);
|
||||
|
||||
dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
|
||||
dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
|
||||
|
||||
hpbdev->shdma_dev.ops = &hpb_dmae_ops;
|
||||
hpbdev->shdma_dev.desc_size = sizeof(struct hpb_desc);
|
||||
err = shdma_init(&pdev->dev, &hpbdev->shdma_dev, pdata->num_channels);
|
||||
if (err < 0)
|
||||
goto error;
|
||||
|
||||
/* Create DMA channels */
|
||||
for (i = 0; i < pdata->num_channels; i++)
|
||||
hpb_dmae_chan_probe(hpbdev, i);
|
||||
|
||||
platform_set_drvdata(pdev, hpbdev);
|
||||
err = dma_async_device_register(dma_dev);
|
||||
if (!err)
|
||||
return 0;
|
||||
|
||||
shdma_cleanup(&hpbdev->shdma_dev);
|
||||
error:
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void hpb_dmae_chan_remove(struct hpb_dmae_device *hpbdev)
|
||||
{
|
||||
struct dma_device *dma_dev = &hpbdev->shdma_dev.dma_dev;
|
||||
struct shdma_chan *schan;
|
||||
int i;
|
||||
|
||||
shdma_for_each_chan(schan, &hpbdev->shdma_dev, i) {
|
||||
BUG_ON(!schan);
|
||||
|
||||
shdma_chan_remove(schan);
|
||||
}
|
||||
dma_dev->chancnt = 0;
|
||||
}
|
||||
|
||||
static int hpb_dmae_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct hpb_dmae_device *hpbdev = platform_get_drvdata(pdev);
|
||||
|
||||
dma_async_device_unregister(&hpbdev->shdma_dev.dma_dev);
|
||||
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
||||
hpb_dmae_chan_remove(hpbdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hpb_dmae_shutdown(struct platform_device *pdev)
|
||||
{
|
||||
struct hpb_dmae_device *hpbdev = platform_get_drvdata(pdev);
|
||||
hpb_dmae_ctl_stop(hpbdev);
|
||||
}
|
||||
|
||||
static struct platform_driver hpb_dmae_driver = {
|
||||
.probe = hpb_dmae_probe,
|
||||
.remove = hpb_dmae_remove,
|
||||
.shutdown = hpb_dmae_shutdown,
|
||||
.driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = "hpb-dma-engine",
|
||||
},
|
||||
};
|
||||
module_platform_driver(hpb_dmae_driver);
|
||||
|
||||
MODULE_AUTHOR("Max Filippov <max.filippov@cogentembedded.com>");
|
||||
MODULE_DESCRIPTION("Renesas HPB DMA Engine driver");
|
||||
MODULE_LICENSE("GPL");
|
51
drivers/dma/sh/shdma-arm.h
Normal file
51
drivers/dma/sh/shdma-arm.h
Normal file
|
@ -0,0 +1,51 @@
|
|||
/*
|
||||
* Renesas SuperH DMA Engine support
|
||||
*
|
||||
* Copyright (C) 2013 Renesas Electronics, Inc.
|
||||
*
|
||||
* This is free software; you can redistribute it and/or modify it under the
|
||||
* terms of version 2 the GNU General Public License as published by the Free
|
||||
* Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef SHDMA_ARM_H
|
||||
#define SHDMA_ARM_H
|
||||
|
||||
#include "shdma.h"
|
||||
|
||||
/* Transmit sizes and respective CHCR register values */
|
||||
enum {
|
||||
XMIT_SZ_8BIT = 0,
|
||||
XMIT_SZ_16BIT = 1,
|
||||
XMIT_SZ_32BIT = 2,
|
||||
XMIT_SZ_64BIT = 7,
|
||||
XMIT_SZ_128BIT = 3,
|
||||
XMIT_SZ_256BIT = 4,
|
||||
XMIT_SZ_512BIT = 5,
|
||||
};
|
||||
|
||||
/* log2(size / 8) - used to calculate number of transfers */
|
||||
#define SH_DMAE_TS_SHIFT { \
|
||||
[XMIT_SZ_8BIT] = 0, \
|
||||
[XMIT_SZ_16BIT] = 1, \
|
||||
[XMIT_SZ_32BIT] = 2, \
|
||||
[XMIT_SZ_64BIT] = 3, \
|
||||
[XMIT_SZ_128BIT] = 4, \
|
||||
[XMIT_SZ_256BIT] = 5, \
|
||||
[XMIT_SZ_512BIT] = 6, \
|
||||
}
|
||||
|
||||
#define TS_LOW_BIT 0x3 /* --xx */
|
||||
#define TS_HI_BIT 0xc /* xx-- */
|
||||
|
||||
#define TS_LOW_SHIFT (3)
|
||||
#define TS_HI_SHIFT (20 - 2) /* 2 bits for shifted low TS */
|
||||
|
||||
#define TS_INDEX2VAL(i) \
|
||||
((((i) & TS_LOW_BIT) << TS_LOW_SHIFT) |\
|
||||
(((i) & TS_HI_BIT) << TS_HI_SHIFT))
|
||||
|
||||
#define CHCR_TX(xmit_sz) (DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL((xmit_sz)))
|
||||
#define CHCR_RX(xmit_sz) (DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL((xmit_sz)))
|
||||
|
||||
#endif
|
1035
drivers/dma/sh/shdma-base.c
Normal file
1035
drivers/dma/sh/shdma-base.c
Normal file
File diff suppressed because it is too large
Load diff
80
drivers/dma/sh/shdma-of.c
Normal file
80
drivers/dma/sh/shdma-of.c
Normal file
|
@ -0,0 +1,80 @@
|
|||
/*
|
||||
* SHDMA Device Tree glue
|
||||
*
|
||||
* Copyright (C) 2013 Renesas Electronics Inc.
|
||||
* Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
|
||||
*
|
||||
* This is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_dma.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/shdma-base.h>
|
||||
|
||||
#define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan)
|
||||
|
||||
static struct dma_chan *shdma_of_xlate(struct of_phandle_args *dma_spec,
|
||||
struct of_dma *ofdma)
|
||||
{
|
||||
u32 id = dma_spec->args[0];
|
||||
dma_cap_mask_t mask;
|
||||
struct dma_chan *chan;
|
||||
|
||||
if (dma_spec->args_count != 1)
|
||||
return NULL;
|
||||
|
||||
dma_cap_zero(mask);
|
||||
/* Only slave DMA channels can be allocated via DT */
|
||||
dma_cap_set(DMA_SLAVE, mask);
|
||||
|
||||
chan = dma_request_channel(mask, shdma_chan_filter,
|
||||
(void *)(uintptr_t)id);
|
||||
if (chan)
|
||||
to_shdma_chan(chan)->hw_req = id;
|
||||
|
||||
return chan;
|
||||
}
|
||||
|
||||
static int shdma_of_probe(struct platform_device *pdev)
|
||||
{
|
||||
const struct of_dev_auxdata *lookup = dev_get_platdata(&pdev->dev);
|
||||
int ret;
|
||||
|
||||
ret = of_dma_controller_register(pdev->dev.of_node,
|
||||
shdma_of_xlate, pdev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = of_platform_populate(pdev->dev.of_node, NULL, lookup, &pdev->dev);
|
||||
if (ret < 0)
|
||||
of_dma_controller_free(pdev->dev.of_node);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct of_device_id shdma_of_match[] = {
|
||||
{ .compatible = "renesas,shdma-mux", },
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
|
||||
|
||||
static struct platform_driver shdma_of = {
|
||||
.driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = "shdma-of",
|
||||
.of_match_table = shdma_of_match,
|
||||
},
|
||||
.probe = shdma_of_probe,
|
||||
};
|
||||
|
||||
module_platform_driver(shdma_of);
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_DESCRIPTION("SH-DMA driver DT glue");
|
||||
MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
|
77
drivers/dma/sh/shdma-r8a73a4.c
Normal file
77
drivers/dma/sh/shdma-r8a73a4.c
Normal file
|
@ -0,0 +1,77 @@
|
|||
/*
|
||||
* Renesas SuperH DMA Engine support for r8a73a4 (APE6) SoCs
|
||||
*
|
||||
* Copyright (C) 2013 Renesas Electronics, Inc.
|
||||
*
|
||||
* This is free software; you can redistribute it and/or modify it under the
|
||||
* terms of version 2 the GNU General Public License as published by the Free
|
||||
* Software Foundation.
|
||||
*/
|
||||
#include <linux/sh_dma.h>
|
||||
|
||||
#include "shdma-arm.h"
|
||||
|
||||
const unsigned int dma_ts_shift[] = SH_DMAE_TS_SHIFT;
|
||||
|
||||
static const struct sh_dmae_slave_config dma_slaves[] = {
|
||||
{
|
||||
.chcr = CHCR_TX(XMIT_SZ_32BIT),
|
||||
.mid_rid = 0xd1, /* MMC0 Tx */
|
||||
}, {
|
||||
.chcr = CHCR_RX(XMIT_SZ_32BIT),
|
||||
.mid_rid = 0xd2, /* MMC0 Rx */
|
||||
}, {
|
||||
.chcr = CHCR_TX(XMIT_SZ_32BIT),
|
||||
.mid_rid = 0xe1, /* MMC1 Tx */
|
||||
}, {
|
||||
.chcr = CHCR_RX(XMIT_SZ_32BIT),
|
||||
.mid_rid = 0xe2, /* MMC1 Rx */
|
||||
},
|
||||
};
|
||||
|
||||
#define DMAE_CHANNEL(a, b) \
|
||||
{ \
|
||||
.offset = (a) - 0x20, \
|
||||
.dmars = (a) - 0x20 + 0x40, \
|
||||
.chclr_bit = (b), \
|
||||
.chclr_offset = 0x80 - 0x20, \
|
||||
}
|
||||
|
||||
static const struct sh_dmae_channel dma_channels[] = {
|
||||
DMAE_CHANNEL(0x8000, 0),
|
||||
DMAE_CHANNEL(0x8080, 1),
|
||||
DMAE_CHANNEL(0x8100, 2),
|
||||
DMAE_CHANNEL(0x8180, 3),
|
||||
DMAE_CHANNEL(0x8200, 4),
|
||||
DMAE_CHANNEL(0x8280, 5),
|
||||
DMAE_CHANNEL(0x8300, 6),
|
||||
DMAE_CHANNEL(0x8380, 7),
|
||||
DMAE_CHANNEL(0x8400, 8),
|
||||
DMAE_CHANNEL(0x8480, 9),
|
||||
DMAE_CHANNEL(0x8500, 10),
|
||||
DMAE_CHANNEL(0x8580, 11),
|
||||
DMAE_CHANNEL(0x8600, 12),
|
||||
DMAE_CHANNEL(0x8680, 13),
|
||||
DMAE_CHANNEL(0x8700, 14),
|
||||
DMAE_CHANNEL(0x8780, 15),
|
||||
DMAE_CHANNEL(0x8800, 16),
|
||||
DMAE_CHANNEL(0x8880, 17),
|
||||
DMAE_CHANNEL(0x8900, 18),
|
||||
DMAE_CHANNEL(0x8980, 19),
|
||||
};
|
||||
|
||||
const struct sh_dmae_pdata r8a73a4_dma_pdata = {
|
||||
.slave = dma_slaves,
|
||||
.slave_num = ARRAY_SIZE(dma_slaves),
|
||||
.channel = dma_channels,
|
||||
.channel_num = ARRAY_SIZE(dma_channels),
|
||||
.ts_low_shift = TS_LOW_SHIFT,
|
||||
.ts_low_mask = TS_LOW_BIT << TS_LOW_SHIFT,
|
||||
.ts_high_shift = TS_HI_SHIFT,
|
||||
.ts_high_mask = TS_HI_BIT << TS_HI_SHIFT,
|
||||
.ts_shift = dma_ts_shift,
|
||||
.ts_shift_num = ARRAY_SIZE(dma_ts_shift),
|
||||
.dmaor_init = DMAOR_DME,
|
||||
.chclr_present = 1,
|
||||
.chclr_bitwise = 1,
|
||||
};
|
72
drivers/dma/sh/shdma.h
Normal file
72
drivers/dma/sh/shdma.h
Normal file
|
@ -0,0 +1,72 @@
|
|||
/*
|
||||
* Renesas SuperH DMA Engine support
|
||||
*
|
||||
* Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
|
||||
* Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
|
||||
*
|
||||
* This is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
*/
|
||||
#ifndef __DMA_SHDMA_H
|
||||
#define __DMA_SHDMA_H
|
||||
|
||||
#include <linux/sh_dma.h>
|
||||
#include <linux/shdma-base.h>
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/list.h>
|
||||
|
||||
#define SH_DMAE_MAX_CHANNELS 20
|
||||
#define SH_DMAE_TCR_MAX 0x00FFFFFF /* 16MB */
|
||||
|
||||
struct device;
|
||||
|
||||
struct sh_dmae_chan {
|
||||
struct shdma_chan shdma_chan;
|
||||
const struct sh_dmae_slave_config *config; /* Slave DMA configuration */
|
||||
int xmit_shift; /* log_2(bytes_per_xfer) */
|
||||
void __iomem *base;
|
||||
char dev_id[16]; /* unique name per DMAC of channel */
|
||||
int pm_error;
|
||||
dma_addr_t slave_addr;
|
||||
};
|
||||
|
||||
struct sh_dmae_device {
|
||||
struct shdma_dev shdma_dev;
|
||||
struct sh_dmae_chan *chan[SH_DMAE_MAX_CHANNELS];
|
||||
const struct sh_dmae_pdata *pdata;
|
||||
struct list_head node;
|
||||
void __iomem *chan_reg;
|
||||
void __iomem *dmars;
|
||||
unsigned int chcr_offset;
|
||||
u32 chcr_ie_bit;
|
||||
};
|
||||
|
||||
struct sh_dmae_regs {
|
||||
u32 sar; /* SAR / source address */
|
||||
u32 dar; /* DAR / destination address */
|
||||
u32 tcr; /* TCR / transfer count */
|
||||
};
|
||||
|
||||
struct sh_dmae_desc {
|
||||
struct sh_dmae_regs hw;
|
||||
struct shdma_desc shdma_desc;
|
||||
};
|
||||
|
||||
#define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, shdma_chan)
|
||||
#define to_sh_desc(lh) container_of(lh, struct sh_desc, node)
|
||||
#define tx_to_sh_desc(tx) container_of(tx, struct sh_desc, async_tx)
|
||||
#define to_sh_dev(chan) container_of(chan->shdma_chan.dma_chan.device,\
|
||||
struct sh_dmae_device, shdma_dev.dma_dev)
|
||||
|
||||
#ifdef CONFIG_SH_DMAE_R8A73A4
|
||||
extern const struct sh_dmae_pdata r8a73a4_dma_pdata;
|
||||
#define r8a73a4_shdma_devid (&r8a73a4_dma_pdata)
|
||||
#else
|
||||
#define r8a73a4_shdma_devid NULL
|
||||
#endif
|
||||
|
||||
#endif /* __DMA_SHDMA_H */
|
959
drivers/dma/sh/shdmac.c
Normal file
959
drivers/dma/sh/shdmac.c
Normal file
|
@ -0,0 +1,959 @@
|
|||
/*
|
||||
* Renesas SuperH DMA Engine support
|
||||
*
|
||||
* base is drivers/dma/flsdma.c
|
||||
*
|
||||
* Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
|
||||
* Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
|
||||
* Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
|
||||
* Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
|
||||
*
|
||||
* This is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* - DMA of SuperH does not have Hardware DMA chain mode.
|
||||
* - MAX DMA size is 16MB.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/kdebug.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/rculist.h>
|
||||
#include <linux/sh_dma.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
#include "../dmaengine.h"
|
||||
#include "shdma.h"
|
||||
|
||||
/* DMA registers */
|
||||
#define SAR 0x00 /* Source Address Register */
|
||||
#define DAR 0x04 /* Destination Address Register */
|
||||
#define TCR 0x08 /* Transfer Count Register */
|
||||
#define CHCR 0x0C /* Channel Control Register */
|
||||
#define DMAOR 0x40 /* DMA Operation Register */
|
||||
|
||||
#define TEND 0x18 /* USB-DMAC */
|
||||
|
||||
#define SH_DMAE_DRV_NAME "sh-dma-engine"
|
||||
|
||||
/* Default MEMCPY transfer size = 2^2 = 4 bytes */
|
||||
#define LOG2_DEFAULT_XFER_SIZE 2
|
||||
#define SH_DMA_SLAVE_NUMBER 256
|
||||
#define SH_DMA_TCR_MAX (16 * 1024 * 1024 - 1)
|
||||
|
||||
/*
|
||||
* Used for write-side mutual exclusion for the global device list,
|
||||
* read-side synchronization by way of RCU, and per-controller data.
|
||||
*/
|
||||
static DEFINE_SPINLOCK(sh_dmae_lock);
|
||||
static LIST_HEAD(sh_dmae_devices);
|
||||
|
||||
/*
|
||||
* Different DMAC implementations provide different ways to clear DMA channels:
|
||||
* (1) none - no CHCLR registers are available
|
||||
* (2) one CHCLR register per channel - 0 has to be written to it to clear
|
||||
* channel buffers
|
||||
* (3) one CHCLR per several channels - 1 has to be written to the bit,
|
||||
* corresponding to the specific channel to reset it
|
||||
*/
|
||||
static void channel_clear(struct sh_dmae_chan *sh_dc)
|
||||
{
|
||||
struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
|
||||
const struct sh_dmae_channel *chan_pdata = shdev->pdata->channel +
|
||||
sh_dc->shdma_chan.id;
|
||||
u32 val = shdev->pdata->chclr_bitwise ? 1 << chan_pdata->chclr_bit : 0;
|
||||
|
||||
__raw_writel(val, shdev->chan_reg + chan_pdata->chclr_offset);
|
||||
}
|
||||
|
||||
static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
|
||||
{
|
||||
__raw_writel(data, sh_dc->base + reg);
|
||||
}
|
||||
|
||||
static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
|
||||
{
|
||||
return __raw_readl(sh_dc->base + reg);
|
||||
}
|
||||
|
||||
static u16 dmaor_read(struct sh_dmae_device *shdev)
|
||||
{
|
||||
void __iomem *addr = shdev->chan_reg + DMAOR;
|
||||
|
||||
if (shdev->pdata->dmaor_is_32bit)
|
||||
return __raw_readl(addr);
|
||||
else
|
||||
return __raw_readw(addr);
|
||||
}
|
||||
|
||||
static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
|
||||
{
|
||||
void __iomem *addr = shdev->chan_reg + DMAOR;
|
||||
|
||||
if (shdev->pdata->dmaor_is_32bit)
|
||||
__raw_writel(data, addr);
|
||||
else
|
||||
__raw_writew(data, addr);
|
||||
}
|
||||
|
||||
static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data)
|
||||
{
|
||||
struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
|
||||
|
||||
__raw_writel(data, sh_dc->base + shdev->chcr_offset);
|
||||
}
|
||||
|
||||
static u32 chcr_read(struct sh_dmae_chan *sh_dc)
|
||||
{
|
||||
struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
|
||||
|
||||
return __raw_readl(sh_dc->base + shdev->chcr_offset);
|
||||
}
|
||||
|
||||
/*
|
||||
* Reset DMA controller
|
||||
*
|
||||
* SH7780 has two DMAOR register
|
||||
*/
|
||||
static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
|
||||
{
|
||||
unsigned short dmaor;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&sh_dmae_lock, flags);
|
||||
|
||||
dmaor = dmaor_read(shdev);
|
||||
dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
|
||||
|
||||
spin_unlock_irqrestore(&sh_dmae_lock, flags);
|
||||
}
|
||||
|
||||
static int sh_dmae_rst(struct sh_dmae_device *shdev)
|
||||
{
|
||||
unsigned short dmaor;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&sh_dmae_lock, flags);
|
||||
|
||||
dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);
|
||||
|
||||
if (shdev->pdata->chclr_present) {
|
||||
int i;
|
||||
for (i = 0; i < shdev->pdata->channel_num; i++) {
|
||||
struct sh_dmae_chan *sh_chan = shdev->chan[i];
|
||||
if (sh_chan)
|
||||
channel_clear(sh_chan);
|
||||
}
|
||||
}
|
||||
|
||||
dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);
|
||||
|
||||
dmaor = dmaor_read(shdev);
|
||||
|
||||
spin_unlock_irqrestore(&sh_dmae_lock, flags);
|
||||
|
||||
if (dmaor & (DMAOR_AE | DMAOR_NMIF)) {
|
||||
dev_warn(shdev->shdma_dev.dma_dev.dev, "Can't initialize DMAOR.\n");
|
||||
return -EIO;
|
||||
}
|
||||
if (shdev->pdata->dmaor_init & ~dmaor)
|
||||
dev_warn(shdev->shdma_dev.dma_dev.dev,
|
||||
"DMAOR=0x%x hasn't latched the initial value 0x%x.\n",
|
||||
dmaor, shdev->pdata->dmaor_init);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
|
||||
{
|
||||
u32 chcr = chcr_read(sh_chan);
|
||||
|
||||
if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
|
||||
return true; /* working */
|
||||
|
||||
return false; /* waiting */
|
||||
}
|
||||
|
||||
static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
|
||||
{
|
||||
struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
|
||||
const struct sh_dmae_pdata *pdata = shdev->pdata;
|
||||
int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
|
||||
((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
|
||||
|
||||
if (cnt >= pdata->ts_shift_num)
|
||||
cnt = 0;
|
||||
|
||||
return pdata->ts_shift[cnt];
|
||||
}
|
||||
|
||||
static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
|
||||
{
|
||||
struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
|
||||
const struct sh_dmae_pdata *pdata = shdev->pdata;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < pdata->ts_shift_num; i++)
|
||||
if (pdata->ts_shift[i] == l2size)
|
||||
break;
|
||||
|
||||
if (i == pdata->ts_shift_num)
|
||||
i = 0;
|
||||
|
||||
return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) |
|
||||
((i << pdata->ts_high_shift) & pdata->ts_high_mask);
|
||||
}
|
||||
|
||||
static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
|
||||
{
|
||||
sh_dmae_writel(sh_chan, hw->sar, SAR);
|
||||
sh_dmae_writel(sh_chan, hw->dar, DAR);
|
||||
sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
|
||||
}
|
||||
|
||||
static void dmae_start(struct sh_dmae_chan *sh_chan)
|
||||
{
|
||||
struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
|
||||
u32 chcr = chcr_read(sh_chan);
|
||||
|
||||
if (shdev->pdata->needs_tend_set)
|
||||
sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND);
|
||||
|
||||
chcr |= CHCR_DE | shdev->chcr_ie_bit;
|
||||
chcr_write(sh_chan, chcr & ~CHCR_TE);
|
||||
}
|
||||
|
||||
static void dmae_init(struct sh_dmae_chan *sh_chan)
|
||||
{
|
||||
/*
|
||||
* Default configuration for dual address memory-memory transfer.
|
||||
*/
|
||||
u32 chcr = DM_INC | SM_INC | RS_AUTO | log2size_to_chcr(sh_chan,
|
||||
LOG2_DEFAULT_XFER_SIZE);
|
||||
sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
|
||||
chcr_write(sh_chan, chcr);
|
||||
}
|
||||
|
||||
static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
|
||||
{
|
||||
/* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */
|
||||
if (dmae_is_busy(sh_chan))
|
||||
return -EBUSY;
|
||||
|
||||
sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
|
||||
chcr_write(sh_chan, val);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
|
||||
{
|
||||
struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
|
||||
const struct sh_dmae_pdata *pdata = shdev->pdata;
|
||||
const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->shdma_chan.id];
|
||||
void __iomem *addr = shdev->dmars;
|
||||
unsigned int shift = chan_pdata->dmars_bit;
|
||||
|
||||
if (dmae_is_busy(sh_chan))
|
||||
return -EBUSY;
|
||||
|
||||
if (pdata->no_dmars)
|
||||
return 0;
|
||||
|
||||
/* in the case of a missing DMARS resource use first memory window */
|
||||
if (!addr)
|
||||
addr = shdev->chan_reg;
|
||||
addr += chan_pdata->dmars;
|
||||
|
||||
__raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
|
||||
addr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sh_dmae_start_xfer(struct shdma_chan *schan,
|
||||
struct shdma_desc *sdesc)
|
||||
{
|
||||
struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
|
||||
shdma_chan);
|
||||
struct sh_dmae_desc *sh_desc = container_of(sdesc,
|
||||
struct sh_dmae_desc, shdma_desc);
|
||||
dev_dbg(sh_chan->shdma_chan.dev, "Queue #%d to %d: %u@%x -> %x\n",
|
||||
sdesc->async_tx.cookie, sh_chan->shdma_chan.id,
|
||||
sh_desc->hw.tcr, sh_desc->hw.sar, sh_desc->hw.dar);
|
||||
/* Get the ld start address from ld_queue */
|
||||
dmae_set_reg(sh_chan, &sh_desc->hw);
|
||||
dmae_start(sh_chan);
|
||||
}
|
||||
|
||||
static bool sh_dmae_channel_busy(struct shdma_chan *schan)
|
||||
{
|
||||
struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
|
||||
shdma_chan);
|
||||
return dmae_is_busy(sh_chan);
|
||||
}
|
||||
|
||||
static void sh_dmae_setup_xfer(struct shdma_chan *schan,
|
||||
int slave_id)
|
||||
{
|
||||
struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
|
||||
shdma_chan);
|
||||
|
||||
if (slave_id >= 0) {
|
||||
const struct sh_dmae_slave_config *cfg =
|
||||
sh_chan->config;
|
||||
|
||||
dmae_set_dmars(sh_chan, cfg->mid_rid);
|
||||
dmae_set_chcr(sh_chan, cfg->chcr);
|
||||
} else {
|
||||
dmae_init(sh_chan);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Find a slave channel configuration from the contoller list by either a slave
|
||||
* ID in the non-DT case, or by a MID/RID value in the DT case
|
||||
*/
|
||||
static const struct sh_dmae_slave_config *dmae_find_slave(
|
||||
struct sh_dmae_chan *sh_chan, int match)
|
||||
{
|
||||
struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
|
||||
const struct sh_dmae_pdata *pdata = shdev->pdata;
|
||||
const struct sh_dmae_slave_config *cfg;
|
||||
int i;
|
||||
|
||||
if (!sh_chan->shdma_chan.dev->of_node) {
|
||||
if (match >= SH_DMA_SLAVE_NUMBER)
|
||||
return NULL;
|
||||
|
||||
for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
|
||||
if (cfg->slave_id == match)
|
||||
return cfg;
|
||||
} else {
|
||||
for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
|
||||
if (cfg->mid_rid == match) {
|
||||
sh_chan->shdma_chan.slave_id = i;
|
||||
return cfg;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int sh_dmae_set_slave(struct shdma_chan *schan,
|
||||
int slave_id, dma_addr_t slave_addr, bool try)
|
||||
{
|
||||
struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
|
||||
shdma_chan);
|
||||
const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, slave_id);
|
||||
if (!cfg)
|
||||
return -ENXIO;
|
||||
|
||||
if (!try) {
|
||||
sh_chan->config = cfg;
|
||||
sh_chan->slave_addr = slave_addr ? : cfg->addr;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dmae_halt(struct sh_dmae_chan *sh_chan)
|
||||
{
|
||||
struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
|
||||
u32 chcr = chcr_read(sh_chan);
|
||||
|
||||
chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit);
|
||||
chcr_write(sh_chan, chcr);
|
||||
}
|
||||
|
||||
static int sh_dmae_desc_setup(struct shdma_chan *schan,
|
||||
struct shdma_desc *sdesc,
|
||||
dma_addr_t src, dma_addr_t dst, size_t *len)
|
||||
{
|
||||
struct sh_dmae_desc *sh_desc = container_of(sdesc,
|
||||
struct sh_dmae_desc, shdma_desc);
|
||||
|
||||
if (*len > schan->max_xfer_len)
|
||||
*len = schan->max_xfer_len;
|
||||
|
||||
sh_desc->hw.sar = src;
|
||||
sh_desc->hw.dar = dst;
|
||||
sh_desc->hw.tcr = *len;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sh_dmae_halt(struct shdma_chan *schan)
|
||||
{
|
||||
struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
|
||||
shdma_chan);
|
||||
dmae_halt(sh_chan);
|
||||
}
|
||||
|
||||
static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq)
|
||||
{
|
||||
struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
|
||||
shdma_chan);
|
||||
|
||||
if (!(chcr_read(sh_chan) & CHCR_TE))
|
||||
return false;
|
||||
|
||||
/* DMA stop */
|
||||
dmae_halt(sh_chan);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static size_t sh_dmae_get_partial(struct shdma_chan *schan,
|
||||
struct shdma_desc *sdesc)
|
||||
{
|
||||
struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
|
||||
shdma_chan);
|
||||
struct sh_dmae_desc *sh_desc = container_of(sdesc,
|
||||
struct sh_dmae_desc, shdma_desc);
|
||||
return sh_desc->hw.tcr -
|
||||
(sh_dmae_readl(sh_chan, TCR) << sh_chan->xmit_shift);
|
||||
}
|
||||
|
||||
/* Called from error IRQ or NMI */
|
||||
static bool sh_dmae_reset(struct sh_dmae_device *shdev)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
/* halt the dma controller */
|
||||
sh_dmae_ctl_stop(shdev);
|
||||
|
||||
/* We cannot detect, which channel caused the error, have to reset all */
|
||||
ret = shdma_reset(&shdev->shdma_dev);
|
||||
|
||||
sh_dmae_rst(shdev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARM)
|
||||
static irqreturn_t sh_dmae_err(int irq, void *data)
|
||||
{
|
||||
struct sh_dmae_device *shdev = data;
|
||||
|
||||
if (!(dmaor_read(shdev) & DMAOR_AE))
|
||||
return IRQ_NONE;
|
||||
|
||||
sh_dmae_reset(shdev);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
#endif
|
||||
|
||||
static bool sh_dmae_desc_completed(struct shdma_chan *schan,
|
||||
struct shdma_desc *sdesc)
|
||||
{
|
||||
struct sh_dmae_chan *sh_chan = container_of(schan,
|
||||
struct sh_dmae_chan, shdma_chan);
|
||||
struct sh_dmae_desc *sh_desc = container_of(sdesc,
|
||||
struct sh_dmae_desc, shdma_desc);
|
||||
u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
|
||||
u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
|
||||
|
||||
return (sdesc->direction == DMA_DEV_TO_MEM &&
|
||||
(sh_desc->hw.dar + sh_desc->hw.tcr) == dar_buf) ||
|
||||
(sdesc->direction != DMA_DEV_TO_MEM &&
|
||||
(sh_desc->hw.sar + sh_desc->hw.tcr) == sar_buf);
|
||||
}
|
||||
|
||||
static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev)
|
||||
{
|
||||
/* Fast path out if NMIF is not asserted for this controller */
|
||||
if ((dmaor_read(shdev) & DMAOR_NMIF) == 0)
|
||||
return false;
|
||||
|
||||
return sh_dmae_reset(shdev);
|
||||
}
|
||||
|
||||
static int sh_dmae_nmi_handler(struct notifier_block *self,
|
||||
unsigned long cmd, void *data)
|
||||
{
|
||||
struct sh_dmae_device *shdev;
|
||||
int ret = NOTIFY_DONE;
|
||||
bool triggered;
|
||||
|
||||
/*
|
||||
* Only concern ourselves with NMI events.
|
||||
*
|
||||
* Normally we would check the die chain value, but as this needs
|
||||
* to be architecture independent, check for NMI context instead.
|
||||
*/
|
||||
if (!in_nmi())
|
||||
return NOTIFY_DONE;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) {
|
||||
/*
|
||||
* Only stop if one of the controllers has NMIF asserted,
|
||||
* we do not want to interfere with regular address error
|
||||
* handling or NMI events that don't concern the DMACs.
|
||||
*/
|
||||
triggered = sh_dmae_nmi_notify(shdev);
|
||||
if (triggered == true)
|
||||
ret = NOTIFY_OK;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
|
||||
.notifier_call = sh_dmae_nmi_handler,
|
||||
|
||||
/* Run before NMI debug handler and KGDB */
|
||||
.priority = 1,
|
||||
};
|
||||
|
||||
static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
|
||||
int irq, unsigned long flags)
|
||||
{
|
||||
const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
|
||||
struct shdma_dev *sdev = &shdev->shdma_dev;
|
||||
struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev);
|
||||
struct sh_dmae_chan *sh_chan;
|
||||
struct shdma_chan *schan;
|
||||
int err;
|
||||
|
||||
sh_chan = devm_kzalloc(sdev->dma_dev.dev, sizeof(struct sh_dmae_chan),
|
||||
GFP_KERNEL);
|
||||
if (!sh_chan) {
|
||||
dev_err(sdev->dma_dev.dev,
|
||||
"No free memory for allocating dma channels!\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
schan = &sh_chan->shdma_chan;
|
||||
schan->max_xfer_len = SH_DMA_TCR_MAX + 1;
|
||||
|
||||
shdma_chan_probe(sdev, schan, id);
|
||||
|
||||
sh_chan->base = shdev->chan_reg + chan_pdata->offset;
|
||||
|
||||
/* set up channel irq */
|
||||
if (pdev->id >= 0)
|
||||
snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id),
|
||||
"sh-dmae%d.%d", pdev->id, id);
|
||||
else
|
||||
snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id),
|
||||
"sh-dma%d", id);
|
||||
|
||||
err = shdma_request_irq(schan, irq, flags, sh_chan->dev_id);
|
||||
if (err) {
|
||||
dev_err(sdev->dma_dev.dev,
|
||||
"DMA channel %d request_irq error %d\n",
|
||||
id, err);
|
||||
goto err_no_irq;
|
||||
}
|
||||
|
||||
shdev->chan[id] = sh_chan;
|
||||
return 0;
|
||||
|
||||
err_no_irq:
|
||||
/* remove from dmaengine device node */
|
||||
shdma_chan_remove(schan);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
|
||||
{
|
||||
struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
|
||||
struct shdma_chan *schan;
|
||||
int i;
|
||||
|
||||
shdma_for_each_chan(schan, &shdev->shdma_dev, i) {
|
||||
BUG_ON(!schan);
|
||||
|
||||
shdma_chan_remove(schan);
|
||||
}
|
||||
dma_dev->chancnt = 0;
|
||||
}
|
||||
|
||||
static void sh_dmae_shutdown(struct platform_device *pdev)
|
||||
{
|
||||
struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
|
||||
sh_dmae_ctl_stop(shdev);
|
||||
}
|
||||
|
||||
static int sh_dmae_runtime_suspend(struct device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sh_dmae_runtime_resume(struct device *dev)
|
||||
{
|
||||
struct sh_dmae_device *shdev = dev_get_drvdata(dev);
|
||||
|
||||
return sh_dmae_rst(shdev);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static int sh_dmae_suspend(struct device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sh_dmae_resume(struct device *dev)
|
||||
{
|
||||
struct sh_dmae_device *shdev = dev_get_drvdata(dev);
|
||||
int i, ret;
|
||||
|
||||
ret = sh_dmae_rst(shdev);
|
||||
if (ret < 0)
|
||||
dev_err(dev, "Failed to reset!\n");
|
||||
|
||||
for (i = 0; i < shdev->pdata->channel_num; i++) {
|
||||
struct sh_dmae_chan *sh_chan = shdev->chan[i];
|
||||
|
||||
if (!sh_chan->shdma_chan.desc_num)
|
||||
continue;
|
||||
|
||||
if (sh_chan->shdma_chan.slave_id >= 0) {
|
||||
const struct sh_dmae_slave_config *cfg = sh_chan->config;
|
||||
dmae_set_dmars(sh_chan, cfg->mid_rid);
|
||||
dmae_set_chcr(sh_chan, cfg->chcr);
|
||||
} else {
|
||||
dmae_init(sh_chan);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
#define sh_dmae_suspend NULL
|
||||
#define sh_dmae_resume NULL
|
||||
#endif
|
||||
|
||||
static const struct dev_pm_ops sh_dmae_pm = {
|
||||
.suspend = sh_dmae_suspend,
|
||||
.resume = sh_dmae_resume,
|
||||
.runtime_suspend = sh_dmae_runtime_suspend,
|
||||
.runtime_resume = sh_dmae_runtime_resume,
|
||||
};
|
||||
|
||||
static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan)
|
||||
{
|
||||
struct sh_dmae_chan *sh_chan = container_of(schan,
|
||||
struct sh_dmae_chan, shdma_chan);
|
||||
|
||||
/*
|
||||
* Implicit BUG_ON(!sh_chan->config)
|
||||
* This is an exclusive slave DMA operation, may only be called after a
|
||||
* successful slave configuration.
|
||||
*/
|
||||
return sh_chan->slave_addr;
|
||||
}
|
||||
|
||||
static struct shdma_desc *sh_dmae_embedded_desc(void *buf, int i)
|
||||
{
|
||||
return &((struct sh_dmae_desc *)buf)[i].shdma_desc;
|
||||
}
|
||||
|
||||
static const struct shdma_ops sh_dmae_shdma_ops = {
|
||||
.desc_completed = sh_dmae_desc_completed,
|
||||
.halt_channel = sh_dmae_halt,
|
||||
.channel_busy = sh_dmae_channel_busy,
|
||||
.slave_addr = sh_dmae_slave_addr,
|
||||
.desc_setup = sh_dmae_desc_setup,
|
||||
.set_slave = sh_dmae_set_slave,
|
||||
.setup_xfer = sh_dmae_setup_xfer,
|
||||
.start_xfer = sh_dmae_start_xfer,
|
||||
.embedded_desc = sh_dmae_embedded_desc,
|
||||
.chan_irq = sh_dmae_chan_irq,
|
||||
.get_partial = sh_dmae_get_partial,
|
||||
};
|
||||
|
||||
static const struct of_device_id sh_dmae_of_match[] = {
|
||||
{.compatible = "renesas,shdma-r8a73a4", .data = r8a73a4_shdma_devid,},
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
|
||||
|
||||
static int sh_dmae_probe(struct platform_device *pdev)
|
||||
{
|
||||
const struct sh_dmae_pdata *pdata;
|
||||
unsigned long chan_flag[SH_DMAE_MAX_CHANNELS] = {};
|
||||
int chan_irq[SH_DMAE_MAX_CHANNELS];
|
||||
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARM)
|
||||
unsigned long irqflags = 0;
|
||||
int errirq;
|
||||
#endif
|
||||
int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
|
||||
struct sh_dmae_device *shdev;
|
||||
struct dma_device *dma_dev;
|
||||
struct resource *chan, *dmars, *errirq_res, *chanirq_res;
|
||||
|
||||
if (pdev->dev.of_node)
|
||||
pdata = of_match_device(sh_dmae_of_match, &pdev->dev)->data;
|
||||
else
|
||||
pdata = dev_get_platdata(&pdev->dev);
|
||||
|
||||
/* get platform data */
|
||||
if (!pdata || !pdata->channel_num)
|
||||
return -ENODEV;
|
||||
|
||||
chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
/* DMARS area is optional */
|
||||
dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
|
||||
/*
|
||||
* IRQ resources:
|
||||
* 1. there always must be at least one IRQ IO-resource. On SH4 it is
|
||||
* the error IRQ, in which case it is the only IRQ in this resource:
|
||||
* start == end. If it is the only IRQ resource, all channels also
|
||||
* use the same IRQ.
|
||||
* 2. DMA channel IRQ resources can be specified one per resource or in
|
||||
* ranges (start != end)
|
||||
* 3. iff all events (channels and, optionally, error) on this
|
||||
* controller use the same IRQ, only one IRQ resource can be
|
||||
* specified, otherwise there must be one IRQ per channel, even if
|
||||
* some of them are equal
|
||||
* 4. if all IRQs on this controller are equal or if some specific IRQs
|
||||
* specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
|
||||
* requested with the IRQF_SHARED flag
|
||||
*/
|
||||
errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
|
||||
if (!chan || !errirq_res)
|
||||
return -ENODEV;
|
||||
|
||||
shdev = devm_kzalloc(&pdev->dev, sizeof(struct sh_dmae_device),
|
||||
GFP_KERNEL);
|
||||
if (!shdev) {
|
||||
dev_err(&pdev->dev, "Not enough memory\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
dma_dev = &shdev->shdma_dev.dma_dev;
|
||||
|
||||
shdev->chan_reg = devm_ioremap_resource(&pdev->dev, chan);
|
||||
if (IS_ERR(shdev->chan_reg))
|
||||
return PTR_ERR(shdev->chan_reg);
|
||||
if (dmars) {
|
||||
shdev->dmars = devm_ioremap_resource(&pdev->dev, dmars);
|
||||
if (IS_ERR(shdev->dmars))
|
||||
return PTR_ERR(shdev->dmars);
|
||||
}
|
||||
|
||||
if (!pdata->slave_only)
|
||||
dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
|
||||
if (pdata->slave && pdata->slave_num)
|
||||
dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
|
||||
|
||||
/* Default transfer size of 32 bytes requires 32-byte alignment */
|
||||
dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE;
|
||||
|
||||
shdev->shdma_dev.ops = &sh_dmae_shdma_ops;
|
||||
shdev->shdma_dev.desc_size = sizeof(struct sh_dmae_desc);
|
||||
err = shdma_init(&pdev->dev, &shdev->shdma_dev,
|
||||
pdata->channel_num);
|
||||
if (err < 0)
|
||||
goto eshdma;
|
||||
|
||||
/* platform data */
|
||||
shdev->pdata = pdata;
|
||||
|
||||
if (pdata->chcr_offset)
|
||||
shdev->chcr_offset = pdata->chcr_offset;
|
||||
else
|
||||
shdev->chcr_offset = CHCR;
|
||||
|
||||
if (pdata->chcr_ie_bit)
|
||||
shdev->chcr_ie_bit = pdata->chcr_ie_bit;
|
||||
else
|
||||
shdev->chcr_ie_bit = CHCR_IE;
|
||||
|
||||
platform_set_drvdata(pdev, shdev);
|
||||
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
err = pm_runtime_get_sync(&pdev->dev);
|
||||
if (err < 0)
|
||||
dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err);
|
||||
|
||||
spin_lock_irq(&sh_dmae_lock);
|
||||
list_add_tail_rcu(&shdev->node, &sh_dmae_devices);
|
||||
spin_unlock_irq(&sh_dmae_lock);
|
||||
|
||||
/* reset dma controller - only needed as a test */
|
||||
err = sh_dmae_rst(shdev);
|
||||
if (err)
|
||||
goto rst_err;
|
||||
|
||||
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
|
||||
chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
|
||||
|
||||
if (!chanirq_res)
|
||||
chanirq_res = errirq_res;
|
||||
else
|
||||
irqres++;
|
||||
|
||||
if (chanirq_res == errirq_res ||
|
||||
(errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
|
||||
irqflags = IRQF_SHARED;
|
||||
|
||||
errirq = errirq_res->start;
|
||||
|
||||
err = devm_request_irq(&pdev->dev, errirq, sh_dmae_err, irqflags,
|
||||
"DMAC Address Error", shdev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev,
|
||||
"DMA failed requesting irq #%d, error %d\n",
|
||||
errirq, err);
|
||||
goto eirq_err;
|
||||
}
|
||||
|
||||
#else
|
||||
chanirq_res = errirq_res;
|
||||
#endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */
|
||||
|
||||
if (chanirq_res->start == chanirq_res->end &&
|
||||
!platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
|
||||
/* Special case - all multiplexed */
|
||||
for (; irq_cnt < pdata->channel_num; irq_cnt++) {
|
||||
if (irq_cnt < SH_DMAE_MAX_CHANNELS) {
|
||||
chan_irq[irq_cnt] = chanirq_res->start;
|
||||
chan_flag[irq_cnt] = IRQF_SHARED;
|
||||
} else {
|
||||
irq_cap = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
do {
|
||||
for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
|
||||
if (irq_cnt >= SH_DMAE_MAX_CHANNELS) {
|
||||
irq_cap = 1;
|
||||
break;
|
||||
}
|
||||
|
||||
if ((errirq_res->flags & IORESOURCE_BITS) ==
|
||||
IORESOURCE_IRQ_SHAREABLE)
|
||||
chan_flag[irq_cnt] = IRQF_SHARED;
|
||||
else
|
||||
chan_flag[irq_cnt] = 0;
|
||||
dev_dbg(&pdev->dev,
|
||||
"Found IRQ %d for channel %d\n",
|
||||
i, irq_cnt);
|
||||
chan_irq[irq_cnt++] = i;
|
||||
}
|
||||
|
||||
if (irq_cnt >= SH_DMAE_MAX_CHANNELS)
|
||||
break;
|
||||
|
||||
chanirq_res = platform_get_resource(pdev,
|
||||
IORESOURCE_IRQ, ++irqres);
|
||||
} while (irq_cnt < pdata->channel_num && chanirq_res);
|
||||
}
|
||||
|
||||
/* Create DMA Channel */
|
||||
for (i = 0; i < irq_cnt; i++) {
|
||||
err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
|
||||
if (err)
|
||||
goto chan_probe_err;
|
||||
}
|
||||
|
||||
if (irq_cap)
|
||||
dev_notice(&pdev->dev, "Attempting to register %d DMA "
|
||||
"channels when a maximum of %d are supported.\n",
|
||||
pdata->channel_num, SH_DMAE_MAX_CHANNELS);
|
||||
|
||||
pm_runtime_put(&pdev->dev);
|
||||
|
||||
err = dma_async_device_register(&shdev->shdma_dev.dma_dev);
|
||||
if (err < 0)
|
||||
goto edmadevreg;
|
||||
|
||||
return err;
|
||||
|
||||
edmadevreg:
|
||||
pm_runtime_get(&pdev->dev);
|
||||
|
||||
chan_probe_err:
|
||||
sh_dmae_chan_remove(shdev);
|
||||
|
||||
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
|
||||
eirq_err:
|
||||
#endif
|
||||
rst_err:
|
||||
spin_lock_irq(&sh_dmae_lock);
|
||||
list_del_rcu(&shdev->node);
|
||||
spin_unlock_irq(&sh_dmae_lock);
|
||||
|
||||
pm_runtime_put(&pdev->dev);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
||||
shdma_cleanup(&shdev->shdma_dev);
|
||||
eshdma:
|
||||
synchronize_rcu();
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int sh_dmae_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
|
||||
struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
|
||||
|
||||
dma_async_device_unregister(dma_dev);
|
||||
|
||||
spin_lock_irq(&sh_dmae_lock);
|
||||
list_del_rcu(&shdev->node);
|
||||
spin_unlock_irq(&sh_dmae_lock);
|
||||
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
||||
sh_dmae_chan_remove(shdev);
|
||||
shdma_cleanup(&shdev->shdma_dev);
|
||||
|
||||
synchronize_rcu();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct platform_driver sh_dmae_driver = {
|
||||
.driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.pm = &sh_dmae_pm,
|
||||
.name = SH_DMAE_DRV_NAME,
|
||||
.of_match_table = sh_dmae_of_match,
|
||||
},
|
||||
.remove = sh_dmae_remove,
|
||||
.shutdown = sh_dmae_shutdown,
|
||||
};
|
||||
|
||||
static int __init sh_dmae_init(void)
|
||||
{
|
||||
/* Wire up NMI handling */
|
||||
int err = register_die_notifier(&sh_dmae_nmi_notifier);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
|
||||
}
|
||||
module_init(sh_dmae_init);
|
||||
|
||||
static void __exit sh_dmae_exit(void)
|
||||
{
|
||||
platform_driver_unregister(&sh_dmae_driver);
|
||||
|
||||
unregister_die_notifier(&sh_dmae_nmi_notifier);
|
||||
}
|
||||
module_exit(sh_dmae_exit);
|
||||
|
||||
MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
|
||||
MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_ALIAS("platform:" SH_DMAE_DRV_NAME);
|
425
drivers/dma/sh/sudmac.c
Normal file
425
drivers/dma/sh/sudmac.c
Normal file
|
@ -0,0 +1,425 @@
|
|||
/*
|
||||
* Renesas SUDMAC support
|
||||
*
|
||||
* Copyright (C) 2013 Renesas Solutions Corp.
|
||||
*
|
||||
* based on drivers/dma/sh/shdma.c:
|
||||
* Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
|
||||
* Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
|
||||
* Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
|
||||
* Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
|
||||
*
|
||||
* This is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/sudmac.h>
|
||||
|
||||
struct sudmac_chan {
|
||||
struct shdma_chan shdma_chan;
|
||||
void __iomem *base;
|
||||
char dev_id[16]; /* unique name per DMAC of channel */
|
||||
|
||||
u32 offset; /* for CFG, BA, BBC, CA, CBC, DEN */
|
||||
u32 cfg;
|
||||
u32 dint_end_bit;
|
||||
};
|
||||
|
||||
struct sudmac_device {
|
||||
struct shdma_dev shdma_dev;
|
||||
struct sudmac_pdata *pdata;
|
||||
void __iomem *chan_reg;
|
||||
};
|
||||
|
||||
struct sudmac_regs {
|
||||
u32 base_addr;
|
||||
u32 base_byte_count;
|
||||
};
|
||||
|
||||
struct sudmac_desc {
|
||||
struct sudmac_regs hw;
|
||||
struct shdma_desc shdma_desc;
|
||||
};
|
||||
|
||||
#define to_chan(schan) container_of(schan, struct sudmac_chan, shdma_chan)
|
||||
#define to_desc(sdesc) container_of(sdesc, struct sudmac_desc, shdma_desc)
|
||||
#define to_sdev(sc) container_of(sc->shdma_chan.dma_chan.device, \
|
||||
struct sudmac_device, shdma_dev.dma_dev)
|
||||
|
||||
/* SUDMAC register */
|
||||
#define SUDMAC_CH0CFG 0x00
|
||||
#define SUDMAC_CH0BA 0x10
|
||||
#define SUDMAC_CH0BBC 0x18
|
||||
#define SUDMAC_CH0CA 0x20
|
||||
#define SUDMAC_CH0CBC 0x28
|
||||
#define SUDMAC_CH0DEN 0x30
|
||||
#define SUDMAC_DSTSCLR 0x38
|
||||
#define SUDMAC_DBUFCTRL 0x3C
|
||||
#define SUDMAC_DINTCTRL 0x40
|
||||
#define SUDMAC_DINTSTS 0x44
|
||||
#define SUDMAC_DINTSTSCLR 0x48
|
||||
#define SUDMAC_CH0SHCTRL 0x50
|
||||
|
||||
/* Definitions for the sudmac_channel.config */
|
||||
#define SUDMAC_SENDBUFM 0x1000 /* b12: Transmit Buffer Mode */
|
||||
#define SUDMAC_RCVENDM 0x0100 /* b8: Receive Data Transfer End Mode */
|
||||
#define SUDMAC_LBA_WAIT 0x0030 /* b5-4: Local Bus Access Wait */
|
||||
|
||||
/* Definitions for the sudmac_channel.dint_end_bit */
|
||||
#define SUDMAC_CH1ENDE 0x0002 /* b1: Ch1 DMA Transfer End Int Enable */
|
||||
#define SUDMAC_CH0ENDE 0x0001 /* b0: Ch0 DMA Transfer End Int Enable */
|
||||
|
||||
#define SUDMAC_DRV_NAME "sudmac"
|
||||
|
||||
static void sudmac_writel(struct sudmac_chan *sc, u32 data, u32 reg)
|
||||
{
|
||||
iowrite32(data, sc->base + reg);
|
||||
}
|
||||
|
||||
static u32 sudmac_readl(struct sudmac_chan *sc, u32 reg)
|
||||
{
|
||||
return ioread32(sc->base + reg);
|
||||
}
|
||||
|
||||
static bool sudmac_is_busy(struct sudmac_chan *sc)
|
||||
{
|
||||
u32 den = sudmac_readl(sc, SUDMAC_CH0DEN + sc->offset);
|
||||
|
||||
if (den)
|
||||
return true; /* working */
|
||||
|
||||
return false; /* waiting */
|
||||
}
|
||||
|
||||
static void sudmac_set_reg(struct sudmac_chan *sc, struct sudmac_regs *hw,
|
||||
struct shdma_desc *sdesc)
|
||||
{
|
||||
sudmac_writel(sc, sc->cfg, SUDMAC_CH0CFG + sc->offset);
|
||||
sudmac_writel(sc, hw->base_addr, SUDMAC_CH0BA + sc->offset);
|
||||
sudmac_writel(sc, hw->base_byte_count, SUDMAC_CH0BBC + sc->offset);
|
||||
}
|
||||
|
||||
static void sudmac_start(struct sudmac_chan *sc)
|
||||
{
|
||||
u32 dintctrl = sudmac_readl(sc, SUDMAC_DINTCTRL);
|
||||
|
||||
sudmac_writel(sc, dintctrl | sc->dint_end_bit, SUDMAC_DINTCTRL);
|
||||
sudmac_writel(sc, 1, SUDMAC_CH0DEN + sc->offset);
|
||||
}
|
||||
|
||||
static void sudmac_start_xfer(struct shdma_chan *schan,
|
||||
struct shdma_desc *sdesc)
|
||||
{
|
||||
struct sudmac_chan *sc = to_chan(schan);
|
||||
struct sudmac_desc *sd = to_desc(sdesc);
|
||||
|
||||
sudmac_set_reg(sc, &sd->hw, sdesc);
|
||||
sudmac_start(sc);
|
||||
}
|
||||
|
||||
static bool sudmac_channel_busy(struct shdma_chan *schan)
|
||||
{
|
||||
struct sudmac_chan *sc = to_chan(schan);
|
||||
|
||||
return sudmac_is_busy(sc);
|
||||
}
|
||||
|
||||
static void sudmac_setup_xfer(struct shdma_chan *schan, int slave_id)
|
||||
{
|
||||
}
|
||||
|
||||
static const struct sudmac_slave_config *sudmac_find_slave(
|
||||
struct sudmac_chan *sc, int slave_id)
|
||||
{
|
||||
struct sudmac_device *sdev = to_sdev(sc);
|
||||
struct sudmac_pdata *pdata = sdev->pdata;
|
||||
const struct sudmac_slave_config *cfg;
|
||||
int i;
|
||||
|
||||
for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
|
||||
if (cfg->slave_id == slave_id)
|
||||
return cfg;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int sudmac_set_slave(struct shdma_chan *schan, int slave_id,
|
||||
dma_addr_t slave_addr, bool try)
|
||||
{
|
||||
struct sudmac_chan *sc = to_chan(schan);
|
||||
const struct sudmac_slave_config *cfg = sudmac_find_slave(sc, slave_id);
|
||||
|
||||
if (!cfg)
|
||||
return -ENODEV;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void sudmac_dma_halt(struct sudmac_chan *sc)
|
||||
{
|
||||
u32 dintctrl = sudmac_readl(sc, SUDMAC_DINTCTRL);
|
||||
|
||||
sudmac_writel(sc, 0, SUDMAC_CH0DEN + sc->offset);
|
||||
sudmac_writel(sc, dintctrl & ~sc->dint_end_bit, SUDMAC_DINTCTRL);
|
||||
sudmac_writel(sc, sc->dint_end_bit, SUDMAC_DINTSTSCLR);
|
||||
}
|
||||
|
||||
static int sudmac_desc_setup(struct shdma_chan *schan,
|
||||
struct shdma_desc *sdesc,
|
||||
dma_addr_t src, dma_addr_t dst, size_t *len)
|
||||
{
|
||||
struct sudmac_chan *sc = to_chan(schan);
|
||||
struct sudmac_desc *sd = to_desc(sdesc);
|
||||
|
||||
dev_dbg(sc->shdma_chan.dev, "%s: src=%pad, dst=%pad, len=%zu\n",
|
||||
__func__, &src, &dst, *len);
|
||||
|
||||
if (*len > schan->max_xfer_len)
|
||||
*len = schan->max_xfer_len;
|
||||
|
||||
if (dst)
|
||||
sd->hw.base_addr = dst;
|
||||
else if (src)
|
||||
sd->hw.base_addr = src;
|
||||
sd->hw.base_byte_count = *len;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sudmac_halt(struct shdma_chan *schan)
|
||||
{
|
||||
struct sudmac_chan *sc = to_chan(schan);
|
||||
|
||||
sudmac_dma_halt(sc);
|
||||
}
|
||||
|
||||
static bool sudmac_chan_irq(struct shdma_chan *schan, int irq)
|
||||
{
|
||||
struct sudmac_chan *sc = to_chan(schan);
|
||||
u32 dintsts = sudmac_readl(sc, SUDMAC_DINTSTS);
|
||||
|
||||
if (!(dintsts & sc->dint_end_bit))
|
||||
return false;
|
||||
|
||||
/* DMA stop */
|
||||
sudmac_dma_halt(sc);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static size_t sudmac_get_partial(struct shdma_chan *schan,
|
||||
struct shdma_desc *sdesc)
|
||||
{
|
||||
struct sudmac_chan *sc = to_chan(schan);
|
||||
struct sudmac_desc *sd = to_desc(sdesc);
|
||||
u32 current_byte_count = sudmac_readl(sc, SUDMAC_CH0CBC + sc->offset);
|
||||
|
||||
return sd->hw.base_byte_count - current_byte_count;
|
||||
}
|
||||
|
||||
static bool sudmac_desc_completed(struct shdma_chan *schan,
|
||||
struct shdma_desc *sdesc)
|
||||
{
|
||||
struct sudmac_chan *sc = to_chan(schan);
|
||||
struct sudmac_desc *sd = to_desc(sdesc);
|
||||
u32 current_addr = sudmac_readl(sc, SUDMAC_CH0CA + sc->offset);
|
||||
|
||||
return sd->hw.base_addr + sd->hw.base_byte_count == current_addr;
|
||||
}
|
||||
|
||||
static int sudmac_chan_probe(struct sudmac_device *su_dev, int id, int irq,
|
||||
unsigned long flags)
|
||||
{
|
||||
struct shdma_dev *sdev = &su_dev->shdma_dev;
|
||||
struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev);
|
||||
struct sudmac_chan *sc;
|
||||
struct shdma_chan *schan;
|
||||
int err;
|
||||
|
||||
sc = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_chan), GFP_KERNEL);
|
||||
if (!sc) {
|
||||
dev_err(sdev->dma_dev.dev,
|
||||
"No free memory for allocating dma channels!\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
schan = &sc->shdma_chan;
|
||||
schan->max_xfer_len = 64 * 1024 * 1024 - 1;
|
||||
|
||||
shdma_chan_probe(sdev, schan, id);
|
||||
|
||||
sc->base = su_dev->chan_reg;
|
||||
|
||||
/* get platform_data */
|
||||
sc->offset = su_dev->pdata->channel->offset;
|
||||
if (su_dev->pdata->channel->config & SUDMAC_TX_BUFFER_MODE)
|
||||
sc->cfg |= SUDMAC_SENDBUFM;
|
||||
if (su_dev->pdata->channel->config & SUDMAC_RX_END_MODE)
|
||||
sc->cfg |= SUDMAC_RCVENDM;
|
||||
sc->cfg |= (su_dev->pdata->channel->wait << 4) & SUDMAC_LBA_WAIT;
|
||||
|
||||
if (su_dev->pdata->channel->dint_end_bit & SUDMAC_DMA_BIT_CH0)
|
||||
sc->dint_end_bit |= SUDMAC_CH0ENDE;
|
||||
if (su_dev->pdata->channel->dint_end_bit & SUDMAC_DMA_BIT_CH1)
|
||||
sc->dint_end_bit |= SUDMAC_CH1ENDE;
|
||||
|
||||
/* set up channel irq */
|
||||
if (pdev->id >= 0)
|
||||
snprintf(sc->dev_id, sizeof(sc->dev_id), "sudmac%d.%d",
|
||||
pdev->id, id);
|
||||
else
|
||||
snprintf(sc->dev_id, sizeof(sc->dev_id), "sudmac%d", id);
|
||||
|
||||
err = shdma_request_irq(schan, irq, flags, sc->dev_id);
|
||||
if (err) {
|
||||
dev_err(sdev->dma_dev.dev,
|
||||
"DMA channel %d request_irq failed %d\n", id, err);
|
||||
goto err_no_irq;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_no_irq:
|
||||
/* remove from dmaengine device node */
|
||||
shdma_chan_remove(schan);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void sudmac_chan_remove(struct sudmac_device *su_dev)
|
||||
{
|
||||
struct dma_device *dma_dev = &su_dev->shdma_dev.dma_dev;
|
||||
struct shdma_chan *schan;
|
||||
int i;
|
||||
|
||||
shdma_for_each_chan(schan, &su_dev->shdma_dev, i) {
|
||||
BUG_ON(!schan);
|
||||
|
||||
shdma_chan_remove(schan);
|
||||
}
|
||||
dma_dev->chancnt = 0;
|
||||
}
|
||||
|
||||
static dma_addr_t sudmac_slave_addr(struct shdma_chan *schan)
|
||||
{
|
||||
/* SUDMAC doesn't need the address */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct shdma_desc *sudmac_embedded_desc(void *buf, int i)
|
||||
{
|
||||
return &((struct sudmac_desc *)buf)[i].shdma_desc;
|
||||
}
|
||||
|
||||
static const struct shdma_ops sudmac_shdma_ops = {
|
||||
.desc_completed = sudmac_desc_completed,
|
||||
.halt_channel = sudmac_halt,
|
||||
.channel_busy = sudmac_channel_busy,
|
||||
.slave_addr = sudmac_slave_addr,
|
||||
.desc_setup = sudmac_desc_setup,
|
||||
.set_slave = sudmac_set_slave,
|
||||
.setup_xfer = sudmac_setup_xfer,
|
||||
.start_xfer = sudmac_start_xfer,
|
||||
.embedded_desc = sudmac_embedded_desc,
|
||||
.chan_irq = sudmac_chan_irq,
|
||||
.get_partial = sudmac_get_partial,
|
||||
};
|
||||
|
||||
static int sudmac_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct sudmac_pdata *pdata = dev_get_platdata(&pdev->dev);
|
||||
int err, i;
|
||||
struct sudmac_device *su_dev;
|
||||
struct dma_device *dma_dev;
|
||||
struct resource *chan, *irq_res;
|
||||
|
||||
/* get platform data */
|
||||
if (!pdata)
|
||||
return -ENODEV;
|
||||
|
||||
irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
|
||||
if (!irq_res)
|
||||
return -ENODEV;
|
||||
|
||||
err = -ENOMEM;
|
||||
su_dev = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_device),
|
||||
GFP_KERNEL);
|
||||
if (!su_dev) {
|
||||
dev_err(&pdev->dev, "Not enough memory\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
dma_dev = &su_dev->shdma_dev.dma_dev;
|
||||
|
||||
chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
su_dev->chan_reg = devm_ioremap_resource(&pdev->dev, chan);
|
||||
if (IS_ERR(su_dev->chan_reg))
|
||||
return PTR_ERR(su_dev->chan_reg);
|
||||
|
||||
dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
|
||||
|
||||
su_dev->shdma_dev.ops = &sudmac_shdma_ops;
|
||||
su_dev->shdma_dev.desc_size = sizeof(struct sudmac_desc);
|
||||
err = shdma_init(&pdev->dev, &su_dev->shdma_dev, pdata->channel_num);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
/* platform data */
|
||||
su_dev->pdata = dev_get_platdata(&pdev->dev);
|
||||
|
||||
platform_set_drvdata(pdev, su_dev);
|
||||
|
||||
/* Create DMA Channel */
|
||||
for (i = 0; i < pdata->channel_num; i++) {
|
||||
err = sudmac_chan_probe(su_dev, i, irq_res->start, IRQF_SHARED);
|
||||
if (err)
|
||||
goto chan_probe_err;
|
||||
}
|
||||
|
||||
err = dma_async_device_register(&su_dev->shdma_dev.dma_dev);
|
||||
if (err < 0)
|
||||
goto chan_probe_err;
|
||||
|
||||
return err;
|
||||
|
||||
chan_probe_err:
|
||||
sudmac_chan_remove(su_dev);
|
||||
|
||||
shdma_cleanup(&su_dev->shdma_dev);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int sudmac_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct sudmac_device *su_dev = platform_get_drvdata(pdev);
|
||||
struct dma_device *dma_dev = &su_dev->shdma_dev.dma_dev;
|
||||
|
||||
dma_async_device_unregister(dma_dev);
|
||||
sudmac_chan_remove(su_dev);
|
||||
shdma_cleanup(&su_dev->shdma_dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct platform_driver sudmac_driver = {
|
||||
.driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = SUDMAC_DRV_NAME,
|
||||
},
|
||||
.probe = sudmac_probe,
|
||||
.remove = sudmac_remove,
|
||||
};
|
||||
module_platform_driver(sudmac_driver);
|
||||
|
||||
MODULE_AUTHOR("Yoshihiro Shimoda");
|
||||
MODULE_DESCRIPTION("Renesas SUDMAC driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_ALIAS("platform:" SUDMAC_DRV_NAME);
|
Loading…
Add table
Add a link
Reference in a new issue