mirror of
https://github.com/AetherDroid/android_kernel_samsung_on5xelte.git
synced 2025-09-08 01:08:03 -04:00
Fixed MTP to work with TWRP
This commit is contained in:
commit
f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions
31
drivers/soc/ti/Kconfig
Normal file
31
drivers/soc/ti/Kconfig
Normal file
|
@ -0,0 +1,31 @@
|
|||
#
|
||||
# TI SOC drivers
|
||||
#
|
||||
menuconfig SOC_TI
|
||||
bool "TI SOC drivers support"
|
||||
|
||||
if SOC_TI
|
||||
|
||||
config KEYSTONE_NAVIGATOR_QMSS
|
||||
tristate "Keystone Queue Manager Sub System"
|
||||
depends on ARCH_KEYSTONE
|
||||
help
|
||||
Say y here to support the Keystone multicore Navigator Queue
|
||||
Manager support. The Queue Manager is a hardware module that
|
||||
is responsible for accelerating management of the packet queues.
|
||||
Packets are queued/de-queued by writing/reading descriptor address
|
||||
to a particular memory mapped location in the Queue Manager module.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config KEYSTONE_NAVIGATOR_DMA
|
||||
tristate "TI Keystone Navigator Packet DMA support"
|
||||
depends on ARCH_KEYSTONE
|
||||
help
|
||||
Say y tp enable support for the Keystone Navigator Packet DMA on
|
||||
on Keystone family of devices. It sets up the dma channels for the
|
||||
Queue Manager Sub System.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
endif # SOC_TI
|
5
drivers/soc/ti/Makefile
Normal file
5
drivers/soc/ti/Makefile
Normal file
|
@ -0,0 +1,5 @@
|
|||
#
|
||||
# TI Keystone SOC drivers
|
||||
#
|
||||
obj-$(CONFIG_KEYSTONE_NAVIGATOR_QMSS) += knav_qmss_queue.o knav_qmss_acc.o
|
||||
obj-$(CONFIG_KEYSTONE_NAVIGATOR_DMA) += knav_dma.o
|
815
drivers/soc/ti/knav_dma.c
Normal file
815
drivers/soc/ti/knav_dma.c
Normal file
|
@ -0,0 +1,815 @@
|
|||
/*
|
||||
* Copyright (C) 2014 Texas Instruments Incorporated
|
||||
* Authors: Santosh Shilimkar <santosh.shilimkar@ti.com>
|
||||
* Sandeep Nair <sandeep_n@ti.com>
|
||||
* Cyril Chemparathy <cyril@ti.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation version 2.
|
||||
*
|
||||
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
|
||||
* kind, whether express or implied; without even the implied warranty
|
||||
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <linux/io.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/dma-direction.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/of_dma.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/soc/ti/knav_dma.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/seq_file.h>
|
||||
|
||||
#define REG_MASK 0xffffffff
|
||||
|
||||
#define DMA_LOOPBACK BIT(31)
|
||||
#define DMA_ENABLE BIT(31)
|
||||
#define DMA_TEARDOWN BIT(30)
|
||||
|
||||
#define DMA_TX_FILT_PSWORDS BIT(29)
|
||||
#define DMA_TX_FILT_EINFO BIT(30)
|
||||
#define DMA_TX_PRIO_SHIFT 0
|
||||
#define DMA_RX_PRIO_SHIFT 16
|
||||
#define DMA_PRIO_MASK GENMASK(3, 0)
|
||||
#define DMA_PRIO_DEFAULT 0
|
||||
#define DMA_RX_TIMEOUT_DEFAULT 17500 /* cycles */
|
||||
#define DMA_RX_TIMEOUT_MASK GENMASK(16, 0)
|
||||
#define DMA_RX_TIMEOUT_SHIFT 0
|
||||
|
||||
#define CHAN_HAS_EPIB BIT(30)
|
||||
#define CHAN_HAS_PSINFO BIT(29)
|
||||
#define CHAN_ERR_RETRY BIT(28)
|
||||
#define CHAN_PSINFO_AT_SOP BIT(25)
|
||||
#define CHAN_SOP_OFF_SHIFT 16
|
||||
#define CHAN_SOP_OFF_MASK GENMASK(9, 0)
|
||||
#define DESC_TYPE_SHIFT 26
|
||||
#define DESC_TYPE_MASK GENMASK(2, 0)
|
||||
|
||||
/*
|
||||
* QMGR & QNUM together make up 14 bits with QMGR as the 2 MSb's in the logical
|
||||
* navigator cloud mapping scheme.
|
||||
* using the 14bit physical queue numbers directly maps into this scheme.
|
||||
*/
|
||||
#define CHAN_QNUM_MASK GENMASK(14, 0)
|
||||
#define DMA_MAX_QMS 4
|
||||
#define DMA_TIMEOUT 1 /* msecs */
|
||||
#define DMA_INVALID_ID 0xffff
|
||||
|
||||
struct reg_global {
|
||||
u32 revision;
|
||||
u32 perf_control;
|
||||
u32 emulation_control;
|
||||
u32 priority_control;
|
||||
u32 qm_base_address[DMA_MAX_QMS];
|
||||
};
|
||||
|
||||
struct reg_chan {
|
||||
u32 control;
|
||||
u32 mode;
|
||||
u32 __rsvd[6];
|
||||
};
|
||||
|
||||
struct reg_tx_sched {
|
||||
u32 prio;
|
||||
};
|
||||
|
||||
struct reg_rx_flow {
|
||||
u32 control;
|
||||
u32 tags;
|
||||
u32 tag_sel;
|
||||
u32 fdq_sel[2];
|
||||
u32 thresh[3];
|
||||
};
|
||||
|
||||
struct knav_dma_pool_device {
|
||||
struct device *dev;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
struct knav_dma_device {
|
||||
bool loopback, enable_all;
|
||||
unsigned tx_priority, rx_priority, rx_timeout;
|
||||
unsigned logical_queue_managers;
|
||||
unsigned qm_base_address[DMA_MAX_QMS];
|
||||
struct reg_global __iomem *reg_global;
|
||||
struct reg_chan __iomem *reg_tx_chan;
|
||||
struct reg_rx_flow __iomem *reg_rx_flow;
|
||||
struct reg_chan __iomem *reg_rx_chan;
|
||||
struct reg_tx_sched __iomem *reg_tx_sched;
|
||||
unsigned max_rx_chan, max_tx_chan;
|
||||
unsigned max_rx_flow;
|
||||
char name[32];
|
||||
atomic_t ref_count;
|
||||
struct list_head list;
|
||||
struct list_head chan_list;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
struct knav_dma_chan {
|
||||
enum dma_transfer_direction direction;
|
||||
struct knav_dma_device *dma;
|
||||
atomic_t ref_count;
|
||||
|
||||
/* registers */
|
||||
struct reg_chan __iomem *reg_chan;
|
||||
struct reg_tx_sched __iomem *reg_tx_sched;
|
||||
struct reg_rx_flow __iomem *reg_rx_flow;
|
||||
|
||||
/* configuration stuff */
|
||||
unsigned channel, flow;
|
||||
struct knav_dma_cfg cfg;
|
||||
struct list_head list;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
#define chan_number(ch) ((ch->direction == DMA_MEM_TO_DEV) ? \
|
||||
ch->channel : ch->flow)
|
||||
|
||||
static struct knav_dma_pool_device *kdev;
|
||||
|
||||
static bool check_config(struct knav_dma_chan *chan, struct knav_dma_cfg *cfg)
|
||||
{
|
||||
if (!memcmp(&chan->cfg, cfg, sizeof(*cfg)))
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
static int chan_start(struct knav_dma_chan *chan,
|
||||
struct knav_dma_cfg *cfg)
|
||||
{
|
||||
u32 v = 0;
|
||||
|
||||
spin_lock(&chan->lock);
|
||||
if ((chan->direction == DMA_MEM_TO_DEV) && chan->reg_chan) {
|
||||
if (cfg->u.tx.filt_pswords)
|
||||
v |= DMA_TX_FILT_PSWORDS;
|
||||
if (cfg->u.tx.filt_einfo)
|
||||
v |= DMA_TX_FILT_EINFO;
|
||||
writel_relaxed(v, &chan->reg_chan->mode);
|
||||
writel_relaxed(DMA_ENABLE, &chan->reg_chan->control);
|
||||
}
|
||||
|
||||
if (chan->reg_tx_sched)
|
||||
writel_relaxed(cfg->u.tx.priority, &chan->reg_tx_sched->prio);
|
||||
|
||||
if (chan->reg_rx_flow) {
|
||||
v = 0;
|
||||
|
||||
if (cfg->u.rx.einfo_present)
|
||||
v |= CHAN_HAS_EPIB;
|
||||
if (cfg->u.rx.psinfo_present)
|
||||
v |= CHAN_HAS_PSINFO;
|
||||
if (cfg->u.rx.err_mode == DMA_RETRY)
|
||||
v |= CHAN_ERR_RETRY;
|
||||
v |= (cfg->u.rx.desc_type & DESC_TYPE_MASK) << DESC_TYPE_SHIFT;
|
||||
if (cfg->u.rx.psinfo_at_sop)
|
||||
v |= CHAN_PSINFO_AT_SOP;
|
||||
v |= (cfg->u.rx.sop_offset & CHAN_SOP_OFF_MASK)
|
||||
<< CHAN_SOP_OFF_SHIFT;
|
||||
v |= cfg->u.rx.dst_q & CHAN_QNUM_MASK;
|
||||
|
||||
writel_relaxed(v, &chan->reg_rx_flow->control);
|
||||
writel_relaxed(0, &chan->reg_rx_flow->tags);
|
||||
writel_relaxed(0, &chan->reg_rx_flow->tag_sel);
|
||||
|
||||
v = cfg->u.rx.fdq[0] << 16;
|
||||
v |= cfg->u.rx.fdq[1] & CHAN_QNUM_MASK;
|
||||
writel_relaxed(v, &chan->reg_rx_flow->fdq_sel[0]);
|
||||
|
||||
v = cfg->u.rx.fdq[2] << 16;
|
||||
v |= cfg->u.rx.fdq[3] & CHAN_QNUM_MASK;
|
||||
writel_relaxed(v, &chan->reg_rx_flow->fdq_sel[1]);
|
||||
|
||||
writel_relaxed(0, &chan->reg_rx_flow->thresh[0]);
|
||||
writel_relaxed(0, &chan->reg_rx_flow->thresh[1]);
|
||||
writel_relaxed(0, &chan->reg_rx_flow->thresh[2]);
|
||||
}
|
||||
|
||||
/* Keep a copy of the cfg */
|
||||
memcpy(&chan->cfg, cfg, sizeof(*cfg));
|
||||
spin_unlock(&chan->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int chan_teardown(struct knav_dma_chan *chan)
|
||||
{
|
||||
unsigned long end, value;
|
||||
|
||||
if (!chan->reg_chan)
|
||||
return 0;
|
||||
|
||||
/* indicate teardown */
|
||||
writel_relaxed(DMA_TEARDOWN, &chan->reg_chan->control);
|
||||
|
||||
/* wait for the dma to shut itself down */
|
||||
end = jiffies + msecs_to_jiffies(DMA_TIMEOUT);
|
||||
do {
|
||||
value = readl_relaxed(&chan->reg_chan->control);
|
||||
if ((value & DMA_ENABLE) == 0)
|
||||
break;
|
||||
} while (time_after(end, jiffies));
|
||||
|
||||
if (readl_relaxed(&chan->reg_chan->control) & DMA_ENABLE) {
|
||||
dev_err(kdev->dev, "timeout waiting for teardown\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void chan_stop(struct knav_dma_chan *chan)
|
||||
{
|
||||
spin_lock(&chan->lock);
|
||||
if (chan->reg_rx_flow) {
|
||||
/* first detach fdqs, starve out the flow */
|
||||
writel_relaxed(0, &chan->reg_rx_flow->fdq_sel[0]);
|
||||
writel_relaxed(0, &chan->reg_rx_flow->fdq_sel[1]);
|
||||
writel_relaxed(0, &chan->reg_rx_flow->thresh[0]);
|
||||
writel_relaxed(0, &chan->reg_rx_flow->thresh[1]);
|
||||
writel_relaxed(0, &chan->reg_rx_flow->thresh[2]);
|
||||
}
|
||||
|
||||
/* teardown the dma channel */
|
||||
chan_teardown(chan);
|
||||
|
||||
/* then disconnect the completion side */
|
||||
if (chan->reg_rx_flow) {
|
||||
writel_relaxed(0, &chan->reg_rx_flow->control);
|
||||
writel_relaxed(0, &chan->reg_rx_flow->tags);
|
||||
writel_relaxed(0, &chan->reg_rx_flow->tag_sel);
|
||||
}
|
||||
|
||||
memset(&chan->cfg, 0, sizeof(struct knav_dma_cfg));
|
||||
spin_unlock(&chan->lock);
|
||||
|
||||
dev_dbg(kdev->dev, "channel stopped\n");
|
||||
}
|
||||
|
||||
static void dma_hw_enable_all(struct knav_dma_device *dma)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < dma->max_tx_chan; i++) {
|
||||
writel_relaxed(0, &dma->reg_tx_chan[i].mode);
|
||||
writel_relaxed(DMA_ENABLE, &dma->reg_tx_chan[i].control);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void knav_dma_hw_init(struct knav_dma_device *dma)
|
||||
{
|
||||
unsigned v;
|
||||
int i;
|
||||
|
||||
spin_lock(&dma->lock);
|
||||
v = dma->loopback ? DMA_LOOPBACK : 0;
|
||||
writel_relaxed(v, &dma->reg_global->emulation_control);
|
||||
|
||||
v = readl_relaxed(&dma->reg_global->perf_control);
|
||||
v |= ((dma->rx_timeout & DMA_RX_TIMEOUT_MASK) << DMA_RX_TIMEOUT_SHIFT);
|
||||
writel_relaxed(v, &dma->reg_global->perf_control);
|
||||
|
||||
v = ((dma->tx_priority << DMA_TX_PRIO_SHIFT) |
|
||||
(dma->rx_priority << DMA_RX_PRIO_SHIFT));
|
||||
|
||||
writel_relaxed(v, &dma->reg_global->priority_control);
|
||||
|
||||
/* Always enable all Rx channels. Rx paths are managed using flows */
|
||||
for (i = 0; i < dma->max_rx_chan; i++)
|
||||
writel_relaxed(DMA_ENABLE, &dma->reg_rx_chan[i].control);
|
||||
|
||||
for (i = 0; i < dma->logical_queue_managers; i++)
|
||||
writel_relaxed(dma->qm_base_address[i],
|
||||
&dma->reg_global->qm_base_address[i]);
|
||||
spin_unlock(&dma->lock);
|
||||
}
|
||||
|
||||
static void knav_dma_hw_destroy(struct knav_dma_device *dma)
|
||||
{
|
||||
int i;
|
||||
unsigned v;
|
||||
|
||||
spin_lock(&dma->lock);
|
||||
v = ~DMA_ENABLE & REG_MASK;
|
||||
|
||||
for (i = 0; i < dma->max_rx_chan; i++)
|
||||
writel_relaxed(v, &dma->reg_rx_chan[i].control);
|
||||
|
||||
for (i = 0; i < dma->max_tx_chan; i++)
|
||||
writel_relaxed(v, &dma->reg_tx_chan[i].control);
|
||||
spin_unlock(&dma->lock);
|
||||
}
|
||||
|
||||
static void dma_debug_show_channels(struct seq_file *s,
|
||||
struct knav_dma_chan *chan)
|
||||
{
|
||||
int i;
|
||||
|
||||
seq_printf(s, "\t%s %d:\t",
|
||||
((chan->direction == DMA_MEM_TO_DEV) ? "tx chan" : "rx flow"),
|
||||
chan_number(chan));
|
||||
|
||||
if (chan->direction == DMA_MEM_TO_DEV) {
|
||||
seq_printf(s, "einfo - %d, pswords - %d, priority - %d\n",
|
||||
chan->cfg.u.tx.filt_einfo,
|
||||
chan->cfg.u.tx.filt_pswords,
|
||||
chan->cfg.u.tx.priority);
|
||||
} else {
|
||||
seq_printf(s, "einfo - %d, psinfo - %d, desc_type - %d\n",
|
||||
chan->cfg.u.rx.einfo_present,
|
||||
chan->cfg.u.rx.psinfo_present,
|
||||
chan->cfg.u.rx.desc_type);
|
||||
seq_printf(s, "\t\t\tdst_q: [%d], thresh: %d fdq: ",
|
||||
chan->cfg.u.rx.dst_q,
|
||||
chan->cfg.u.rx.thresh);
|
||||
for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN; i++)
|
||||
seq_printf(s, "[%d]", chan->cfg.u.rx.fdq[i]);
|
||||
seq_printf(s, "\n");
|
||||
}
|
||||
}
|
||||
|
||||
static void dma_debug_show_devices(struct seq_file *s,
|
||||
struct knav_dma_device *dma)
|
||||
{
|
||||
struct knav_dma_chan *chan;
|
||||
|
||||
list_for_each_entry(chan, &dma->chan_list, list) {
|
||||
if (atomic_read(&chan->ref_count))
|
||||
dma_debug_show_channels(s, chan);
|
||||
}
|
||||
}
|
||||
|
||||
static int dma_debug_show(struct seq_file *s, void *v)
|
||||
{
|
||||
struct knav_dma_device *dma;
|
||||
|
||||
list_for_each_entry(dma, &kdev->list, list) {
|
||||
if (atomic_read(&dma->ref_count)) {
|
||||
seq_printf(s, "%s : max_tx_chan: (%d), max_rx_flows: (%d)\n",
|
||||
dma->name, dma->max_tx_chan, dma->max_rx_flow);
|
||||
dma_debug_show_devices(s, dma);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int knav_dma_debug_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, dma_debug_show, NULL);
|
||||
}
|
||||
|
||||
static const struct file_operations knav_dma_debug_ops = {
|
||||
.open = knav_dma_debug_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
static int of_channel_match_helper(struct device_node *np, const char *name,
|
||||
const char **dma_instance)
|
||||
{
|
||||
struct of_phandle_args args;
|
||||
struct device_node *dma_node;
|
||||
int index;
|
||||
|
||||
dma_node = of_parse_phandle(np, "ti,navigator-dmas", 0);
|
||||
if (!dma_node)
|
||||
return -ENODEV;
|
||||
|
||||
*dma_instance = dma_node->name;
|
||||
index = of_property_match_string(np, "ti,navigator-dma-names", name);
|
||||
if (index < 0) {
|
||||
dev_err(kdev->dev, "No 'ti,navigator-dma-names' propery\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (of_parse_phandle_with_fixed_args(np, "ti,navigator-dmas",
|
||||
1, index, &args)) {
|
||||
dev_err(kdev->dev, "Missing the pahndle args name %s\n", name);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (args.args[0] < 0) {
|
||||
dev_err(kdev->dev, "Missing args for %s\n", name);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return args.args[0];
|
||||
}
|
||||
|
||||
/**
|
||||
* knav_dma_open_channel() - try to setup an exclusive slave channel
|
||||
* @dev: pointer to client device structure
|
||||
* @name: slave channel name
|
||||
* @config: dma configuration parameters
|
||||
*
|
||||
* Returns pointer to appropriate DMA channel on success or NULL.
|
||||
*/
|
||||
void *knav_dma_open_channel(struct device *dev, const char *name,
|
||||
struct knav_dma_cfg *config)
|
||||
{
|
||||
struct knav_dma_chan *chan;
|
||||
struct knav_dma_device *dma;
|
||||
bool found = false;
|
||||
int chan_num = -1;
|
||||
const char *instance;
|
||||
|
||||
if (!kdev) {
|
||||
pr_err("keystone-navigator-dma driver not registered\n");
|
||||
return (void *)-EINVAL;
|
||||
}
|
||||
|
||||
chan_num = of_channel_match_helper(dev->of_node, name, &instance);
|
||||
if (chan_num < 0) {
|
||||
dev_err(kdev->dev, "No DMA instace with name %s\n", name);
|
||||
return (void *)-EINVAL;
|
||||
}
|
||||
|
||||
dev_dbg(kdev->dev, "initializing %s channel %d from DMA %s\n",
|
||||
config->direction == DMA_MEM_TO_DEV ? "transmit" :
|
||||
config->direction == DMA_DEV_TO_MEM ? "receive" :
|
||||
"unknown", chan_num, instance);
|
||||
|
||||
if (config->direction != DMA_MEM_TO_DEV &&
|
||||
config->direction != DMA_DEV_TO_MEM) {
|
||||
dev_err(kdev->dev, "bad direction\n");
|
||||
return (void *)-EINVAL;
|
||||
}
|
||||
|
||||
/* Look for correct dma instance */
|
||||
list_for_each_entry(dma, &kdev->list, list) {
|
||||
if (!strcmp(dma->name, instance)) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!found) {
|
||||
dev_err(kdev->dev, "No DMA instace with name %s\n", instance);
|
||||
return (void *)-EINVAL;
|
||||
}
|
||||
|
||||
/* Look for correct dma channel from dma instance */
|
||||
found = false;
|
||||
list_for_each_entry(chan, &dma->chan_list, list) {
|
||||
if (config->direction == DMA_MEM_TO_DEV) {
|
||||
if (chan->channel == chan_num) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
if (chan->flow == chan_num) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!found) {
|
||||
dev_err(kdev->dev, "channel %d is not in DMA %s\n",
|
||||
chan_num, instance);
|
||||
return (void *)-EINVAL;
|
||||
}
|
||||
|
||||
if (atomic_read(&chan->ref_count) >= 1) {
|
||||
if (!check_config(chan, config)) {
|
||||
dev_err(kdev->dev, "channel %d config miss-match\n",
|
||||
chan_num);
|
||||
return (void *)-EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
if (atomic_inc_return(&chan->dma->ref_count) <= 1)
|
||||
knav_dma_hw_init(chan->dma);
|
||||
|
||||
if (atomic_inc_return(&chan->ref_count) <= 1)
|
||||
chan_start(chan, config);
|
||||
|
||||
dev_dbg(kdev->dev, "channel %d opened from DMA %s\n",
|
||||
chan_num, instance);
|
||||
|
||||
return chan;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(knav_dma_open_channel);
|
||||
|
||||
/**
|
||||
* knav_dma_close_channel() - Destroy a dma channel
|
||||
*
|
||||
* channel: dma channel handle
|
||||
*
|
||||
*/
|
||||
void knav_dma_close_channel(void *channel)
|
||||
{
|
||||
struct knav_dma_chan *chan = channel;
|
||||
|
||||
if (!kdev) {
|
||||
pr_err("keystone-navigator-dma driver not registered\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (atomic_dec_return(&chan->ref_count) <= 0)
|
||||
chan_stop(chan);
|
||||
|
||||
if (atomic_dec_return(&chan->dma->ref_count) <= 0)
|
||||
knav_dma_hw_destroy(chan->dma);
|
||||
|
||||
dev_dbg(kdev->dev, "channel %d or flow %d closed from DMA %s\n",
|
||||
chan->channel, chan->flow, chan->dma->name);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(knav_dma_close_channel);
|
||||
|
||||
static void __iomem *pktdma_get_regs(struct knav_dma_device *dma,
|
||||
struct device_node *node,
|
||||
unsigned index, resource_size_t *_size)
|
||||
{
|
||||
struct device *dev = kdev->dev;
|
||||
struct resource res;
|
||||
void __iomem *regs;
|
||||
int ret;
|
||||
|
||||
ret = of_address_to_resource(node, index, &res);
|
||||
if (ret) {
|
||||
dev_err(dev, "Can't translate of node(%s) address for index(%d)\n",
|
||||
node->name, index);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
regs = devm_ioremap_resource(kdev->dev, &res);
|
||||
if (IS_ERR(regs))
|
||||
dev_err(dev, "Failed to map register base for index(%d) node(%s)\n",
|
||||
index, node->name);
|
||||
if (_size)
|
||||
*_size = resource_size(&res);
|
||||
|
||||
return regs;
|
||||
}
|
||||
|
||||
static int pktdma_init_rx_chan(struct knav_dma_chan *chan, u32 flow)
|
||||
{
|
||||
struct knav_dma_device *dma = chan->dma;
|
||||
|
||||
chan->flow = flow;
|
||||
chan->reg_rx_flow = dma->reg_rx_flow + flow;
|
||||
chan->channel = DMA_INVALID_ID;
|
||||
dev_dbg(kdev->dev, "rx flow(%d) (%p)\n", chan->flow, chan->reg_rx_flow);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pktdma_init_tx_chan(struct knav_dma_chan *chan, u32 channel)
|
||||
{
|
||||
struct knav_dma_device *dma = chan->dma;
|
||||
|
||||
chan->channel = channel;
|
||||
chan->reg_chan = dma->reg_tx_chan + channel;
|
||||
chan->reg_tx_sched = dma->reg_tx_sched + channel;
|
||||
chan->flow = DMA_INVALID_ID;
|
||||
dev_dbg(kdev->dev, "tx channel(%d) (%p)\n", chan->channel, chan->reg_chan);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pktdma_init_chan(struct knav_dma_device *dma,
|
||||
enum dma_transfer_direction dir,
|
||||
unsigned chan_num)
|
||||
{
|
||||
struct device *dev = kdev->dev;
|
||||
struct knav_dma_chan *chan;
|
||||
int ret = -EINVAL;
|
||||
|
||||
chan = devm_kzalloc(dev, sizeof(*chan), GFP_KERNEL);
|
||||
if (!chan)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_LIST_HEAD(&chan->list);
|
||||
chan->dma = dma;
|
||||
chan->direction = DMA_NONE;
|
||||
atomic_set(&chan->ref_count, 0);
|
||||
spin_lock_init(&chan->lock);
|
||||
|
||||
if (dir == DMA_MEM_TO_DEV) {
|
||||
chan->direction = dir;
|
||||
ret = pktdma_init_tx_chan(chan, chan_num);
|
||||
} else if (dir == DMA_DEV_TO_MEM) {
|
||||
chan->direction = dir;
|
||||
ret = pktdma_init_rx_chan(chan, chan_num);
|
||||
} else {
|
||||
dev_err(dev, "channel(%d) direction unknown\n", chan_num);
|
||||
}
|
||||
|
||||
list_add_tail(&chan->list, &dma->chan_list);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int dma_init(struct device_node *cloud, struct device_node *dma_node)
|
||||
{
|
||||
unsigned max_tx_chan, max_rx_chan, max_rx_flow, max_tx_sched;
|
||||
struct device_node *node = dma_node;
|
||||
struct knav_dma_device *dma;
|
||||
int ret, len, num_chan = 0;
|
||||
resource_size_t size;
|
||||
u32 timeout;
|
||||
u32 i;
|
||||
|
||||
dma = devm_kzalloc(kdev->dev, sizeof(*dma), GFP_KERNEL);
|
||||
if (!dma) {
|
||||
dev_err(kdev->dev, "could not allocate driver mem\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
INIT_LIST_HEAD(&dma->list);
|
||||
INIT_LIST_HEAD(&dma->chan_list);
|
||||
|
||||
if (!of_find_property(cloud, "ti,navigator-cloud-address", &len)) {
|
||||
dev_err(kdev->dev, "unspecified navigator cloud addresses\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
dma->logical_queue_managers = len / sizeof(u32);
|
||||
if (dma->logical_queue_managers > DMA_MAX_QMS) {
|
||||
dev_warn(kdev->dev, "too many queue mgrs(>%d) rest ignored\n",
|
||||
dma->logical_queue_managers);
|
||||
dma->logical_queue_managers = DMA_MAX_QMS;
|
||||
}
|
||||
|
||||
ret = of_property_read_u32_array(cloud, "ti,navigator-cloud-address",
|
||||
dma->qm_base_address,
|
||||
dma->logical_queue_managers);
|
||||
if (ret) {
|
||||
dev_err(kdev->dev, "invalid navigator cloud addresses\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
dma->reg_global = pktdma_get_regs(dma, node, 0, &size);
|
||||
if (!dma->reg_global)
|
||||
return -ENODEV;
|
||||
if (size < sizeof(struct reg_global)) {
|
||||
dev_err(kdev->dev, "bad size %pa for global regs\n", &size);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
dma->reg_tx_chan = pktdma_get_regs(dma, node, 1, &size);
|
||||
if (!dma->reg_tx_chan)
|
||||
return -ENODEV;
|
||||
|
||||
max_tx_chan = size / sizeof(struct reg_chan);
|
||||
dma->reg_rx_chan = pktdma_get_regs(dma, node, 2, &size);
|
||||
if (!dma->reg_rx_chan)
|
||||
return -ENODEV;
|
||||
|
||||
max_rx_chan = size / sizeof(struct reg_chan);
|
||||
dma->reg_tx_sched = pktdma_get_regs(dma, node, 3, &size);
|
||||
if (!dma->reg_tx_sched)
|
||||
return -ENODEV;
|
||||
|
||||
max_tx_sched = size / sizeof(struct reg_tx_sched);
|
||||
dma->reg_rx_flow = pktdma_get_regs(dma, node, 4, &size);
|
||||
if (!dma->reg_rx_flow)
|
||||
return -ENODEV;
|
||||
|
||||
max_rx_flow = size / sizeof(struct reg_rx_flow);
|
||||
dma->rx_priority = DMA_PRIO_DEFAULT;
|
||||
dma->tx_priority = DMA_PRIO_DEFAULT;
|
||||
|
||||
dma->enable_all = (of_get_property(node, "ti,enable-all", NULL) != NULL);
|
||||
dma->loopback = (of_get_property(node, "ti,loop-back", NULL) != NULL);
|
||||
|
||||
ret = of_property_read_u32(node, "ti,rx-retry-timeout", &timeout);
|
||||
if (ret < 0) {
|
||||
dev_dbg(kdev->dev, "unspecified rx timeout using value %d\n",
|
||||
DMA_RX_TIMEOUT_DEFAULT);
|
||||
timeout = DMA_RX_TIMEOUT_DEFAULT;
|
||||
}
|
||||
|
||||
dma->rx_timeout = timeout;
|
||||
dma->max_rx_chan = max_rx_chan;
|
||||
dma->max_rx_flow = max_rx_flow;
|
||||
dma->max_tx_chan = min(max_tx_chan, max_tx_sched);
|
||||
atomic_set(&dma->ref_count, 0);
|
||||
strcpy(dma->name, node->name);
|
||||
spin_lock_init(&dma->lock);
|
||||
|
||||
for (i = 0; i < dma->max_tx_chan; i++) {
|
||||
if (pktdma_init_chan(dma, DMA_MEM_TO_DEV, i) >= 0)
|
||||
num_chan++;
|
||||
}
|
||||
|
||||
for (i = 0; i < dma->max_rx_flow; i++) {
|
||||
if (pktdma_init_chan(dma, DMA_DEV_TO_MEM, i) >= 0)
|
||||
num_chan++;
|
||||
}
|
||||
|
||||
list_add_tail(&dma->list, &kdev->list);
|
||||
|
||||
/*
|
||||
* For DSP software usecases or userpace transport software, setup all
|
||||
* the DMA hardware resources.
|
||||
*/
|
||||
if (dma->enable_all) {
|
||||
atomic_inc(&dma->ref_count);
|
||||
knav_dma_hw_init(dma);
|
||||
dma_hw_enable_all(dma);
|
||||
}
|
||||
|
||||
dev_info(kdev->dev, "DMA %s registered %d logical channels, flows %d, tx chans: %d, rx chans: %d%s\n",
|
||||
dma->name, num_chan, dma->max_rx_flow,
|
||||
dma->max_tx_chan, dma->max_rx_chan,
|
||||
dma->loopback ? ", loopback" : "");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int knav_dma_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct device_node *node = pdev->dev.of_node;
|
||||
struct device_node *child;
|
||||
int ret = 0;
|
||||
|
||||
if (!node) {
|
||||
dev_err(&pdev->dev, "could not find device info\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
kdev = devm_kzalloc(dev,
|
||||
sizeof(struct knav_dma_pool_device), GFP_KERNEL);
|
||||
if (!kdev) {
|
||||
dev_err(dev, "could not allocate driver mem\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
kdev->dev = dev;
|
||||
INIT_LIST_HEAD(&kdev->list);
|
||||
|
||||
pm_runtime_enable(kdev->dev);
|
||||
ret = pm_runtime_get_sync(kdev->dev);
|
||||
if (ret < 0) {
|
||||
dev_err(kdev->dev, "unable to enable pktdma, err %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Initialise all packet dmas */
|
||||
for_each_child_of_node(node, child) {
|
||||
ret = dma_init(node, child);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "init failed with %d\n", ret);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (list_empty(&kdev->list)) {
|
||||
dev_err(dev, "no valid dma instance\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
debugfs_create_file("knav_dma", S_IFREG | S_IRUGO, NULL, NULL,
|
||||
&knav_dma_debug_ops);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int knav_dma_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct knav_dma_device *dma;
|
||||
|
||||
list_for_each_entry(dma, &kdev->list, list) {
|
||||
if (atomic_dec_return(&dma->ref_count) == 0)
|
||||
knav_dma_hw_destroy(dma);
|
||||
}
|
||||
|
||||
pm_runtime_put_sync(&pdev->dev);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct of_device_id of_match[] = {
|
||||
{ .compatible = "ti,keystone-navigator-dma", },
|
||||
{},
|
||||
};
|
||||
|
||||
MODULE_DEVICE_TABLE(of, of_match);
|
||||
|
||||
static struct platform_driver knav_dma_driver = {
|
||||
.probe = knav_dma_probe,
|
||||
.remove = knav_dma_remove,
|
||||
.driver = {
|
||||
.name = "keystone-navigator-dma",
|
||||
.owner = THIS_MODULE,
|
||||
.of_match_table = of_match,
|
||||
},
|
||||
};
|
||||
module_platform_driver(knav_dma_driver);
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_DESCRIPTION("TI Keystone Navigator Packet DMA driver");
|
||||
MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com>");
|
||||
MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>");
|
386
drivers/soc/ti/knav_qmss.h
Normal file
386
drivers/soc/ti/knav_qmss.h
Normal file
|
@ -0,0 +1,386 @@
|
|||
/*
|
||||
* Keystone Navigator QMSS driver internal header
|
||||
*
|
||||
* Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
|
||||
* Author: Sandeep Nair <sandeep_n@ti.com>
|
||||
* Cyril Chemparathy <cyril@ti.com>
|
||||
* Santosh Shilimkar <santosh.shilimkar@ti.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*/
|
||||
|
||||
#ifndef __KNAV_QMSS_H__
|
||||
#define __KNAV_QMSS_H__
|
||||
|
||||
#define THRESH_GTE BIT(7)
|
||||
#define THRESH_LT 0
|
||||
|
||||
#define PDSP_CTRL_PC_MASK 0xffff0000
|
||||
#define PDSP_CTRL_SOFT_RESET BIT(0)
|
||||
#define PDSP_CTRL_ENABLE BIT(1)
|
||||
#define PDSP_CTRL_RUNNING BIT(15)
|
||||
|
||||
#define ACC_MAX_CHANNEL 48
|
||||
#define ACC_DEFAULT_PERIOD 25 /* usecs */
|
||||
|
||||
#define ACC_CHANNEL_INT_BASE 2
|
||||
|
||||
#define ACC_LIST_ENTRY_TYPE 1
|
||||
#define ACC_LIST_ENTRY_WORDS (1 << ACC_LIST_ENTRY_TYPE)
|
||||
#define ACC_LIST_ENTRY_QUEUE_IDX 0
|
||||
#define ACC_LIST_ENTRY_DESC_IDX (ACC_LIST_ENTRY_WORDS - 1)
|
||||
|
||||
#define ACC_CMD_DISABLE_CHANNEL 0x80
|
||||
#define ACC_CMD_ENABLE_CHANNEL 0x81
|
||||
#define ACC_CFG_MULTI_QUEUE BIT(21)
|
||||
|
||||
#define ACC_INTD_OFFSET_EOI (0x0010)
|
||||
#define ACC_INTD_OFFSET_COUNT(ch) (0x0300 + 4 * (ch))
|
||||
#define ACC_INTD_OFFSET_STATUS(ch) (0x0200 + 4 * ((ch) / 32))
|
||||
|
||||
#define RANGE_MAX_IRQS 64
|
||||
|
||||
#define ACC_DESCS_MAX SZ_1K
|
||||
#define ACC_DESCS_MASK (ACC_DESCS_MAX - 1)
|
||||
#define DESC_SIZE_MASK 0xful
|
||||
#define DESC_PTR_MASK (~DESC_SIZE_MASK)
|
||||
|
||||
#define KNAV_NAME_SIZE 32
|
||||
|
||||
enum knav_acc_result {
|
||||
ACC_RET_IDLE,
|
||||
ACC_RET_SUCCESS,
|
||||
ACC_RET_INVALID_COMMAND,
|
||||
ACC_RET_INVALID_CHANNEL,
|
||||
ACC_RET_INACTIVE_CHANNEL,
|
||||
ACC_RET_ACTIVE_CHANNEL,
|
||||
ACC_RET_INVALID_QUEUE,
|
||||
ACC_RET_INVALID_RET,
|
||||
};
|
||||
|
||||
struct knav_reg_config {
|
||||
u32 revision;
|
||||
u32 __pad1;
|
||||
u32 divert;
|
||||
u32 link_ram_base0;
|
||||
u32 link_ram_size0;
|
||||
u32 link_ram_base1;
|
||||
u32 __pad2[2];
|
||||
u32 starvation[0];
|
||||
};
|
||||
|
||||
struct knav_reg_region {
|
||||
u32 base;
|
||||
u32 start_index;
|
||||
u32 size_count;
|
||||
u32 __pad;
|
||||
};
|
||||
|
||||
struct knav_reg_pdsp_regs {
|
||||
u32 control;
|
||||
u32 status;
|
||||
u32 cycle_count;
|
||||
u32 stall_count;
|
||||
};
|
||||
|
||||
struct knav_reg_acc_command {
|
||||
u32 command;
|
||||
u32 queue_mask;
|
||||
u32 list_phys;
|
||||
u32 queue_num;
|
||||
u32 timer_config;
|
||||
};
|
||||
|
||||
struct knav_link_ram_block {
|
||||
dma_addr_t phys;
|
||||
void *virt;
|
||||
size_t size;
|
||||
};
|
||||
|
||||
struct knav_acc_info {
|
||||
u32 pdsp_id;
|
||||
u32 start_channel;
|
||||
u32 list_entries;
|
||||
u32 pacing_mode;
|
||||
u32 timer_count;
|
||||
int mem_size;
|
||||
int list_size;
|
||||
struct knav_pdsp_info *pdsp;
|
||||
};
|
||||
|
||||
struct knav_acc_channel {
|
||||
u32 channel;
|
||||
u32 list_index;
|
||||
u32 open_mask;
|
||||
u32 *list_cpu[2];
|
||||
dma_addr_t list_dma[2];
|
||||
char name[KNAV_NAME_SIZE];
|
||||
atomic_t retrigger_count;
|
||||
};
|
||||
|
||||
struct knav_pdsp_info {
|
||||
const char *name;
|
||||
struct knav_reg_pdsp_regs __iomem *regs;
|
||||
union {
|
||||
void __iomem *command;
|
||||
struct knav_reg_acc_command __iomem *acc_command;
|
||||
u32 __iomem *qos_command;
|
||||
};
|
||||
void __iomem *intd;
|
||||
u32 __iomem *iram;
|
||||
const char *firmware;
|
||||
u32 id;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
struct knav_qmgr_info {
|
||||
unsigned start_queue;
|
||||
unsigned num_queues;
|
||||
struct knav_reg_config __iomem *reg_config;
|
||||
struct knav_reg_region __iomem *reg_region;
|
||||
struct knav_reg_queue __iomem *reg_push, *reg_pop, *reg_peek;
|
||||
void __iomem *reg_status;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
#define KNAV_NUM_LINKRAM 2
|
||||
|
||||
/**
|
||||
* struct knav_queue_stats: queue statistics
|
||||
* pushes: number of push operations
|
||||
* pops: number of pop operations
|
||||
* push_errors: number of push errors
|
||||
* pop_errors: number of pop errors
|
||||
* notifies: notifier counts
|
||||
*/
|
||||
struct knav_queue_stats {
|
||||
atomic_t pushes;
|
||||
atomic_t pops;
|
||||
atomic_t push_errors;
|
||||
atomic_t pop_errors;
|
||||
atomic_t notifies;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct knav_reg_queue: queue registers
|
||||
* @entry_count: valid entries in the queue
|
||||
* @byte_count: total byte count in thhe queue
|
||||
* @packet_size: packet size for the queue
|
||||
* @ptr_size_thresh: packet pointer size threshold
|
||||
*/
|
||||
struct knav_reg_queue {
|
||||
u32 entry_count;
|
||||
u32 byte_count;
|
||||
u32 packet_size;
|
||||
u32 ptr_size_thresh;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct knav_region: qmss region info
|
||||
* @dma_start, dma_end: start and end dma address
|
||||
* @virt_start, virt_end: start and end virtual address
|
||||
* @desc_size: descriptor size
|
||||
* @used_desc: consumed descriptors
|
||||
* @id: region number
|
||||
* @num_desc: total descriptors
|
||||
* @link_index: index of the first descriptor
|
||||
* @name: region name
|
||||
* @list: instance in the device's region list
|
||||
* @pools: list of descriptor pools in the region
|
||||
*/
|
||||
struct knav_region {
|
||||
dma_addr_t dma_start, dma_end;
|
||||
void *virt_start, *virt_end;
|
||||
unsigned desc_size;
|
||||
unsigned used_desc;
|
||||
unsigned id;
|
||||
unsigned num_desc;
|
||||
unsigned link_index;
|
||||
const char *name;
|
||||
struct list_head list;
|
||||
struct list_head pools;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct knav_pool: qmss pools
|
||||
* @dev: device pointer
|
||||
* @region: qmss region info
|
||||
* @queue: queue registers
|
||||
* @kdev: qmss device pointer
|
||||
* @region_offset: offset from the base
|
||||
* @num_desc: total descriptors
|
||||
* @desc_size: descriptor size
|
||||
* @region_id: region number
|
||||
* @name: pool name
|
||||
* @list: list head
|
||||
* @region_inst: instance in the region's pool list
|
||||
*/
|
||||
struct knav_pool {
|
||||
struct device *dev;
|
||||
struct knav_region *region;
|
||||
struct knav_queue *queue;
|
||||
struct knav_device *kdev;
|
||||
int region_offset;
|
||||
int num_desc;
|
||||
int desc_size;
|
||||
int region_id;
|
||||
const char *name;
|
||||
struct list_head list;
|
||||
struct list_head region_inst;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct knav_queue_inst: qmss queue instace properties
|
||||
* @descs: descriptor pointer
|
||||
* @desc_head, desc_tail, desc_count: descriptor counters
|
||||
* @acc: accumulator channel pointer
|
||||
* @kdev: qmss device pointer
|
||||
* @range: range info
|
||||
* @qmgr: queue manager info
|
||||
* @id: queue instace id
|
||||
* @irq_num: irq line number
|
||||
* @notify_needed: notifier needed based on queue type
|
||||
* @num_notifiers: total notifiers
|
||||
* @handles: list head
|
||||
* @name: queue instance name
|
||||
* @irq_name: irq line name
|
||||
*/
|
||||
struct knav_queue_inst {
|
||||
u32 *descs;
|
||||
atomic_t desc_head, desc_tail, desc_count;
|
||||
struct knav_acc_channel *acc;
|
||||
struct knav_device *kdev;
|
||||
struct knav_range_info *range;
|
||||
struct knav_qmgr_info *qmgr;
|
||||
u32 id;
|
||||
int irq_num;
|
||||
int notify_needed;
|
||||
atomic_t num_notifiers;
|
||||
struct list_head handles;
|
||||
const char *name;
|
||||
const char *irq_name;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct knav_queue: qmss queue properties
|
||||
* @reg_push, reg_pop, reg_peek: push, pop queue registers
|
||||
* @inst: qmss queue instace properties
|
||||
* @notifier_fn: notifier function
|
||||
* @notifier_fn_arg: notifier function argument
|
||||
* @notifier_enabled: notier enabled for a give queue
|
||||
* @rcu: rcu head
|
||||
* @flags: queue flags
|
||||
* @list: list head
|
||||
*/
|
||||
struct knav_queue {
|
||||
struct knav_reg_queue __iomem *reg_push, *reg_pop, *reg_peek;
|
||||
struct knav_queue_inst *inst;
|
||||
struct knav_queue_stats stats;
|
||||
knav_queue_notify_fn notifier_fn;
|
||||
void *notifier_fn_arg;
|
||||
atomic_t notifier_enabled;
|
||||
struct rcu_head rcu;
|
||||
unsigned flags;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
struct knav_device {
|
||||
struct device *dev;
|
||||
unsigned base_id;
|
||||
unsigned num_queues;
|
||||
unsigned num_queues_in_use;
|
||||
unsigned inst_shift;
|
||||
struct knav_link_ram_block link_rams[KNAV_NUM_LINKRAM];
|
||||
void *instances;
|
||||
struct list_head regions;
|
||||
struct list_head queue_ranges;
|
||||
struct list_head pools;
|
||||
struct list_head pdsps;
|
||||
struct list_head qmgrs;
|
||||
};
|
||||
|
||||
struct knav_range_ops {
|
||||
int (*init_range)(struct knav_range_info *range);
|
||||
int (*free_range)(struct knav_range_info *range);
|
||||
int (*init_queue)(struct knav_range_info *range,
|
||||
struct knav_queue_inst *inst);
|
||||
int (*open_queue)(struct knav_range_info *range,
|
||||
struct knav_queue_inst *inst, unsigned flags);
|
||||
int (*close_queue)(struct knav_range_info *range,
|
||||
struct knav_queue_inst *inst);
|
||||
int (*set_notify)(struct knav_range_info *range,
|
||||
struct knav_queue_inst *inst, bool enabled);
|
||||
};
|
||||
|
||||
struct knav_irq_info {
|
||||
int irq;
|
||||
u32 cpu_map;
|
||||
};
|
||||
|
||||
struct knav_range_info {
|
||||
const char *name;
|
||||
struct knav_device *kdev;
|
||||
unsigned queue_base;
|
||||
unsigned num_queues;
|
||||
void *queue_base_inst;
|
||||
unsigned flags;
|
||||
struct list_head list;
|
||||
struct knav_range_ops *ops;
|
||||
struct knav_acc_info acc_info;
|
||||
struct knav_acc_channel *acc;
|
||||
unsigned num_irqs;
|
||||
struct knav_irq_info irqs[RANGE_MAX_IRQS];
|
||||
};
|
||||
|
||||
#define RANGE_RESERVED BIT(0)
|
||||
#define RANGE_HAS_IRQ BIT(1)
|
||||
#define RANGE_HAS_ACCUMULATOR BIT(2)
|
||||
#define RANGE_MULTI_QUEUE BIT(3)
|
||||
|
||||
#define for_each_region(kdev, region) \
|
||||
list_for_each_entry(region, &kdev->regions, list)
|
||||
|
||||
#define first_region(kdev) \
|
||||
list_first_entry(&kdev->regions, \
|
||||
struct knav_region, list)
|
||||
|
||||
#define for_each_queue_range(kdev, range) \
|
||||
list_for_each_entry(range, &kdev->queue_ranges, list)
|
||||
|
||||
#define first_queue_range(kdev) \
|
||||
list_first_entry(&kdev->queue_ranges, \
|
||||
struct knav_range_info, list)
|
||||
|
||||
#define for_each_pool(kdev, pool) \
|
||||
list_for_each_entry(pool, &kdev->pools, list)
|
||||
|
||||
#define for_each_pdsp(kdev, pdsp) \
|
||||
list_for_each_entry(pdsp, &kdev->pdsps, list)
|
||||
|
||||
#define for_each_qmgr(kdev, qmgr) \
|
||||
list_for_each_entry(qmgr, &kdev->qmgrs, list)
|
||||
|
||||
static inline struct knav_pdsp_info *
|
||||
knav_find_pdsp(struct knav_device *kdev, unsigned pdsp_id)
|
||||
{
|
||||
struct knav_pdsp_info *pdsp;
|
||||
|
||||
for_each_pdsp(kdev, pdsp)
|
||||
if (pdsp_id == pdsp->id)
|
||||
return pdsp;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
extern int knav_init_acc_range(struct knav_device *kdev,
|
||||
struct device_node *node,
|
||||
struct knav_range_info *range);
|
||||
extern void knav_queue_notify(struct knav_queue_inst *inst);
|
||||
|
||||
#endif /* __KNAV_QMSS_H__ */
|
591
drivers/soc/ti/knav_qmss_acc.c
Normal file
591
drivers/soc/ti/knav_qmss_acc.c
Normal file
|
@ -0,0 +1,591 @@
|
|||
/*
|
||||
* Keystone accumulator queue manager
|
||||
*
|
||||
* Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
|
||||
* Author: Sandeep Nair <sandeep_n@ti.com>
|
||||
* Cyril Chemparathy <cyril@ti.com>
|
||||
* Santosh Shilimkar <santosh.shilimkar@ti.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/soc/ti/knav_qmss.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/firmware.h>
|
||||
|
||||
#include "knav_qmss.h"
|
||||
|
||||
#define knav_range_offset_to_inst(kdev, range, q) \
|
||||
(range->queue_base_inst + (q << kdev->inst_shift))
|
||||
|
||||
static void __knav_acc_notify(struct knav_range_info *range,
|
||||
struct knav_acc_channel *acc)
|
||||
{
|
||||
struct knav_device *kdev = range->kdev;
|
||||
struct knav_queue_inst *inst;
|
||||
int range_base, queue;
|
||||
|
||||
range_base = kdev->base_id + range->queue_base;
|
||||
|
||||
if (range->flags & RANGE_MULTI_QUEUE) {
|
||||
for (queue = 0; queue < range->num_queues; queue++) {
|
||||
inst = knav_range_offset_to_inst(kdev, range,
|
||||
queue);
|
||||
if (inst->notify_needed) {
|
||||
inst->notify_needed = 0;
|
||||
dev_dbg(kdev->dev, "acc-irq: notifying %d\n",
|
||||
range_base + queue);
|
||||
knav_queue_notify(inst);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
queue = acc->channel - range->acc_info.start_channel;
|
||||
inst = knav_range_offset_to_inst(kdev, range, queue);
|
||||
dev_dbg(kdev->dev, "acc-irq: notifying %d\n",
|
||||
range_base + queue);
|
||||
knav_queue_notify(inst);
|
||||
}
|
||||
}
|
||||
|
||||
static int knav_acc_set_notify(struct knav_range_info *range,
|
||||
struct knav_queue_inst *kq,
|
||||
bool enabled)
|
||||
{
|
||||
struct knav_pdsp_info *pdsp = range->acc_info.pdsp;
|
||||
struct knav_device *kdev = range->kdev;
|
||||
u32 mask, offset;
|
||||
|
||||
/*
|
||||
* when enabling, we need to re-trigger an interrupt if we
|
||||
* have descriptors pending
|
||||
*/
|
||||
if (!enabled || atomic_read(&kq->desc_count) <= 0)
|
||||
return 0;
|
||||
|
||||
kq->notify_needed = 1;
|
||||
atomic_inc(&kq->acc->retrigger_count);
|
||||
mask = BIT(kq->acc->channel % 32);
|
||||
offset = ACC_INTD_OFFSET_STATUS(kq->acc->channel);
|
||||
dev_dbg(kdev->dev, "setup-notify: re-triggering irq for %s\n",
|
||||
kq->acc->name);
|
||||
writel_relaxed(mask, pdsp->intd + offset);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static irqreturn_t knav_acc_int_handler(int irq, void *_instdata)
|
||||
{
|
||||
struct knav_acc_channel *acc;
|
||||
struct knav_queue_inst *kq = NULL;
|
||||
struct knav_range_info *range;
|
||||
struct knav_pdsp_info *pdsp;
|
||||
struct knav_acc_info *info;
|
||||
struct knav_device *kdev;
|
||||
|
||||
u32 *list, *list_cpu, val, idx, notifies;
|
||||
int range_base, channel, queue = 0;
|
||||
dma_addr_t list_dma;
|
||||
|
||||
range = _instdata;
|
||||
info = &range->acc_info;
|
||||
kdev = range->kdev;
|
||||
pdsp = range->acc_info.pdsp;
|
||||
acc = range->acc;
|
||||
|
||||
range_base = kdev->base_id + range->queue_base;
|
||||
if ((range->flags & RANGE_MULTI_QUEUE) == 0) {
|
||||
for (queue = 0; queue < range->num_irqs; queue++)
|
||||
if (range->irqs[queue].irq == irq)
|
||||
break;
|
||||
kq = knav_range_offset_to_inst(kdev, range, queue);
|
||||
acc += queue;
|
||||
}
|
||||
|
||||
channel = acc->channel;
|
||||
list_dma = acc->list_dma[acc->list_index];
|
||||
list_cpu = acc->list_cpu[acc->list_index];
|
||||
dev_dbg(kdev->dev, "acc-irq: channel %d, list %d, virt %p, phys %x\n",
|
||||
channel, acc->list_index, list_cpu, list_dma);
|
||||
if (atomic_read(&acc->retrigger_count)) {
|
||||
atomic_dec(&acc->retrigger_count);
|
||||
__knav_acc_notify(range, acc);
|
||||
writel_relaxed(1, pdsp->intd + ACC_INTD_OFFSET_COUNT(channel));
|
||||
/* ack the interrupt */
|
||||
writel_relaxed(ACC_CHANNEL_INT_BASE + channel,
|
||||
pdsp->intd + ACC_INTD_OFFSET_EOI);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
notifies = readl_relaxed(pdsp->intd + ACC_INTD_OFFSET_COUNT(channel));
|
||||
WARN_ON(!notifies);
|
||||
dma_sync_single_for_cpu(kdev->dev, list_dma, info->list_size,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
for (list = list_cpu; list < list_cpu + (info->list_size / sizeof(u32));
|
||||
list += ACC_LIST_ENTRY_WORDS) {
|
||||
if (ACC_LIST_ENTRY_WORDS == 1) {
|
||||
dev_dbg(kdev->dev,
|
||||
"acc-irq: list %d, entry @%p, %08x\n",
|
||||
acc->list_index, list, list[0]);
|
||||
} else if (ACC_LIST_ENTRY_WORDS == 2) {
|
||||
dev_dbg(kdev->dev,
|
||||
"acc-irq: list %d, entry @%p, %08x %08x\n",
|
||||
acc->list_index, list, list[0], list[1]);
|
||||
} else if (ACC_LIST_ENTRY_WORDS == 4) {
|
||||
dev_dbg(kdev->dev,
|
||||
"acc-irq: list %d, entry @%p, %08x %08x %08x %08x\n",
|
||||
acc->list_index, list, list[0], list[1],
|
||||
list[2], list[3]);
|
||||
}
|
||||
|
||||
val = list[ACC_LIST_ENTRY_DESC_IDX];
|
||||
if (!val)
|
||||
break;
|
||||
|
||||
if (range->flags & RANGE_MULTI_QUEUE) {
|
||||
queue = list[ACC_LIST_ENTRY_QUEUE_IDX] >> 16;
|
||||
if (queue < range_base ||
|
||||
queue >= range_base + range->num_queues) {
|
||||
dev_err(kdev->dev,
|
||||
"bad queue %d, expecting %d-%d\n",
|
||||
queue, range_base,
|
||||
range_base + range->num_queues);
|
||||
break;
|
||||
}
|
||||
queue -= range_base;
|
||||
kq = knav_range_offset_to_inst(kdev, range,
|
||||
queue);
|
||||
}
|
||||
|
||||
if (atomic_inc_return(&kq->desc_count) >= ACC_DESCS_MAX) {
|
||||
atomic_dec(&kq->desc_count);
|
||||
dev_err(kdev->dev,
|
||||
"acc-irq: queue %d full, entry dropped\n",
|
||||
queue + range_base);
|
||||
continue;
|
||||
}
|
||||
|
||||
idx = atomic_inc_return(&kq->desc_tail) & ACC_DESCS_MASK;
|
||||
kq->descs[idx] = val;
|
||||
kq->notify_needed = 1;
|
||||
dev_dbg(kdev->dev, "acc-irq: enqueue %08x at %d, queue %d\n",
|
||||
val, idx, queue + range_base);
|
||||
}
|
||||
|
||||
__knav_acc_notify(range, acc);
|
||||
memset(list_cpu, 0, info->list_size);
|
||||
dma_sync_single_for_device(kdev->dev, list_dma, info->list_size,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
/* flip to the other list */
|
||||
acc->list_index ^= 1;
|
||||
|
||||
/* reset the interrupt counter */
|
||||
writel_relaxed(1, pdsp->intd + ACC_INTD_OFFSET_COUNT(channel));
|
||||
|
||||
/* ack the interrupt */
|
||||
writel_relaxed(ACC_CHANNEL_INT_BASE + channel,
|
||||
pdsp->intd + ACC_INTD_OFFSET_EOI);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
int knav_range_setup_acc_irq(struct knav_range_info *range,
|
||||
int queue, bool enabled)
|
||||
{
|
||||
struct knav_device *kdev = range->kdev;
|
||||
struct knav_acc_channel *acc;
|
||||
unsigned long cpu_map;
|
||||
int ret = 0, irq;
|
||||
u32 old, new;
|
||||
|
||||
if (range->flags & RANGE_MULTI_QUEUE) {
|
||||
acc = range->acc;
|
||||
irq = range->irqs[0].irq;
|
||||
cpu_map = range->irqs[0].cpu_map;
|
||||
} else {
|
||||
acc = range->acc + queue;
|
||||
irq = range->irqs[queue].irq;
|
||||
cpu_map = range->irqs[queue].cpu_map;
|
||||
}
|
||||
|
||||
old = acc->open_mask;
|
||||
if (enabled)
|
||||
new = old | BIT(queue);
|
||||
else
|
||||
new = old & ~BIT(queue);
|
||||
acc->open_mask = new;
|
||||
|
||||
dev_dbg(kdev->dev,
|
||||
"setup-acc-irq: open mask old %08x, new %08x, channel %s\n",
|
||||
old, new, acc->name);
|
||||
|
||||
if (likely(new == old))
|
||||
return 0;
|
||||
|
||||
if (new && !old) {
|
||||
dev_dbg(kdev->dev,
|
||||
"setup-acc-irq: requesting %s for channel %s\n",
|
||||
acc->name, acc->name);
|
||||
ret = request_irq(irq, knav_acc_int_handler, 0, acc->name,
|
||||
range);
|
||||
if (!ret && cpu_map) {
|
||||
ret = irq_set_affinity_hint(irq, to_cpumask(&cpu_map));
|
||||
if (ret) {
|
||||
dev_warn(range->kdev->dev,
|
||||
"Failed to set IRQ affinity\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (old && !new) {
|
||||
dev_dbg(kdev->dev, "setup-acc-irq: freeing %s for channel %s\n",
|
||||
acc->name, acc->name);
|
||||
free_irq(irq, range);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const char *knav_acc_result_str(enum knav_acc_result result)
|
||||
{
|
||||
static const char * const result_str[] = {
|
||||
[ACC_RET_IDLE] = "idle",
|
||||
[ACC_RET_SUCCESS] = "success",
|
||||
[ACC_RET_INVALID_COMMAND] = "invalid command",
|
||||
[ACC_RET_INVALID_CHANNEL] = "invalid channel",
|
||||
[ACC_RET_INACTIVE_CHANNEL] = "inactive channel",
|
||||
[ACC_RET_ACTIVE_CHANNEL] = "active channel",
|
||||
[ACC_RET_INVALID_QUEUE] = "invalid queue",
|
||||
[ACC_RET_INVALID_RET] = "invalid return code",
|
||||
};
|
||||
|
||||
if (result >= ARRAY_SIZE(result_str))
|
||||
return result_str[ACC_RET_INVALID_RET];
|
||||
else
|
||||
return result_str[result];
|
||||
}
|
||||
|
||||
static enum knav_acc_result
|
||||
knav_acc_write(struct knav_device *kdev, struct knav_pdsp_info *pdsp,
|
||||
struct knav_reg_acc_command *cmd)
|
||||
{
|
||||
u32 result;
|
||||
|
||||
dev_dbg(kdev->dev, "acc command %08x %08x %08x %08x %08x\n",
|
||||
cmd->command, cmd->queue_mask, cmd->list_phys,
|
||||
cmd->queue_num, cmd->timer_config);
|
||||
|
||||
writel_relaxed(cmd->timer_config, &pdsp->acc_command->timer_config);
|
||||
writel_relaxed(cmd->queue_num, &pdsp->acc_command->queue_num);
|
||||
writel_relaxed(cmd->list_phys, &pdsp->acc_command->list_phys);
|
||||
writel_relaxed(cmd->queue_mask, &pdsp->acc_command->queue_mask);
|
||||
writel_relaxed(cmd->command, &pdsp->acc_command->command);
|
||||
|
||||
/* wait for the command to clear */
|
||||
do {
|
||||
result = readl_relaxed(&pdsp->acc_command->command);
|
||||
} while ((result >> 8) & 0xff);
|
||||
|
||||
return (result >> 24) & 0xff;
|
||||
}
|
||||
|
||||
static void knav_acc_setup_cmd(struct knav_device *kdev,
|
||||
struct knav_range_info *range,
|
||||
struct knav_reg_acc_command *cmd,
|
||||
int queue)
|
||||
{
|
||||
struct knav_acc_info *info = &range->acc_info;
|
||||
struct knav_acc_channel *acc;
|
||||
int queue_base;
|
||||
u32 queue_mask;
|
||||
|
||||
if (range->flags & RANGE_MULTI_QUEUE) {
|
||||
acc = range->acc;
|
||||
queue_base = range->queue_base;
|
||||
queue_mask = BIT(range->num_queues) - 1;
|
||||
} else {
|
||||
acc = range->acc + queue;
|
||||
queue_base = range->queue_base + queue;
|
||||
queue_mask = 0;
|
||||
}
|
||||
|
||||
memset(cmd, 0, sizeof(*cmd));
|
||||
cmd->command = acc->channel;
|
||||
cmd->queue_mask = queue_mask;
|
||||
cmd->list_phys = acc->list_dma[0];
|
||||
cmd->queue_num = info->list_entries << 16;
|
||||
cmd->queue_num |= queue_base;
|
||||
|
||||
cmd->timer_config = ACC_LIST_ENTRY_TYPE << 18;
|
||||
if (range->flags & RANGE_MULTI_QUEUE)
|
||||
cmd->timer_config |= ACC_CFG_MULTI_QUEUE;
|
||||
cmd->timer_config |= info->pacing_mode << 16;
|
||||
cmd->timer_config |= info->timer_count;
|
||||
}
|
||||
|
||||
static void knav_acc_stop(struct knav_device *kdev,
|
||||
struct knav_range_info *range,
|
||||
int queue)
|
||||
{
|
||||
struct knav_reg_acc_command cmd;
|
||||
struct knav_acc_channel *acc;
|
||||
enum knav_acc_result result;
|
||||
|
||||
acc = range->acc + queue;
|
||||
|
||||
knav_acc_setup_cmd(kdev, range, &cmd, queue);
|
||||
cmd.command |= ACC_CMD_DISABLE_CHANNEL << 8;
|
||||
result = knav_acc_write(kdev, range->acc_info.pdsp, &cmd);
|
||||
|
||||
dev_dbg(kdev->dev, "stopped acc channel %s, result %s\n",
|
||||
acc->name, knav_acc_result_str(result));
|
||||
}
|
||||
|
||||
static enum knav_acc_result knav_acc_start(struct knav_device *kdev,
|
||||
struct knav_range_info *range,
|
||||
int queue)
|
||||
{
|
||||
struct knav_reg_acc_command cmd;
|
||||
struct knav_acc_channel *acc;
|
||||
enum knav_acc_result result;
|
||||
|
||||
acc = range->acc + queue;
|
||||
|
||||
knav_acc_setup_cmd(kdev, range, &cmd, queue);
|
||||
cmd.command |= ACC_CMD_ENABLE_CHANNEL << 8;
|
||||
result = knav_acc_write(kdev, range->acc_info.pdsp, &cmd);
|
||||
|
||||
dev_dbg(kdev->dev, "started acc channel %s, result %s\n",
|
||||
acc->name, knav_acc_result_str(result));
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static int knav_acc_init_range(struct knav_range_info *range)
|
||||
{
|
||||
struct knav_device *kdev = range->kdev;
|
||||
struct knav_acc_channel *acc;
|
||||
enum knav_acc_result result;
|
||||
int queue;
|
||||
|
||||
for (queue = 0; queue < range->num_queues; queue++) {
|
||||
acc = range->acc + queue;
|
||||
|
||||
knav_acc_stop(kdev, range, queue);
|
||||
acc->list_index = 0;
|
||||
result = knav_acc_start(kdev, range, queue);
|
||||
|
||||
if (result != ACC_RET_SUCCESS)
|
||||
return -EIO;
|
||||
|
||||
if (range->flags & RANGE_MULTI_QUEUE)
|
||||
return 0;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int knav_acc_init_queue(struct knav_range_info *range,
|
||||
struct knav_queue_inst *kq)
|
||||
{
|
||||
unsigned id = kq->id - range->queue_base;
|
||||
|
||||
kq->descs = devm_kzalloc(range->kdev->dev,
|
||||
ACC_DESCS_MAX * sizeof(u32), GFP_KERNEL);
|
||||
if (!kq->descs)
|
||||
return -ENOMEM;
|
||||
|
||||
kq->acc = range->acc;
|
||||
if ((range->flags & RANGE_MULTI_QUEUE) == 0)
|
||||
kq->acc += id;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int knav_acc_open_queue(struct knav_range_info *range,
|
||||
struct knav_queue_inst *inst, unsigned flags)
|
||||
{
|
||||
unsigned id = inst->id - range->queue_base;
|
||||
|
||||
return knav_range_setup_acc_irq(range, id, true);
|
||||
}
|
||||
|
||||
static int knav_acc_close_queue(struct knav_range_info *range,
|
||||
struct knav_queue_inst *inst)
|
||||
{
|
||||
unsigned id = inst->id - range->queue_base;
|
||||
|
||||
return knav_range_setup_acc_irq(range, id, false);
|
||||
}
|
||||
|
||||
static int knav_acc_free_range(struct knav_range_info *range)
|
||||
{
|
||||
struct knav_device *kdev = range->kdev;
|
||||
struct knav_acc_channel *acc;
|
||||
struct knav_acc_info *info;
|
||||
int channel, channels;
|
||||
|
||||
info = &range->acc_info;
|
||||
|
||||
if (range->flags & RANGE_MULTI_QUEUE)
|
||||
channels = 1;
|
||||
else
|
||||
channels = range->num_queues;
|
||||
|
||||
for (channel = 0; channel < channels; channel++) {
|
||||
acc = range->acc + channel;
|
||||
if (!acc->list_cpu[0])
|
||||
continue;
|
||||
dma_unmap_single(kdev->dev, acc->list_dma[0],
|
||||
info->mem_size, DMA_BIDIRECTIONAL);
|
||||
free_pages_exact(acc->list_cpu[0], info->mem_size);
|
||||
}
|
||||
devm_kfree(range->kdev->dev, range->acc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct knav_range_ops knav_acc_range_ops = {
|
||||
.set_notify = knav_acc_set_notify,
|
||||
.init_queue = knav_acc_init_queue,
|
||||
.open_queue = knav_acc_open_queue,
|
||||
.close_queue = knav_acc_close_queue,
|
||||
.init_range = knav_acc_init_range,
|
||||
.free_range = knav_acc_free_range,
|
||||
};
|
||||
|
||||
/**
|
||||
* knav_init_acc_range: Initialise accumulator ranges
|
||||
*
|
||||
* @kdev: qmss device
|
||||
* @node: device node
|
||||
* @range: qmms range information
|
||||
*
|
||||
* Return 0 on success or error
|
||||
*/
|
||||
int knav_init_acc_range(struct knav_device *kdev,
|
||||
struct device_node *node,
|
||||
struct knav_range_info *range)
|
||||
{
|
||||
struct knav_acc_channel *acc;
|
||||
struct knav_pdsp_info *pdsp;
|
||||
struct knav_acc_info *info;
|
||||
int ret, channel, channels;
|
||||
int list_size, mem_size;
|
||||
dma_addr_t list_dma;
|
||||
void *list_mem;
|
||||
u32 config[5];
|
||||
|
||||
range->flags |= RANGE_HAS_ACCUMULATOR;
|
||||
info = &range->acc_info;
|
||||
|
||||
ret = of_property_read_u32_array(node, "accumulator", config, 5);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
info->pdsp_id = config[0];
|
||||
info->start_channel = config[1];
|
||||
info->list_entries = config[2];
|
||||
info->pacing_mode = config[3];
|
||||
info->timer_count = config[4] / ACC_DEFAULT_PERIOD;
|
||||
|
||||
if (info->start_channel > ACC_MAX_CHANNEL) {
|
||||
dev_err(kdev->dev, "channel %d invalid for range %s\n",
|
||||
info->start_channel, range->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (info->pacing_mode > 3) {
|
||||
dev_err(kdev->dev, "pacing mode %d invalid for range %s\n",
|
||||
info->pacing_mode, range->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pdsp = knav_find_pdsp(kdev, info->pdsp_id);
|
||||
if (!pdsp) {
|
||||
dev_err(kdev->dev, "pdsp id %d not found for range %s\n",
|
||||
info->pdsp_id, range->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
info->pdsp = pdsp;
|
||||
channels = range->num_queues;
|
||||
if (of_get_property(node, "multi-queue", NULL)) {
|
||||
range->flags |= RANGE_MULTI_QUEUE;
|
||||
channels = 1;
|
||||
if (range->queue_base & (32 - 1)) {
|
||||
dev_err(kdev->dev,
|
||||
"misaligned multi-queue accumulator range %s\n",
|
||||
range->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (range->num_queues > 32) {
|
||||
dev_err(kdev->dev,
|
||||
"too many queues in accumulator range %s\n",
|
||||
range->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
/* figure out list size */
|
||||
list_size = info->list_entries;
|
||||
list_size *= ACC_LIST_ENTRY_WORDS * sizeof(u32);
|
||||
info->list_size = list_size;
|
||||
mem_size = PAGE_ALIGN(list_size * 2);
|
||||
info->mem_size = mem_size;
|
||||
range->acc = devm_kzalloc(kdev->dev, channels * sizeof(*range->acc),
|
||||
GFP_KERNEL);
|
||||
if (!range->acc)
|
||||
return -ENOMEM;
|
||||
|
||||
for (channel = 0; channel < channels; channel++) {
|
||||
acc = range->acc + channel;
|
||||
acc->channel = info->start_channel + channel;
|
||||
|
||||
/* allocate memory for the two lists */
|
||||
list_mem = alloc_pages_exact(mem_size, GFP_KERNEL | GFP_DMA);
|
||||
if (!list_mem)
|
||||
return -ENOMEM;
|
||||
|
||||
list_dma = dma_map_single(kdev->dev, list_mem, mem_size,
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(kdev->dev, list_dma)) {
|
||||
free_pages_exact(list_mem, mem_size);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(list_mem, 0, mem_size);
|
||||
dma_sync_single_for_device(kdev->dev, list_dma, mem_size,
|
||||
DMA_TO_DEVICE);
|
||||
scnprintf(acc->name, sizeof(acc->name), "hwqueue-acc-%d",
|
||||
acc->channel);
|
||||
acc->list_cpu[0] = list_mem;
|
||||
acc->list_cpu[1] = list_mem + list_size;
|
||||
acc->list_dma[0] = list_dma;
|
||||
acc->list_dma[1] = list_dma + list_size;
|
||||
dev_dbg(kdev->dev, "%s: channel %d, phys %08x, virt %8p\n",
|
||||
acc->name, acc->channel, list_dma, list_mem);
|
||||
}
|
||||
|
||||
range->ops = &knav_acc_range_ops;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(knav_init_acc_range);
|
1816
drivers/soc/ti/knav_qmss_queue.c
Normal file
1816
drivers/soc/ti/knav_qmss_queue.c
Normal file
File diff suppressed because it is too large
Load diff
Loading…
Add table
Add a link
Reference in a new issue