Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

48
drivers/mailbox/Kconfig Normal file
View file

@ -0,0 +1,48 @@
menuconfig MAILBOX
bool "Mailbox Hardware Support"
help
Mailbox is a framework to control hardware communication between
on-chip processors through queued messages and interrupt driven
signals. Say Y if your platform supports hardware mailboxes.
if MAILBOX
config PL320_MBOX
bool "ARM PL320 Mailbox"
depends on ARM_AMBA
help
An implementation of the ARM PL320 Interprocessor Communication
Mailbox (IPCM), tailored for the Calxeda Highbank. It is used to
send short messages between Highbank's A9 cores and the EnergyCore
Management Engine, primarily for cpufreq. Say Y here if you want
to use the PL320 IPCM support.
config OMAP2PLUS_MBOX
tristate "OMAP2+ Mailbox framework support"
depends on ARCH_OMAP2PLUS
help
Mailbox implementation for OMAP family chips with hardware for
interprocessor communication involving DSP, IVA1.0 and IVA2 in
OMAP2/3; or IPU, IVA HD and DSP in OMAP4/5. Say Y here if you
want to use OMAP2+ Mailbox framework support.
config OMAP_MBOX_KFIFO_SIZE
int "Mailbox kfifo default buffer size (bytes)"
depends on OMAP2PLUS_MBOX
default 256
help
Specify the default size of mailbox's kfifo buffers (bytes).
This can also be changed at runtime (via the mbox_kfifo_size
module parameter).
config PCC
bool "Platform Communication Channel Driver"
depends on ACPI
help
ACPI 5.0+ spec defines a generic mode of communication
between the OS and a platform such as the BMC. This medium
(PCC) is typically used by CPPC (ACPI CPU Performance management),
RAS (ACPI reliability protocol) and MPST (ACPI Memory power
states). Select this driver if your platform implements the
PCC clients mentioned above.
endif

11
drivers/mailbox/Makefile Normal file
View file

@ -0,0 +1,11 @@
# Generic MAILBOX API
obj-$(CONFIG_MAILBOX) += mailbox.o
obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o
obj-$(CONFIG_OMAP2PLUS_MBOX) += omap-mailbox.o
obj-$(CONFIG_PCC) += pcc.o
obj-y += samsung/

485
drivers/mailbox/mailbox.c Normal file
View file

@ -0,0 +1,485 @@
/*
* Mailbox: Common code for Mailbox controllers and users
*
* Copyright (C) 2013-2014 Linaro Ltd.
* Author: Jassi Brar <jassisinghbrar@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/bitops.h>
#include <linux/mailbox_client.h>
#include <linux/mailbox_controller.h>
#include "mailbox.h"
static LIST_HEAD(mbox_cons);
static DEFINE_MUTEX(con_mutex);
static int poll_txdone(unsigned long data);
static int add_to_rbuf(struct mbox_chan *chan, void *mssg)
{
int idx;
unsigned long flags;
spin_lock_irqsave(&chan->lock, flags);
/* See if there is any space left */
if (chan->msg_count == MBOX_TX_QUEUE_LEN) {
spin_unlock_irqrestore(&chan->lock, flags);
return -ENOBUFS;
}
idx = chan->msg_free;
chan->msg_data[idx] = mssg;
chan->msg_count++;
if (idx == MBOX_TX_QUEUE_LEN - 1)
chan->msg_free = 0;
else
chan->msg_free++;
spin_unlock_irqrestore(&chan->lock, flags);
return idx;
}
static int msg_submit(struct mbox_chan *chan)
{
unsigned count, idx;
unsigned long flags;
void *data;
int err = -EBUSY;
int ret = 0;
spin_lock_irqsave(&chan->lock, flags);
if (!chan->msg_count || chan->active_req) {
ret = -ENOENT;
goto exit;
}
count = chan->msg_count;
idx = chan->msg_free;
if (idx >= count)
idx -= count;
else
idx += MBOX_TX_QUEUE_LEN - count;
data = chan->msg_data[idx];
if (chan->cl->tx_prepare)
chan->cl->tx_prepare(chan->cl, data);
/* Try to submit a message to the MBOX controller */
err = chan->mbox->ops->send_data(chan, data);
if (!err) {
chan->active_req = data;
chan->msg_count--;
} else {
pr_err("mailbox: cm3 send fail\n");
spin_unlock_irqrestore(&chan->lock, flags);
ret = -EIO;
}
exit:
spin_unlock_irqrestore(&chan->lock, flags);
if (!err && (chan->txdone_method & TXDONE_BY_POLL)) {
ret = poll_txdone((unsigned long)chan->mbox);
if (ret < 0)
pr_err("%s Do not check polling data\n", __func__);
}
return ret;
}
static void tx_tick(struct mbox_chan *chan, int r)
{
unsigned long flags;
void *mssg;
spin_lock_irqsave(&chan->lock, flags);
mssg = chan->active_req;
chan->active_req = NULL;
spin_unlock_irqrestore(&chan->lock, flags);
/* Notify the client */
if (mssg && chan->cl->tx_done)
chan->cl->tx_done(chan->cl, mssg, r);
if (chan->cl->tx_block)
complete(&chan->tx_complete);
}
static int poll_txdone(unsigned long data)
{
struct mbox_controller *mbox = (struct mbox_controller *)data;
int txdone;
int i;
int ret = 0;
for (i = 0; i < mbox->num_chans; i++) {
struct mbox_chan *chan = &mbox->chans[i];
if (chan->active_req && chan->cl) {
txdone = chan->mbox->ops->last_tx_done(chan);
if (!txdone) {
tx_tick(chan, MBOX_OK);
ret = 0;
} else if (txdone == -EIO) {
tx_tick(chan, MBOX_ERR);
ret = -EIO;
}
}
}
return ret;
}
/**
* mbox_chan_received_data - A way for controller driver to push data
* received from remote to the upper layer.
* @chan: Pointer to the mailbox channel on which RX happened.
* @mssg: Client specific message typecasted as void *
*
* After startup and before shutdown any data received on the chan
* is passed on to the API via atomic mbox_chan_received_data().
* The controller should ACK the RX only after this call returns.
*/
void mbox_chan_received_data(struct mbox_chan *chan, void *mssg)
{
/* No buffering the received data */
if (chan->cl->rx_callback)
chan->cl->rx_callback(chan->cl, mssg);
}
EXPORT_SYMBOL_GPL(mbox_chan_received_data);
/**
* mbox_chan_txdone - A way for controller driver to notify the
* framework that the last TX has completed.
* @chan: Pointer to the mailbox chan on which TX happened.
* @r: Status of last TX - OK or ERROR
*
* The controller that has IRQ for TX ACK calls this atomic API
* to tick the TX state machine. It works only if txdone_irq
* is set by the controller.
*/
void mbox_chan_txdone(struct mbox_chan *chan, int r)
{
if (unlikely(!(chan->txdone_method & TXDONE_BY_IRQ))) {
dev_err(chan->mbox->dev,
"Controller can't run the TX ticker\n");
return;
}
tx_tick(chan, r);
}
EXPORT_SYMBOL_GPL(mbox_chan_txdone);
/**
* mbox_client_txdone - The way for a client to run the TX state machine.
* @chan: Mailbox channel assigned to this client.
* @r: Success status of last transmission.
*
* The client/protocol had received some 'ACK' packet and it notifies
* the API that the last packet was sent successfully. This only works
* if the controller can't sense TX-Done.
*/
void mbox_client_txdone(struct mbox_chan *chan, int r)
{
if (unlikely(!(chan->txdone_method & TXDONE_BY_ACK))) {
dev_err(chan->mbox->dev, "Client can't run the TX ticker\n");
return;
}
tx_tick(chan, r);
}
EXPORT_SYMBOL_GPL(mbox_client_txdone);
/**
* mbox_client_peek_data - A way for client driver to pull data
* received from remote by the controller.
* @chan: Mailbox channel assigned to this client.
*
* A poke to controller driver for any received data.
* The data is actually passed onto client via the
* mbox_chan_received_data()
* The call can be made from atomic context, so the controller's
* implementation of peek_data() must not sleep.
*
* Return: True, if controller has, and is going to push after this,
* some data.
* False, if controller doesn't have any data to be read.
*/
bool mbox_client_peek_data(struct mbox_chan *chan)
{
if (chan->mbox->ops->peek_data)
return chan->mbox->ops->peek_data(chan);
return false;
}
EXPORT_SYMBOL_GPL(mbox_client_peek_data);
/**
* mbox_send_message - For client to submit a message to be
* sent to the remote.
* @chan: Mailbox channel assigned to this client.
* @mssg: Client specific message typecasted.
*
* For client to submit data to the controller destined for a remote
* processor. If the client had set 'tx_block', the call will return
* either when the remote receives the data or when 'tx_tout' millisecs
* run out.
* In non-blocking mode, the requests are buffered by the API and a
* non-negative token is returned for each queued request. If the request
* is not queued, a negative token is returned. Upon failure or successful
* TX, the API calls 'tx_done' from atomic context, from which the client
* could submit yet another request.
* The pointer to message should be preserved until it is sent
* over the chan, i.e, tx_done() is made.
* This function could be called from atomic context as it simply
* queues the data and returns a token against the request.
*
* Return: Non-negative integer for successful submission (non-blocking mode)
* or transmission over chan (blocking mode).
* Negative value denotes failure.
*/
int mbox_send_message(struct mbox_chan *chan, void *mssg)
{
int t, ret;
if (!chan || !chan->cl)
return -EINVAL;
t = add_to_rbuf(chan, mssg);
if (t < 0) {
dev_err(chan->mbox->dev, "Try increasing MBOX_TX_QUEUE_LEN\n");
return t;
}
ret = msg_submit(chan);
if (ret) {
return -EIO;
}
if (chan->cl->tx_block && chan->active_req) {
unsigned long wait;
int ret;
if (!chan->cl->tx_tout) /* wait forever */
wait = msecs_to_jiffies(3600000);
else
wait = msecs_to_jiffies(chan->cl->tx_tout);
ret = wait_for_completion_timeout(&chan->tx_complete, wait);
if (ret == 0) {
t = -EIO;
tx_tick(chan, -EIO);
}
}
return t;
}
EXPORT_SYMBOL_GPL(mbox_send_message);
/**
* mbox_request_channel - Request a mailbox channel.
* @cl: Identity of the client requesting the channel.
* @index: Index of mailbox specifier in 'mboxes' property.
*
* The Client specifies its requirements and capabilities while asking for
* a mailbox channel. It can't be called from atomic context.
* The channel is exclusively allocated and can't be used by another
* client before the owner calls mbox_free_channel.
* After assignment, any packet received on this channel will be
* handed over to the client via the 'rx_callback'.
* The framework holds reference to the client, so the mbox_client
* structure shouldn't be modified until the mbox_free_channel returns.
*
* Return: Pointer to the channel assigned to the client if successful.
* ERR_PTR for request failure.
*/
struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
{
struct device *dev = cl->dev;
struct mbox_controller *mbox;
struct of_phandle_args spec;
struct mbox_chan *chan;
unsigned long flags;
int ret;
if (!dev || !dev->of_node) {
pr_debug("%s: No owner device node\n", __func__);
return ERR_PTR(-ENODEV);
}
mutex_lock(&con_mutex);
if (of_parse_phandle_with_args(dev->of_node, "mboxes",
"#mbox-cells", index, &spec)) {
dev_dbg(dev, "%s: can't parse \"mboxes\" property\n", __func__);
mutex_unlock(&con_mutex);
return ERR_PTR(-ENODEV);
}
chan = ERR_PTR(-EPROBE_DEFER);
list_for_each_entry(mbox, &mbox_cons, node)
if (mbox->dev->of_node == spec.np) {
chan = mbox->of_xlate(mbox, &spec);
break;
}
of_node_put(spec.np);
if (IS_ERR(chan)) {
mutex_unlock(&con_mutex);
return chan;
}
if (chan->cl || !try_module_get(mbox->dev->driver->owner)) {
dev_dbg(dev, "%s: mailbox not free\n", __func__);
mutex_unlock(&con_mutex);
return ERR_PTR(-EBUSY);
}
spin_lock_irqsave(&chan->lock, flags);
chan->msg_free = 0;
chan->msg_count = 0;
chan->active_req = NULL;
chan->cl = cl;
init_completion(&chan->tx_complete);
if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
chan->txdone_method |= TXDONE_BY_ACK;
spin_unlock_irqrestore(&chan->lock, flags);
ret = chan->mbox->ops->startup(chan);
if (ret) {
dev_err(dev, "Unable to startup the chan (%d)\n", ret);
mbox_free_channel(chan);
chan = ERR_PTR(ret);
}
mutex_unlock(&con_mutex);
return chan;
}
EXPORT_SYMBOL_GPL(mbox_request_channel);
/**
* mbox_free_channel - The client relinquishes control of a mailbox
* channel by this call.
* @chan: The mailbox channel to be freed.
*/
void mbox_free_channel(struct mbox_chan *chan)
{
unsigned long flags;
if (!chan || !chan->cl)
return;
chan->mbox->ops->shutdown(chan);
/* The queued TX requests are simply aborted, no callbacks are made */
spin_lock_irqsave(&chan->lock, flags);
chan->cl = NULL;
chan->active_req = NULL;
if (chan->txdone_method == (TXDONE_BY_POLL | TXDONE_BY_ACK))
chan->txdone_method = TXDONE_BY_POLL;
module_put(chan->mbox->dev->driver->owner);
spin_unlock_irqrestore(&chan->lock, flags);
}
EXPORT_SYMBOL_GPL(mbox_free_channel);
static struct mbox_chan *
of_mbox_index_xlate(struct mbox_controller *mbox,
const struct of_phandle_args *sp)
{
int ind = sp->args[0];
if (ind >= mbox->num_chans)
return ERR_PTR(-EINVAL);
return &mbox->chans[ind];
}
/**
* mbox_controller_register - Register the mailbox controller
* @mbox: Pointer to the mailbox controller.
*
* The controller driver registers its communication channels
*/
int mbox_controller_register(struct mbox_controller *mbox)
{
int i, txdone;
/* Sanity check */
if (!mbox || !mbox->dev || !mbox->ops || !mbox->num_chans)
return -EINVAL;
if (mbox->txdone_irq)
txdone = TXDONE_BY_IRQ;
else if (mbox->txdone_poll)
txdone = TXDONE_BY_POLL;
else /* It has to be ACK then */
txdone = TXDONE_BY_ACK;
if (txdone == TXDONE_BY_POLL) {
mbox->poll.data = (unsigned long)mbox;
}
for (i = 0; i < mbox->num_chans; i++) {
struct mbox_chan *chan = &mbox->chans[i];
chan->cl = NULL;
chan->mbox = mbox;
chan->txdone_method = txdone;
spin_lock_init(&chan->lock);
}
if (!mbox->of_xlate)
mbox->of_xlate = of_mbox_index_xlate;
mutex_lock(&con_mutex);
list_add_tail(&mbox->node, &mbox_cons);
mutex_unlock(&con_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(mbox_controller_register);
/**
* mbox_controller_unregister - Unregister the mailbox controller
* @mbox: Pointer to the mailbox controller.
*/
void mbox_controller_unregister(struct mbox_controller *mbox)
{
int i;
if (!mbox)
return;
mutex_lock(&con_mutex);
list_del(&mbox->node);
for (i = 0; i < mbox->num_chans; i++)
mbox_free_channel(&mbox->chans[i]);
if (mbox->txdone_poll)
del_timer_sync(&mbox->poll);
mutex_unlock(&con_mutex);
}
EXPORT_SYMBOL_GPL(mbox_controller_unregister);

19
drivers/mailbox/mailbox.h Normal file
View file

@ -0,0 +1,19 @@
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __MAILBOX_H
#define __MAILBOX_H
#define TXDONE_BY_IRQ BIT(0) /* controller has remote RTR irq */
#define TXDONE_BY_POLL BIT(1) /* controller can read status of last TX */
#define TXDONE_BY_ACK BIT(2) /* S/W ACK recevied by Client ticks the TX */
enum mbox_result {
MBOX_OK = 0,
MBOX_ERR,
};
#endif /* __MAILBOX_H */

View file

@ -0,0 +1,849 @@
/*
* OMAP mailbox driver
*
* Copyright (C) 2006-2009 Nokia Corporation. All rights reserved.
* Copyright (C) 2013-2014 Texas Instruments Inc.
*
* Contact: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
* Suman Anna <s-anna@ti.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*
*/
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/kfifo.h>
#include <linux/err.h>
#include <linux/notifier.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/platform_data/mailbox-omap.h>
#include <linux/omap-mailbox.h>
#define MAILBOX_REVISION 0x000
#define MAILBOX_MESSAGE(m) (0x040 + 4 * (m))
#define MAILBOX_FIFOSTATUS(m) (0x080 + 4 * (m))
#define MAILBOX_MSGSTATUS(m) (0x0c0 + 4 * (m))
#define OMAP2_MAILBOX_IRQSTATUS(u) (0x100 + 8 * (u))
#define OMAP2_MAILBOX_IRQENABLE(u) (0x104 + 8 * (u))
#define OMAP4_MAILBOX_IRQSTATUS(u) (0x104 + 0x10 * (u))
#define OMAP4_MAILBOX_IRQENABLE(u) (0x108 + 0x10 * (u))
#define OMAP4_MAILBOX_IRQENABLE_CLR(u) (0x10c + 0x10 * (u))
#define MAILBOX_IRQSTATUS(type, u) (type ? OMAP4_MAILBOX_IRQSTATUS(u) : \
OMAP2_MAILBOX_IRQSTATUS(u))
#define MAILBOX_IRQENABLE(type, u) (type ? OMAP4_MAILBOX_IRQENABLE(u) : \
OMAP2_MAILBOX_IRQENABLE(u))
#define MAILBOX_IRQDISABLE(type, u) (type ? OMAP4_MAILBOX_IRQENABLE_CLR(u) \
: OMAP2_MAILBOX_IRQENABLE(u))
#define MAILBOX_IRQ_NEWMSG(m) (1 << (2 * (m)))
#define MAILBOX_IRQ_NOTFULL(m) (1 << (2 * (m) + 1))
#define MBOX_REG_SIZE 0x120
#define OMAP4_MBOX_REG_SIZE 0x130
#define MBOX_NR_REGS (MBOX_REG_SIZE / sizeof(u32))
#define OMAP4_MBOX_NR_REGS (OMAP4_MBOX_REG_SIZE / sizeof(u32))
struct omap_mbox_fifo {
unsigned long msg;
unsigned long fifo_stat;
unsigned long msg_stat;
unsigned long irqenable;
unsigned long irqstatus;
unsigned long irqdisable;
u32 intr_bit;
};
struct omap_mbox_queue {
spinlock_t lock;
struct kfifo fifo;
struct work_struct work;
struct tasklet_struct tasklet;
struct omap_mbox *mbox;
bool full;
};
struct omap_mbox_device {
struct device *dev;
struct mutex cfg_lock;
void __iomem *mbox_base;
u32 num_users;
u32 num_fifos;
struct omap_mbox **mboxes;
struct list_head elem;
};
struct omap_mbox_fifo_info {
int tx_id;
int tx_usr;
int tx_irq;
int rx_id;
int rx_usr;
int rx_irq;
const char *name;
};
struct omap_mbox {
const char *name;
int irq;
struct omap_mbox_queue *txq, *rxq;
struct device *dev;
struct omap_mbox_device *parent;
struct omap_mbox_fifo tx_fifo;
struct omap_mbox_fifo rx_fifo;
u32 ctx[OMAP4_MBOX_NR_REGS];
u32 intr_type;
int use_count;
struct blocking_notifier_head notifier;
};
/* global variables for the mailbox devices */
static DEFINE_MUTEX(omap_mbox_devices_lock);
static LIST_HEAD(omap_mbox_devices);
static unsigned int mbox_kfifo_size = CONFIG_OMAP_MBOX_KFIFO_SIZE;
module_param(mbox_kfifo_size, uint, S_IRUGO);
MODULE_PARM_DESC(mbox_kfifo_size, "Size of omap's mailbox kfifo (bytes)");
static inline
unsigned int mbox_read_reg(struct omap_mbox_device *mdev, size_t ofs)
{
return __raw_readl(mdev->mbox_base + ofs);
}
static inline
void mbox_write_reg(struct omap_mbox_device *mdev, u32 val, size_t ofs)
{
__raw_writel(val, mdev->mbox_base + ofs);
}
/* Mailbox FIFO handle functions */
static mbox_msg_t mbox_fifo_read(struct omap_mbox *mbox)
{
struct omap_mbox_fifo *fifo = &mbox->rx_fifo;
return (mbox_msg_t) mbox_read_reg(mbox->parent, fifo->msg);
}
static void mbox_fifo_write(struct omap_mbox *mbox, mbox_msg_t msg)
{
struct omap_mbox_fifo *fifo = &mbox->tx_fifo;
mbox_write_reg(mbox->parent, msg, fifo->msg);
}
static int mbox_fifo_empty(struct omap_mbox *mbox)
{
struct omap_mbox_fifo *fifo = &mbox->rx_fifo;
return (mbox_read_reg(mbox->parent, fifo->msg_stat) == 0);
}
static int mbox_fifo_full(struct omap_mbox *mbox)
{
struct omap_mbox_fifo *fifo = &mbox->tx_fifo;
return mbox_read_reg(mbox->parent, fifo->fifo_stat);
}
/* Mailbox IRQ handle functions */
static void ack_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
{
struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
&mbox->tx_fifo : &mbox->rx_fifo;
u32 bit = fifo->intr_bit;
u32 irqstatus = fifo->irqstatus;
mbox_write_reg(mbox->parent, bit, irqstatus);
/* Flush posted write for irq status to avoid spurious interrupts */
mbox_read_reg(mbox->parent, irqstatus);
}
static int is_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
{
struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
&mbox->tx_fifo : &mbox->rx_fifo;
u32 bit = fifo->intr_bit;
u32 irqenable = fifo->irqenable;
u32 irqstatus = fifo->irqstatus;
u32 enable = mbox_read_reg(mbox->parent, irqenable);
u32 status = mbox_read_reg(mbox->parent, irqstatus);
return (int)(enable & status & bit);
}
/*
* message sender
*/
int omap_mbox_msg_send(struct omap_mbox *mbox, mbox_msg_t msg)
{
struct omap_mbox_queue *mq = mbox->txq;
int ret = 0, len;
spin_lock_bh(&mq->lock);
if (kfifo_avail(&mq->fifo) < sizeof(msg)) {
ret = -ENOMEM;
goto out;
}
if (kfifo_is_empty(&mq->fifo) && !mbox_fifo_full(mbox)) {
mbox_fifo_write(mbox, msg);
goto out;
}
len = kfifo_in(&mq->fifo, (unsigned char *)&msg, sizeof(msg));
WARN_ON(len != sizeof(msg));
tasklet_schedule(&mbox->txq->tasklet);
out:
spin_unlock_bh(&mq->lock);
return ret;
}
EXPORT_SYMBOL(omap_mbox_msg_send);
void omap_mbox_save_ctx(struct omap_mbox *mbox)
{
int i;
int nr_regs;
if (mbox->intr_type)
nr_regs = OMAP4_MBOX_NR_REGS;
else
nr_regs = MBOX_NR_REGS;
for (i = 0; i < nr_regs; i++) {
mbox->ctx[i] = mbox_read_reg(mbox->parent, i * sizeof(u32));
dev_dbg(mbox->dev, "%s: [%02x] %08x\n", __func__,
i, mbox->ctx[i]);
}
}
EXPORT_SYMBOL(omap_mbox_save_ctx);
void omap_mbox_restore_ctx(struct omap_mbox *mbox)
{
int i;
int nr_regs;
if (mbox->intr_type)
nr_regs = OMAP4_MBOX_NR_REGS;
else
nr_regs = MBOX_NR_REGS;
for (i = 0; i < nr_regs; i++) {
mbox_write_reg(mbox->parent, mbox->ctx[i], i * sizeof(u32));
dev_dbg(mbox->dev, "%s: [%02x] %08x\n", __func__,
i, mbox->ctx[i]);
}
}
EXPORT_SYMBOL(omap_mbox_restore_ctx);
void omap_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
{
u32 l;
struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
&mbox->tx_fifo : &mbox->rx_fifo;
u32 bit = fifo->intr_bit;
u32 irqenable = fifo->irqenable;
l = mbox_read_reg(mbox->parent, irqenable);
l |= bit;
mbox_write_reg(mbox->parent, l, irqenable);
}
EXPORT_SYMBOL(omap_mbox_enable_irq);
void omap_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
{
struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
&mbox->tx_fifo : &mbox->rx_fifo;
u32 bit = fifo->intr_bit;
u32 irqdisable = fifo->irqdisable;
/*
* Read and update the interrupt configuration register for pre-OMAP4.
* OMAP4 and later SoCs have a dedicated interrupt disabling register.
*/
if (!mbox->intr_type)
bit = mbox_read_reg(mbox->parent, irqdisable) & ~bit;
mbox_write_reg(mbox->parent, bit, irqdisable);
}
EXPORT_SYMBOL(omap_mbox_disable_irq);
static void mbox_tx_tasklet(unsigned long tx_data)
{
struct omap_mbox *mbox = (struct omap_mbox *)tx_data;
struct omap_mbox_queue *mq = mbox->txq;
mbox_msg_t msg;
int ret;
while (kfifo_len(&mq->fifo)) {
if (mbox_fifo_full(mbox)) {
omap_mbox_enable_irq(mbox, IRQ_TX);
break;
}
ret = kfifo_out(&mq->fifo, (unsigned char *)&msg,
sizeof(msg));
WARN_ON(ret != sizeof(msg));
mbox_fifo_write(mbox, msg);
}
}
/*
* Message receiver(workqueue)
*/
static void mbox_rx_work(struct work_struct *work)
{
struct omap_mbox_queue *mq =
container_of(work, struct omap_mbox_queue, work);
mbox_msg_t msg;
int len;
while (kfifo_len(&mq->fifo) >= sizeof(msg)) {
len = kfifo_out(&mq->fifo, (unsigned char *)&msg, sizeof(msg));
WARN_ON(len != sizeof(msg));
blocking_notifier_call_chain(&mq->mbox->notifier, len,
(void *)msg);
spin_lock_irq(&mq->lock);
if (mq->full) {
mq->full = false;
omap_mbox_enable_irq(mq->mbox, IRQ_RX);
}
spin_unlock_irq(&mq->lock);
}
}
/*
* Mailbox interrupt handler
*/
static void __mbox_tx_interrupt(struct omap_mbox *mbox)
{
omap_mbox_disable_irq(mbox, IRQ_TX);
ack_mbox_irq(mbox, IRQ_TX);
tasklet_schedule(&mbox->txq->tasklet);
}
static void __mbox_rx_interrupt(struct omap_mbox *mbox)
{
struct omap_mbox_queue *mq = mbox->rxq;
mbox_msg_t msg;
int len;
while (!mbox_fifo_empty(mbox)) {
if (unlikely(kfifo_avail(&mq->fifo) < sizeof(msg))) {
omap_mbox_disable_irq(mbox, IRQ_RX);
mq->full = true;
goto nomem;
}
msg = mbox_fifo_read(mbox);
len = kfifo_in(&mq->fifo, (unsigned char *)&msg, sizeof(msg));
WARN_ON(len != sizeof(msg));
}
/* no more messages in the fifo. clear IRQ source. */
ack_mbox_irq(mbox, IRQ_RX);
nomem:
schedule_work(&mbox->rxq->work);
}
static irqreturn_t mbox_interrupt(int irq, void *p)
{
struct omap_mbox *mbox = p;
if (is_mbox_irq(mbox, IRQ_TX))
__mbox_tx_interrupt(mbox);
if (is_mbox_irq(mbox, IRQ_RX))
__mbox_rx_interrupt(mbox);
return IRQ_HANDLED;
}
static struct omap_mbox_queue *mbox_queue_alloc(struct omap_mbox *mbox,
void (*work) (struct work_struct *),
void (*tasklet)(unsigned long))
{
struct omap_mbox_queue *mq;
mq = kzalloc(sizeof(struct omap_mbox_queue), GFP_KERNEL);
if (!mq)
return NULL;
spin_lock_init(&mq->lock);
if (kfifo_alloc(&mq->fifo, mbox_kfifo_size, GFP_KERNEL))
goto error;
if (work)
INIT_WORK(&mq->work, work);
if (tasklet)
tasklet_init(&mq->tasklet, tasklet, (unsigned long)mbox);
return mq;
error:
kfree(mq);
return NULL;
}
static void mbox_queue_free(struct omap_mbox_queue *q)
{
kfifo_free(&q->fifo);
kfree(q);
}
static int omap_mbox_startup(struct omap_mbox *mbox)
{
int ret = 0;
struct omap_mbox_queue *mq;
struct omap_mbox_device *mdev = mbox->parent;
mutex_lock(&mdev->cfg_lock);
ret = pm_runtime_get_sync(mdev->dev);
if (unlikely(ret < 0))
goto fail_startup;
if (!mbox->use_count++) {
mq = mbox_queue_alloc(mbox, NULL, mbox_tx_tasklet);
if (!mq) {
ret = -ENOMEM;
goto fail_alloc_txq;
}
mbox->txq = mq;
mq = mbox_queue_alloc(mbox, mbox_rx_work, NULL);
if (!mq) {
ret = -ENOMEM;
goto fail_alloc_rxq;
}
mbox->rxq = mq;
mq->mbox = mbox;
ret = request_irq(mbox->irq, mbox_interrupt, IRQF_SHARED,
mbox->name, mbox);
if (unlikely(ret)) {
pr_err("failed to register mailbox interrupt:%d\n",
ret);
goto fail_request_irq;
}
omap_mbox_enable_irq(mbox, IRQ_RX);
}
mutex_unlock(&mdev->cfg_lock);
return 0;
fail_request_irq:
mbox_queue_free(mbox->rxq);
fail_alloc_rxq:
mbox_queue_free(mbox->txq);
fail_alloc_txq:
pm_runtime_put_sync(mdev->dev);
mbox->use_count--;
fail_startup:
mutex_unlock(&mdev->cfg_lock);
return ret;
}
static void omap_mbox_fini(struct omap_mbox *mbox)
{
struct omap_mbox_device *mdev = mbox->parent;
mutex_lock(&mdev->cfg_lock);
if (!--mbox->use_count) {
omap_mbox_disable_irq(mbox, IRQ_RX);
free_irq(mbox->irq, mbox);
tasklet_kill(&mbox->txq->tasklet);
flush_work(&mbox->rxq->work);
mbox_queue_free(mbox->txq);
mbox_queue_free(mbox->rxq);
}
pm_runtime_put_sync(mdev->dev);
mutex_unlock(&mdev->cfg_lock);
}
static struct omap_mbox *omap_mbox_device_find(struct omap_mbox_device *mdev,
const char *mbox_name)
{
struct omap_mbox *_mbox, *mbox = NULL;
struct omap_mbox **mboxes = mdev->mboxes;
int i;
if (!mboxes)
return NULL;
for (i = 0; (_mbox = mboxes[i]); i++) {
if (!strcmp(_mbox->name, mbox_name)) {
mbox = _mbox;
break;
}
}
return mbox;
}
struct omap_mbox *omap_mbox_get(const char *name, struct notifier_block *nb)
{
struct omap_mbox *mbox = NULL;
struct omap_mbox_device *mdev;
int ret;
mutex_lock(&omap_mbox_devices_lock);
list_for_each_entry(mdev, &omap_mbox_devices, elem) {
mbox = omap_mbox_device_find(mdev, name);
if (mbox)
break;
}
mutex_unlock(&omap_mbox_devices_lock);
if (!mbox)
return ERR_PTR(-ENOENT);
if (nb)
blocking_notifier_chain_register(&mbox->notifier, nb);
ret = omap_mbox_startup(mbox);
if (ret) {
blocking_notifier_chain_unregister(&mbox->notifier, nb);
return ERR_PTR(-ENODEV);
}
return mbox;
}
EXPORT_SYMBOL(omap_mbox_get);
void omap_mbox_put(struct omap_mbox *mbox, struct notifier_block *nb)
{
blocking_notifier_chain_unregister(&mbox->notifier, nb);
omap_mbox_fini(mbox);
}
EXPORT_SYMBOL(omap_mbox_put);
static struct class omap_mbox_class = { .name = "mbox", };
static int omap_mbox_register(struct omap_mbox_device *mdev)
{
int ret;
int i;
struct omap_mbox **mboxes;
if (!mdev || !mdev->mboxes)
return -EINVAL;
mboxes = mdev->mboxes;
for (i = 0; mboxes[i]; i++) {
struct omap_mbox *mbox = mboxes[i];
mbox->dev = device_create(&omap_mbox_class,
mdev->dev, 0, mbox, "%s", mbox->name);
if (IS_ERR(mbox->dev)) {
ret = PTR_ERR(mbox->dev);
goto err_out;
}
BLOCKING_INIT_NOTIFIER_HEAD(&mbox->notifier);
}
mutex_lock(&omap_mbox_devices_lock);
list_add(&mdev->elem, &omap_mbox_devices);
mutex_unlock(&omap_mbox_devices_lock);
return 0;
err_out:
while (i--)
device_unregister(mboxes[i]->dev);
return ret;
}
static int omap_mbox_unregister(struct omap_mbox_device *mdev)
{
int i;
struct omap_mbox **mboxes;
if (!mdev || !mdev->mboxes)
return -EINVAL;
mutex_lock(&omap_mbox_devices_lock);
list_del(&mdev->elem);
mutex_unlock(&omap_mbox_devices_lock);
mboxes = mdev->mboxes;
for (i = 0; mboxes[i]; i++)
device_unregister(mboxes[i]->dev);
return 0;
}
static const struct of_device_id omap_mailbox_of_match[] = {
{
.compatible = "ti,omap2-mailbox",
.data = (void *)MBOX_INTR_CFG_TYPE1,
},
{
.compatible = "ti,omap3-mailbox",
.data = (void *)MBOX_INTR_CFG_TYPE1,
},
{
.compatible = "ti,omap4-mailbox",
.data = (void *)MBOX_INTR_CFG_TYPE2,
},
{
/* end */
},
};
MODULE_DEVICE_TABLE(of, omap_mailbox_of_match);
static int omap_mbox_probe(struct platform_device *pdev)
{
struct resource *mem;
int ret;
struct omap_mbox **list, *mbox, *mboxblk;
struct omap_mbox_pdata *pdata = pdev->dev.platform_data;
struct omap_mbox_dev_info *info = NULL;
struct omap_mbox_fifo_info *finfo, *finfoblk;
struct omap_mbox_device *mdev;
struct omap_mbox_fifo *fifo;
struct device_node *node = pdev->dev.of_node;
struct device_node *child;
const struct of_device_id *match;
u32 intr_type, info_count;
u32 num_users, num_fifos;
u32 tmp[3];
u32 l;
int i;
if (!node && (!pdata || !pdata->info_cnt || !pdata->info)) {
pr_err("%s: platform not supported\n", __func__);
return -ENODEV;
}
if (node) {
match = of_match_device(omap_mailbox_of_match, &pdev->dev);
if (!match)
return -ENODEV;
intr_type = (u32)match->data;
if (of_property_read_u32(node, "ti,mbox-num-users",
&num_users))
return -ENODEV;
if (of_property_read_u32(node, "ti,mbox-num-fifos",
&num_fifos))
return -ENODEV;
info_count = of_get_available_child_count(node);
if (!info_count) {
dev_err(&pdev->dev, "no available mbox devices found\n");
return -ENODEV;
}
} else { /* non-DT device creation */
info_count = pdata->info_cnt;
info = pdata->info;
intr_type = pdata->intr_type;
num_users = pdata->num_users;
num_fifos = pdata->num_fifos;
}
finfoblk = devm_kzalloc(&pdev->dev, info_count * sizeof(*finfoblk),
GFP_KERNEL);
if (!finfoblk)
return -ENOMEM;
finfo = finfoblk;
child = NULL;
for (i = 0; i < info_count; i++, finfo++) {
if (node) {
child = of_get_next_available_child(node, child);
ret = of_property_read_u32_array(child, "ti,mbox-tx",
tmp, ARRAY_SIZE(tmp));
if (ret)
return ret;
finfo->tx_id = tmp[0];
finfo->tx_irq = tmp[1];
finfo->tx_usr = tmp[2];
ret = of_property_read_u32_array(child, "ti,mbox-rx",
tmp, ARRAY_SIZE(tmp));
if (ret)
return ret;
finfo->rx_id = tmp[0];
finfo->rx_irq = tmp[1];
finfo->rx_usr = tmp[2];
finfo->name = child->name;
} else {
finfo->tx_id = info->tx_id;
finfo->rx_id = info->rx_id;
finfo->tx_usr = info->usr_id;
finfo->tx_irq = info->irq_id;
finfo->rx_usr = info->usr_id;
finfo->rx_irq = info->irq_id;
finfo->name = info->name;
info++;
}
if (finfo->tx_id >= num_fifos || finfo->rx_id >= num_fifos ||
finfo->tx_usr >= num_users || finfo->rx_usr >= num_users)
return -EINVAL;
}
mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL);
if (!mdev)
return -ENOMEM;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mdev->mbox_base = devm_ioremap_resource(&pdev->dev, mem);
if (IS_ERR(mdev->mbox_base))
return PTR_ERR(mdev->mbox_base);
/* allocate one extra for marking end of list */
list = devm_kzalloc(&pdev->dev, (info_count + 1) * sizeof(*list),
GFP_KERNEL);
if (!list)
return -ENOMEM;
mboxblk = devm_kzalloc(&pdev->dev, info_count * sizeof(*mbox),
GFP_KERNEL);
if (!mboxblk)
return -ENOMEM;
mbox = mboxblk;
finfo = finfoblk;
for (i = 0; i < info_count; i++, finfo++) {
fifo = &mbox->tx_fifo;
fifo->msg = MAILBOX_MESSAGE(finfo->tx_id);
fifo->fifo_stat = MAILBOX_FIFOSTATUS(finfo->tx_id);
fifo->intr_bit = MAILBOX_IRQ_NOTFULL(finfo->tx_id);
fifo->irqenable = MAILBOX_IRQENABLE(intr_type, finfo->tx_usr);
fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, finfo->tx_usr);
fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, finfo->tx_usr);
fifo = &mbox->rx_fifo;
fifo->msg = MAILBOX_MESSAGE(finfo->rx_id);
fifo->msg_stat = MAILBOX_MSGSTATUS(finfo->rx_id);
fifo->intr_bit = MAILBOX_IRQ_NEWMSG(finfo->rx_id);
fifo->irqenable = MAILBOX_IRQENABLE(intr_type, finfo->rx_usr);
fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, finfo->rx_usr);
fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, finfo->rx_usr);
mbox->intr_type = intr_type;
mbox->parent = mdev;
mbox->name = finfo->name;
mbox->irq = platform_get_irq(pdev, finfo->tx_irq);
if (mbox->irq < 0)
return mbox->irq;
list[i] = mbox++;
}
mutex_init(&mdev->cfg_lock);
mdev->dev = &pdev->dev;
mdev->num_users = num_users;
mdev->num_fifos = num_fifos;
mdev->mboxes = list;
ret = omap_mbox_register(mdev);
if (ret)
return ret;
platform_set_drvdata(pdev, mdev);
pm_runtime_enable(mdev->dev);
ret = pm_runtime_get_sync(mdev->dev);
if (ret < 0) {
pm_runtime_put_noidle(mdev->dev);
goto unregister;
}
/*
* just print the raw revision register, the format is not
* uniform across all SoCs
*/
l = mbox_read_reg(mdev, MAILBOX_REVISION);
dev_info(mdev->dev, "omap mailbox rev 0x%x\n", l);
ret = pm_runtime_put_sync(mdev->dev);
if (ret < 0)
goto unregister;
devm_kfree(&pdev->dev, finfoblk);
return 0;
unregister:
pm_runtime_disable(mdev->dev);
omap_mbox_unregister(mdev);
return ret;
}
static int omap_mbox_remove(struct platform_device *pdev)
{
struct omap_mbox_device *mdev = platform_get_drvdata(pdev);
pm_runtime_disable(mdev->dev);
omap_mbox_unregister(mdev);
return 0;
}
static struct platform_driver omap_mbox_driver = {
.probe = omap_mbox_probe,
.remove = omap_mbox_remove,
.driver = {
.name = "omap-mailbox",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(omap_mailbox_of_match),
},
};
static int __init omap_mbox_init(void)
{
int err;
err = class_register(&omap_mbox_class);
if (err)
return err;
/* kfifo size sanity check: alignment and minimal size */
mbox_kfifo_size = ALIGN(mbox_kfifo_size, sizeof(mbox_msg_t));
mbox_kfifo_size = max_t(unsigned int, mbox_kfifo_size,
sizeof(mbox_msg_t));
return platform_driver_register(&omap_mbox_driver);
}
subsys_initcall(omap_mbox_init);
static void __exit omap_mbox_exit(void)
{
platform_driver_unregister(&omap_mbox_driver);
class_unregister(&omap_mbox_class);
}
module_exit(omap_mbox_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("omap mailbox: interrupt driven messaging");
MODULE_AUTHOR("Toshihiro Kobayashi");
MODULE_AUTHOR("Hiroshi DOYU");

403
drivers/mailbox/pcc.c Normal file
View file

@ -0,0 +1,403 @@
/*
* Copyright (C) 2014 Linaro Ltd.
* Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* PCC (Platform Communication Channel) is defined in the ACPI 5.0+
* specification. It is a mailbox like mechanism to allow clients
* such as CPPC (Collaborative Processor Performance Control), RAS
* (Reliability, Availability and Serviceability) and MPST (Memory
* Node Power State Table) to talk to the platform (e.g. BMC) through
* shared memory regions as defined in the PCC table entries. The PCC
* specification supports a Doorbell mechanism for the PCC clients
* to notify the platform about new data. This Doorbell information
* is also specified in each PCC table entry. See pcc_send_data()
* and pcc_tx_done() for basic mode of operation.
*
* For more details about PCC, please see the ACPI specification from
* http://www.uefi.org/ACPIv5.1 Section 14.
*
* This file implements PCC as a Mailbox controller and allows for PCC
* clients to be implemented as its Mailbox Client Channels.
*/
#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/platform_device.h>
#include <linux/mailbox_controller.h>
#include <linux/mailbox_client.h>
#include "mailbox.h"
#define MAX_PCC_SUBSPACES 256
#define PCCS_SS_SIG_MAGIC 0x50434300
#define PCC_CMD_COMPLETE 0x1
static struct mbox_chan *pcc_mbox_channels;
static struct mbox_controller pcc_mbox_ctrl = {};
/**
* get_pcc_channel - Given a PCC subspace idx, get
* the respective mbox_channel.
* @id: PCC subspace index.
*
* Return: ERR_PTR(errno) if error, else pointer
* to mbox channel.
*/
static struct mbox_chan *get_pcc_channel(int id)
{
struct mbox_chan *pcc_chan;
if (id < 0 || id > pcc_mbox_ctrl.num_chans)
return ERR_PTR(-ENOENT);
pcc_chan = (struct mbox_chan *)
(unsigned long) pcc_mbox_channels +
(id * sizeof(*pcc_chan));
return pcc_chan;
}
/**
* get_subspace_id - Given a Mailbox channel, find out the
* PCC subspace id.
* @chan: Pointer to Mailbox Channel from which we want
* the index.
* Return: Errno if not found, else positive index number.
*/
static int get_subspace_id(struct mbox_chan *chan)
{
unsigned int id = chan - pcc_mbox_channels;
if (id < 0 || id > pcc_mbox_ctrl.num_chans)
return -ENOENT;
return id;
}
/**
* pcc_mbox_request_channel - PCC clients call this function to
* request a pointer to their PCC subspace, from which they
* can get the details of communicating with the remote.
* @cl: Pointer to Mailbox client, so we know where to bind the
* Channel.
* @subspace_id: The PCC Subspace index as parsed in the PCC client
* ACPI package. This is used to lookup the array of PCC
* subspaces as parsed by the PCC Mailbox controller.
*
* Return: Pointer to the Mailbox Channel if successful or
* ERR_PTR.
*/
struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
int subspace_id)
{
struct device *dev = pcc_mbox_ctrl.dev;
struct mbox_chan *chan;
unsigned long flags;
/*
* Each PCC Subspace is a Mailbox Channel.
* The PCC Clients get their PCC Subspace ID
* from their own tables and pass it here.
* This returns a pointer to the PCC subspace
* for the Client to operate on.
*/
chan = get_pcc_channel(subspace_id);
if (!chan || chan->cl) {
dev_err(dev, "%s: PCC mailbox not free\n", __func__);
return ERR_PTR(-EBUSY);
}
spin_lock_irqsave(&chan->lock, flags);
chan->msg_free = 0;
chan->msg_count = 0;
chan->active_req = NULL;
chan->cl = cl;
init_completion(&chan->tx_complete);
if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
chan->txdone_method |= TXDONE_BY_ACK;
spin_unlock_irqrestore(&chan->lock, flags);
return chan;
}
EXPORT_SYMBOL_GPL(pcc_mbox_request_channel);
/**
* pcc_mbox_free_channel - Clients call this to free their Channel.
*
* @chan: Pointer to the mailbox channel as returned by
* pcc_mbox_request_channel()
*/
void pcc_mbox_free_channel(struct mbox_chan *chan)
{
unsigned long flags;
if (!chan || !chan->cl)
return;
spin_lock_irqsave(&chan->lock, flags);
chan->cl = NULL;
chan->active_req = NULL;
if (chan->txdone_method == (TXDONE_BY_POLL | TXDONE_BY_ACK))
chan->txdone_method = TXDONE_BY_POLL;
spin_unlock_irqrestore(&chan->lock, flags);
}
EXPORT_SYMBOL_GPL(pcc_mbox_free_channel);
/**
* pcc_tx_done - Callback from Mailbox controller code to
* check if PCC message transmission completed.
* @chan: Pointer to Mailbox channel on which previous
* transmission occurred.
*
* Return: TRUE if succeeded.
*/
static bool pcc_tx_done(struct mbox_chan *chan)
{
struct acpi_pcct_hw_reduced *pcct_ss = chan->con_priv;
struct acpi_pcct_shared_memory *generic_comm_base =
(struct acpi_pcct_shared_memory *) pcct_ss->base_address;
u16 cmd_delay = pcct_ss->latency;
unsigned int retries = 0;
/* Try a few times while waiting for platform to consume */
while (!(readw_relaxed(&generic_comm_base->status)
& PCC_CMD_COMPLETE)) {
if (retries++ < 5)
udelay(cmd_delay);
else {
/*
* If the remote is dead, this will cause the Mbox
* controller to timeout after mbox client.tx_tout
* msecs.
*/
pr_err("PCC platform did not respond.\n");
return false;
}
}
return true;
}
/**
* pcc_send_data - Called from Mailbox Controller code to finally
* transmit data over channel.
* @chan: Pointer to Mailbox channel over which to send data.
* @data: Actual data to be written over channel.
*
* Return: Err if something failed else 0 for success.
*/
static int pcc_send_data(struct mbox_chan *chan, void *data)
{
struct acpi_pcct_hw_reduced *pcct_ss = chan->con_priv;
struct acpi_pcct_shared_memory *generic_comm_base =
(struct acpi_pcct_shared_memory *) pcct_ss->base_address;
struct acpi_generic_address doorbell;
u64 doorbell_preserve;
u64 doorbell_val;
u64 doorbell_write;
u16 cmd = *(u16 *) data;
u16 ss_idx = -1;
ss_idx = get_subspace_id(chan);
if (ss_idx < 0) {
pr_err("Invalid Subspace ID from PCC client\n");
return -EINVAL;
}
doorbell = pcct_ss->doorbell_register;
doorbell_preserve = pcct_ss->preserve_mask;
doorbell_write = pcct_ss->write_mask;
/* Write to the shared comm region. */
writew(cmd, &generic_comm_base->command);
/* Write Subspace MAGIC value so platform can identify destination. */
writel((PCCS_SS_SIG_MAGIC | ss_idx), &generic_comm_base->signature);
/* Flip CMD COMPLETE bit */
writew(0, &generic_comm_base->status);
/* Sync notification from OSPM to Platform. */
acpi_read(&doorbell_val, &doorbell);
acpi_write((doorbell_val & doorbell_preserve) | doorbell_write,
&doorbell);
return 0;
}
static struct mbox_chan_ops pcc_chan_ops = {
.send_data = pcc_send_data,
.last_tx_done = pcc_tx_done,
};
/**
* parse_pcc_subspace - Parse the PCC table and verify PCC subspace
* entries. There should be one entry per PCC client.
* @header: Pointer to the ACPI subtable header under the PCCT.
* @end: End of subtable entry.
*
* Return: 0 for Success, else errno.
*
* This gets called for each entry in the PCC table.
*/
static int parse_pcc_subspace(struct acpi_subtable_header *header,
const unsigned long end)
{
struct acpi_pcct_hw_reduced *pcct_ss;
if (pcc_mbox_ctrl.num_chans <= MAX_PCC_SUBSPACES) {
pcct_ss = (struct acpi_pcct_hw_reduced *) header;
if (pcct_ss->header.type !=
ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE) {
pr_err("Incorrect PCC Subspace type detected\n");
return -EINVAL;
}
}
return 0;
}
/**
* acpi_pcc_probe - Parse the ACPI tree for the PCCT.
*
* Return: 0 for Success, else errno.
*/
static int __init acpi_pcc_probe(void)
{
acpi_size pcct_tbl_header_size;
struct acpi_table_header *pcct_tbl;
struct acpi_subtable_header *pcct_entry;
int count, i;
acpi_status status = AE_OK;
/* Search for PCCT */
status = acpi_get_table_with_size(ACPI_SIG_PCCT, 0,
&pcct_tbl,
&pcct_tbl_header_size);
if (ACPI_FAILURE(status) || !pcct_tbl) {
pr_warn("PCCT header not found.\n");
return -ENODEV;
}
count = acpi_table_parse_entries(ACPI_SIG_PCCT,
sizeof(struct acpi_table_pcct),
ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE,
parse_pcc_subspace, MAX_PCC_SUBSPACES);
if (count <= 0) {
pr_err("Error parsing PCC subspaces from PCCT\n");
return -EINVAL;
}
pcc_mbox_channels = kzalloc(sizeof(struct mbox_chan) *
count, GFP_KERNEL);
if (!pcc_mbox_channels) {
pr_err("Could not allocate space for PCC mbox channels\n");
return -ENOMEM;
}
/* Point to the first PCC subspace entry */
pcct_entry = (struct acpi_subtable_header *) (
(unsigned long) pcct_tbl + sizeof(struct acpi_table_pcct));
for (i = 0; i < count; i++) {
pcc_mbox_channels[i].con_priv = pcct_entry;
pcct_entry = (struct acpi_subtable_header *)
((unsigned long) pcct_entry + pcct_entry->length);
}
pcc_mbox_ctrl.num_chans = count;
pr_info("Detected %d PCC Subspaces\n", pcc_mbox_ctrl.num_chans);
return 0;
}
/**
* pcc_mbox_probe - Called when we find a match for the
* PCCT platform device. This is purely used to represent
* the PCCT as a virtual device for registering with the
* generic Mailbox framework.
*
* @pdev: Pointer to platform device returned when a match
* is found.
*
* Return: 0 for Success, else errno.
*/
static int pcc_mbox_probe(struct platform_device *pdev)
{
int ret = 0;
pcc_mbox_ctrl.chans = pcc_mbox_channels;
pcc_mbox_ctrl.ops = &pcc_chan_ops;
pcc_mbox_ctrl.txdone_poll = true;
pcc_mbox_ctrl.txpoll_period = 10;
pcc_mbox_ctrl.dev = &pdev->dev;
pr_info("Registering PCC driver as Mailbox controller\n");
ret = mbox_controller_register(&pcc_mbox_ctrl);
if (ret) {
pr_err("Err registering PCC as Mailbox controller: %d\n", ret);
ret = -ENODEV;
}
return ret;
}
struct platform_driver pcc_mbox_driver = {
.probe = pcc_mbox_probe,
.driver = {
.name = "PCCT",
.owner = THIS_MODULE,
},
};
static int __init pcc_init(void)
{
int ret;
struct platform_device *pcc_pdev;
if (acpi_disabled)
return -ENODEV;
/* Check if PCC support is available. */
ret = acpi_pcc_probe();
if (ret) {
pr_err("ACPI PCC probe failed.\n");
return -ENODEV;
}
pcc_pdev = platform_create_bundle(&pcc_mbox_driver,
pcc_mbox_probe, NULL, 0, NULL, 0);
if (!pcc_pdev) {
pr_err("Err creating PCC platform bundle\n");
return -ENODEV;
}
return 0;
}
device_initcall(pcc_init);

198
drivers/mailbox/pl320-ipc.c Normal file
View file

@ -0,0 +1,198 @@
/*
* Copyright 2012 Calxeda, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/types.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/completion.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
#include <linux/spinlock.h>
#include <linux/device.h>
#include <linux/amba/bus.h>
#include <linux/pl320-ipc.h>
#define IPCMxSOURCE(m) ((m) * 0x40)
#define IPCMxDSET(m) (((m) * 0x40) + 0x004)
#define IPCMxDCLEAR(m) (((m) * 0x40) + 0x008)
#define IPCMxDSTATUS(m) (((m) * 0x40) + 0x00C)
#define IPCMxMODE(m) (((m) * 0x40) + 0x010)
#define IPCMxMSET(m) (((m) * 0x40) + 0x014)
#define IPCMxMCLEAR(m) (((m) * 0x40) + 0x018)
#define IPCMxMSTATUS(m) (((m) * 0x40) + 0x01C)
#define IPCMxSEND(m) (((m) * 0x40) + 0x020)
#define IPCMxDR(m, dr) (((m) * 0x40) + ((dr) * 4) + 0x024)
#define IPCMMIS(irq) (((irq) * 8) + 0x800)
#define IPCMRIS(irq) (((irq) * 8) + 0x804)
#define MBOX_MASK(n) (1 << (n))
#define IPC_TX_MBOX 1
#define IPC_RX_MBOX 2
#define CHAN_MASK(n) (1 << (n))
#define A9_SOURCE 1
#define M3_SOURCE 0
static void __iomem *ipc_base;
static int ipc_irq;
static DEFINE_MUTEX(ipc_m1_lock);
static DECLARE_COMPLETION(ipc_completion);
static ATOMIC_NOTIFIER_HEAD(ipc_notifier);
static inline void set_destination(int source, int mbox)
{
__raw_writel(CHAN_MASK(source), ipc_base + IPCMxDSET(mbox));
__raw_writel(CHAN_MASK(source), ipc_base + IPCMxMSET(mbox));
}
static inline void clear_destination(int source, int mbox)
{
__raw_writel(CHAN_MASK(source), ipc_base + IPCMxDCLEAR(mbox));
__raw_writel(CHAN_MASK(source), ipc_base + IPCMxMCLEAR(mbox));
}
static void __ipc_send(int mbox, u32 *data)
{
int i;
for (i = 0; i < 7; i++)
__raw_writel(data[i], ipc_base + IPCMxDR(mbox, i));
__raw_writel(0x1, ipc_base + IPCMxSEND(mbox));
}
static u32 __ipc_rcv(int mbox, u32 *data)
{
int i;
for (i = 0; i < 7; i++)
data[i] = __raw_readl(ipc_base + IPCMxDR(mbox, i));
return data[1];
}
/* blocking implmentation from the A9 side, not usuable in interrupts! */
int pl320_ipc_transmit(u32 *data)
{
int ret;
mutex_lock(&ipc_m1_lock);
init_completion(&ipc_completion);
__ipc_send(IPC_TX_MBOX, data);
ret = wait_for_completion_timeout(&ipc_completion,
msecs_to_jiffies(1000));
if (ret == 0) {
ret = -ETIMEDOUT;
goto out;
}
ret = __ipc_rcv(IPC_TX_MBOX, data);
out:
mutex_unlock(&ipc_m1_lock);
return ret;
}
EXPORT_SYMBOL_GPL(pl320_ipc_transmit);
static irqreturn_t ipc_handler(int irq, void *dev)
{
u32 irq_stat;
u32 data[7];
irq_stat = __raw_readl(ipc_base + IPCMMIS(1));
if (irq_stat & MBOX_MASK(IPC_TX_MBOX)) {
__raw_writel(0, ipc_base + IPCMxSEND(IPC_TX_MBOX));
complete(&ipc_completion);
}
if (irq_stat & MBOX_MASK(IPC_RX_MBOX)) {
__ipc_rcv(IPC_RX_MBOX, data);
atomic_notifier_call_chain(&ipc_notifier, data[0], data + 1);
__raw_writel(2, ipc_base + IPCMxSEND(IPC_RX_MBOX));
}
return IRQ_HANDLED;
}
int pl320_ipc_register_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_register(&ipc_notifier, nb);
}
EXPORT_SYMBOL_GPL(pl320_ipc_register_notifier);
int pl320_ipc_unregister_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_unregister(&ipc_notifier, nb);
}
EXPORT_SYMBOL_GPL(pl320_ipc_unregister_notifier);
static int pl320_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret;
ipc_base = ioremap(adev->res.start, resource_size(&adev->res));
if (ipc_base == NULL)
return -ENOMEM;
__raw_writel(0, ipc_base + IPCMxSEND(IPC_TX_MBOX));
ipc_irq = adev->irq[0];
ret = request_irq(ipc_irq, ipc_handler, 0, dev_name(&adev->dev), NULL);
if (ret < 0)
goto err;
/* Init slow mailbox */
__raw_writel(CHAN_MASK(A9_SOURCE),
ipc_base + IPCMxSOURCE(IPC_TX_MBOX));
__raw_writel(CHAN_MASK(M3_SOURCE),
ipc_base + IPCMxDSET(IPC_TX_MBOX));
__raw_writel(CHAN_MASK(M3_SOURCE) | CHAN_MASK(A9_SOURCE),
ipc_base + IPCMxMSET(IPC_TX_MBOX));
/* Init receive mailbox */
__raw_writel(CHAN_MASK(M3_SOURCE),
ipc_base + IPCMxSOURCE(IPC_RX_MBOX));
__raw_writel(CHAN_MASK(A9_SOURCE),
ipc_base + IPCMxDSET(IPC_RX_MBOX));
__raw_writel(CHAN_MASK(M3_SOURCE) | CHAN_MASK(A9_SOURCE),
ipc_base + IPCMxMSET(IPC_RX_MBOX));
return 0;
err:
iounmap(ipc_base);
return ret;
}
static struct amba_id pl320_ids[] = {
{
.id = 0x00041320,
.mask = 0x000fffff,
},
{ 0, 0 },
};
static struct amba_driver pl320_driver = {
.drv = {
.name = "pl320",
},
.id_table = pl320_ids,
.probe = pl320_probe,
};
static int __init ipc_init(void)
{
return amba_driver_register(&pl320_driver);
}
module_init(ipc_init);

View file

@ -0,0 +1,66 @@
config EXYNOS_APM
bool "APM Driver"
select MAILBOX
select EXYNOS_MBOX
if EXYNOS_APM
config EXYNOS_MBOX
tristate "Exynos Mailbox Controller"
help
An implementation of the Samsung Interprocessor Communication
Mailbox (IPCM). It is used to send short messages between CortexM3 cores
and the Power Control Processor or Power Coprocessor firmware.
Say Y here if you want to use the Exynos mailbox support.
config EXYNOS8890_APM
bool "EXYNOS8890 APM Driver"
default y
depends on SOC_EXYNOS8890
menu "EXYNOS_CL_DVFS"
config EXYNOS_CL_DVFS_CPU
bool "EXYNOS_CL_DVFS_CPU"
help
CPU closed loop dvfs feature
config EXYNOS_CL_DVFS_G3D
bool "EXYNOS_CL_DVFS_G3D"
help
G3D closed loop dvfs feature
config EXYNOS_CL_DVFS_MIF
bool "EXYNOS_CL_DVFS_MIF"
help
MIF closed loop dvfs feature
endmenu
choice
prompt "Default communction mode"
depends on EXYNOS_MBOX
default EXYNOS_MBOX_DEFAULT_POLLING
help
config EXYNOS_MBOX_DEFAULT_INTERRUPT
bool "Interrupt"
select EXYNOS_MBOX_INTERRUPT
help
This select is interrupt mode.
config EXYNOS_MBOX_DEFAULT_POLLING
bool "Polling"
select EXYNOS_MBOX_POLLING
help
This select is interrupt mode.
endchoice
config EXYNOS_MBOX_INTERRUPT
depends on EXYNOS_MBOX
tristate "Interrupt mode"
config EXYNOS_MBOX_POLLING
depends on EXYNOS_MBOX
tristate "Polling mode"
config EXYNOS_APM_VOLTAGE_DEBUG
bool "APM Voltage Debug"
default y
endif

View file

@ -0,0 +1,2 @@
# SAMSUNG MAILBOX API
obj-$(CONFIG_EXYNOS8890_APM) += mailbox-exynos8.o apm-exynos.o apm-exynos8890.o

View file

@ -0,0 +1,339 @@
/* linux/arch/arm/mach-exynos/apm-exynos.c
*
* Copyright (c) 2015 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* EXYNOS - APM driver
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/err.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mailbox_client.h>
#include <linux/suspend.h>
#include <linux/platform_device.h>
#include <linux/mfd/samsung/core.h>
#include <linux/mailbox-exynos.h>
#include <linux/apm-exynos.h>
#include <asm/io.h>
#include <soc/samsung/asv-exynos.h>
/* Add OPS */
struct cl_ops *cl_ops;
int apm_wfi_prepare = APM_ON;
static int cm3_status;
static unsigned int cl_mode_status = CL_ON;
extern struct cl_ops exynos_cl_function_ops;
static DEFINE_MUTEX(cl_lock);
void cl_dvfs_lock(void)
{
mutex_lock(&cl_lock);
}
EXPORT_SYMBOL_GPL(cl_dvfs_lock);
void cl_dvfs_unlock(void)
{
mutex_unlock(&cl_lock);
}
EXPORT_SYMBOL_GPL(cl_dvfs_unlock);
/* Routines for PM-transition notifications */
static BLOCKING_NOTIFIER_HEAD(apm_chain_head);
int register_apm_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&apm_chain_head, nb);
}
EXPORT_SYMBOL_GPL(register_apm_notifier);
int unregister_apm_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_unregister(&apm_chain_head, nb);
}
EXPORT_SYMBOL_GPL(unregister_apm_notifier);
int apm_notifier_call_chain(unsigned long val)
{
int ret;
ret = blocking_notifier_call_chain(&apm_chain_head, val, NULL);
return notifier_to_errno(ret);
}
/* exynos_apm_reset_release()
* exynos_apm_reset_release set PMU register.
*/
void exynos_apm_reset_release(void)
{
cl_ops->apm_reset();
}
/* exynos_apm_power_up()
* exynos_apm_power_up set PMU register.
*/
void exynos_apm_power_up(void)
{
cl_ops->apm_power_up();
}
/* exynos_apm_power_down()
* exynos_apm_power_down set PMU register.
*/
void exynos_apm_power_down(void)
{
cl_ops->apm_power_down();
}
/* exynos_cl_dvfs_setup()
* exynos_cl_dvfs_setup set voltage margin limit and period.
*/
int exynos_cl_dvfs_setup(unsigned int atlas_cl_limit, unsigned int apollo_cl_limit,
unsigned int g3d_cl_limit, unsigned int mif_cl_limit, unsigned int cl_period)
{
int ret = 0;
sec_core_lock();
cl_dvfs_lock();
ret = cl_ops->cl_dvfs_setup(atlas_cl_limit, apollo_cl_limit,
g3d_cl_limit, mif_cl_limit, cl_period);
cl_dvfs_unlock();
sec_core_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(exynos_cl_dvfs_setup);
/* exynos7420_cl_dvfs_start()
* cl_dvfs_start means os send cl_dvfs start command.
* We change voltage and frequency, after than start cl-dvfs.
*/
int exynos_cl_dvfs_start(unsigned int cl_domain)
{
int ret = 0;
sec_core_lock();
cl_dvfs_lock();
if (cl_mode_status)
ret = cl_ops->cl_dvfs_start(cl_domain);
cl_dvfs_unlock();
sec_core_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(exynos_cl_dvfs_start);
/* exynos_cl_dvfs_stop()
* cl_dvfs_stop means os send cl_dvfs stop command to CM3.
* We need change voltage and frequency. first, we stop cl-dvfs.
*/
int exynos_cl_dvfs_stop(unsigned int cl_domain, unsigned int level)
{
int ret = 0;
sec_core_lock();
cl_dvfs_lock();
if (cl_mode_status)
ret = cl_ops->cl_dvfs_stop(cl_domain, level);
cl_dvfs_unlock();
sec_core_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(exynos_cl_dvfs_stop);
/* exynos7420_cl_dvfs_mode_disable()
* cl_dvfs_stop means os send cl_dvfs stop command to CM3.
* We need change voltage and frequency. first, we stop cl-dvfs.
*/
int exynos_cl_dvfs_mode_enable(void)
{
int ret = 0;
sec_core_lock();
cl_dvfs_lock();
ret = cl_ops->cl_dvfs_enable();
cl_dvfs_unlock();
sec_core_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(exynos_cl_dvfs_mode_enable);
/* exynos7420_cl_dvfs_mode_disable()
* cl_dvfs_stop means os send cl_dvfs stop command to CM3.
* We need change voltage and frequency. first, we stop cl-dvfs.
*/
int exynos_cl_dvfs_mode_disable(void)
{
int ret = 0;
sec_core_lock();
cl_dvfs_lock();
ret = cl_ops->cl_dvfs_disable();
cl_dvfs_unlock();
sec_core_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(exynos_cl_dvfs_mode_disable);
/* exynos_g3d_power_on_noti_apm()
* APM driver notice g3d power on status to CM3.
*/
int exynos_g3d_power_on_noti_apm(void)
{
int ret = 0;
sec_core_lock();
cl_dvfs_lock();
ret = cl_ops->g3d_power_on();
cl_dvfs_unlock();
sec_core_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(exynos_g3d_power_on_noti_apm);
/* exynos7420_g3d_power_on_noti_apm()
* APM driver notice g3d power off status to CM3.
*/
int exynos_g3d_power_down_noti_apm(void)
{
int ret = 0;
sec_core_lock();
cl_dvfs_lock();
ret = cl_ops->g3d_power_down();
cl_dvfs_unlock();
sec_core_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(exynos_g3d_power_down_noti_apm);
/* exynos_apm_enter_wfi();
* This function send CM3 go to WFI message to CM3.
*/
int exynos_apm_enter_wfi(void)
{
cl_ops->enter_wfi();
return 0;
}
EXPORT_SYMBOL_GPL(exynos_apm_enter_wfi);
static int exynos_cm3_status_show(struct seq_file *buf, void *d)
{
/* Show pmic communcation mode */
if (cm3_status == HSI2C_MODE) seq_printf(buf, "mode : HSI2C \n");
else if (cm3_status == APM_MODE) seq_printf(buf, "mode : APM \n");
else if (cm3_status == APM_TIMOUT) seq_printf(buf, "mode : HSI2C (CM3 timeout) \n");
return 0;
}
int cm3_status_open(struct inode *inode, struct file *file)
{
return single_open(file, exynos_cm3_status_show, inode->i_private);
}
#ifdef CONFIG_EXYNOS_MBOX
static int exynos_apm_function_notifier(struct notifier_block *notifier,
unsigned long pm_event, void *v)
{
switch (pm_event) {
case APM_READY:
cm3_status = APM_MODE;
apm_wfi_prepare = APM_OFF;
#ifdef CONFIG_EXYNOS_MBOX_INTERRUPT
samsung_mbox_enable_irq();
#endif
pr_info("mailbox: hsi2c -> apm mode \n");
break;
case APM_SLEEP:
#ifdef CONFIG_EXYNOS_MBOX_INTERRUPT
if (cm3_status != APM_TIMOUT)
samsung_mbox_disable_irq();
#endif
if (cm3_status == APM_MODE)
pr_info("mailbox: apm -> hsi2c mode \n");
cm3_status = HSI2C_MODE;
apm_wfi_prepare = APM_ON;
break;
case APM_TIMEOUT:
cm3_status = APM_TIMOUT;
apm_wfi_prepare = APM_WFI_TIMEOUT;
#ifdef CONFIG_EXYNOS_MBOX_INTERRUPT
samsung_mbox_disable_irq();
#endif
pr_info("mailbox: apm -> hsi2c mode(timeout) \n");
break;
case CL_ENABLE:
cl_mode_status = CL_ON;
break;
case CL_DISABLE:
cl_mode_status = CL_OFF;
break;
default:
break;
}
return NOTIFY_OK;
}
static struct notifier_block exynos_apm_notifier = {
.notifier_call = exynos_apm_function_notifier,
};
#endif
static int exynos_apm_probe(struct platform_device *pdev)
{
#ifdef CONFIG_EXYNOS_MBOX
register_apm_notifier(&exynos_apm_notifier);
cl_ops = &exynos_cl_function_ops;
exynos_mbox_client_init(&pdev->dev);
#endif
return 0;
}
static int exynos_apm_remove(struct platform_device *pdev)
{
#ifdef CONFIG_EXYNOS_MBOX
unregister_apm_notifier(&exynos_apm_notifier);
#endif
return 0;
}
static const struct of_device_id apm_smc_match[] = {
{ .compatible = "samsung,exynos-apm" },
{},
};
static struct platform_driver exynos_apm_driver = {
.probe = exynos_apm_probe,
.remove = exynos_apm_remove,
.driver = {
.name = "exynos-apm-driver",
.owner = THIS_MODULE,
.of_match_table = apm_smc_match,
},
};
static int __init exynos_apm_init(void)
{
return platform_driver_register(&exynos_apm_driver);
}
fs_initcall(exynos_apm_init);
static void __exit exynos_apm_exit(void)
{
platform_driver_unregister(&exynos_apm_driver);
}
module_exit(exynos_apm_exit);

View file

@ -0,0 +1,852 @@
/* drivers/mailbox/samsung/apm-exynos8890.c
*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* EXYNOS8890 - APM driver
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/err.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mailbox_client.h>
#include <linux/suspend.h>
#include <linux/platform_device.h>
#include <linux/mfd/samsung/core.h>
#include <linux/apm-exynos.h>
#include <linux/mailbox-exynos.h>
#include <asm/io.h>
#include <soc/samsung/asv-exynos.h>
#define PMIC_MIF_OUT (0x1B)
#define PMIC_ATL_OUT (0x1D)
#define PMIC_APO_OUT (0x1F)
#define PMIC_G3D_OUT (0x26)
extern int apm_wfi_prepare;
static DEFINE_MUTEX(cl_mutex);
char* protocol_name;
#ifdef CONFIG_EXYNOS_APM_VOLTAGE_DEBUG
u32 mif_in_voltage;
u32 atl_in_voltage;
u32 apo_in_voltage;
u32 g3d_in_voltage;
#endif
struct mbox_client cl;
void exynos8890_apm_power_up(void)
{
u32 tmp;
tmp = exynos_cortexm3_pmu_read(EXYNOS_PMU_CORTEXM3_APM_CONFIGURATION);
tmp &= APM_LOCAL_PWR_CFG_RESET;
tmp |= APM_LOCAL_PWR_CFG_RUN;
exynos_cortexm3_pmu_write(tmp, EXYNOS_PMU_CORTEXM3_APM_CONFIGURATION);
}
void exynos8890_apm_power_down(void)
{
u32 tmp;
/* Reset CORTEX M3 */
tmp = exynos_cortexm3_pmu_read(EXYNOS_PMU_CORTEXM3_APM_CONFIGURATION);
tmp &= APM_LOCAL_PWR_CFG_RESET;
exynos_cortexm3_pmu_write(tmp, EXYNOS_PMU_CORTEXM3_APM_CONFIGURATION);
}
/* exynos8890_apm_reset_release
* Reset signal release to PMU setting.
*/
void exynos8890_apm_reset_release(void)
{
unsigned int tmp;
/* Cortex M3 Interrupt bit clear */
exynos_mailbox_reg_write(0x0, EXYNOS_MAILBOX_TX_INT);
exynos_mailbox_reg_write(0x0, EXYNOS_MAILBOX_RX_INT);
/* Set APM device enable */
tmp = exynos_cortexm3_pmu_read(EXYNOS_PMU_CORTEXM3_APM_OPTION);
tmp &= ~ENABLE_APM;
tmp |= ENABLE_APM;
exynos_cortexm3_pmu_write(tmp, EXYNOS_PMU_CORTEXM3_APM_OPTION);
tmp = exynos_mailbox_reg_read(EXYNOS_MAILBOX_MRX_SEM);
tmp &= ~MRX_SEM_ENABLE;
tmp |= MRX_SEM_ENABLE;
exynos_mailbox_reg_write(tmp, EXYNOS_MAILBOX_MRX_SEM);
}
/* check_rx_data function check return value.
* CM3 send return value. 0xA is sucess, 0x0 is fail value.
* check_rx_data function check this value. So sucess return 0.
*/
static int check_rx_data(void *msg)
{
u8 i;
u32 buf[5] = {0, 0, 0, 0, 0};
for (i = 0; i < MBOX_LEN; i++)
buf[i] = exynos_mailbox_reg_read(EXYNOS_MAILBOX_RX(i));
/* Check return command */
buf[4] = exynos_mailbox_reg_read(EXYNOS_MAILBOX_TX(0));
/* Check apm device return value */
if (buf[1] == APM_GPIO_ERR)
return APM_GPIO_ERR;
/* PMIC No ACK return value */
if (buf[1] == PMIC_NO_ACK_ERR)
return PMIC_NO_ACK_ERR;
/* Multi byte condition */
if ((buf[4] >> MULTI_BYTE_SHIFT) & MULTI_BYTE_MASK) {
return 0;
}
/* Normal condition */
if (((buf[4] >> COMMAND_SHIFT) & COMMAND_MASK) != READ_MODE) {
if (buf[1] == APM_RET_SUCESS) {
return 0;
} else {
pr_err("mailbox err : return incorrect\n");
data_history();
return -1;
}
} else if (((buf[4] >> COMMAND_SHIFT) & COMMAND_MASK) == READ_MODE) {
return buf[1];
}
return 0;
}
EXPORT_SYMBOL_GPL(check_rx_data);
/* Setting channel ack_mode condition */
static void channel_ack_mode(struct mbox_client *client)
{
client->rx_callback = NULL;
client->tx_done = NULL;
#ifdef CONFIG_EXYNOS_MBOX_INTERRUPT
client->tx_block = true;
#endif
#ifdef CONFIG_EXYNOS_MBOX_POLLING
client->tx_block = NULL;
#endif
client->tx_tout = TIMEOUT;
client->knows_txdone = false;
}
EXPORT_SYMBOL_GPL(channel_ack_mode);
static int exynos_send_message(struct mbox_client *mbox_cl, void *msg)
{
struct mbox_chan *chan;
int ret;
chan = mbox_request_channel(mbox_cl, 0);
if (IS_ERR(chan)) {
pr_err("mailbox : Did not make a mailbox channel\n");
return PTR_ERR(chan);
}
if (!mbox_send_message(chan, (void *)msg)) {
ret = check_rx_data((void *)msg);
if (ret == APM_GPIO_ERR) {
pr_err("mailbox : gpio not set to gpio-i2c \n");
apm_wfi_prepare = APM_ON;
mbox_free_channel(chan);
return ERR_TIMEOUT;
} else if (ret < 0) {
pr_err("[%s] mailbox send error \n", __func__);
mbox_free_channel(chan);
return ERR_OUT;
}
} else {
pr_err("%s : Mailbox timeout\n", __func__);
pr_err("POLLING status: 0x%x\n", exynos_mailbox_reg_read(EXYNOS_MAILBOX_RX_INT));
apm_wfi_prepare = APM_ON;
mbox_free_channel(chan);
return ERR_TIMEOUT;
}
mbox_free_channel(chan);
return 0;
}
static int exynos_send_message_bulk_read(struct mbox_client *mbox_cl, void *msg)
{
struct mbox_chan *chan;
int ret;
chan = mbox_request_channel(mbox_cl, 0);
if (IS_ERR(chan)) {
pr_err("mailbox : Did not make a mailbox channel\n");
return PTR_ERR(chan);
}
if (!mbox_send_message(chan, (void *)msg)) {
ret = check_rx_data((void *)msg);
if (ret < 0) {
pr_err("[%s] mailbox send error \n", __func__);
return ERR_RETRY;
} else if (ret == APM_GPIO_ERR) {
apm_wfi_prepare = APM_ON;
mbox_free_channel(chan);
return ERR_TIMEOUT;
}
} else {
pr_err("%s : Mailbox timeout error \n", __func__);
apm_wfi_prepare = APM_ON;
mbox_free_channel(chan);
return ERR_TIMEOUT;
}
mbox_free_channel(chan);
return 0;
}
static int exynos8890_do_cl_dvfs_setup(unsigned int atlas_cl_limit, unsigned int apollo_cl_limit,
unsigned int g3d_cl_limit, unsigned int mif_cl_limit, unsigned int cl_period)
{
u32 msg[MBOX_LEN] = {0, 0, 0, 0};
int ret;
mutex_lock(&cl_mutex);
if (apm_wfi_prepare) {
mutex_unlock(&cl_mutex);
return 0;
}
channel_ack_mode(&cl);
protocol_name = "setup";
msg[0] = (NONE << COMMAND_SHIFT) | (INIT_SET << INIT_MODE_SHIFT);
msg[1] = (atlas_cl_limit << ATLAS_SHIFT) | (apollo_cl_limit << APOLLO_SHIFT)
| (g3d_cl_limit << G3D_SHIFT) | (mif_cl_limit << MIF_SHIFT) | (cl_period << PERIOD_SHIFT);
msg[3] = TX_INTERRUPT_ENABLE;
ret = exynos_send_message(&cl, msg);
if (ret == ERR_TIMEOUT || ret == ERR_OUT) {
data_history();
goto timeout;
} else if (ret) {
goto error;
}
mutex_unlock(&cl_mutex);
return 0;
/* out means turn off apm device and then mode change */
timeout :
exynos8890_apm_power_down();
apm_notifier_call_chain(APM_TIMEOUT);
error :
mutex_unlock(&cl_mutex);
return -EINVAL;
}
EXPORT_SYMBOL_GPL(exynos8890_do_cl_dvfs_setup);
static int exynos8890_do_cl_dvfs_start(unsigned int cl_domain)
{
u32 msg[MBOX_LEN] = {0, 0, 0, 0};
int ret;
mutex_lock(&cl_mutex);
if (apm_wfi_prepare) {
mutex_unlock(&cl_mutex);
return 0;
}
channel_ack_mode(&cl);
/* CL-DVFS[29] start, command mode none */
msg[0] = CL_DVFS | (NONE << COMMAND_SHIFT);
msg[3] = ((cl_domain + 1) << CL_DOMAIN_SHIFT) | TX_INTERRUPT_ENABLE;
if (cl_domain == ID_CL1)
protocol_name = "cl_start(ATL)--";
else if (cl_domain == ID_CL0)
protocol_name = "cl_start(APO)--";
else if (cl_domain == ID_MIF)
protocol_name = "cl_start(MIF)--";
else if (cl_domain == ID_G3D)
protocol_name = "cl_start(G3D)--";
ret = exynos_send_message(&cl, msg);
if (ret == ERR_TIMEOUT || ret == ERR_OUT) {
data_history();
goto timeout;
} else if (ret) {
goto error;
}
mutex_unlock(&cl_mutex);
return 0;
/* out means turn off apm device and then mode change */
timeout :
exynos8890_apm_power_down();
apm_notifier_call_chain(APM_TIMEOUT);
error :
mutex_unlock(&cl_mutex);
return -EINVAL;
}
EXPORT_SYMBOL_GPL(exynos8890_do_cl_dvfs_start);
static int exynos8890_do_cl_dvfs_mode_enable(void)
{
u32 msg[MBOX_LEN] = {0, 0, 0, 0};
int ret = 0;
mutex_lock(&cl_mutex);
if (apm_wfi_prepare) {
mutex_unlock(&cl_mutex);
return 0;
}
channel_ack_mode(&cl);
protocol_name = "cl_mode_enable";
/* CL-DVFS[29] stop, command mode none */
msg[0] = (1 << CL_ALL_START_SHIFT);
msg[3] = (TX_INTERRUPT_ENABLE);
ret = exynos_send_message(&cl, msg);
if (ret == ERR_TIMEOUT || ret == ERR_OUT) {
data_history();
goto timeout;
} else if (ret) {
goto error;
}
mutex_unlock(&cl_mutex);
return ret;
/* out means turn off apm device and then mode change */
timeout :
exynos8890_apm_power_down();
apm_notifier_call_chain(APM_TIMEOUT);
error :
mutex_unlock(&cl_mutex);
return -EINVAL;
}
EXPORT_SYMBOL_GPL(exynos8890_do_cl_dvfs_mode_enable);
static int exynos8890_do_cl_dvfs_mode_disable(void)
{
u32 msg[MBOX_LEN] = {0, 0, 0, 0};
int ret = 0;
mutex_lock(&cl_mutex);
if (apm_wfi_prepare) {
mutex_unlock(&cl_mutex);
return 0;
}
channel_ack_mode(&cl);
protocol_name = "cl_mode_disable";
/* CL-DVFS[29] stop, command mode none */
msg[0] = (1 << CL_ALL_STOP_SHIFT);
msg[3] = (TX_INTERRUPT_ENABLE);
ret = exynos_send_message(&cl, msg);
if (ret == ERR_TIMEOUT || ret == ERR_OUT) {
data_history();
goto timeout;
} else if (ret) {
goto error;
}
mutex_unlock(&cl_mutex);
return ret;
/* out means turn off apm device and then mode change */
timeout :
exynos8890_apm_power_down();
apm_notifier_call_chain(APM_TIMEOUT);
error :
mutex_unlock(&cl_mutex);
return -EINVAL;
}
EXPORT_SYMBOL_GPL(exynos8890_do_cl_dvfs_mode_disable);
static int exynos8890_do_cl_dvfs_stop(unsigned int cl_domain, unsigned int level)
{
u32 msg[MBOX_LEN] = {0, 0, 0, 0};
int ret = 0;
mutex_lock(&cl_mutex);
if (apm_wfi_prepare) {
mutex_unlock(&cl_mutex);
return 0;
}
if (cl_domain == ID_G3D) {
/* G3D driver not use level 0, 1 */
level = level + G3D_LV_OFFSET;
}
channel_ack_mode(&cl);
protocol_name = "cl_stop";
/* CL-DVFS[29] stop, command mode none */
msg[0] = (CL_DVFS_OFF << CL_DVFS_SHIFT) | (NONE << COMMAND_SHIFT);
msg[1] = level;
msg[3] = ((cl_domain + 1) << CL_DOMAIN_SHIFT) | (TX_INTERRUPT_ENABLE);
if (cl_domain == ID_CL1)
protocol_name = "cl_stop(ATL)++";
else if (cl_domain == ID_CL0)
protocol_name = "cl_stop(APO)++";
else if (cl_domain == ID_MIF)
protocol_name = "cl_stop(MIF)++";
else if (cl_domain == ID_G3D)
protocol_name = "cl_stop(G3D)++";
ret = exynos_send_message(&cl, msg);
if (ret == ERR_TIMEOUT || ret == ERR_OUT) {
data_history();
goto timeout;
} else if (ret) {
goto error;
}
mutex_unlock(&cl_mutex);
return ret;
/* out means turn off apm device and then mode change */
timeout :
exynos8890_apm_power_down();
apm_notifier_call_chain(APM_TIMEOUT);
error :
mutex_unlock(&cl_mutex);
return -EINVAL;
}
EXPORT_SYMBOL_GPL(exynos8890_do_cl_dvfs_stop);
static int exynos8890_do_g3d_power_on_noti_apm(void)
{
u32 msg[MBOX_LEN] = {0, 0, 0, 0};
int ret = 0;
mutex_lock(&cl_mutex);
if (apm_wfi_prepare) {
mutex_unlock(&cl_mutex);
return 0;
}
channel_ack_mode(&cl);
protocol_name = "g3d_power_on";
/* CL-DVFS[29] stop, command mode none */
msg[3] = TX_INTERRUPT_ENABLE | (1 << 13);
ret = exynos_send_message(&cl, msg);
if (ret == ERR_TIMEOUT || ret == ERR_OUT) {
data_history();
goto timeout;
} else if (ret) {
goto error;
}
mutex_unlock(&cl_mutex);
return 0;
timeout :
exynos8890_apm_power_down();
apm_notifier_call_chain(APM_TIMEOUT);
error :
mutex_unlock(&cl_mutex);
return -EINVAL;
}
EXPORT_SYMBOL_GPL(exynos8890_do_g3d_power_on_noti_apm);
static int exynos8890_do_g3d_power_down_noti_apm(void)
{
u32 msg[MBOX_LEN] = {0, 0, 0, 0};
int ret = 0;
mutex_lock(&cl_mutex);
if (apm_wfi_prepare) {
mutex_unlock(&cl_mutex);
return 0;
}
channel_ack_mode(&cl);
protocol_name = "g3d_power_off";
/* CL-DVFS[29] stop, command mode none */
msg[3] = TX_INTERRUPT_ENABLE | (1 << 12);
ret = exynos_send_message(&cl, msg);
if (ret == ERR_TIMEOUT || ret == ERR_OUT) {
data_history();
goto timeout;
} else if (ret) {
goto error;
}
mutex_unlock(&cl_mutex);
return 0;
timeout :
exynos8890_apm_power_down();
apm_notifier_call_chain(APM_TIMEOUT);
error :
mutex_unlock(&cl_mutex);
return -EINVAL;
}
EXPORT_SYMBOL_GPL(exynos8890_do_g3d_power_down_noti_apm);
/* exynos8890_apm_enter_wfi();
* This function send CM3 go to WFI message to CM3.
*/
int exynos8890_apm_enter_wfi(void)
{
u32 msg[MBOX_LEN] = {0, 0, 0, 0};
struct mbox_chan *chan;
mutex_lock(&cl_mutex);
if (apm_wfi_prepare == APM_TIMEOUT) {
mutex_unlock(&cl_mutex);
return 0;
}
channel_ack_mode(&cl);
protocol_name = "enter_wfi";
/* CL-DVFS[29] stop, command mode none */
msg[0] = 1 << 23;
msg[3] = TX_INTERRUPT_ENABLE;
chan = mbox_request_channel(&cl, 0);
if (IS_ERR(chan)) {
pr_err("mailbox : Did not make a mailbox channel\n");
mutex_unlock(&cl_mutex);
return PTR_ERR(chan);
}
if (!mbox_send_message(chan, (void *)msg)) {
}
mbox_free_channel(chan);
mutex_unlock(&cl_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(exynos8890_apm_enter_wfi);
/**
* exynos8890_apm_update_bits(): Mask a value, after then write value.
* @type: Register pmic section (pm_section(0), rtc_section(1))
* @reg: register address
* @mask : masking value
* @value : Pointer to store write value
*
* A value of zero will be returned on success, a negative errno will
* be returned in error cases.
*/
int exynos8890_apm_update_bits(unsigned int type, unsigned int reg,
unsigned int mask, unsigned int value)
{
u32 msg[MBOX_LEN] = {0, 0, 0, 0};
int ret;
mutex_lock(&cl_mutex);
channel_ack_mode(&cl);
protocol_name = "update_bits";
/* CL-DVFS[29] stop, command mode write(0x0), mask mode enable */
msg[0] = ((type << PM_SECTION_SHIFT) | (MASK << MASK_SHIFT) | (mask));
msg[1] = reg;
msg[2] = value;
msg[3] = TX_INTERRUPT_ENABLE;
ret = exynos_send_message(&cl, msg);
if (ret == ERR_TIMEOUT || ret == ERR_OUT) {
data_history();
goto timeout;
} else if (ret) {
goto error;
}
mutex_unlock(&cl_mutex);
return ret;
timeout :
exynos8890_apm_power_down();
apm_notifier_call_chain(APM_TIMEOUT);
error :
mutex_unlock(&cl_mutex);
return -EINVAL;
}
EXPORT_SYMBOL_GPL(exynos8890_apm_update_bits);
/**
* exynos8890_apm_write()
* @type: Register pmic section (pm_section(0), rtc_section(1))
* @reg: register address
* @value : Pointer to store write value
*
* A value of zero will be returned on success, a negative errno will
* be returned in error cases.
*/
int exynos8890_apm_write(unsigned int type, unsigned int reg, unsigned int value)
{
u32 msg[MBOX_LEN] = {0, 0, 0, 0};
int ret;
mutex_lock(&cl_mutex);
channel_ack_mode(&cl);
protocol_name = "write";
/* CL-DVFS[29] stop, command mode write(0x0) */
msg[0] = (type << PM_SECTION_SHIFT);
msg[1] = reg;
msg[2] = value;
msg[3] = TX_INTERRUPT_ENABLE;
#ifdef CONFIG_EXYNOS_APM_VOLTAGE_DEBUG
if (reg == PMIC_MIF_OUT) {
mif_in_voltage = ((value * (u32)PMIC_STEP) + MIN_VOL);
} else if (reg == PMIC_ATL_OUT) {
atl_in_voltage = ((value * (u32)PMIC_STEP) + MIN_VOL);
} else if (reg == PMIC_APO_OUT) {
apo_in_voltage = ((value * (u32)PMIC_STEP) + MIN_VOL);
} else if (reg == PMIC_G3D_OUT) {
g3d_in_voltage = ((value * (u32)PMIC_STEP) + MIN_VOL);
}
#endif
ret = exynos_send_message(&cl, msg);
if (ret == ERR_TIMEOUT || ret == ERR_OUT) {
data_history();
goto timeout;
} else if (ret) {
goto error;
}
mutex_unlock(&cl_mutex);
return ret;
timeout :
exynos8890_apm_power_down();
apm_notifier_call_chain(APM_TIMEOUT);
error :
mutex_unlock(&cl_mutex);
return -EINVAL;
}
EXPORT_SYMBOL_GPL(exynos8890_apm_write);
/**
* exynos8890_apm_bulk_write()
* @type: Register pmic section (pm_section(0), rtc_section(1))
* @reg: register address
* @*buf: write buffer section
* @value : Pointer to store write value
*
* A value of zero will be returned on success, a negative errno will
* be returned in error cases.
*/
int exynos8890_apm_bulk_write(unsigned int type, unsigned char reg, unsigned char *buf, unsigned int count)
{
u32 msg[MBOX_LEN] = {0, 0, 0, 0};
unsigned int i;
int ret;
mutex_lock(&cl_mutex);
channel_ack_mode(&cl);
protocol_name = "bulk_write";
msg[0] = (type << PM_SECTION_SHIFT) | ((count-1) << MULTI_BYTE_CNT_SHIFT) | reg;
for (i = 0; i < count; i++) {
if (i < BYTE_4)
msg[1] |= buf[i] << BYTE_SHIFT * i;
else
msg[2] |= buf[i] << BYTE_SHIFT * (i - BYTE_4);
}
msg[3] = TX_INTERRUPT_ENABLE;
ret = exynos_send_message(&cl, msg);
if (ret == ERR_TIMEOUT || ret == ERR_OUT) {
data_history();
goto timeout;
} else if (ret) {
goto error;
}
mutex_unlock(&cl_mutex);
return ret;
timeout :
exynos8890_apm_power_down();
apm_notifier_call_chain(APM_TIMEOUT);
error :
mutex_unlock(&cl_mutex);
return -EINVAL;
}
EXPORT_SYMBOL_GPL(exynos8890_apm_bulk_write);
/**
* exynos8890_apm_read()
* @type: Register pmic section (pm_section(0), rtc_section(1))
* @reg: register address
* @*val: store read value
*
* A value of zero will be returned on success, a negative errno will
* be returned in error cases.
*/
int exynos8890_apm_read(unsigned int type, unsigned int reg, unsigned int *val)
{
u32 msg[MBOX_LEN] = {0, 0, 0, 0};
struct mbox_chan *chan;
mutex_lock(&cl_mutex);
channel_ack_mode(&cl);
protocol_name = "read";
/* CL-DVFS[29] stop, command mode read(0x1) */
msg[0] = (READ_MODE << COMMAND_SHIFT) | (type << PM_SECTION_SHIFT);
msg[1] = reg;
msg[3] = TX_INTERRUPT_ENABLE;
chan = mbox_request_channel(&cl, 0);
if (IS_ERR(chan)) {
pr_err("mailbox : Did not make a mailbox channel\n");
mutex_unlock(&cl_mutex);
return PTR_ERR(chan);
}
if (!mbox_send_message(chan, (void *)msg)) {
*val = check_rx_data((void *)msg);
if (*val == APM_GPIO_ERR) {
pr_err("%s, gpio error\n", __func__);
mbox_free_channel(chan);
data_history();
goto timeout;
}
} else {
pr_err("%s : Mailbox timeout error \n", __func__);
apm_wfi_prepare = APM_ON;
mbox_free_channel(chan);
data_history();
goto timeout;
}
mbox_free_channel(chan);
mutex_unlock(&cl_mutex);
return 0;
timeout :
exynos8890_apm_power_down();
apm_notifier_call_chain(APM_TIMEOUT);
mutex_unlock(&cl_mutex);
return -EINVAL;
}
EXPORT_SYMBOL_GPL(exynos8890_apm_read);
/**
* exynos8890_apm_bulk_read()
* @type: Register pmic section (pm_section(0), rtc_section(1))
* @reg: register address
* @*buf: read buffer section
* @*count : read count
*
* A value of zero will be returned on success, a negative errno will
* be returned in error cases.
*/
int exynos8890_apm_bulk_read(unsigned int type, unsigned char reg, unsigned char *buf, unsigned int count)
{
u32 msg[MBOX_LEN] = {0, 0, 0, 0};
u32 result[2] = {0, 0};
unsigned int ret, i;
mutex_lock(&cl_mutex);
channel_ack_mode(&cl);
protocol_name = "bulk_read";
msg[0] = (READ_MODE << COMMAND_SHIFT) | (type << PM_SECTION_SHIFT)
| ((count-1) << MULTI_BYTE_CNT_SHIFT) | (reg);
msg[3] = TX_INTERRUPT_ENABLE;
ret = exynos_send_message_bulk_read(&cl, msg);
if (ret == ERR_TIMEOUT) {
data_history();
goto timeout;
}
result[0] = exynos_mailbox_reg_read(EXYNOS_MAILBOX_RX(1));
result[1] = exynos_mailbox_reg_read(EXYNOS_MAILBOX_RX(2));
for (i = 0; i < count; i++) {
if (i < BYTE_4)
buf[i] = (result[0] >> i * BYTE_SHIFT) & BYTE_MASK;
else
buf[i] = (result[1] >> (i - BYTE_4) * BYTE_SHIFT) & BYTE_MASK;
}
mutex_unlock(&cl_mutex);
return 0;
timeout :
exynos8890_apm_power_down();
apm_notifier_call_chain(APM_TIMEOUT);
mutex_unlock(&cl_mutex);
return -EINVAL;
}
EXPORT_SYMBOL_GPL(exynos8890_apm_bulk_read);
void exynos_mbox_client_init(struct device *dev)
{
cl.dev = dev;
}
EXPORT_SYMBOL_GPL(exynos_mbox_client_init);
struct cl_ops exynos_cl_function_ops = {
.cl_dvfs_setup = exynos8890_do_cl_dvfs_setup,
.cl_dvfs_start = exynos8890_do_cl_dvfs_start,
.cl_dvfs_stop = exynos8890_do_cl_dvfs_stop,
.cl_dvfs_enable = exynos8890_do_cl_dvfs_mode_enable,
.cl_dvfs_disable = exynos8890_do_cl_dvfs_mode_disable,
.g3d_power_on = exynos8890_do_g3d_power_on_noti_apm,
.g3d_power_down = exynos8890_do_g3d_power_down_noti_apm,
.enter_wfi = exynos8890_apm_enter_wfi,
.apm_reset = exynos8890_apm_reset_release,
.apm_power_up = exynos8890_apm_power_up,
.apm_power_down = exynos8890_apm_power_down,
};
struct apm_ops exynos_apm_function_ops = {
.apm_update_bits = exynos8890_apm_update_bits,
.apm_write = exynos8890_apm_write,
.apm_bulk_write = exynos8890_apm_bulk_write,
.apm_read = exynos8890_apm_read,
.apm_bulk_read = exynos8890_apm_bulk_read,
};

File diff suppressed because it is too large Load diff