Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

View file

@ -0,0 +1,32 @@
#
# Generic HWSPINLOCK framework
#
# HWSPINLOCK always gets selected by whoever wants it.
config HWSPINLOCK
tristate
menu "Hardware Spinlock drivers"
config HWSPINLOCK_OMAP
tristate "OMAP Hardware Spinlock device"
depends on ARCH_OMAP4 || SOC_OMAP5 || SOC_DRA7XX || SOC_AM33XX || SOC_AM43XX
select HWSPINLOCK
help
Say y here to support the OMAP Hardware Spinlock device (firstly
introduced in OMAP4).
If unsure, say N.
config HSEM_U8500
tristate "STE Hardware Semaphore functionality"
depends on ARCH_U8500
select HWSPINLOCK
help
Say y here to support the STE Hardware Semaphore functionality, which
provides a synchronisation mechanism for the various processor on the
SoC.
If unsure, say N.
endmenu

View file

@ -0,0 +1,7 @@
#
# Generic Hardware Spinlock framework
#
obj-$(CONFIG_HWSPINLOCK) += hwspinlock_core.o
obj-$(CONFIG_HWSPINLOCK_OMAP) += omap_hwspinlock.o
obj-$(CONFIG_HSEM_U8500) += u8500_hsem.o

View file

@ -0,0 +1,601 @@
/*
* Hardware spinlock framework
*
* Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
*
* Contact: Ohad Ben-Cohen <ohad@wizery.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/err.h>
#include <linux/jiffies.h>
#include <linux/radix-tree.h>
#include <linux/hwspinlock.h>
#include <linux/pm_runtime.h>
#include <linux/mutex.h>
#include "hwspinlock_internal.h"
/* radix tree tags */
#define HWSPINLOCK_UNUSED (0) /* tags an hwspinlock as unused */
/*
* A radix tree is used to maintain the available hwspinlock instances.
* The tree associates hwspinlock pointers with their integer key id,
* and provides easy-to-use API which makes the hwspinlock core code simple
* and easy to read.
*
* Radix trees are quick on lookups, and reasonably efficient in terms of
* storage, especially with high density usages such as this framework
* requires (a continuous range of integer keys, beginning with zero, is
* used as the ID's of the hwspinlock instances).
*
* The radix tree API supports tagging items in the tree, which this
* framework uses to mark unused hwspinlock instances (see the
* HWSPINLOCK_UNUSED tag above). As a result, the process of querying the
* tree, looking for an unused hwspinlock instance, is now reduced to a
* single radix tree API call.
*/
static RADIX_TREE(hwspinlock_tree, GFP_KERNEL);
/*
* Synchronization of access to the tree is achieved using this mutex,
* as the radix-tree API requires that users provide all synchronisation.
* A mutex is needed because we're using non-atomic radix tree allocations.
*/
static DEFINE_MUTEX(hwspinlock_tree_lock);
/**
* __hwspin_trylock() - attempt to lock a specific hwspinlock
* @hwlock: an hwspinlock which we want to trylock
* @mode: controls whether local interrupts are disabled or not
* @flags: a pointer where the caller's interrupt state will be saved at (if
* requested)
*
* This function attempts to lock an hwspinlock, and will immediately
* fail if the hwspinlock is already taken.
*
* Upon a successful return from this function, preemption (and possibly
* interrupts) is disabled, so the caller must not sleep, and is advised to
* release the hwspinlock as soon as possible. This is required in order to
* minimize remote cores polling on the hardware interconnect.
*
* The user decides whether local interrupts are disabled or not, and if yes,
* whether he wants their previous state to be saved. It is up to the user
* to choose the appropriate @mode of operation, exactly the same way users
* should decide between spin_trylock, spin_trylock_irq and
* spin_trylock_irqsave.
*
* Returns 0 if we successfully locked the hwspinlock or -EBUSY if
* the hwspinlock was already taken.
* This function will never sleep.
*/
int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
{
int ret;
BUG_ON(!hwlock);
BUG_ON(!flags && mode == HWLOCK_IRQSTATE);
/*
* This spin_lock{_irq, _irqsave} serves three purposes:
*
* 1. Disable preemption, in order to minimize the period of time
* in which the hwspinlock is taken. This is important in order
* to minimize the possible polling on the hardware interconnect
* by a remote user of this lock.
* 2. Make the hwspinlock SMP-safe (so we can take it from
* additional contexts on the local host).
* 3. Ensure that in_atomic/might_sleep checks catch potential
* problems with hwspinlock usage (e.g. scheduler checks like
* 'scheduling while atomic' etc.)
*/
if (mode == HWLOCK_IRQSTATE)
ret = spin_trylock_irqsave(&hwlock->lock, *flags);
else if (mode == HWLOCK_IRQ)
ret = spin_trylock_irq(&hwlock->lock);
else
ret = spin_trylock(&hwlock->lock);
/* is lock already taken by another context on the local cpu ? */
if (!ret)
return -EBUSY;
/* try to take the hwspinlock device */
ret = hwlock->bank->ops->trylock(hwlock);
/* if hwlock is already taken, undo spin_trylock_* and exit */
if (!ret) {
if (mode == HWLOCK_IRQSTATE)
spin_unlock_irqrestore(&hwlock->lock, *flags);
else if (mode == HWLOCK_IRQ)
spin_unlock_irq(&hwlock->lock);
else
spin_unlock(&hwlock->lock);
return -EBUSY;
}
/*
* We can be sure the other core's memory operations
* are observable to us only _after_ we successfully take
* the hwspinlock, and we must make sure that subsequent memory
* operations (both reads and writes) will not be reordered before
* we actually took the hwspinlock.
*
* Note: the implicit memory barrier of the spinlock above is too
* early, so we need this additional explicit memory barrier.
*/
mb();
return 0;
}
EXPORT_SYMBOL_GPL(__hwspin_trylock);
/**
* __hwspin_lock_timeout() - lock an hwspinlock with timeout limit
* @hwlock: the hwspinlock to be locked
* @timeout: timeout value in msecs
* @mode: mode which controls whether local interrupts are disabled or not
* @flags: a pointer to where the caller's interrupt state will be saved at (if
* requested)
*
* This function locks the given @hwlock. If the @hwlock
* is already taken, the function will busy loop waiting for it to
* be released, but give up after @timeout msecs have elapsed.
*
* Upon a successful return from this function, preemption is disabled
* (and possibly local interrupts, too), so the caller must not sleep,
* and is advised to release the hwspinlock as soon as possible.
* This is required in order to minimize remote cores polling on the
* hardware interconnect.
*
* The user decides whether local interrupts are disabled or not, and if yes,
* whether he wants their previous state to be saved. It is up to the user
* to choose the appropriate @mode of operation, exactly the same way users
* should decide between spin_lock, spin_lock_irq and spin_lock_irqsave.
*
* Returns 0 when the @hwlock was successfully taken, and an appropriate
* error code otherwise (most notably -ETIMEDOUT if the @hwlock is still
* busy after @timeout msecs). The function will never sleep.
*/
int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
int mode, unsigned long *flags)
{
int ret;
unsigned long expire;
expire = msecs_to_jiffies(to) + jiffies;
for (;;) {
/* Try to take the hwspinlock */
ret = __hwspin_trylock(hwlock, mode, flags);
if (ret != -EBUSY)
break;
/*
* The lock is already taken, let's check if the user wants
* us to try again
*/
if (time_is_before_eq_jiffies(expire))
return -ETIMEDOUT;
/*
* Allow platform-specific relax handlers to prevent
* hogging the interconnect (no sleeping, though)
*/
if (hwlock->bank->ops->relax)
hwlock->bank->ops->relax(hwlock);
}
return ret;
}
EXPORT_SYMBOL_GPL(__hwspin_lock_timeout);
/**
* __hwspin_unlock() - unlock a specific hwspinlock
* @hwlock: a previously-acquired hwspinlock which we want to unlock
* @mode: controls whether local interrupts needs to be restored or not
* @flags: previous caller's interrupt state to restore (if requested)
*
* This function will unlock a specific hwspinlock, enable preemption and
* (possibly) enable interrupts or restore their previous state.
* @hwlock must be already locked before calling this function: it is a bug
* to call unlock on a @hwlock that is already unlocked.
*
* The user decides whether local interrupts should be enabled or not, and
* if yes, whether he wants their previous state to be restored. It is up
* to the user to choose the appropriate @mode of operation, exactly the
* same way users decide between spin_unlock, spin_unlock_irq and
* spin_unlock_irqrestore.
*
* The function will never sleep.
*/
void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
{
BUG_ON(!hwlock);
BUG_ON(!flags && mode == HWLOCK_IRQSTATE);
/*
* We must make sure that memory operations (both reads and writes),
* done before unlocking the hwspinlock, will not be reordered
* after the lock is released.
*
* That's the purpose of this explicit memory barrier.
*
* Note: the memory barrier induced by the spin_unlock below is too
* late; the other core is going to access memory soon after it will
* take the hwspinlock, and by then we want to be sure our memory
* operations are already observable.
*/
mb();
hwlock->bank->ops->unlock(hwlock);
/* Undo the spin_trylock{_irq, _irqsave} called while locking */
if (mode == HWLOCK_IRQSTATE)
spin_unlock_irqrestore(&hwlock->lock, *flags);
else if (mode == HWLOCK_IRQ)
spin_unlock_irq(&hwlock->lock);
else
spin_unlock(&hwlock->lock);
}
EXPORT_SYMBOL_GPL(__hwspin_unlock);
static int hwspin_lock_register_single(struct hwspinlock *hwlock, int id)
{
struct hwspinlock *tmp;
int ret;
mutex_lock(&hwspinlock_tree_lock);
ret = radix_tree_insert(&hwspinlock_tree, id, hwlock);
if (ret) {
if (ret == -EEXIST)
pr_err("hwspinlock id %d already exists!\n", id);
goto out;
}
/* mark this hwspinlock as available */
tmp = radix_tree_tag_set(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
/* self-sanity check which should never fail */
WARN_ON(tmp != hwlock);
out:
mutex_unlock(&hwspinlock_tree_lock);
return 0;
}
static struct hwspinlock *hwspin_lock_unregister_single(unsigned int id)
{
struct hwspinlock *hwlock = NULL;
int ret;
mutex_lock(&hwspinlock_tree_lock);
/* make sure the hwspinlock is not in use (tag is set) */
ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
if (ret == 0) {
pr_err("hwspinlock %d still in use (or not present)\n", id);
goto out;
}
hwlock = radix_tree_delete(&hwspinlock_tree, id);
if (!hwlock) {
pr_err("failed to delete hwspinlock %d\n", id);
goto out;
}
out:
mutex_unlock(&hwspinlock_tree_lock);
return hwlock;
}
/**
* hwspin_lock_register() - register a new hw spinlock device
* @bank: the hwspinlock device, which usually provides numerous hw locks
* @dev: the backing device
* @ops: hwspinlock handlers for this device
* @base_id: id of the first hardware spinlock in this bank
* @num_locks: number of hwspinlocks provided by this device
*
* This function should be called from the underlying platform-specific
* implementation, to register a new hwspinlock device instance.
*
* Should be called from a process context (might sleep)
*
* Returns 0 on success, or an appropriate error code on failure
*/
int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
const struct hwspinlock_ops *ops, int base_id, int num_locks)
{
struct hwspinlock *hwlock;
int ret = 0, i;
if (!bank || !ops || !dev || !num_locks || !ops->trylock ||
!ops->unlock) {
pr_err("invalid parameters\n");
return -EINVAL;
}
bank->dev = dev;
bank->ops = ops;
bank->base_id = base_id;
bank->num_locks = num_locks;
for (i = 0; i < num_locks; i++) {
hwlock = &bank->lock[i];
spin_lock_init(&hwlock->lock);
hwlock->bank = bank;
ret = hwspin_lock_register_single(hwlock, base_id + i);
if (ret)
goto reg_failed;
}
return 0;
reg_failed:
while (--i >= 0)
hwspin_lock_unregister_single(base_id + i);
return ret;
}
EXPORT_SYMBOL_GPL(hwspin_lock_register);
/**
* hwspin_lock_unregister() - unregister an hw spinlock device
* @bank: the hwspinlock device, which usually provides numerous hw locks
*
* This function should be called from the underlying platform-specific
* implementation, to unregister an existing (and unused) hwspinlock.
*
* Should be called from a process context (might sleep)
*
* Returns 0 on success, or an appropriate error code on failure
*/
int hwspin_lock_unregister(struct hwspinlock_device *bank)
{
struct hwspinlock *hwlock, *tmp;
int i;
for (i = 0; i < bank->num_locks; i++) {
hwlock = &bank->lock[i];
tmp = hwspin_lock_unregister_single(bank->base_id + i);
if (!tmp)
return -EBUSY;
/* self-sanity check that should never fail */
WARN_ON(tmp != hwlock);
}
return 0;
}
EXPORT_SYMBOL_GPL(hwspin_lock_unregister);
/**
* __hwspin_lock_request() - tag an hwspinlock as used and power it up
*
* This is an internal function that prepares an hwspinlock instance
* before it is given to the user. The function assumes that
* hwspinlock_tree_lock is taken.
*
* Returns 0 or positive to indicate success, and a negative value to
* indicate an error (with the appropriate error code)
*/
static int __hwspin_lock_request(struct hwspinlock *hwlock)
{
struct device *dev = hwlock->bank->dev;
struct hwspinlock *tmp;
int ret;
/* prevent underlying implementation from being removed */
if (!try_module_get(dev->driver->owner)) {
dev_err(dev, "%s: can't get owner\n", __func__);
return -EINVAL;
}
/* notify PM core that power is now needed */
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
dev_err(dev, "%s: can't power on device\n", __func__);
pm_runtime_put_noidle(dev);
module_put(dev->driver->owner);
return ret;
}
/* mark hwspinlock as used, should not fail */
tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock_to_id(hwlock),
HWSPINLOCK_UNUSED);
/* self-sanity check that should never fail */
WARN_ON(tmp != hwlock);
return ret;
}
/**
* hwspin_lock_get_id() - retrieve id number of a given hwspinlock
* @hwlock: a valid hwspinlock instance
*
* Returns the id number of a given @hwlock, or -EINVAL if @hwlock is invalid.
*/
int hwspin_lock_get_id(struct hwspinlock *hwlock)
{
if (!hwlock) {
pr_err("invalid hwlock\n");
return -EINVAL;
}
return hwlock_to_id(hwlock);
}
EXPORT_SYMBOL_GPL(hwspin_lock_get_id);
/**
* hwspin_lock_request() - request an hwspinlock
*
* This function should be called by users of the hwspinlock device,
* in order to dynamically assign them an unused hwspinlock.
* Usually the user of this lock will then have to communicate the lock's id
* to the remote core before it can be used for synchronization (to get the
* id of a given hwlock, use hwspin_lock_get_id()).
*
* Should be called from a process context (might sleep)
*
* Returns the address of the assigned hwspinlock, or NULL on error
*/
struct hwspinlock *hwspin_lock_request(void)
{
struct hwspinlock *hwlock;
int ret;
mutex_lock(&hwspinlock_tree_lock);
/* look for an unused lock */
ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock,
0, 1, HWSPINLOCK_UNUSED);
if (ret == 0) {
pr_warn("a free hwspinlock is not available\n");
hwlock = NULL;
goto out;
}
/* sanity check that should never fail */
WARN_ON(ret > 1);
/* mark as used and power up */
ret = __hwspin_lock_request(hwlock);
if (ret < 0)
hwlock = NULL;
out:
mutex_unlock(&hwspinlock_tree_lock);
return hwlock;
}
EXPORT_SYMBOL_GPL(hwspin_lock_request);
/**
* hwspin_lock_request_specific() - request for a specific hwspinlock
* @id: index of the specific hwspinlock that is requested
*
* This function should be called by users of the hwspinlock module,
* in order to assign them a specific hwspinlock.
* Usually early board code will be calling this function in order to
* reserve specific hwspinlock ids for predefined purposes.
*
* Should be called from a process context (might sleep)
*
* Returns the address of the assigned hwspinlock, or NULL on error
*/
struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
{
struct hwspinlock *hwlock;
int ret;
mutex_lock(&hwspinlock_tree_lock);
/* make sure this hwspinlock exists */
hwlock = radix_tree_lookup(&hwspinlock_tree, id);
if (!hwlock) {
pr_warn("hwspinlock %u does not exist\n", id);
goto out;
}
/* sanity check (this shouldn't happen) */
WARN_ON(hwlock_to_id(hwlock) != id);
/* make sure this hwspinlock is unused */
ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
if (ret == 0) {
pr_warn("hwspinlock %u is already in use\n", id);
hwlock = NULL;
goto out;
}
/* mark as used and power up */
ret = __hwspin_lock_request(hwlock);
if (ret < 0)
hwlock = NULL;
out:
mutex_unlock(&hwspinlock_tree_lock);
return hwlock;
}
EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
/**
* hwspin_lock_free() - free a specific hwspinlock
* @hwlock: the specific hwspinlock to free
*
* This function mark @hwlock as free again.
* Should only be called with an @hwlock that was retrieved from
* an earlier call to omap_hwspin_lock_request{_specific}.
*
* Should be called from a process context (might sleep)
*
* Returns 0 on success, or an appropriate error code on failure
*/
int hwspin_lock_free(struct hwspinlock *hwlock)
{
struct device *dev;
struct hwspinlock *tmp;
int ret;
if (!hwlock) {
pr_err("invalid hwlock\n");
return -EINVAL;
}
dev = hwlock->bank->dev;
mutex_lock(&hwspinlock_tree_lock);
/* make sure the hwspinlock is used */
ret = radix_tree_tag_get(&hwspinlock_tree, hwlock_to_id(hwlock),
HWSPINLOCK_UNUSED);
if (ret == 1) {
dev_err(dev, "%s: hwlock is already free\n", __func__);
dump_stack();
ret = -EINVAL;
goto out;
}
/* notify the underlying device that power is not needed */
ret = pm_runtime_put(dev);
if (ret < 0)
goto out;
/* mark this hwspinlock as available */
tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock_to_id(hwlock),
HWSPINLOCK_UNUSED);
/* sanity check (this shouldn't happen) */
WARN_ON(tmp != hwlock);
module_put(dev->driver->owner);
out:
mutex_unlock(&hwspinlock_tree_lock);
return ret;
}
EXPORT_SYMBOL_GPL(hwspin_lock_free);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Hardware spinlock interface");
MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>");

View file

@ -0,0 +1,77 @@
/*
* Hardware spinlocks internal header
*
* Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
*
* Contact: Ohad Ben-Cohen <ohad@wizery.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __HWSPINLOCK_HWSPINLOCK_H
#define __HWSPINLOCK_HWSPINLOCK_H
#include <linux/spinlock.h>
#include <linux/device.h>
struct hwspinlock_device;
/**
* struct hwspinlock_ops - platform-specific hwspinlock handlers
*
* @trylock: make a single attempt to take the lock. returns 0 on
* failure and true on success. may _not_ sleep.
* @unlock: release the lock. always succeed. may _not_ sleep.
* @relax: optional, platform-specific relax handler, called by hwspinlock
* core while spinning on a lock, between two successive
* invocations of @trylock. may _not_ sleep.
*/
struct hwspinlock_ops {
int (*trylock)(struct hwspinlock *lock);
void (*unlock)(struct hwspinlock *lock);
void (*relax)(struct hwspinlock *lock);
};
/**
* struct hwspinlock - this struct represents a single hwspinlock instance
* @bank: the hwspinlock_device structure which owns this lock
* @lock: initialized and used by hwspinlock core
* @priv: private data, owned by the underlying platform-specific hwspinlock drv
*/
struct hwspinlock {
struct hwspinlock_device *bank;
spinlock_t lock;
void *priv;
};
/**
* struct hwspinlock_device - a device which usually spans numerous hwspinlocks
* @dev: underlying device, will be used to invoke runtime PM api
* @ops: platform-specific hwspinlock handlers
* @base_id: id index of the first lock in this device
* @num_locks: number of locks in this device
* @lock: dynamically allocated array of 'struct hwspinlock'
*/
struct hwspinlock_device {
struct device *dev;
const struct hwspinlock_ops *ops;
int base_id;
int num_locks;
struct hwspinlock lock[0];
};
static inline int hwlock_to_id(struct hwspinlock *hwlock)
{
int local_id = hwlock - &hwlock->bank->lock[0];
return hwlock->bank->base_id + local_id;
}
#endif /* __HWSPINLOCK_HWSPINLOCK_H */

View file

@ -0,0 +1,203 @@
/*
* OMAP hardware spinlock driver
*
* Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
*
* Contact: Simon Que <sque@ti.com>
* Hari Kanigeri <h-kanigeri2@ti.com>
* Ohad Ben-Cohen <ohad@wizery.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/bitops.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/hwspinlock.h>
#include <linux/platform_device.h>
#include "hwspinlock_internal.h"
/* Spinlock register offsets */
#define SYSSTATUS_OFFSET 0x0014
#define LOCK_BASE_OFFSET 0x0800
#define SPINLOCK_NUMLOCKS_BIT_OFFSET (24)
/* Possible values of SPINLOCK_LOCK_REG */
#define SPINLOCK_NOTTAKEN (0) /* free */
#define SPINLOCK_TAKEN (1) /* locked */
static int omap_hwspinlock_trylock(struct hwspinlock *lock)
{
void __iomem *lock_addr = lock->priv;
/* attempt to acquire the lock by reading its value */
return (SPINLOCK_NOTTAKEN == readl(lock_addr));
}
static void omap_hwspinlock_unlock(struct hwspinlock *lock)
{
void __iomem *lock_addr = lock->priv;
/* release the lock by writing 0 to it */
writel(SPINLOCK_NOTTAKEN, lock_addr);
}
/*
* relax the OMAP interconnect while spinning on it.
*
* The specs recommended that the retry delay time will be
* just over half of the time that a requester would be
* expected to hold the lock.
*
* The number below is taken from an hardware specs example,
* obviously it is somewhat arbitrary.
*/
static void omap_hwspinlock_relax(struct hwspinlock *lock)
{
ndelay(50);
}
static const struct hwspinlock_ops omap_hwspinlock_ops = {
.trylock = omap_hwspinlock_trylock,
.unlock = omap_hwspinlock_unlock,
.relax = omap_hwspinlock_relax,
};
static int omap_hwspinlock_probe(struct platform_device *pdev)
{
struct hwspinlock_pdata *pdata = pdev->dev.platform_data;
struct hwspinlock_device *bank;
struct hwspinlock *hwlock;
struct resource *res;
void __iomem *io_base;
int num_locks, i, ret;
if (!pdata)
return -ENODEV;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
io_base = ioremap(res->start, resource_size(res));
if (!io_base)
return -ENOMEM;
/*
* make sure the module is enabled and clocked before reading
* the module SYSSTATUS register
*/
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
pm_runtime_put_noidle(&pdev->dev);
goto iounmap_base;
}
/* Determine number of locks */
i = readl(io_base + SYSSTATUS_OFFSET);
i >>= SPINLOCK_NUMLOCKS_BIT_OFFSET;
/*
* runtime PM will make sure the clock of this module is
* enabled again iff at least one lock is requested
*/
ret = pm_runtime_put(&pdev->dev);
if (ret < 0)
goto iounmap_base;
/* one of the four lsb's must be set, and nothing else */
if (hweight_long(i & 0xf) != 1 || i > 8) {
ret = -EINVAL;
goto iounmap_base;
}
num_locks = i * 32; /* actual number of locks in this device */
bank = kzalloc(sizeof(*bank) + num_locks * sizeof(*hwlock), GFP_KERNEL);
if (!bank) {
ret = -ENOMEM;
goto iounmap_base;
}
platform_set_drvdata(pdev, bank);
for (i = 0, hwlock = &bank->lock[0]; i < num_locks; i++, hwlock++)
hwlock->priv = io_base + LOCK_BASE_OFFSET + sizeof(u32) * i;
ret = hwspin_lock_register(bank, &pdev->dev, &omap_hwspinlock_ops,
pdata->base_id, num_locks);
if (ret)
goto reg_fail;
return 0;
reg_fail:
kfree(bank);
iounmap_base:
pm_runtime_disable(&pdev->dev);
iounmap(io_base);
return ret;
}
static int omap_hwspinlock_remove(struct platform_device *pdev)
{
struct hwspinlock_device *bank = platform_get_drvdata(pdev);
void __iomem *io_base = bank->lock[0].priv - LOCK_BASE_OFFSET;
int ret;
ret = hwspin_lock_unregister(bank);
if (ret) {
dev_err(&pdev->dev, "%s failed: %d\n", __func__, ret);
return ret;
}
pm_runtime_disable(&pdev->dev);
iounmap(io_base);
kfree(bank);
return 0;
}
static struct platform_driver omap_hwspinlock_driver = {
.probe = omap_hwspinlock_probe,
.remove = omap_hwspinlock_remove,
.driver = {
.name = "omap_hwspinlock",
.owner = THIS_MODULE,
},
};
static int __init omap_hwspinlock_init(void)
{
return platform_driver_register(&omap_hwspinlock_driver);
}
/* board init code might need to reserve hwspinlocks for predefined purposes */
postcore_initcall(omap_hwspinlock_init);
static void __exit omap_hwspinlock_exit(void)
{
platform_driver_unregister(&omap_hwspinlock_driver);
}
module_exit(omap_hwspinlock_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Hardware spinlock driver for OMAP");
MODULE_AUTHOR("Simon Que <sque@ti.com>");
MODULE_AUTHOR("Hari Kanigeri <h-kanigeri2@ti.com>");
MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>");

View file

@ -0,0 +1,197 @@
/*
* u8500 HWSEM driver
*
* Copyright (C) 2010-2011 ST-Ericsson
*
* Implements u8500 semaphore handling for protocol 1, no interrupts.
*
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
* Heavily borrowed from the work of :
* Simon Que <sque@ti.com>
* Hari Kanigeri <h-kanigeri2@ti.com>
* Ohad Ben-Cohen <ohad@wizery.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/hwspinlock.h>
#include <linux/platform_device.h>
#include "hwspinlock_internal.h"
/*
* Implementation of STE's HSem protocol 1 without interrutps.
* The only masterID we allow is '0x01' to force people to use
* HSems for synchronisation between processors rather than processes
* on the ARM core.
*/
#define U8500_MAX_SEMAPHORE 32 /* a total of 32 semaphore */
#define RESET_SEMAPHORE (0) /* free */
/*
* CPU ID for master running u8500 kernel.
* Hswpinlocks should only be used to synchonise operations
* between the Cortex A9 core and the other CPUs. Hence
* forcing the masterID to a preset value.
*/
#define HSEM_MASTER_ID 0x01
#define HSEM_REGISTER_OFFSET 0x08
#define HSEM_CTRL_REG 0x00
#define HSEM_ICRALL 0x90
#define HSEM_PROTOCOL_1 0x01
static int u8500_hsem_trylock(struct hwspinlock *lock)
{
void __iomem *lock_addr = lock->priv;
writel(HSEM_MASTER_ID, lock_addr);
/* get only first 4 bit and compare to masterID.
* if equal, we have the semaphore, otherwise
* someone else has it.
*/
return (HSEM_MASTER_ID == (0x0F & readl(lock_addr)));
}
static void u8500_hsem_unlock(struct hwspinlock *lock)
{
void __iomem *lock_addr = lock->priv;
/* release the lock by writing 0 to it */
writel(RESET_SEMAPHORE, lock_addr);
}
/*
* u8500: what value is recommended here ?
*/
static void u8500_hsem_relax(struct hwspinlock *lock)
{
ndelay(50);
}
static const struct hwspinlock_ops u8500_hwspinlock_ops = {
.trylock = u8500_hsem_trylock,
.unlock = u8500_hsem_unlock,
.relax = u8500_hsem_relax,
};
static int u8500_hsem_probe(struct platform_device *pdev)
{
struct hwspinlock_pdata *pdata = pdev->dev.platform_data;
struct hwspinlock_device *bank;
struct hwspinlock *hwlock;
struct resource *res;
void __iomem *io_base;
int i, ret, num_locks = U8500_MAX_SEMAPHORE;
ulong val;
if (!pdata)
return -ENODEV;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
io_base = ioremap(res->start, resource_size(res));
if (!io_base)
return -ENOMEM;
/* make sure protocol 1 is selected */
val = readl(io_base + HSEM_CTRL_REG);
writel((val & ~HSEM_PROTOCOL_1), io_base + HSEM_CTRL_REG);
/* clear all interrupts */
writel(0xFFFF, io_base + HSEM_ICRALL);
bank = kzalloc(sizeof(*bank) + num_locks * sizeof(*hwlock), GFP_KERNEL);
if (!bank) {
ret = -ENOMEM;
goto iounmap_base;
}
platform_set_drvdata(pdev, bank);
for (i = 0, hwlock = &bank->lock[0]; i < num_locks; i++, hwlock++)
hwlock->priv = io_base + HSEM_REGISTER_OFFSET + sizeof(u32) * i;
/* no pm needed for HSem but required to comply with hwspilock core */
pm_runtime_enable(&pdev->dev);
ret = hwspin_lock_register(bank, &pdev->dev, &u8500_hwspinlock_ops,
pdata->base_id, num_locks);
if (ret)
goto reg_fail;
return 0;
reg_fail:
pm_runtime_disable(&pdev->dev);
kfree(bank);
iounmap_base:
iounmap(io_base);
return ret;
}
static int u8500_hsem_remove(struct platform_device *pdev)
{
struct hwspinlock_device *bank = platform_get_drvdata(pdev);
void __iomem *io_base = bank->lock[0].priv - HSEM_REGISTER_OFFSET;
int ret;
/* clear all interrupts */
writel(0xFFFF, io_base + HSEM_ICRALL);
ret = hwspin_lock_unregister(bank);
if (ret) {
dev_err(&pdev->dev, "%s failed: %d\n", __func__, ret);
return ret;
}
pm_runtime_disable(&pdev->dev);
iounmap(io_base);
kfree(bank);
return 0;
}
static struct platform_driver u8500_hsem_driver = {
.probe = u8500_hsem_probe,
.remove = u8500_hsem_remove,
.driver = {
.name = "u8500_hsem",
.owner = THIS_MODULE,
},
};
static int __init u8500_hsem_init(void)
{
return platform_driver_register(&u8500_hsem_driver);
}
/* board init code might need to reserve hwspinlocks for predefined purposes */
postcore_initcall(u8500_hsem_init);
static void __exit u8500_hsem_exit(void)
{
platform_driver_unregister(&u8500_hsem_driver);
}
module_exit(u8500_hsem_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Hardware Spinlock driver for u8500");
MODULE_AUTHOR("Mathieu Poirier <mathieu.poirier@linaro.org>");