mirror of
https://github.com/AetherDroid/android_kernel_samsung_on5xelte.git
synced 2025-09-08 01:08:03 -04:00
Fixed MTP to work with TWRP
This commit is contained in:
commit
f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions
1
drivers/dma-buf/Makefile
Normal file
1
drivers/dma-buf/Makefile
Normal file
|
@ -0,0 +1 @@
|
|||
obj-y := dma-buf.o fence.o reservation.o seqno-fence.o
|
929
drivers/dma-buf/dma-buf.c
Normal file
929
drivers/dma-buf/dma-buf.c
Normal file
|
@ -0,0 +1,929 @@
|
|||
/*
|
||||
* Framework for buffer objects that can be shared across devices/subsystems.
|
||||
*
|
||||
* Copyright(C) 2011 Linaro Limited. All rights reserved.
|
||||
* Author: Sumit Semwal <sumit.semwal@ti.com>
|
||||
*
|
||||
* Many thanks to linaro-mm-sig list, and specially
|
||||
* Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
|
||||
* Daniel Vetter <daniel@ffwll.ch> for their support in creation and
|
||||
* refining of this idea.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published by
|
||||
* the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/fs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/dma-buf.h>
|
||||
#include <linux/fence.h>
|
||||
#include <linux/anon_inodes.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/poll.h>
|
||||
#include <linux/reservation.h>
|
||||
|
||||
static inline int is_dma_buf_file(struct file *);
|
||||
|
||||
struct dma_buf_list {
|
||||
struct list_head head;
|
||||
struct mutex lock;
|
||||
};
|
||||
|
||||
static struct dma_buf_list db_list;
|
||||
|
||||
static int dma_buf_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct dma_buf *dmabuf;
|
||||
|
||||
if (!is_dma_buf_file(file))
|
||||
return -EINVAL;
|
||||
|
||||
dmabuf = file->private_data;
|
||||
|
||||
BUG_ON(dmabuf->vmapping_counter);
|
||||
|
||||
/*
|
||||
* Any fences that a dma-buf poll can wait on should be signaled
|
||||
* before releasing dma-buf. This is the responsibility of each
|
||||
* driver that uses the reservation objects.
|
||||
*
|
||||
* If you hit this BUG() it means someone dropped their ref to the
|
||||
* dma-buf while still having pending operation to the buffer.
|
||||
*/
|
||||
BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
|
||||
|
||||
dmabuf->ops->release(dmabuf);
|
||||
|
||||
mutex_lock(&db_list.lock);
|
||||
list_del(&dmabuf->list_node);
|
||||
mutex_unlock(&db_list.lock);
|
||||
|
||||
if (dmabuf->resv == (struct reservation_object *)&dmabuf[1])
|
||||
reservation_object_fini(dmabuf->resv);
|
||||
|
||||
kfree(dmabuf);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
|
||||
{
|
||||
struct dma_buf *dmabuf;
|
||||
|
||||
if (!is_dma_buf_file(file))
|
||||
return -EINVAL;
|
||||
|
||||
dmabuf = file->private_data;
|
||||
|
||||
/* check for overflowing the buffer's size */
|
||||
if (vma->vm_pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
|
||||
dmabuf->size >> PAGE_SHIFT)
|
||||
return -EINVAL;
|
||||
|
||||
return dmabuf->ops->mmap(dmabuf, vma);
|
||||
}
|
||||
|
||||
static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
|
||||
{
|
||||
struct dma_buf *dmabuf;
|
||||
loff_t base;
|
||||
|
||||
if (!is_dma_buf_file(file))
|
||||
return -EBADF;
|
||||
|
||||
dmabuf = file->private_data;
|
||||
|
||||
/* only support discovering the end of the buffer,
|
||||
but also allow SEEK_SET to maintain the idiomatic
|
||||
SEEK_END(0), SEEK_CUR(0) pattern */
|
||||
if (whence == SEEK_END)
|
||||
base = dmabuf->size;
|
||||
else if (whence == SEEK_SET)
|
||||
base = 0;
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
if (offset != 0)
|
||||
return -EINVAL;
|
||||
|
||||
return base + offset;
|
||||
}
|
||||
|
||||
static void dma_buf_poll_cb(struct fence *fence, struct fence_cb *cb)
|
||||
{
|
||||
struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dcb->poll->lock, flags);
|
||||
wake_up_locked_poll(dcb->poll, dcb->active);
|
||||
dcb->active = 0;
|
||||
spin_unlock_irqrestore(&dcb->poll->lock, flags);
|
||||
}
|
||||
|
||||
static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
|
||||
{
|
||||
struct dma_buf *dmabuf;
|
||||
struct reservation_object *resv;
|
||||
struct reservation_object_list *fobj;
|
||||
struct fence *fence_excl;
|
||||
unsigned long events;
|
||||
unsigned shared_count, seq;
|
||||
|
||||
dmabuf = file->private_data;
|
||||
if (!dmabuf || !dmabuf->resv)
|
||||
return POLLERR;
|
||||
|
||||
resv = dmabuf->resv;
|
||||
|
||||
poll_wait(file, &dmabuf->poll, poll);
|
||||
|
||||
events = poll_requested_events(poll) & (POLLIN | POLLOUT);
|
||||
if (!events)
|
||||
return 0;
|
||||
|
||||
retry:
|
||||
seq = read_seqcount_begin(&resv->seq);
|
||||
rcu_read_lock();
|
||||
|
||||
fobj = rcu_dereference(resv->fence);
|
||||
if (fobj)
|
||||
shared_count = fobj->shared_count;
|
||||
else
|
||||
shared_count = 0;
|
||||
fence_excl = rcu_dereference(resv->fence_excl);
|
||||
if (read_seqcount_retry(&resv->seq, seq)) {
|
||||
rcu_read_unlock();
|
||||
goto retry;
|
||||
}
|
||||
|
||||
if (fence_excl && (!(events & POLLOUT) || shared_count == 0)) {
|
||||
struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
|
||||
unsigned long pevents = POLLIN;
|
||||
|
||||
if (shared_count == 0)
|
||||
pevents |= POLLOUT;
|
||||
|
||||
spin_lock_irq(&dmabuf->poll.lock);
|
||||
if (dcb->active) {
|
||||
dcb->active |= pevents;
|
||||
events &= ~pevents;
|
||||
} else
|
||||
dcb->active = pevents;
|
||||
spin_unlock_irq(&dmabuf->poll.lock);
|
||||
|
||||
if (events & pevents) {
|
||||
if (!fence_get_rcu(fence_excl)) {
|
||||
/* force a recheck */
|
||||
events &= ~pevents;
|
||||
dma_buf_poll_cb(NULL, &dcb->cb);
|
||||
} else if (!fence_add_callback(fence_excl, &dcb->cb,
|
||||
dma_buf_poll_cb)) {
|
||||
events &= ~pevents;
|
||||
fence_put(fence_excl);
|
||||
} else {
|
||||
/*
|
||||
* No callback queued, wake up any additional
|
||||
* waiters.
|
||||
*/
|
||||
fence_put(fence_excl);
|
||||
dma_buf_poll_cb(NULL, &dcb->cb);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ((events & POLLOUT) && shared_count > 0) {
|
||||
struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
|
||||
int i;
|
||||
|
||||
/* Only queue a new callback if no event has fired yet */
|
||||
spin_lock_irq(&dmabuf->poll.lock);
|
||||
if (dcb->active)
|
||||
events &= ~POLLOUT;
|
||||
else
|
||||
dcb->active = POLLOUT;
|
||||
spin_unlock_irq(&dmabuf->poll.lock);
|
||||
|
||||
if (!(events & POLLOUT))
|
||||
goto out;
|
||||
|
||||
for (i = 0; i < shared_count; ++i) {
|
||||
struct fence *fence = rcu_dereference(fobj->shared[i]);
|
||||
|
||||
if (!fence_get_rcu(fence)) {
|
||||
/*
|
||||
* fence refcount dropped to zero, this means
|
||||
* that fobj has been freed
|
||||
*
|
||||
* call dma_buf_poll_cb and force a recheck!
|
||||
*/
|
||||
events &= ~POLLOUT;
|
||||
dma_buf_poll_cb(NULL, &dcb->cb);
|
||||
break;
|
||||
}
|
||||
if (!fence_add_callback(fence, &dcb->cb,
|
||||
dma_buf_poll_cb)) {
|
||||
fence_put(fence);
|
||||
events &= ~POLLOUT;
|
||||
break;
|
||||
}
|
||||
fence_put(fence);
|
||||
}
|
||||
|
||||
/* No callback queued, wake up any additional waiters. */
|
||||
if (i == shared_count)
|
||||
dma_buf_poll_cb(NULL, &dcb->cb);
|
||||
}
|
||||
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
return events;
|
||||
}
|
||||
|
||||
static const struct file_operations dma_buf_fops = {
|
||||
.release = dma_buf_release,
|
||||
.mmap = dma_buf_mmap_internal,
|
||||
.llseek = dma_buf_llseek,
|
||||
.poll = dma_buf_poll,
|
||||
};
|
||||
|
||||
/*
|
||||
* is_dma_buf_file - Check if struct file* is associated with dma_buf
|
||||
*/
|
||||
static inline int is_dma_buf_file(struct file *file)
|
||||
{
|
||||
return file->f_op == &dma_buf_fops;
|
||||
}
|
||||
|
||||
/**
|
||||
* get_dma_buf_file - Finds dma_buf from a file descriptor
|
||||
*
|
||||
* @filp: [in] file descriptor to extract dma_buf.
|
||||
*
|
||||
* Returns the pointer to dma_buf stored in @filp after incrementing count.
|
||||
* The returned dma_buf must be released with dma_buf_put().
|
||||
* Returns NULL if @filp is not the file descriptor of dma_buf.
|
||||
*/
|
||||
struct dma_buf *get_dma_buf_file(struct file *filp)
|
||||
{
|
||||
struct dma_buf *dmabuf;
|
||||
|
||||
if (!is_dma_buf_file(filp))
|
||||
return NULL;
|
||||
|
||||
dmabuf = filp->private_data;
|
||||
|
||||
get_dma_buf(dmabuf);
|
||||
|
||||
return dmabuf;
|
||||
}
|
||||
/**
|
||||
* dma_buf_export_named - Creates a new dma_buf, and associates an anon file
|
||||
* with this buffer, so it can be exported.
|
||||
* Also connect the allocator specific data and ops to the buffer.
|
||||
* Additionally, provide a name string for exporter; useful in debugging.
|
||||
*
|
||||
* @priv: [in] Attach private data of allocator to this buffer
|
||||
* @ops: [in] Attach allocator-defined dma buf ops to the new buffer.
|
||||
* @size: [in] Size of the buffer
|
||||
* @flags: [in] mode flags for the file.
|
||||
* @exp_name: [in] name of the exporting module - useful for debugging.
|
||||
* @resv: [in] reservation-object, NULL to allocate default one.
|
||||
*
|
||||
* Returns, on success, a newly created dma_buf object, which wraps the
|
||||
* supplied private data and operations for dma_buf_ops. On either missing
|
||||
* ops, or error in allocating struct dma_buf, will return negative error.
|
||||
*
|
||||
*/
|
||||
struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
|
||||
size_t size, int flags, const char *exp_name,
|
||||
struct reservation_object *resv)
|
||||
{
|
||||
struct dma_buf *dmabuf;
|
||||
struct file *file;
|
||||
size_t alloc_size = sizeof(struct dma_buf);
|
||||
if (!resv)
|
||||
alloc_size += sizeof(struct reservation_object);
|
||||
else
|
||||
/* prevent &dma_buf[1] == dma_buf->resv */
|
||||
alloc_size += 1;
|
||||
|
||||
if (WARN_ON(!priv || !ops
|
||||
|| !ops->map_dma_buf
|
||||
|| !ops->unmap_dma_buf
|
||||
|| !ops->release
|
||||
|| !ops->kmap_atomic
|
||||
|| !ops->kmap
|
||||
|| !ops->mmap)) {
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
dmabuf = kzalloc(alloc_size, GFP_KERNEL);
|
||||
if (dmabuf == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
dmabuf->priv = priv;
|
||||
dmabuf->ops = ops;
|
||||
dmabuf->size = size;
|
||||
dmabuf->exp_name = exp_name;
|
||||
init_waitqueue_head(&dmabuf->poll);
|
||||
dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
|
||||
dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
|
||||
|
||||
if (!resv) {
|
||||
resv = (struct reservation_object *)&dmabuf[1];
|
||||
reservation_object_init(resv);
|
||||
}
|
||||
dmabuf->resv = resv;
|
||||
|
||||
file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf, flags);
|
||||
if (IS_ERR(file)) {
|
||||
kfree(dmabuf);
|
||||
return ERR_CAST(file);
|
||||
}
|
||||
|
||||
file->f_mode |= FMODE_LSEEK;
|
||||
dmabuf->file = file;
|
||||
|
||||
mutex_init(&dmabuf->lock);
|
||||
INIT_LIST_HEAD(&dmabuf->attachments);
|
||||
|
||||
mutex_lock(&db_list.lock);
|
||||
list_add(&dmabuf->list_node, &db_list.head);
|
||||
mutex_unlock(&db_list.lock);
|
||||
|
||||
return dmabuf;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_buf_export_named);
|
||||
|
||||
|
||||
/**
|
||||
* dma_buf_fd - returns a file descriptor for the given dma_buf
|
||||
* @dmabuf: [in] pointer to dma_buf for which fd is required.
|
||||
* @flags: [in] flags to give to fd
|
||||
*
|
||||
* On success, returns an associated 'fd'. Else, returns error.
|
||||
*/
|
||||
int dma_buf_fd(struct dma_buf *dmabuf, int flags)
|
||||
{
|
||||
int fd;
|
||||
|
||||
if (!dmabuf || !dmabuf->file)
|
||||
return -EINVAL;
|
||||
|
||||
fd = get_unused_fd_flags(flags);
|
||||
if (fd < 0)
|
||||
return fd;
|
||||
|
||||
fd_install(fd, dmabuf->file);
|
||||
|
||||
return fd;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_buf_fd);
|
||||
|
||||
/**
|
||||
* dma_buf_get - returns the dma_buf structure related to an fd
|
||||
* @fd: [in] fd associated with the dma_buf to be returned
|
||||
*
|
||||
* On success, returns the dma_buf structure associated with an fd; uses
|
||||
* file's refcounting done by fget to increase refcount. returns ERR_PTR
|
||||
* otherwise.
|
||||
*/
|
||||
struct dma_buf *dma_buf_get(int fd)
|
||||
{
|
||||
struct file *file;
|
||||
|
||||
file = fget(fd);
|
||||
|
||||
if (!file)
|
||||
return ERR_PTR(-EBADF);
|
||||
|
||||
if (!is_dma_buf_file(file)) {
|
||||
fput(file);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
return file->private_data;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_buf_get);
|
||||
|
||||
/**
|
||||
* dma_buf_put - decreases refcount of the buffer
|
||||
* @dmabuf: [in] buffer to reduce refcount of
|
||||
*
|
||||
* Uses file's refcounting done implicitly by fput()
|
||||
*/
|
||||
void dma_buf_put(struct dma_buf *dmabuf)
|
||||
{
|
||||
if (WARN_ON(!dmabuf || !dmabuf->file))
|
||||
return;
|
||||
|
||||
fput(dmabuf->file);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_buf_put);
|
||||
|
||||
/**
|
||||
* dma_buf_attach - Add the device to dma_buf's attachments list; optionally,
|
||||
* calls attach() of dma_buf_ops to allow device-specific attach functionality
|
||||
* @dmabuf: [in] buffer to attach device to.
|
||||
* @dev: [in] device to be attached.
|
||||
*
|
||||
* Returns struct dma_buf_attachment * for this attachment; returns ERR_PTR on
|
||||
* error.
|
||||
*/
|
||||
struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
|
||||
struct device *dev)
|
||||
{
|
||||
struct dma_buf_attachment *attach;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(!dmabuf || !dev))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
attach = kzalloc(sizeof(struct dma_buf_attachment), GFP_KERNEL);
|
||||
if (attach == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
attach->dev = dev;
|
||||
attach->dmabuf = dmabuf;
|
||||
|
||||
mutex_lock(&dmabuf->lock);
|
||||
|
||||
if (dmabuf->ops->attach) {
|
||||
ret = dmabuf->ops->attach(dmabuf, dev, attach);
|
||||
if (ret)
|
||||
goto err_attach;
|
||||
}
|
||||
list_add(&attach->node, &dmabuf->attachments);
|
||||
|
||||
mutex_unlock(&dmabuf->lock);
|
||||
return attach;
|
||||
|
||||
err_attach:
|
||||
kfree(attach);
|
||||
mutex_unlock(&dmabuf->lock);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_buf_attach);
|
||||
|
||||
/**
|
||||
* dma_buf_detach - Remove the given attachment from dmabuf's attachments list;
|
||||
* optionally calls detach() of dma_buf_ops for device-specific detach
|
||||
* @dmabuf: [in] buffer to detach from.
|
||||
* @attach: [in] attachment to be detached; is free'd after this call.
|
||||
*
|
||||
*/
|
||||
void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
|
||||
{
|
||||
if (WARN_ON(!dmabuf || !attach))
|
||||
return;
|
||||
|
||||
mutex_lock(&dmabuf->lock);
|
||||
list_del(&attach->node);
|
||||
if (dmabuf->ops->detach)
|
||||
dmabuf->ops->detach(dmabuf, attach);
|
||||
|
||||
mutex_unlock(&dmabuf->lock);
|
||||
kfree(attach);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_buf_detach);
|
||||
|
||||
/**
|
||||
* dma_buf_map_attachment - Returns the scatterlist table of the attachment;
|
||||
* mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
|
||||
* dma_buf_ops.
|
||||
* @attach: [in] attachment whose scatterlist is to be returned
|
||||
* @direction: [in] direction of DMA transfer
|
||||
*
|
||||
* Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
|
||||
* on error.
|
||||
*/
|
||||
struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
struct sg_table *sg_table = ERR_PTR(-EINVAL);
|
||||
|
||||
might_sleep();
|
||||
|
||||
if (WARN_ON(!attach || !attach->dmabuf))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
|
||||
if (!sg_table)
|
||||
sg_table = ERR_PTR(-ENOMEM);
|
||||
|
||||
return sg_table;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
|
||||
|
||||
/**
|
||||
* dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
|
||||
* deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
|
||||
* dma_buf_ops.
|
||||
* @attach: [in] attachment to unmap buffer from
|
||||
* @sg_table: [in] scatterlist info of the buffer to unmap
|
||||
* @direction: [in] direction of DMA transfer
|
||||
*
|
||||
*/
|
||||
void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
|
||||
struct sg_table *sg_table,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
might_sleep();
|
||||
|
||||
if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
|
||||
return;
|
||||
|
||||
attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
|
||||
direction);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
|
||||
|
||||
|
||||
/**
|
||||
* dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
|
||||
* cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
|
||||
* preparations. Coherency is only guaranteed in the specified range for the
|
||||
* specified access direction.
|
||||
* @dmabuf: [in] buffer to prepare cpu access for.
|
||||
* @start: [in] start of range for cpu access.
|
||||
* @len: [in] length of range for cpu access.
|
||||
* @direction: [in] length of range for cpu access.
|
||||
*
|
||||
* Can return negative error values, returns 0 on success.
|
||||
*/
|
||||
int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (WARN_ON(!dmabuf))
|
||||
return -EINVAL;
|
||||
|
||||
if (dmabuf->ops->begin_cpu_access)
|
||||
ret = dmabuf->ops->begin_cpu_access(dmabuf, start, len, direction);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
|
||||
|
||||
/**
|
||||
* dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
|
||||
* cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
|
||||
* actions. Coherency is only guaranteed in the specified range for the
|
||||
* specified access direction.
|
||||
* @dmabuf: [in] buffer to complete cpu access for.
|
||||
* @start: [in] start of range for cpu access.
|
||||
* @len: [in] length of range for cpu access.
|
||||
* @direction: [in] length of range for cpu access.
|
||||
*
|
||||
* This call must always succeed.
|
||||
*/
|
||||
void dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
WARN_ON(!dmabuf);
|
||||
|
||||
if (dmabuf->ops->end_cpu_access)
|
||||
dmabuf->ops->end_cpu_access(dmabuf, start, len, direction);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
|
||||
|
||||
/**
|
||||
* dma_buf_kmap_atomic - Map a page of the buffer object into kernel address
|
||||
* space. The same restrictions as for kmap_atomic and friends apply.
|
||||
* @dmabuf: [in] buffer to map page from.
|
||||
* @page_num: [in] page in PAGE_SIZE units to map.
|
||||
*
|
||||
* This call must always succeed, any necessary preparations that might fail
|
||||
* need to be done in begin_cpu_access.
|
||||
*/
|
||||
void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num)
|
||||
{
|
||||
WARN_ON(!dmabuf);
|
||||
|
||||
return dmabuf->ops->kmap_atomic(dmabuf, page_num);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic);
|
||||
|
||||
/**
|
||||
* dma_buf_kunmap_atomic - Unmap a page obtained by dma_buf_kmap_atomic.
|
||||
* @dmabuf: [in] buffer to unmap page from.
|
||||
* @page_num: [in] page in PAGE_SIZE units to unmap.
|
||||
* @vaddr: [in] kernel space pointer obtained from dma_buf_kmap_atomic.
|
||||
*
|
||||
* This call must always succeed.
|
||||
*/
|
||||
void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num,
|
||||
void *vaddr)
|
||||
{
|
||||
WARN_ON(!dmabuf);
|
||||
|
||||
if (dmabuf->ops->kunmap_atomic)
|
||||
dmabuf->ops->kunmap_atomic(dmabuf, page_num, vaddr);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic);
|
||||
|
||||
/**
|
||||
* dma_buf_kmap - Map a page of the buffer object into kernel address space. The
|
||||
* same restrictions as for kmap and friends apply.
|
||||
* @dmabuf: [in] buffer to map page from.
|
||||
* @page_num: [in] page in PAGE_SIZE units to map.
|
||||
*
|
||||
* This call must always succeed, any necessary preparations that might fail
|
||||
* need to be done in begin_cpu_access.
|
||||
*/
|
||||
void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
|
||||
{
|
||||
WARN_ON(!dmabuf);
|
||||
|
||||
return dmabuf->ops->kmap(dmabuf, page_num);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_buf_kmap);
|
||||
|
||||
/**
|
||||
* dma_buf_kunmap - Unmap a page obtained by dma_buf_kmap.
|
||||
* @dmabuf: [in] buffer to unmap page from.
|
||||
* @page_num: [in] page in PAGE_SIZE units to unmap.
|
||||
* @vaddr: [in] kernel space pointer obtained from dma_buf_kmap.
|
||||
*
|
||||
* This call must always succeed.
|
||||
*/
|
||||
void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num,
|
||||
void *vaddr)
|
||||
{
|
||||
WARN_ON(!dmabuf);
|
||||
|
||||
if (dmabuf->ops->kunmap)
|
||||
dmabuf->ops->kunmap(dmabuf, page_num, vaddr);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_buf_kunmap);
|
||||
|
||||
|
||||
/**
|
||||
* dma_buf_mmap - Setup up a userspace mmap with the given vma
|
||||
* @dmabuf: [in] buffer that should back the vma
|
||||
* @vma: [in] vma for the mmap
|
||||
* @pgoff: [in] offset in pages where this mmap should start within the
|
||||
* dma-buf buffer.
|
||||
*
|
||||
* This function adjusts the passed in vma so that it points at the file of the
|
||||
* dma_buf operation. It also adjusts the starting pgoff and does bounds
|
||||
* checking on the size of the vma. Then it calls the exporters mmap function to
|
||||
* set up the mapping.
|
||||
*
|
||||
* Can return negative error values, returns 0 on success.
|
||||
*/
|
||||
int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
|
||||
unsigned long pgoff)
|
||||
{
|
||||
struct file *oldfile;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(!dmabuf || !vma))
|
||||
return -EINVAL;
|
||||
|
||||
/* check for offset overflow */
|
||||
if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) < pgoff)
|
||||
return -EOVERFLOW;
|
||||
|
||||
/* check for overflowing the buffer's size */
|
||||
if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
|
||||
dmabuf->size >> PAGE_SHIFT)
|
||||
return -EINVAL;
|
||||
|
||||
/* readjust the vma */
|
||||
get_file(dmabuf->file);
|
||||
oldfile = vma->vm_file;
|
||||
vma->vm_file = dmabuf->file;
|
||||
vma->vm_pgoff = pgoff;
|
||||
|
||||
ret = dmabuf->ops->mmap(dmabuf, vma);
|
||||
if (ret) {
|
||||
/* restore old parameters on failure */
|
||||
vma->vm_file = oldfile;
|
||||
fput(dmabuf->file);
|
||||
} else {
|
||||
if (oldfile)
|
||||
fput(oldfile);
|
||||
}
|
||||
return ret;
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_buf_mmap);
|
||||
|
||||
/**
|
||||
* dma_buf_vmap - Create virtual mapping for the buffer object into kernel
|
||||
* address space. Same restrictions as for vmap and friends apply.
|
||||
* @dmabuf: [in] buffer to vmap
|
||||
*
|
||||
* This call may fail due to lack of virtual mapping address space.
|
||||
* These calls are optional in drivers. The intended use for them
|
||||
* is for mapping objects linear in kernel space for high use objects.
|
||||
* Please attempt to use kmap/kunmap before thinking about these interfaces.
|
||||
*
|
||||
* Returns NULL on error.
|
||||
*/
|
||||
void *dma_buf_vmap(struct dma_buf *dmabuf)
|
||||
{
|
||||
void *ptr;
|
||||
|
||||
if (WARN_ON(!dmabuf))
|
||||
return NULL;
|
||||
|
||||
if (!dmabuf->ops->vmap)
|
||||
return NULL;
|
||||
|
||||
mutex_lock(&dmabuf->lock);
|
||||
if (dmabuf->vmapping_counter) {
|
||||
dmabuf->vmapping_counter++;
|
||||
BUG_ON(!dmabuf->vmap_ptr);
|
||||
ptr = dmabuf->vmap_ptr;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
BUG_ON(dmabuf->vmap_ptr);
|
||||
|
||||
ptr = dmabuf->ops->vmap(dmabuf);
|
||||
if (WARN_ON_ONCE(IS_ERR(ptr)))
|
||||
ptr = NULL;
|
||||
if (!ptr)
|
||||
goto out_unlock;
|
||||
|
||||
dmabuf->vmap_ptr = ptr;
|
||||
dmabuf->vmapping_counter = 1;
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&dmabuf->lock);
|
||||
return ptr;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_buf_vmap);
|
||||
|
||||
/**
|
||||
* dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
|
||||
* @dmabuf: [in] buffer to vunmap
|
||||
* @vaddr: [in] vmap to vunmap
|
||||
*/
|
||||
void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
|
||||
{
|
||||
if (WARN_ON(!dmabuf))
|
||||
return;
|
||||
|
||||
BUG_ON(!dmabuf->vmap_ptr);
|
||||
BUG_ON(dmabuf->vmapping_counter == 0);
|
||||
BUG_ON(dmabuf->vmap_ptr != vaddr);
|
||||
|
||||
mutex_lock(&dmabuf->lock);
|
||||
if (--dmabuf->vmapping_counter == 0) {
|
||||
if (dmabuf->ops->vunmap)
|
||||
dmabuf->ops->vunmap(dmabuf, vaddr);
|
||||
dmabuf->vmap_ptr = NULL;
|
||||
}
|
||||
mutex_unlock(&dmabuf->lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_buf_vunmap);
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
static int dma_buf_describe(struct seq_file *s)
|
||||
{
|
||||
int ret;
|
||||
struct dma_buf *buf_obj;
|
||||
struct dma_buf_attachment *attach_obj;
|
||||
int count = 0, attach_count;
|
||||
size_t size = 0;
|
||||
|
||||
ret = mutex_lock_interruptible(&db_list.lock);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
seq_puts(s, "\nDma-buf Objects:\n");
|
||||
seq_puts(s, "size\tflags\tmode\tcount\texp_name\n");
|
||||
|
||||
list_for_each_entry(buf_obj, &db_list.head, list_node) {
|
||||
ret = mutex_lock_interruptible(&buf_obj->lock);
|
||||
|
||||
if (ret) {
|
||||
seq_puts(s,
|
||||
"\tERROR locking buffer object: skipping\n");
|
||||
continue;
|
||||
}
|
||||
|
||||
seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\n",
|
||||
buf_obj->size,
|
||||
buf_obj->file->f_flags, buf_obj->file->f_mode,
|
||||
file_count(buf_obj->file),
|
||||
buf_obj->exp_name);
|
||||
|
||||
seq_puts(s, "\tAttached Devices:\n");
|
||||
attach_count = 0;
|
||||
|
||||
list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
|
||||
seq_puts(s, "\t");
|
||||
|
||||
seq_printf(s, "%s\n", dev_name(attach_obj->dev));
|
||||
attach_count++;
|
||||
}
|
||||
|
||||
seq_printf(s, "Total %d devices attached\n\n",
|
||||
attach_count);
|
||||
|
||||
count++;
|
||||
size += buf_obj->size;
|
||||
mutex_unlock(&buf_obj->lock);
|
||||
}
|
||||
|
||||
seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
|
||||
|
||||
mutex_unlock(&db_list.lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dma_buf_show(struct seq_file *s, void *unused)
|
||||
{
|
||||
void (*func)(struct seq_file *) = s->private;
|
||||
func(s);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dma_buf_debug_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, dma_buf_show, inode->i_private);
|
||||
}
|
||||
|
||||
static const struct file_operations dma_buf_debug_fops = {
|
||||
.open = dma_buf_debug_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
static struct dentry *dma_buf_debugfs_dir;
|
||||
|
||||
static int dma_buf_init_debugfs(void)
|
||||
{
|
||||
int err = 0;
|
||||
dma_buf_debugfs_dir = debugfs_create_dir("dma_buf", NULL);
|
||||
if (IS_ERR(dma_buf_debugfs_dir)) {
|
||||
err = PTR_ERR(dma_buf_debugfs_dir);
|
||||
dma_buf_debugfs_dir = NULL;
|
||||
return err;
|
||||
}
|
||||
|
||||
err = dma_buf_debugfs_create_file("bufinfo", dma_buf_describe);
|
||||
|
||||
if (err)
|
||||
pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void dma_buf_uninit_debugfs(void)
|
||||
{
|
||||
if (dma_buf_debugfs_dir)
|
||||
debugfs_remove_recursive(dma_buf_debugfs_dir);
|
||||
}
|
||||
|
||||
int dma_buf_debugfs_create_file(const char *name,
|
||||
int (*write)(struct seq_file *))
|
||||
{
|
||||
struct dentry *d;
|
||||
|
||||
d = debugfs_create_file(name, S_IRUGO, dma_buf_debugfs_dir,
|
||||
write, &dma_buf_debug_fops);
|
||||
|
||||
return PTR_ERR_OR_ZERO(d);
|
||||
}
|
||||
#else
|
||||
static inline int dma_buf_init_debugfs(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void dma_buf_uninit_debugfs(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
static int __init dma_buf_init(void)
|
||||
{
|
||||
mutex_init(&db_list.lock);
|
||||
INIT_LIST_HEAD(&db_list.head);
|
||||
dma_buf_init_debugfs();
|
||||
return 0;
|
||||
}
|
||||
subsys_initcall(dma_buf_init);
|
||||
|
||||
static void __exit dma_buf_deinit(void)
|
||||
{
|
||||
dma_buf_uninit_debugfs();
|
||||
}
|
||||
__exitcall(dma_buf_deinit);
|
431
drivers/dma-buf/fence.c
Normal file
431
drivers/dma-buf/fence.c
Normal file
|
@ -0,0 +1,431 @@
|
|||
/*
|
||||
* Fence mechanism for dma-buf and to allow for asynchronous dma access
|
||||
*
|
||||
* Copyright (C) 2012 Canonical Ltd
|
||||
* Copyright (C) 2012 Texas Instruments
|
||||
*
|
||||
* Authors:
|
||||
* Rob Clark <robdclark@gmail.com>
|
||||
* Maarten Lankhorst <maarten.lankhorst@canonical.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published by
|
||||
* the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/fence.h>
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/fence.h>
|
||||
|
||||
EXPORT_TRACEPOINT_SYMBOL(fence_annotate_wait_on);
|
||||
EXPORT_TRACEPOINT_SYMBOL(fence_emit);
|
||||
|
||||
/*
|
||||
* fence context counter: each execution context should have its own
|
||||
* fence context, this allows checking if fences belong to the same
|
||||
* context or not. One device can have multiple separate contexts,
|
||||
* and they're used if some engine can run independently of another.
|
||||
*/
|
||||
static atomic_t fence_context_counter = ATOMIC_INIT(0);
|
||||
|
||||
/**
|
||||
* fence_context_alloc - allocate an array of fence contexts
|
||||
* @num: [in] amount of contexts to allocate
|
||||
*
|
||||
* This function will return the first index of the number of fences allocated.
|
||||
* The fence context is used for setting fence->context to a unique number.
|
||||
*/
|
||||
unsigned fence_context_alloc(unsigned num)
|
||||
{
|
||||
BUG_ON(!num);
|
||||
return atomic_add_return(num, &fence_context_counter) - num;
|
||||
}
|
||||
EXPORT_SYMBOL(fence_context_alloc);
|
||||
|
||||
/**
|
||||
* fence_signal_locked - signal completion of a fence
|
||||
* @fence: the fence to signal
|
||||
*
|
||||
* Signal completion for software callbacks on a fence, this will unblock
|
||||
* fence_wait() calls and run all the callbacks added with
|
||||
* fence_add_callback(). Can be called multiple times, but since a fence
|
||||
* can only go from unsignaled to signaled state, it will only be effective
|
||||
* the first time.
|
||||
*
|
||||
* Unlike fence_signal, this function must be called with fence->lock held.
|
||||
*/
|
||||
int fence_signal_locked(struct fence *fence)
|
||||
{
|
||||
struct fence_cb *cur, *tmp;
|
||||
int ret = 0;
|
||||
|
||||
if (WARN_ON(!fence))
|
||||
return -EINVAL;
|
||||
|
||||
if (!ktime_to_ns(fence->timestamp)) {
|
||||
fence->timestamp = ktime_get();
|
||||
smp_mb__before_atomic();
|
||||
}
|
||||
|
||||
if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
|
||||
ret = -EINVAL;
|
||||
|
||||
/*
|
||||
* we might have raced with the unlocked fence_signal,
|
||||
* still run through all callbacks
|
||||
*/
|
||||
} else
|
||||
trace_fence_signaled(fence);
|
||||
|
||||
list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
|
||||
list_del_init(&cur->node);
|
||||
cur->func(fence, cur);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(fence_signal_locked);
|
||||
|
||||
/**
|
||||
* fence_signal - signal completion of a fence
|
||||
* @fence: the fence to signal
|
||||
*
|
||||
* Signal completion for software callbacks on a fence, this will unblock
|
||||
* fence_wait() calls and run all the callbacks added with
|
||||
* fence_add_callback(). Can be called multiple times, but since a fence
|
||||
* can only go from unsignaled to signaled state, it will only be effective
|
||||
* the first time.
|
||||
*/
|
||||
int fence_signal(struct fence *fence)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (!fence)
|
||||
return -EINVAL;
|
||||
|
||||
if (!ktime_to_ns(fence->timestamp)) {
|
||||
fence->timestamp = ktime_get();
|
||||
smp_mb__before_atomic();
|
||||
}
|
||||
|
||||
if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
|
||||
return -EINVAL;
|
||||
|
||||
trace_fence_signaled(fence);
|
||||
|
||||
if (test_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
|
||||
struct fence_cb *cur, *tmp;
|
||||
|
||||
spin_lock_irqsave(fence->lock, flags);
|
||||
list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
|
||||
list_del_init(&cur->node);
|
||||
cur->func(fence, cur);
|
||||
}
|
||||
spin_unlock_irqrestore(fence->lock, flags);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(fence_signal);
|
||||
|
||||
/**
|
||||
* fence_wait_timeout - sleep until the fence gets signaled
|
||||
* or until timeout elapses
|
||||
* @fence: [in] the fence to wait on
|
||||
* @intr: [in] if true, do an interruptible wait
|
||||
* @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
|
||||
*
|
||||
* Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
|
||||
* remaining timeout in jiffies on success. Other error values may be
|
||||
* returned on custom implementations.
|
||||
*
|
||||
* Performs a synchronous wait on this fence. It is assumed the caller
|
||||
* directly or indirectly (buf-mgr between reservation and committing)
|
||||
* holds a reference to the fence, otherwise the fence might be
|
||||
* freed before return, resulting in undefined behavior.
|
||||
*/
|
||||
signed long
|
||||
fence_wait_timeout(struct fence *fence, bool intr, signed long timeout)
|
||||
{
|
||||
signed long ret;
|
||||
|
||||
if (WARN_ON(timeout < 0))
|
||||
return -EINVAL;
|
||||
|
||||
trace_fence_wait_start(fence);
|
||||
ret = fence->ops->wait(fence, intr, timeout);
|
||||
trace_fence_wait_end(fence);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(fence_wait_timeout);
|
||||
|
||||
void fence_release(struct kref *kref)
|
||||
{
|
||||
struct fence *fence =
|
||||
container_of(kref, struct fence, refcount);
|
||||
|
||||
trace_fence_destroy(fence);
|
||||
|
||||
BUG_ON(!list_empty(&fence->cb_list));
|
||||
|
||||
if (fence->ops->release)
|
||||
fence->ops->release(fence);
|
||||
else
|
||||
fence_free(fence);
|
||||
}
|
||||
EXPORT_SYMBOL(fence_release);
|
||||
|
||||
void fence_free(struct fence *fence)
|
||||
{
|
||||
kfree_rcu(fence, rcu);
|
||||
}
|
||||
EXPORT_SYMBOL(fence_free);
|
||||
|
||||
/**
|
||||
* fence_enable_sw_signaling - enable signaling on fence
|
||||
* @fence: [in] the fence to enable
|
||||
*
|
||||
* this will request for sw signaling to be enabled, to make the fence
|
||||
* complete as soon as possible
|
||||
*/
|
||||
void fence_enable_sw_signaling(struct fence *fence)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (!test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) &&
|
||||
!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
|
||||
trace_fence_enable_signal(fence);
|
||||
|
||||
spin_lock_irqsave(fence->lock, flags);
|
||||
|
||||
if (!fence->ops->enable_signaling(fence))
|
||||
fence_signal_locked(fence);
|
||||
|
||||
spin_unlock_irqrestore(fence->lock, flags);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(fence_enable_sw_signaling);
|
||||
|
||||
/**
|
||||
* fence_add_callback - add a callback to be called when the fence
|
||||
* is signaled
|
||||
* @fence: [in] the fence to wait on
|
||||
* @cb: [in] the callback to register
|
||||
* @func: [in] the function to call
|
||||
*
|
||||
* cb will be initialized by fence_add_callback, no initialization
|
||||
* by the caller is required. Any number of callbacks can be registered
|
||||
* to a fence, but a callback can only be registered to one fence at a time.
|
||||
*
|
||||
* Note that the callback can be called from an atomic context. If
|
||||
* fence is already signaled, this function will return -ENOENT (and
|
||||
* *not* call the callback)
|
||||
*
|
||||
* Add a software callback to the fence. Same restrictions apply to
|
||||
* refcount as it does to fence_wait, however the caller doesn't need to
|
||||
* keep a refcount to fence afterwards: when software access is enabled,
|
||||
* the creator of the fence is required to keep the fence alive until
|
||||
* after it signals with fence_signal. The callback itself can be called
|
||||
* from irq context.
|
||||
*
|
||||
*/
|
||||
int fence_add_callback(struct fence *fence, struct fence_cb *cb,
|
||||
fence_func_t func)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
bool was_set;
|
||||
|
||||
if (WARN_ON(!fence || !func))
|
||||
return -EINVAL;
|
||||
|
||||
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
|
||||
INIT_LIST_HEAD(&cb->node);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(fence->lock, flags);
|
||||
|
||||
was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
|
||||
|
||||
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
|
||||
ret = -ENOENT;
|
||||
else if (!was_set) {
|
||||
trace_fence_enable_signal(fence);
|
||||
|
||||
if (!fence->ops->enable_signaling(fence)) {
|
||||
fence_signal_locked(fence);
|
||||
ret = -ENOENT;
|
||||
}
|
||||
}
|
||||
|
||||
if (!ret) {
|
||||
cb->func = func;
|
||||
list_add_tail(&cb->node, &fence->cb_list);
|
||||
} else
|
||||
INIT_LIST_HEAD(&cb->node);
|
||||
spin_unlock_irqrestore(fence->lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(fence_add_callback);
|
||||
|
||||
/**
|
||||
* fence_remove_callback - remove a callback from the signaling list
|
||||
* @fence: [in] the fence to wait on
|
||||
* @cb: [in] the callback to remove
|
||||
*
|
||||
* Remove a previously queued callback from the fence. This function returns
|
||||
* true if the callback is succesfully removed, or false if the fence has
|
||||
* already been signaled.
|
||||
*
|
||||
* *WARNING*:
|
||||
* Cancelling a callback should only be done if you really know what you're
|
||||
* doing, since deadlocks and race conditions could occur all too easily. For
|
||||
* this reason, it should only ever be done on hardware lockup recovery,
|
||||
* with a reference held to the fence.
|
||||
*/
|
||||
bool
|
||||
fence_remove_callback(struct fence *fence, struct fence_cb *cb)
|
||||
{
|
||||
unsigned long flags;
|
||||
bool ret;
|
||||
|
||||
spin_lock_irqsave(fence->lock, flags);
|
||||
|
||||
ret = !list_empty(&cb->node);
|
||||
if (ret)
|
||||
list_del_init(&cb->node);
|
||||
|
||||
spin_unlock_irqrestore(fence->lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(fence_remove_callback);
|
||||
|
||||
struct default_wait_cb {
|
||||
struct fence_cb base;
|
||||
struct task_struct *task;
|
||||
};
|
||||
|
||||
static void
|
||||
fence_default_wait_cb(struct fence *fence, struct fence_cb *cb)
|
||||
{
|
||||
struct default_wait_cb *wait =
|
||||
container_of(cb, struct default_wait_cb, base);
|
||||
|
||||
wake_up_state(wait->task, TASK_NORMAL);
|
||||
}
|
||||
|
||||
/**
|
||||
* fence_default_wait - default sleep until the fence gets signaled
|
||||
* or until timeout elapses
|
||||
* @fence: [in] the fence to wait on
|
||||
* @intr: [in] if true, do an interruptible wait
|
||||
* @timeout: [in] timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
|
||||
*
|
||||
* Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
|
||||
* remaining timeout in jiffies on success.
|
||||
*/
|
||||
signed long
|
||||
fence_default_wait(struct fence *fence, bool intr, signed long timeout)
|
||||
{
|
||||
struct default_wait_cb cb;
|
||||
unsigned long flags;
|
||||
signed long ret = timeout;
|
||||
bool was_set;
|
||||
|
||||
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
|
||||
return timeout;
|
||||
|
||||
spin_lock_irqsave(fence->lock, flags);
|
||||
|
||||
if (intr && signal_pending(current)) {
|
||||
ret = -ERESTARTSYS;
|
||||
goto out;
|
||||
}
|
||||
|
||||
was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
|
||||
|
||||
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
|
||||
goto out;
|
||||
|
||||
if (!was_set) {
|
||||
trace_fence_enable_signal(fence);
|
||||
|
||||
if (!fence->ops->enable_signaling(fence)) {
|
||||
fence_signal_locked(fence);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
cb.base.func = fence_default_wait_cb;
|
||||
cb.task = current;
|
||||
list_add(&cb.base.node, &fence->cb_list);
|
||||
|
||||
while (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) {
|
||||
if (intr)
|
||||
__set_current_state(TASK_INTERRUPTIBLE);
|
||||
else
|
||||
__set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
spin_unlock_irqrestore(fence->lock, flags);
|
||||
|
||||
ret = schedule_timeout(ret);
|
||||
|
||||
spin_lock_irqsave(fence->lock, flags);
|
||||
if (ret > 0 && intr && signal_pending(current))
|
||||
ret = -ERESTARTSYS;
|
||||
}
|
||||
|
||||
if (!list_empty(&cb.base.node))
|
||||
list_del(&cb.base.node);
|
||||
__set_current_state(TASK_RUNNING);
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(fence->lock, flags);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(fence_default_wait);
|
||||
|
||||
/**
|
||||
* fence_init - Initialize a custom fence.
|
||||
* @fence: [in] the fence to initialize
|
||||
* @ops: [in] the fence_ops for operations on this fence
|
||||
* @lock: [in] the irqsafe spinlock to use for locking this fence
|
||||
* @context: [in] the execution context this fence is run on
|
||||
* @seqno: [in] a linear increasing sequence number for this context
|
||||
*
|
||||
* Initializes an allocated fence, the caller doesn't have to keep its
|
||||
* refcount after committing with this fence, but it will need to hold a
|
||||
* refcount again if fence_ops.enable_signaling gets called. This can
|
||||
* be used for other implementing other types of fence.
|
||||
*
|
||||
* context and seqno are used for easy comparison between fences, allowing
|
||||
* to check which fence is later by simply using fence_later.
|
||||
*/
|
||||
void
|
||||
fence_init(struct fence *fence, const struct fence_ops *ops,
|
||||
spinlock_t *lock, unsigned context, unsigned seqno)
|
||||
{
|
||||
BUG_ON(!lock);
|
||||
BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
|
||||
!ops->get_driver_name || !ops->get_timeline_name);
|
||||
|
||||
kref_init(&fence->refcount);
|
||||
fence->ops = ops;
|
||||
INIT_LIST_HEAD(&fence->cb_list);
|
||||
fence->lock = lock;
|
||||
fence->context = context;
|
||||
fence->seqno = seqno;
|
||||
fence->flags = 0UL;
|
||||
|
||||
trace_fence_init(fence);
|
||||
}
|
||||
EXPORT_SYMBOL(fence_init);
|
475
drivers/dma-buf/reservation.c
Normal file
475
drivers/dma-buf/reservation.c
Normal file
|
@ -0,0 +1,475 @@
|
|||
/*
|
||||
* Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
|
||||
*
|
||||
* Based on bo.c which bears the following copyright notice,
|
||||
* but is dual licensed:
|
||||
*
|
||||
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
*/
|
||||
|
||||
#include <linux/reservation.h>
|
||||
#include <linux/export.h>
|
||||
|
||||
DEFINE_WW_CLASS(reservation_ww_class);
|
||||
EXPORT_SYMBOL(reservation_ww_class);
|
||||
|
||||
struct lock_class_key reservation_seqcount_class;
|
||||
EXPORT_SYMBOL(reservation_seqcount_class);
|
||||
|
||||
const char reservation_seqcount_string[] = "reservation_seqcount";
|
||||
EXPORT_SYMBOL(reservation_seqcount_string);
|
||||
/*
|
||||
* Reserve space to add a shared fence to a reservation_object,
|
||||
* must be called with obj->lock held.
|
||||
*/
|
||||
int reservation_object_reserve_shared(struct reservation_object *obj)
|
||||
{
|
||||
struct reservation_object_list *fobj, *old;
|
||||
u32 max;
|
||||
|
||||
old = reservation_object_get_list(obj);
|
||||
|
||||
if (old && old->shared_max) {
|
||||
if (old->shared_count < old->shared_max) {
|
||||
/* perform an in-place update */
|
||||
kfree(obj->staged);
|
||||
obj->staged = NULL;
|
||||
return 0;
|
||||
} else
|
||||
max = old->shared_max * 2;
|
||||
} else
|
||||
max = 4;
|
||||
|
||||
/*
|
||||
* resize obj->staged or allocate if it doesn't exist,
|
||||
* noop if already correct size
|
||||
*/
|
||||
fobj = krealloc(obj->staged, offsetof(typeof(*fobj), shared[max]),
|
||||
GFP_KERNEL);
|
||||
if (!fobj)
|
||||
return -ENOMEM;
|
||||
|
||||
obj->staged = fobj;
|
||||
fobj->shared_max = max;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(reservation_object_reserve_shared);
|
||||
|
||||
static void
|
||||
reservation_object_add_shared_inplace(struct reservation_object *obj,
|
||||
struct reservation_object_list *fobj,
|
||||
struct fence *fence)
|
||||
{
|
||||
u32 i;
|
||||
|
||||
fence_get(fence);
|
||||
|
||||
preempt_disable();
|
||||
write_seqcount_begin(&obj->seq);
|
||||
|
||||
for (i = 0; i < fobj->shared_count; ++i) {
|
||||
struct fence *old_fence;
|
||||
|
||||
old_fence = rcu_dereference_protected(fobj->shared[i],
|
||||
reservation_object_held(obj));
|
||||
|
||||
if (old_fence->context == fence->context) {
|
||||
/* memory barrier is added by write_seqcount_begin */
|
||||
RCU_INIT_POINTER(fobj->shared[i], fence);
|
||||
write_seqcount_end(&obj->seq);
|
||||
preempt_enable();
|
||||
|
||||
fence_put(old_fence);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* memory barrier is added by write_seqcount_begin,
|
||||
* fobj->shared_count is protected by this lock too
|
||||
*/
|
||||
RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
|
||||
fobj->shared_count++;
|
||||
|
||||
write_seqcount_end(&obj->seq);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static void
|
||||
reservation_object_add_shared_replace(struct reservation_object *obj,
|
||||
struct reservation_object_list *old,
|
||||
struct reservation_object_list *fobj,
|
||||
struct fence *fence)
|
||||
{
|
||||
unsigned i;
|
||||
struct fence *old_fence = NULL;
|
||||
|
||||
fence_get(fence);
|
||||
|
||||
if (!old) {
|
||||
RCU_INIT_POINTER(fobj->shared[0], fence);
|
||||
fobj->shared_count = 1;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/*
|
||||
* no need to bump fence refcounts, rcu_read access
|
||||
* requires the use of kref_get_unless_zero, and the
|
||||
* references from the old struct are carried over to
|
||||
* the new.
|
||||
*/
|
||||
fobj->shared_count = old->shared_count;
|
||||
|
||||
for (i = 0; i < old->shared_count; ++i) {
|
||||
struct fence *check;
|
||||
|
||||
check = rcu_dereference_protected(old->shared[i],
|
||||
reservation_object_held(obj));
|
||||
|
||||
if (!old_fence && check->context == fence->context) {
|
||||
old_fence = check;
|
||||
RCU_INIT_POINTER(fobj->shared[i], fence);
|
||||
} else
|
||||
RCU_INIT_POINTER(fobj->shared[i], check);
|
||||
}
|
||||
if (!old_fence) {
|
||||
RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
|
||||
fobj->shared_count++;
|
||||
}
|
||||
|
||||
done:
|
||||
preempt_disable();
|
||||
write_seqcount_begin(&obj->seq);
|
||||
/*
|
||||
* RCU_INIT_POINTER can be used here,
|
||||
* seqcount provides the necessary barriers
|
||||
*/
|
||||
RCU_INIT_POINTER(obj->fence, fobj);
|
||||
write_seqcount_end(&obj->seq);
|
||||
preempt_enable();
|
||||
|
||||
if (old)
|
||||
kfree_rcu(old, rcu);
|
||||
|
||||
if (old_fence)
|
||||
fence_put(old_fence);
|
||||
}
|
||||
|
||||
/*
|
||||
* Add a fence to a shared slot, obj->lock must be held, and
|
||||
* reservation_object_reserve_shared_fence has been called.
|
||||
*/
|
||||
void reservation_object_add_shared_fence(struct reservation_object *obj,
|
||||
struct fence *fence)
|
||||
{
|
||||
struct reservation_object_list *old, *fobj = obj->staged;
|
||||
|
||||
old = reservation_object_get_list(obj);
|
||||
obj->staged = NULL;
|
||||
|
||||
if (!fobj) {
|
||||
BUG_ON(old->shared_count >= old->shared_max);
|
||||
reservation_object_add_shared_inplace(obj, old, fence);
|
||||
} else
|
||||
reservation_object_add_shared_replace(obj, old, fobj, fence);
|
||||
}
|
||||
EXPORT_SYMBOL(reservation_object_add_shared_fence);
|
||||
|
||||
void reservation_object_add_excl_fence(struct reservation_object *obj,
|
||||
struct fence *fence)
|
||||
{
|
||||
struct fence *old_fence = reservation_object_get_excl(obj);
|
||||
struct reservation_object_list *old;
|
||||
u32 i = 0;
|
||||
|
||||
old = reservation_object_get_list(obj);
|
||||
if (old)
|
||||
i = old->shared_count;
|
||||
|
||||
if (fence)
|
||||
fence_get(fence);
|
||||
|
||||
preempt_disable();
|
||||
write_seqcount_begin(&obj->seq);
|
||||
/* write_seqcount_begin provides the necessary memory barrier */
|
||||
RCU_INIT_POINTER(obj->fence_excl, fence);
|
||||
if (old)
|
||||
old->shared_count = 0;
|
||||
write_seqcount_end(&obj->seq);
|
||||
preempt_enable();
|
||||
|
||||
/* inplace update, no shared fences */
|
||||
while (i--)
|
||||
fence_put(rcu_dereference_protected(old->shared[i],
|
||||
reservation_object_held(obj)));
|
||||
|
||||
if (old_fence)
|
||||
fence_put(old_fence);
|
||||
}
|
||||
EXPORT_SYMBOL(reservation_object_add_excl_fence);
|
||||
|
||||
int reservation_object_get_fences_rcu(struct reservation_object *obj,
|
||||
struct fence **pfence_excl,
|
||||
unsigned *pshared_count,
|
||||
struct fence ***pshared)
|
||||
{
|
||||
unsigned shared_count = 0;
|
||||
unsigned retry = 1;
|
||||
struct fence **shared = NULL, *fence_excl = NULL;
|
||||
int ret = 0;
|
||||
|
||||
while (retry) {
|
||||
struct reservation_object_list *fobj;
|
||||
unsigned seq;
|
||||
|
||||
seq = read_seqcount_begin(&obj->seq);
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
fobj = rcu_dereference(obj->fence);
|
||||
if (fobj) {
|
||||
struct fence **nshared;
|
||||
size_t sz = sizeof(*shared) * fobj->shared_max;
|
||||
|
||||
nshared = krealloc(shared, sz,
|
||||
GFP_NOWAIT | __GFP_NOWARN);
|
||||
if (!nshared) {
|
||||
rcu_read_unlock();
|
||||
nshared = krealloc(shared, sz, GFP_KERNEL);
|
||||
if (nshared) {
|
||||
shared = nshared;
|
||||
continue;
|
||||
}
|
||||
|
||||
ret = -ENOMEM;
|
||||
shared_count = 0;
|
||||
break;
|
||||
}
|
||||
shared = nshared;
|
||||
memcpy(shared, fobj->shared, sz);
|
||||
shared_count = fobj->shared_count;
|
||||
} else
|
||||
shared_count = 0;
|
||||
fence_excl = rcu_dereference(obj->fence_excl);
|
||||
|
||||
retry = read_seqcount_retry(&obj->seq, seq);
|
||||
if (retry)
|
||||
goto unlock;
|
||||
|
||||
if (!fence_excl || fence_get_rcu(fence_excl)) {
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < shared_count; ++i) {
|
||||
if (fence_get_rcu(shared[i]))
|
||||
continue;
|
||||
|
||||
/* uh oh, refcount failed, abort and retry */
|
||||
while (i--)
|
||||
fence_put(shared[i]);
|
||||
|
||||
if (fence_excl) {
|
||||
fence_put(fence_excl);
|
||||
fence_excl = NULL;
|
||||
}
|
||||
|
||||
retry = 1;
|
||||
break;
|
||||
}
|
||||
} else
|
||||
retry = 1;
|
||||
|
||||
unlock:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
*pshared_count = shared_count;
|
||||
if (shared_count)
|
||||
*pshared = shared;
|
||||
else {
|
||||
*pshared = NULL;
|
||||
kfree(shared);
|
||||
}
|
||||
*pfence_excl = fence_excl;
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu);
|
||||
|
||||
long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
|
||||
bool wait_all, bool intr,
|
||||
unsigned long timeout)
|
||||
{
|
||||
struct fence *fence;
|
||||
unsigned seq, shared_count, i = 0;
|
||||
long ret = timeout;
|
||||
|
||||
retry:
|
||||
fence = NULL;
|
||||
shared_count = 0;
|
||||
seq = read_seqcount_begin(&obj->seq);
|
||||
rcu_read_lock();
|
||||
|
||||
if (wait_all) {
|
||||
struct reservation_object_list *fobj = rcu_dereference(obj->fence);
|
||||
|
||||
if (fobj)
|
||||
shared_count = fobj->shared_count;
|
||||
|
||||
if (read_seqcount_retry(&obj->seq, seq))
|
||||
goto unlock_retry;
|
||||
|
||||
for (i = 0; i < shared_count; ++i) {
|
||||
struct fence *lfence = rcu_dereference(fobj->shared[i]);
|
||||
|
||||
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags))
|
||||
continue;
|
||||
|
||||
if (!fence_get_rcu(lfence))
|
||||
goto unlock_retry;
|
||||
|
||||
if (fence_is_signaled(lfence)) {
|
||||
fence_put(lfence);
|
||||
continue;
|
||||
}
|
||||
|
||||
fence = lfence;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!shared_count) {
|
||||
struct fence *fence_excl = rcu_dereference(obj->fence_excl);
|
||||
|
||||
if (read_seqcount_retry(&obj->seq, seq))
|
||||
goto unlock_retry;
|
||||
|
||||
if (fence_excl &&
|
||||
!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence_excl->flags)) {
|
||||
if (!fence_get_rcu(fence_excl))
|
||||
goto unlock_retry;
|
||||
|
||||
if (fence_is_signaled(fence_excl))
|
||||
fence_put(fence_excl);
|
||||
else
|
||||
fence = fence_excl;
|
||||
}
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
if (fence) {
|
||||
ret = fence_wait_timeout(fence, intr, ret);
|
||||
fence_put(fence);
|
||||
if (ret > 0 && wait_all && (i + 1 < shared_count))
|
||||
goto retry;
|
||||
}
|
||||
return ret;
|
||||
|
||||
unlock_retry:
|
||||
rcu_read_unlock();
|
||||
goto retry;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu);
|
||||
|
||||
|
||||
static inline int
|
||||
reservation_object_test_signaled_single(struct fence *passed_fence)
|
||||
{
|
||||
struct fence *fence, *lfence = passed_fence;
|
||||
int ret = 1;
|
||||
|
||||
if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
|
||||
fence = fence_get_rcu(lfence);
|
||||
if (!fence)
|
||||
return -1;
|
||||
|
||||
ret = !!fence_is_signaled(fence);
|
||||
fence_put(fence);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
|
||||
bool test_all)
|
||||
{
|
||||
unsigned seq, shared_count;
|
||||
int ret = true;
|
||||
|
||||
retry:
|
||||
shared_count = 0;
|
||||
seq = read_seqcount_begin(&obj->seq);
|
||||
rcu_read_lock();
|
||||
|
||||
if (test_all) {
|
||||
unsigned i;
|
||||
|
||||
struct reservation_object_list *fobj = rcu_dereference(obj->fence);
|
||||
|
||||
if (fobj)
|
||||
shared_count = fobj->shared_count;
|
||||
|
||||
if (read_seqcount_retry(&obj->seq, seq))
|
||||
goto unlock_retry;
|
||||
|
||||
for (i = 0; i < shared_count; ++i) {
|
||||
struct fence *fence = rcu_dereference(fobj->shared[i]);
|
||||
|
||||
ret = reservation_object_test_signaled_single(fence);
|
||||
if (ret < 0)
|
||||
goto unlock_retry;
|
||||
else if (!ret)
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* There could be a read_seqcount_retry here, but nothing cares
|
||||
* about whether it's the old or newer fence pointers that are
|
||||
* signaled. That race could still have happened after checking
|
||||
* read_seqcount_retry. If you care, use ww_mutex_lock.
|
||||
*/
|
||||
}
|
||||
|
||||
if (!shared_count) {
|
||||
struct fence *fence_excl = rcu_dereference(obj->fence_excl);
|
||||
|
||||
if (read_seqcount_retry(&obj->seq, seq))
|
||||
goto unlock_retry;
|
||||
|
||||
if (fence_excl) {
|
||||
ret = reservation_object_test_signaled_single(fence_excl);
|
||||
if (ret < 0)
|
||||
goto unlock_retry;
|
||||
}
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
|
||||
unlock_retry:
|
||||
rcu_read_unlock();
|
||||
goto retry;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu);
|
73
drivers/dma-buf/seqno-fence.c
Normal file
73
drivers/dma-buf/seqno-fence.c
Normal file
|
@ -0,0 +1,73 @@
|
|||
/*
|
||||
* seqno-fence, using a dma-buf to synchronize fencing
|
||||
*
|
||||
* Copyright (C) 2012 Texas Instruments
|
||||
* Copyright (C) 2012-2014 Canonical Ltd
|
||||
* Authors:
|
||||
* Rob Clark <robdclark@gmail.com>
|
||||
* Maarten Lankhorst <maarten.lankhorst@canonical.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published by
|
||||
* the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/seqno-fence.h>
|
||||
|
||||
static const char *seqno_fence_get_driver_name(struct fence *fence)
|
||||
{
|
||||
struct seqno_fence *seqno_fence = to_seqno_fence(fence);
|
||||
return seqno_fence->ops->get_driver_name(fence);
|
||||
}
|
||||
|
||||
static const char *seqno_fence_get_timeline_name(struct fence *fence)
|
||||
{
|
||||
struct seqno_fence *seqno_fence = to_seqno_fence(fence);
|
||||
return seqno_fence->ops->get_timeline_name(fence);
|
||||
}
|
||||
|
||||
static bool seqno_enable_signaling(struct fence *fence)
|
||||
{
|
||||
struct seqno_fence *seqno_fence = to_seqno_fence(fence);
|
||||
return seqno_fence->ops->enable_signaling(fence);
|
||||
}
|
||||
|
||||
static bool seqno_signaled(struct fence *fence)
|
||||
{
|
||||
struct seqno_fence *seqno_fence = to_seqno_fence(fence);
|
||||
return seqno_fence->ops->signaled && seqno_fence->ops->signaled(fence);
|
||||
}
|
||||
|
||||
static void seqno_release(struct fence *fence)
|
||||
{
|
||||
struct seqno_fence *f = to_seqno_fence(fence);
|
||||
|
||||
dma_buf_put(f->sync_buf);
|
||||
if (f->ops->release)
|
||||
f->ops->release(fence);
|
||||
else
|
||||
fence_free(&f->base);
|
||||
}
|
||||
|
||||
static signed long seqno_wait(struct fence *fence, bool intr, signed long timeout)
|
||||
{
|
||||
struct seqno_fence *f = to_seqno_fence(fence);
|
||||
return f->ops->wait(fence, intr, timeout);
|
||||
}
|
||||
|
||||
const struct fence_ops seqno_fence_ops = {
|
||||
.get_driver_name = seqno_fence_get_driver_name,
|
||||
.get_timeline_name = seqno_fence_get_timeline_name,
|
||||
.enable_signaling = seqno_enable_signaling,
|
||||
.signaled = seqno_signaled,
|
||||
.wait = seqno_wait,
|
||||
.release = seqno_release,
|
||||
};
|
||||
EXPORT_SYMBOL(seqno_fence_ops);
|
Loading…
Add table
Add a link
Reference in a new issue