mirror of
https://github.com/AetherDroid/android_kernel_samsung_on5xelte.git
synced 2025-09-08 17:18:05 -04:00
Fixed MTP to work with TWRP
This commit is contained in:
commit
f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions
106
drivers/media/v4l2-core/Kconfig
Normal file
106
drivers/media/v4l2-core/Kconfig
Normal file
|
@ -0,0 +1,106 @@
|
|||
#
|
||||
# Generic video config states
|
||||
#
|
||||
|
||||
# Enable the V4L2 core and API
|
||||
config VIDEO_V4L2
|
||||
tristate
|
||||
depends on (I2C || I2C=n) && VIDEO_DEV
|
||||
default (I2C || I2C=n) && VIDEO_DEV
|
||||
|
||||
config VIDEO_ADV_DEBUG
|
||||
bool "Enable advanced debug functionality on V4L2 drivers"
|
||||
default n
|
||||
---help---
|
||||
Say Y here to enable advanced debugging functionality on some
|
||||
V4L devices.
|
||||
In doubt, say N.
|
||||
|
||||
config VIDEO_FIXED_MINOR_RANGES
|
||||
bool "Enable old-style fixed minor ranges on drivers/video devices"
|
||||
default n
|
||||
---help---
|
||||
Say Y here to enable the old-style fixed-range minor assignments.
|
||||
Only useful if you rely on the old behavior and use mknod instead of udev.
|
||||
|
||||
When in doubt, say N.
|
||||
|
||||
config VIDEO_PCI_SKELETON
|
||||
tristate "Skeleton PCI V4L2 driver"
|
||||
depends on PCI && BUILD_DOCSRC
|
||||
depends on VIDEO_V4L2 && VIDEOBUF2_CORE
|
||||
depends on VIDEOBUF2_MEMOPS && VIDEOBUF2_DMA_CONTIG
|
||||
---help---
|
||||
Enable build of the skeleton PCI driver, used as a reference
|
||||
when developing new drivers.
|
||||
|
||||
# Used by drivers that need tuner.ko
|
||||
config VIDEO_TUNER
|
||||
tristate
|
||||
depends on MEDIA_TUNER
|
||||
|
||||
# Used by drivers that need v4l2-mem2mem.ko
|
||||
config V4L2_MEM2MEM_DEV
|
||||
tristate
|
||||
depends on VIDEOBUF2_CORE
|
||||
|
||||
# Used by drivers that need Videobuf modules
|
||||
config VIDEOBUF_GEN
|
||||
tristate
|
||||
|
||||
config VIDEOBUF_DMA_SG
|
||||
tristate
|
||||
depends on HAS_DMA
|
||||
select VIDEOBUF_GEN
|
||||
|
||||
config VIDEOBUF_VMALLOC
|
||||
tristate
|
||||
select VIDEOBUF_GEN
|
||||
|
||||
config VIDEOBUF_DMA_CONTIG
|
||||
tristate
|
||||
depends on HAS_DMA
|
||||
select VIDEOBUF_GEN
|
||||
|
||||
config VIDEOBUF_DVB
|
||||
tristate
|
||||
select VIDEOBUF_GEN
|
||||
|
||||
# Used by drivers that need Videobuf2 modules
|
||||
config VIDEOBUF2_CORE
|
||||
select DMA_SHARED_BUFFER
|
||||
tristate
|
||||
|
||||
config VIDEOBUF2_MEMOPS
|
||||
tristate
|
||||
|
||||
config VIDEOBUF2_DMA_CONTIG
|
||||
tristate
|
||||
depends on HAS_DMA
|
||||
select VIDEOBUF2_CORE
|
||||
select VIDEOBUF2_MEMOPS
|
||||
select DMA_SHARED_BUFFER
|
||||
|
||||
config VIDEOBUF2_VMALLOC
|
||||
tristate
|
||||
select VIDEOBUF2_CORE
|
||||
select VIDEOBUF2_MEMOPS
|
||||
select DMA_SHARED_BUFFER
|
||||
|
||||
config VIDEOBUF2_DMA_SG
|
||||
tristate
|
||||
#depends on HAS_DMA
|
||||
select VIDEOBUF2_CORE
|
||||
select VIDEOBUF2_MEMOPS
|
||||
|
||||
config VIDEOBUF2_ION
|
||||
bool "Video buffer 2 by Android ION"
|
||||
depends on ION_EXYNOS
|
||||
select VIDEOBUF2_CORE
|
||||
select VIDEOBUF2_MEMOPS
|
||||
help
|
||||
Internal memory management for Video Buffer 2 by ION
|
||||
|
||||
config VIDEOBUF2_DVB
|
||||
tristate
|
||||
select VIDEOBUF2_CORE
|
42
drivers/media/v4l2-core/Makefile
Normal file
42
drivers/media/v4l2-core/Makefile
Normal file
|
@ -0,0 +1,42 @@
|
|||
#
|
||||
# Makefile for the V4L2 core
|
||||
#
|
||||
|
||||
tuner-objs := tuner-core.o
|
||||
|
||||
videodev-objs := v4l2-dev.o v4l2-ioctl.o v4l2-device.o v4l2-fh.o \
|
||||
v4l2-event.o v4l2-ctrls.o v4l2-subdev.o v4l2-clk.o \
|
||||
v4l2-async.o
|
||||
ifeq ($(CONFIG_COMPAT),y)
|
||||
videodev-objs += v4l2-compat-ioctl32.o
|
||||
endif
|
||||
ifeq ($(CONFIG_OF),y)
|
||||
videodev-objs += v4l2-of.o
|
||||
endif
|
||||
|
||||
obj-$(CONFIG_VIDEO_V4L2) += videodev.o
|
||||
obj-$(CONFIG_VIDEO_V4L2) += v4l2-common.o
|
||||
obj-$(CONFIG_VIDEO_V4L2) += v4l2-dv-timings.o
|
||||
|
||||
obj-$(CONFIG_VIDEO_TUNER) += tuner.o
|
||||
|
||||
obj-$(CONFIG_V4L2_MEM2MEM_DEV) += v4l2-mem2mem.o
|
||||
|
||||
obj-$(CONFIG_VIDEOBUF_GEN) += videobuf-core.o
|
||||
obj-$(CONFIG_VIDEOBUF_DMA_SG) += videobuf-dma-sg.o
|
||||
obj-$(CONFIG_VIDEOBUF_DMA_CONTIG) += videobuf-dma-contig.o
|
||||
obj-$(CONFIG_VIDEOBUF_VMALLOC) += videobuf-vmalloc.o
|
||||
obj-$(CONFIG_VIDEOBUF_DVB) += videobuf-dvb.o
|
||||
|
||||
obj-$(CONFIG_VIDEOBUF2_CORE) += videobuf2-core.o
|
||||
obj-$(CONFIG_VIDEOBUF2_MEMOPS) += videobuf2-memops.o
|
||||
obj-$(CONFIG_VIDEOBUF2_VMALLOC) += videobuf2-vmalloc.o
|
||||
obj-$(CONFIG_VIDEOBUF2_DMA_CONTIG) += videobuf2-dma-contig.o
|
||||
obj-$(CONFIG_VIDEOBUF2_DMA_SG) += videobuf2-dma-sg.o
|
||||
obj-$(CONFIG_VIDEOBUF2_DVB) += videobuf2-dvb.o
|
||||
obj-$(CONFIG_VIDEOBUF2_ION) += videobuf2-ion.o
|
||||
|
||||
ccflags-y += -I$(srctree)/drivers/media/dvb-core
|
||||
ccflags-y += -I$(srctree)/drivers/media/dvb-frontends
|
||||
ccflags-y += -I$(srctree)/drivers/media/tuners
|
||||
|
1361
drivers/media/v4l2-core/tuner-core.c
Normal file
1361
drivers/media/v4l2-core/tuner-core.c
Normal file
File diff suppressed because it is too large
Load diff
312
drivers/media/v4l2-core/v4l2-async.c
Normal file
312
drivers/media/v4l2-core/v4l2-async.c
Normal file
|
@ -0,0 +1,312 @@
|
|||
/*
|
||||
* V4L2 asynchronous subdevice registration API
|
||||
*
|
||||
* Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <media/v4l2-async.h>
|
||||
#include <media/v4l2-device.h>
|
||||
#include <media/v4l2-subdev.h>
|
||||
|
||||
static bool match_i2c(struct device *dev, struct v4l2_async_subdev *asd)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_I2C)
|
||||
struct i2c_client *client = i2c_verify_client(dev);
|
||||
return client &&
|
||||
asd->match.i2c.adapter_id == client->adapter->nr &&
|
||||
asd->match.i2c.address == client->addr;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
static bool match_devname(struct device *dev, struct v4l2_async_subdev *asd)
|
||||
{
|
||||
return !strcmp(asd->match.device_name.name, dev_name(dev));
|
||||
}
|
||||
|
||||
static bool match_of(struct device *dev, struct v4l2_async_subdev *asd)
|
||||
{
|
||||
return dev->of_node == asd->match.of.node;
|
||||
}
|
||||
|
||||
static LIST_HEAD(subdev_list);
|
||||
static LIST_HEAD(notifier_list);
|
||||
static DEFINE_MUTEX(list_lock);
|
||||
|
||||
static struct v4l2_async_subdev *v4l2_async_belongs(struct v4l2_async_notifier *notifier,
|
||||
struct v4l2_subdev *sd)
|
||||
{
|
||||
struct v4l2_async_subdev *asd;
|
||||
bool (*match)(struct device *, struct v4l2_async_subdev *);
|
||||
|
||||
list_for_each_entry(asd, ¬ifier->waiting, list) {
|
||||
/* bus_type has been verified valid before */
|
||||
switch (asd->match_type) {
|
||||
case V4L2_ASYNC_MATCH_CUSTOM:
|
||||
match = asd->match.custom.match;
|
||||
if (!match)
|
||||
/* Match always */
|
||||
return asd;
|
||||
break;
|
||||
case V4L2_ASYNC_MATCH_DEVNAME:
|
||||
match = match_devname;
|
||||
break;
|
||||
case V4L2_ASYNC_MATCH_I2C:
|
||||
match = match_i2c;
|
||||
break;
|
||||
case V4L2_ASYNC_MATCH_OF:
|
||||
match = match_of;
|
||||
break;
|
||||
default:
|
||||
/* Cannot happen, unless someone breaks us */
|
||||
WARN_ON(true);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* match cannot be NULL here */
|
||||
if (match(sd->dev, asd))
|
||||
return asd;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int v4l2_async_test_notify(struct v4l2_async_notifier *notifier,
|
||||
struct v4l2_subdev *sd,
|
||||
struct v4l2_async_subdev *asd)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* Remove from the waiting list */
|
||||
list_del(&asd->list);
|
||||
sd->asd = asd;
|
||||
sd->notifier = notifier;
|
||||
|
||||
if (notifier->bound) {
|
||||
ret = notifier->bound(notifier, sd, asd);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
/* Move from the global subdevice list to notifier's done */
|
||||
list_move(&sd->async_list, ¬ifier->done);
|
||||
|
||||
ret = v4l2_device_register_subdev(notifier->v4l2_dev, sd);
|
||||
if (ret < 0) {
|
||||
if (notifier->unbind)
|
||||
notifier->unbind(notifier, sd, asd);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (list_empty(¬ifier->waiting) && notifier->complete)
|
||||
return notifier->complete(notifier);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void v4l2_async_cleanup(struct v4l2_subdev *sd)
|
||||
{
|
||||
v4l2_device_unregister_subdev(sd);
|
||||
/* Subdevice driver will reprobe and put the subdev back onto the list */
|
||||
list_del_init(&sd->async_list);
|
||||
sd->asd = NULL;
|
||||
sd->dev = NULL;
|
||||
}
|
||||
|
||||
int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
|
||||
struct v4l2_async_notifier *notifier)
|
||||
{
|
||||
struct v4l2_subdev *sd, *tmp;
|
||||
struct v4l2_async_subdev *asd;
|
||||
int i;
|
||||
|
||||
if (!notifier->num_subdevs || notifier->num_subdevs > V4L2_MAX_SUBDEVS)
|
||||
return -EINVAL;
|
||||
|
||||
notifier->v4l2_dev = v4l2_dev;
|
||||
INIT_LIST_HEAD(¬ifier->waiting);
|
||||
INIT_LIST_HEAD(¬ifier->done);
|
||||
|
||||
for (i = 0; i < notifier->num_subdevs; i++) {
|
||||
asd = notifier->subdevs[i];
|
||||
|
||||
switch (asd->match_type) {
|
||||
case V4L2_ASYNC_MATCH_CUSTOM:
|
||||
case V4L2_ASYNC_MATCH_DEVNAME:
|
||||
case V4L2_ASYNC_MATCH_I2C:
|
||||
case V4L2_ASYNC_MATCH_OF:
|
||||
break;
|
||||
default:
|
||||
dev_err(notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL,
|
||||
"Invalid match type %u on %p\n",
|
||||
asd->match_type, asd);
|
||||
return -EINVAL;
|
||||
}
|
||||
list_add_tail(&asd->list, ¬ifier->waiting);
|
||||
}
|
||||
|
||||
mutex_lock(&list_lock);
|
||||
|
||||
/* Keep also completed notifiers on the list */
|
||||
list_add(¬ifier->list, ¬ifier_list);
|
||||
|
||||
list_for_each_entry_safe(sd, tmp, &subdev_list, async_list) {
|
||||
int ret;
|
||||
|
||||
asd = v4l2_async_belongs(notifier, sd);
|
||||
if (!asd)
|
||||
continue;
|
||||
|
||||
ret = v4l2_async_test_notify(notifier, sd, asd);
|
||||
if (ret < 0) {
|
||||
mutex_unlock(&list_lock);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&list_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(v4l2_async_notifier_register);
|
||||
|
||||
void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
|
||||
{
|
||||
struct v4l2_subdev *sd, *tmp;
|
||||
unsigned int notif_n_subdev = notifier->num_subdevs;
|
||||
unsigned int n_subdev = min(notif_n_subdev, V4L2_MAX_SUBDEVS);
|
||||
struct device **dev;
|
||||
int i = 0;
|
||||
|
||||
if (!notifier->v4l2_dev)
|
||||
return;
|
||||
|
||||
dev = kmalloc(n_subdev * sizeof(*dev), GFP_KERNEL);
|
||||
if (!dev) {
|
||||
dev_err(notifier->v4l2_dev->dev,
|
||||
"Failed to allocate device cache!\n");
|
||||
}
|
||||
|
||||
mutex_lock(&list_lock);
|
||||
|
||||
list_del(¬ifier->list);
|
||||
|
||||
list_for_each_entry_safe(sd, tmp, ¬ifier->done, async_list) {
|
||||
struct device *d;
|
||||
|
||||
d = get_device(sd->dev);
|
||||
|
||||
v4l2_async_cleanup(sd);
|
||||
|
||||
/* If we handled USB devices, we'd have to lock the parent too */
|
||||
device_release_driver(d);
|
||||
|
||||
if (notifier->unbind)
|
||||
notifier->unbind(notifier, sd, sd->asd);
|
||||
|
||||
/*
|
||||
* Store device at the device cache, in order to call
|
||||
* put_device() on the final step
|
||||
*/
|
||||
if (dev)
|
||||
dev[i++] = d;
|
||||
else
|
||||
put_device(d);
|
||||
}
|
||||
|
||||
mutex_unlock(&list_lock);
|
||||
|
||||
/*
|
||||
* Call device_attach() to reprobe devices
|
||||
*
|
||||
* NOTE: If dev allocation fails, i is 0, and the whole loop won't be
|
||||
* executed.
|
||||
*/
|
||||
while (i--) {
|
||||
struct device *d = dev[i];
|
||||
|
||||
if (d && device_attach(d) < 0) {
|
||||
const char *name = "(none)";
|
||||
int lock = device_trylock(d);
|
||||
|
||||
if (lock && d->driver)
|
||||
name = d->driver->name;
|
||||
dev_err(d, "Failed to re-probe to %s\n", name);
|
||||
if (lock)
|
||||
device_unlock(d);
|
||||
}
|
||||
put_device(d);
|
||||
}
|
||||
kfree(dev);
|
||||
|
||||
notifier->v4l2_dev = NULL;
|
||||
|
||||
/*
|
||||
* Don't care about the waiting list, it is initialised and populated
|
||||
* upon notifier registration.
|
||||
*/
|
||||
}
|
||||
EXPORT_SYMBOL(v4l2_async_notifier_unregister);
|
||||
|
||||
int v4l2_async_register_subdev(struct v4l2_subdev *sd)
|
||||
{
|
||||
struct v4l2_async_notifier *notifier;
|
||||
|
||||
mutex_lock(&list_lock);
|
||||
|
||||
INIT_LIST_HEAD(&sd->async_list);
|
||||
|
||||
list_for_each_entry(notifier, ¬ifier_list, list) {
|
||||
struct v4l2_async_subdev *asd = v4l2_async_belongs(notifier, sd);
|
||||
if (asd) {
|
||||
int ret = v4l2_async_test_notify(notifier, sd, asd);
|
||||
mutex_unlock(&list_lock);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
/* None matched, wait for hot-plugging */
|
||||
list_add(&sd->async_list, &subdev_list);
|
||||
|
||||
mutex_unlock(&list_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(v4l2_async_register_subdev);
|
||||
|
||||
void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
|
||||
{
|
||||
struct v4l2_async_notifier *notifier = sd->notifier;
|
||||
|
||||
if (!sd->asd) {
|
||||
if (!list_empty(&sd->async_list))
|
||||
v4l2_async_cleanup(sd);
|
||||
return;
|
||||
}
|
||||
|
||||
mutex_lock(&list_lock);
|
||||
|
||||
list_add(&sd->asd->list, ¬ifier->waiting);
|
||||
|
||||
v4l2_async_cleanup(sd);
|
||||
|
||||
if (notifier->unbind)
|
||||
notifier->unbind(notifier, sd, sd->asd);
|
||||
|
||||
mutex_unlock(&list_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(v4l2_async_unregister_subdev);
|
281
drivers/media/v4l2-core/v4l2-clk.c
Normal file
281
drivers/media/v4l2-core/v4l2-clk.c
Normal file
|
@ -0,0 +1,281 @@
|
|||
/*
|
||||
* V4L2 clock service
|
||||
*
|
||||
* Copyright (C) 2012-2013, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
#include <media/v4l2-clk.h>
|
||||
#include <media/v4l2-subdev.h>
|
||||
|
||||
static DEFINE_MUTEX(clk_lock);
|
||||
static LIST_HEAD(clk_list);
|
||||
|
||||
static struct v4l2_clk *v4l2_clk_find(const char *dev_id, const char *id)
|
||||
{
|
||||
struct v4l2_clk *clk;
|
||||
|
||||
list_for_each_entry(clk, &clk_list, list) {
|
||||
if (strcmp(dev_id, clk->dev_id))
|
||||
continue;
|
||||
|
||||
if (!id || !clk->id || !strcmp(clk->id, id))
|
||||
return clk;
|
||||
}
|
||||
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
struct v4l2_clk *v4l2_clk_get(struct device *dev, const char *id)
|
||||
{
|
||||
struct v4l2_clk *clk;
|
||||
|
||||
mutex_lock(&clk_lock);
|
||||
clk = v4l2_clk_find(dev_name(dev), id);
|
||||
|
||||
if (!IS_ERR(clk))
|
||||
atomic_inc(&clk->use_count);
|
||||
mutex_unlock(&clk_lock);
|
||||
|
||||
return clk;
|
||||
}
|
||||
EXPORT_SYMBOL(v4l2_clk_get);
|
||||
|
||||
void v4l2_clk_put(struct v4l2_clk *clk)
|
||||
{
|
||||
struct v4l2_clk *tmp;
|
||||
|
||||
if (IS_ERR(clk))
|
||||
return;
|
||||
|
||||
mutex_lock(&clk_lock);
|
||||
|
||||
list_for_each_entry(tmp, &clk_list, list)
|
||||
if (tmp == clk)
|
||||
atomic_dec(&clk->use_count);
|
||||
|
||||
mutex_unlock(&clk_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(v4l2_clk_put);
|
||||
|
||||
static int v4l2_clk_lock_driver(struct v4l2_clk *clk)
|
||||
{
|
||||
struct v4l2_clk *tmp;
|
||||
int ret = -ENODEV;
|
||||
|
||||
mutex_lock(&clk_lock);
|
||||
|
||||
list_for_each_entry(tmp, &clk_list, list)
|
||||
if (tmp == clk) {
|
||||
ret = !try_module_get(clk->ops->owner);
|
||||
if (ret)
|
||||
ret = -EFAULT;
|
||||
break;
|
||||
}
|
||||
|
||||
mutex_unlock(&clk_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void v4l2_clk_unlock_driver(struct v4l2_clk *clk)
|
||||
{
|
||||
module_put(clk->ops->owner);
|
||||
}
|
||||
|
||||
int v4l2_clk_enable(struct v4l2_clk *clk)
|
||||
{
|
||||
int ret = v4l2_clk_lock_driver(clk);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&clk->lock);
|
||||
|
||||
if (++clk->enable == 1 && clk->ops->enable) {
|
||||
ret = clk->ops->enable(clk);
|
||||
if (ret < 0)
|
||||
clk->enable--;
|
||||
}
|
||||
|
||||
mutex_unlock(&clk->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(v4l2_clk_enable);
|
||||
|
||||
/*
|
||||
* You might Oops if you try to disabled a disabled clock, because then the
|
||||
* driver isn't locked and could have been unloaded by now, so, don't do that
|
||||
*/
|
||||
void v4l2_clk_disable(struct v4l2_clk *clk)
|
||||
{
|
||||
int enable;
|
||||
|
||||
mutex_lock(&clk->lock);
|
||||
|
||||
enable = --clk->enable;
|
||||
if (WARN(enable < 0, "Unbalanced %s() on %s:%s!\n", __func__,
|
||||
clk->dev_id, clk->id))
|
||||
clk->enable++;
|
||||
else if (!enable && clk->ops->disable)
|
||||
clk->ops->disable(clk);
|
||||
|
||||
mutex_unlock(&clk->lock);
|
||||
|
||||
v4l2_clk_unlock_driver(clk);
|
||||
}
|
||||
EXPORT_SYMBOL(v4l2_clk_disable);
|
||||
|
||||
unsigned long v4l2_clk_get_rate(struct v4l2_clk *clk)
|
||||
{
|
||||
int ret = v4l2_clk_lock_driver(clk);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&clk->lock);
|
||||
if (!clk->ops->get_rate)
|
||||
ret = -ENOSYS;
|
||||
else
|
||||
ret = clk->ops->get_rate(clk);
|
||||
mutex_unlock(&clk->lock);
|
||||
|
||||
v4l2_clk_unlock_driver(clk);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(v4l2_clk_get_rate);
|
||||
|
||||
int v4l2_clk_set_rate(struct v4l2_clk *clk, unsigned long rate)
|
||||
{
|
||||
int ret = v4l2_clk_lock_driver(clk);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&clk->lock);
|
||||
if (!clk->ops->set_rate)
|
||||
ret = -ENOSYS;
|
||||
else
|
||||
ret = clk->ops->set_rate(clk, rate);
|
||||
mutex_unlock(&clk->lock);
|
||||
|
||||
v4l2_clk_unlock_driver(clk);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(v4l2_clk_set_rate);
|
||||
|
||||
struct v4l2_clk *v4l2_clk_register(const struct v4l2_clk_ops *ops,
|
||||
const char *dev_id,
|
||||
const char *id, void *priv)
|
||||
{
|
||||
struct v4l2_clk *clk;
|
||||
int ret;
|
||||
|
||||
if (!ops || !dev_id)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
clk = kzalloc(sizeof(struct v4l2_clk), GFP_KERNEL);
|
||||
if (!clk)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
clk->id = kstrdup(id, GFP_KERNEL);
|
||||
clk->dev_id = kstrdup(dev_id, GFP_KERNEL);
|
||||
if ((id && !clk->id) || !clk->dev_id) {
|
||||
ret = -ENOMEM;
|
||||
goto ealloc;
|
||||
}
|
||||
clk->ops = ops;
|
||||
clk->priv = priv;
|
||||
atomic_set(&clk->use_count, 0);
|
||||
mutex_init(&clk->lock);
|
||||
|
||||
mutex_lock(&clk_lock);
|
||||
if (!IS_ERR(v4l2_clk_find(dev_id, id))) {
|
||||
mutex_unlock(&clk_lock);
|
||||
ret = -EEXIST;
|
||||
goto eexist;
|
||||
}
|
||||
list_add_tail(&clk->list, &clk_list);
|
||||
mutex_unlock(&clk_lock);
|
||||
|
||||
return clk;
|
||||
|
||||
eexist:
|
||||
ealloc:
|
||||
kfree(clk->id);
|
||||
kfree(clk->dev_id);
|
||||
kfree(clk);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
EXPORT_SYMBOL(v4l2_clk_register);
|
||||
|
||||
void v4l2_clk_unregister(struct v4l2_clk *clk)
|
||||
{
|
||||
if (WARN(atomic_read(&clk->use_count),
|
||||
"%s(): Refusing to unregister ref-counted %s:%s clock!\n",
|
||||
__func__, clk->dev_id, clk->id))
|
||||
return;
|
||||
|
||||
mutex_lock(&clk_lock);
|
||||
list_del(&clk->list);
|
||||
mutex_unlock(&clk_lock);
|
||||
|
||||
kfree(clk->id);
|
||||
kfree(clk->dev_id);
|
||||
kfree(clk);
|
||||
}
|
||||
EXPORT_SYMBOL(v4l2_clk_unregister);
|
||||
|
||||
struct v4l2_clk_fixed {
|
||||
unsigned long rate;
|
||||
struct v4l2_clk_ops ops;
|
||||
};
|
||||
|
||||
static unsigned long fixed_get_rate(struct v4l2_clk *clk)
|
||||
{
|
||||
struct v4l2_clk_fixed *priv = clk->priv;
|
||||
return priv->rate;
|
||||
}
|
||||
|
||||
struct v4l2_clk *__v4l2_clk_register_fixed(const char *dev_id,
|
||||
const char *id, unsigned long rate, struct module *owner)
|
||||
{
|
||||
struct v4l2_clk *clk;
|
||||
struct v4l2_clk_fixed *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||
|
||||
if (!priv)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
priv->rate = rate;
|
||||
priv->ops.get_rate = fixed_get_rate;
|
||||
priv->ops.owner = owner;
|
||||
|
||||
clk = v4l2_clk_register(&priv->ops, dev_id, id, priv);
|
||||
if (IS_ERR(clk))
|
||||
kfree(priv);
|
||||
|
||||
return clk;
|
||||
}
|
||||
EXPORT_SYMBOL(__v4l2_clk_register_fixed);
|
||||
|
||||
void v4l2_clk_unregister_fixed(struct v4l2_clk *clk)
|
||||
{
|
||||
kfree(clk->priv);
|
||||
v4l2_clk_unregister(clk);
|
||||
}
|
||||
EXPORT_SYMBOL(v4l2_clk_unregister_fixed);
|
532
drivers/media/v4l2-core/v4l2-common.c
Normal file
532
drivers/media/v4l2-core/v4l2-common.c
Normal file
|
@ -0,0 +1,532 @@
|
|||
/*
|
||||
* Video for Linux Two
|
||||
*
|
||||
* A generic video device interface for the LINUX operating system
|
||||
* using a set of device structures/vectors for low level operations.
|
||||
*
|
||||
* This file replaces the videodev.c file that comes with the
|
||||
* regular kernel distribution.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* Author: Bill Dirks <bill@thedirks.org>
|
||||
* based on code by Alan Cox, <alan@cymru.net>
|
||||
*
|
||||
*/
|
||||
|
||||
/*
|
||||
* Video capture interface for Linux
|
||||
*
|
||||
* A generic video device interface for the LINUX operating system
|
||||
* using a set of device structures/vectors for low level operations.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* Author: Alan Cox, <alan@lxorguk.ukuu.org.uk>
|
||||
*
|
||||
* Fixes:
|
||||
*/
|
||||
|
||||
/*
|
||||
* Video4linux 1/2 integration by Justin Schoeman
|
||||
* <justin@suntiger.ee.up.ac.za>
|
||||
* 2.4 PROCFS support ported from 2.4 kernels by
|
||||
* Iñaki García Etxebarria <garetxe@euskalnet.net>
|
||||
* Makefile fix by "W. Michael Petullo" <mike@flyn.org>
|
||||
* 2.4 devfs support ported from 2.4 kernels by
|
||||
* Dan Merillat <dan@merillat.org>
|
||||
* Added Gerd Knorrs v4l1 enhancements (Justin Schoeman)
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/i2c.h>
|
||||
#if defined(CONFIG_SPI)
|
||||
#include <linux/spi/spi.h>
|
||||
#endif
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/div64.h>
|
||||
#include <media/v4l2-common.h>
|
||||
#include <media/v4l2-device.h>
|
||||
#include <media/v4l2-ctrls.h>
|
||||
|
||||
#include <linux/videodev2.h>
|
||||
|
||||
MODULE_AUTHOR("Bill Dirks, Justin Schoeman, Gerd Knorr");
|
||||
MODULE_DESCRIPTION("misc helper functions for v4l2 device drivers");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
/*
|
||||
*
|
||||
* V 4 L 2 D R I V E R H E L P E R A P I
|
||||
*
|
||||
*/
|
||||
|
||||
/*
|
||||
* Video Standard Operations (contributed by Michael Schimek)
|
||||
*/
|
||||
|
||||
/* Helper functions for control handling */
|
||||
|
||||
/* Check for correctness of the ctrl's value based on the data from
|
||||
struct v4l2_queryctrl and the available menu items. Note that
|
||||
menu_items may be NULL, in that case it is ignored. */
|
||||
int v4l2_ctrl_check(struct v4l2_ext_control *ctrl, struct v4l2_queryctrl *qctrl,
|
||||
const char * const *menu_items)
|
||||
{
|
||||
if (qctrl->flags & V4L2_CTRL_FLAG_DISABLED)
|
||||
return -EINVAL;
|
||||
if (qctrl->flags & V4L2_CTRL_FLAG_GRABBED)
|
||||
return -EBUSY;
|
||||
if (qctrl->type == V4L2_CTRL_TYPE_STRING)
|
||||
return 0;
|
||||
if (qctrl->type == V4L2_CTRL_TYPE_BUTTON ||
|
||||
qctrl->type == V4L2_CTRL_TYPE_INTEGER64 ||
|
||||
qctrl->type == V4L2_CTRL_TYPE_CTRL_CLASS)
|
||||
return 0;
|
||||
if (ctrl->value < qctrl->minimum || ctrl->value > qctrl->maximum)
|
||||
return -ERANGE;
|
||||
if (qctrl->type == V4L2_CTRL_TYPE_MENU && menu_items != NULL) {
|
||||
if (menu_items[ctrl->value] == NULL ||
|
||||
menu_items[ctrl->value][0] == '\0')
|
||||
return -EINVAL;
|
||||
}
|
||||
if (qctrl->type == V4L2_CTRL_TYPE_BITMASK &&
|
||||
(ctrl->value & ~qctrl->maximum))
|
||||
return -ERANGE;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(v4l2_ctrl_check);
|
||||
|
||||
/* Fill in a struct v4l2_queryctrl */
|
||||
int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 _min, s32 _max, s32 _step, s32 _def)
|
||||
{
|
||||
const char *name;
|
||||
s64 min = _min;
|
||||
s64 max = _max;
|
||||
u64 step = _step;
|
||||
s64 def = _def;
|
||||
|
||||
v4l2_ctrl_fill(qctrl->id, &name, &qctrl->type,
|
||||
&min, &max, &step, &def, &qctrl->flags);
|
||||
|
||||
if (name == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
qctrl->minimum = min;
|
||||
qctrl->maximum = max;
|
||||
qctrl->step = step;
|
||||
qctrl->default_value = def;
|
||||
qctrl->reserved[0] = qctrl->reserved[1] = 0;
|
||||
strlcpy(qctrl->name, name, sizeof(qctrl->name));
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(v4l2_ctrl_query_fill);
|
||||
|
||||
/* Fill in a struct v4l2_querymenu based on the struct v4l2_queryctrl and
|
||||
the menu. The qctrl pointer may be NULL, in which case it is ignored.
|
||||
If menu_items is NULL, then the menu items are retrieved using
|
||||
v4l2_ctrl_get_menu. */
|
||||
int v4l2_ctrl_query_menu(struct v4l2_querymenu *qmenu, struct v4l2_queryctrl *qctrl,
|
||||
const char * const *menu_items)
|
||||
{
|
||||
int i;
|
||||
|
||||
qmenu->reserved = 0;
|
||||
if (menu_items == NULL)
|
||||
menu_items = v4l2_ctrl_get_menu(qmenu->id);
|
||||
if (menu_items == NULL ||
|
||||
(qctrl && (qmenu->index < qctrl->minimum || qmenu->index > qctrl->maximum)))
|
||||
return -EINVAL;
|
||||
for (i = 0; i < qmenu->index && menu_items[i]; i++) ;
|
||||
if (menu_items[i] == NULL || menu_items[i][0] == '\0')
|
||||
return -EINVAL;
|
||||
strlcpy(qmenu->name, menu_items[qmenu->index], sizeof(qmenu->name));
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(v4l2_ctrl_query_menu);
|
||||
|
||||
/* Fill in a struct v4l2_querymenu based on the specified array of valid
|
||||
menu items (terminated by V4L2_CTRL_MENU_IDS_END).
|
||||
Use this if there are 'holes' in the list of valid menu items. */
|
||||
int v4l2_ctrl_query_menu_valid_items(struct v4l2_querymenu *qmenu, const u32 *ids)
|
||||
{
|
||||
const char * const *menu_items = v4l2_ctrl_get_menu(qmenu->id);
|
||||
|
||||
qmenu->reserved = 0;
|
||||
if (menu_items == NULL || ids == NULL)
|
||||
return -EINVAL;
|
||||
while (*ids != V4L2_CTRL_MENU_IDS_END) {
|
||||
if (*ids++ == qmenu->index) {
|
||||
strlcpy(qmenu->name, menu_items[qmenu->index],
|
||||
sizeof(qmenu->name));
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
EXPORT_SYMBOL(v4l2_ctrl_query_menu_valid_items);
|
||||
|
||||
/* ctrl_classes points to an array of u32 pointers, the last element is
|
||||
a NULL pointer. Each u32 array is a 0-terminated array of control IDs.
|
||||
Each array must be sorted low to high and belong to the same control
|
||||
class. The array of u32 pointers must also be sorted, from low class IDs
|
||||
to high class IDs.
|
||||
|
||||
This function returns the first ID that follows after the given ID.
|
||||
When no more controls are available 0 is returned. */
|
||||
u32 v4l2_ctrl_next(const u32 * const * ctrl_classes, u32 id)
|
||||
{
|
||||
u32 ctrl_class = V4L2_CTRL_ID2CLASS(id);
|
||||
const u32 *pctrl;
|
||||
|
||||
if (ctrl_classes == NULL)
|
||||
return 0;
|
||||
|
||||
/* if no query is desired, then check if the ID is part of ctrl_classes */
|
||||
if ((id & V4L2_CTRL_FLAG_NEXT_CTRL) == 0) {
|
||||
/* find class */
|
||||
while (*ctrl_classes && V4L2_CTRL_ID2CLASS(**ctrl_classes) != ctrl_class)
|
||||
ctrl_classes++;
|
||||
if (*ctrl_classes == NULL)
|
||||
return 0;
|
||||
pctrl = *ctrl_classes;
|
||||
/* find control ID */
|
||||
while (*pctrl && *pctrl != id) pctrl++;
|
||||
return *pctrl ? id : 0;
|
||||
}
|
||||
id &= V4L2_CTRL_ID_MASK;
|
||||
id++; /* select next control */
|
||||
/* find first class that matches (or is greater than) the class of
|
||||
the ID */
|
||||
while (*ctrl_classes && V4L2_CTRL_ID2CLASS(**ctrl_classes) < ctrl_class)
|
||||
ctrl_classes++;
|
||||
/* no more classes */
|
||||
if (*ctrl_classes == NULL)
|
||||
return 0;
|
||||
pctrl = *ctrl_classes;
|
||||
/* find first ctrl within the class that is >= ID */
|
||||
while (*pctrl && *pctrl < id) pctrl++;
|
||||
if (*pctrl)
|
||||
return *pctrl;
|
||||
/* we are at the end of the controls of the current class. */
|
||||
/* continue with next class if available */
|
||||
ctrl_classes++;
|
||||
if (*ctrl_classes == NULL)
|
||||
return 0;
|
||||
return **ctrl_classes;
|
||||
}
|
||||
EXPORT_SYMBOL(v4l2_ctrl_next);
|
||||
|
||||
/* I2C Helper functions */
|
||||
|
||||
#if IS_ENABLED(CONFIG_I2C)
|
||||
|
||||
void v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client,
|
||||
const struct v4l2_subdev_ops *ops)
|
||||
{
|
||||
v4l2_subdev_init(sd, ops);
|
||||
sd->flags |= V4L2_SUBDEV_FL_IS_I2C;
|
||||
/* the owner is the same as the i2c_client's driver owner */
|
||||
sd->owner = client->dev.driver->owner;
|
||||
sd->dev = &client->dev;
|
||||
/* i2c_client and v4l2_subdev point to one another */
|
||||
v4l2_set_subdevdata(sd, client);
|
||||
i2c_set_clientdata(client, sd);
|
||||
/* initialize name */
|
||||
snprintf(sd->name, sizeof(sd->name), "%s %d-%04x",
|
||||
client->dev.driver->name, i2c_adapter_id(client->adapter),
|
||||
client->addr);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_init);
|
||||
|
||||
/* Load an i2c sub-device. */
|
||||
struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
|
||||
struct i2c_adapter *adapter, struct i2c_board_info *info,
|
||||
const unsigned short *probe_addrs)
|
||||
{
|
||||
struct v4l2_subdev *sd = NULL;
|
||||
struct i2c_client *client;
|
||||
|
||||
BUG_ON(!v4l2_dev);
|
||||
|
||||
request_module(I2C_MODULE_PREFIX "%s", info->type);
|
||||
|
||||
/* Create the i2c client */
|
||||
if (info->addr == 0 && probe_addrs)
|
||||
client = i2c_new_probed_device(adapter, info, probe_addrs,
|
||||
NULL);
|
||||
else
|
||||
client = i2c_new_device(adapter, info);
|
||||
|
||||
/* Note: by loading the module first we are certain that c->driver
|
||||
will be set if the driver was found. If the module was not loaded
|
||||
first, then the i2c core tries to delay-load the module for us,
|
||||
and then c->driver is still NULL until the module is finally
|
||||
loaded. This delay-load mechanism doesn't work if other drivers
|
||||
want to use the i2c device, so explicitly loading the module
|
||||
is the best alternative. */
|
||||
if (client == NULL || client->dev.driver == NULL)
|
||||
goto error;
|
||||
|
||||
/* Lock the module so we can safely get the v4l2_subdev pointer */
|
||||
if (!try_module_get(client->dev.driver->owner))
|
||||
goto error;
|
||||
sd = i2c_get_clientdata(client);
|
||||
|
||||
/* Register with the v4l2_device which increases the module's
|
||||
use count as well. */
|
||||
if (v4l2_device_register_subdev(v4l2_dev, sd))
|
||||
sd = NULL;
|
||||
/* Decrease the module use count to match the first try_module_get. */
|
||||
module_put(client->dev.driver->owner);
|
||||
|
||||
error:
|
||||
/* If we have a client but no subdev, then something went wrong and
|
||||
we must unregister the client. */
|
||||
if (client && sd == NULL)
|
||||
i2c_unregister_device(client);
|
||||
return sd;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_board);
|
||||
|
||||
struct v4l2_subdev *v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev,
|
||||
struct i2c_adapter *adapter, const char *client_type,
|
||||
u8 addr, const unsigned short *probe_addrs)
|
||||
{
|
||||
struct i2c_board_info info;
|
||||
|
||||
/* Setup the i2c board info with the device type and
|
||||
the device address. */
|
||||
memset(&info, 0, sizeof(info));
|
||||
strlcpy(info.type, client_type, sizeof(info.type));
|
||||
info.addr = addr;
|
||||
|
||||
return v4l2_i2c_new_subdev_board(v4l2_dev, adapter, &info, probe_addrs);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev);
|
||||
|
||||
/* Return i2c client address of v4l2_subdev. */
|
||||
unsigned short v4l2_i2c_subdev_addr(struct v4l2_subdev *sd)
|
||||
{
|
||||
struct i2c_client *client = v4l2_get_subdevdata(sd);
|
||||
|
||||
return client ? client->addr : I2C_CLIENT_END;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_addr);
|
||||
|
||||
/* Return a list of I2C tuner addresses to probe. Use only if the tuner
|
||||
addresses are unknown. */
|
||||
const unsigned short *v4l2_i2c_tuner_addrs(enum v4l2_i2c_tuner_type type)
|
||||
{
|
||||
static const unsigned short radio_addrs[] = {
|
||||
#if IS_ENABLED(CONFIG_MEDIA_TUNER_TEA5761)
|
||||
0x10,
|
||||
#endif
|
||||
0x60,
|
||||
I2C_CLIENT_END
|
||||
};
|
||||
static const unsigned short demod_addrs[] = {
|
||||
0x42, 0x43, 0x4a, 0x4b,
|
||||
I2C_CLIENT_END
|
||||
};
|
||||
static const unsigned short tv_addrs[] = {
|
||||
0x42, 0x43, 0x4a, 0x4b, /* tda8290 */
|
||||
0x60, 0x61, 0x62, 0x63, 0x64,
|
||||
I2C_CLIENT_END
|
||||
};
|
||||
|
||||
switch (type) {
|
||||
case ADDRS_RADIO:
|
||||
return radio_addrs;
|
||||
case ADDRS_DEMOD:
|
||||
return demod_addrs;
|
||||
case ADDRS_TV:
|
||||
return tv_addrs;
|
||||
case ADDRS_TV_WITH_DEMOD:
|
||||
return tv_addrs + 4;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_i2c_tuner_addrs);
|
||||
|
||||
#endif /* defined(CONFIG_I2C) */
|
||||
|
||||
#if defined(CONFIG_SPI)
|
||||
|
||||
/* Load an spi sub-device. */
|
||||
|
||||
void v4l2_spi_subdev_init(struct v4l2_subdev *sd, struct spi_device *spi,
|
||||
const struct v4l2_subdev_ops *ops)
|
||||
{
|
||||
v4l2_subdev_init(sd, ops);
|
||||
sd->flags |= V4L2_SUBDEV_FL_IS_SPI;
|
||||
/* the owner is the same as the spi_device's driver owner */
|
||||
sd->owner = spi->dev.driver->owner;
|
||||
sd->dev = &spi->dev;
|
||||
/* spi_device and v4l2_subdev point to one another */
|
||||
v4l2_set_subdevdata(sd, spi);
|
||||
spi_set_drvdata(spi, sd);
|
||||
/* initialize name */
|
||||
strlcpy(sd->name, spi->dev.driver->name, sizeof(sd->name));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_spi_subdev_init);
|
||||
|
||||
struct v4l2_subdev *v4l2_spi_new_subdev(struct v4l2_device *v4l2_dev,
|
||||
struct spi_master *master, struct spi_board_info *info)
|
||||
{
|
||||
struct v4l2_subdev *sd = NULL;
|
||||
struct spi_device *spi = NULL;
|
||||
|
||||
BUG_ON(!v4l2_dev);
|
||||
|
||||
if (info->modalias[0])
|
||||
request_module(info->modalias);
|
||||
|
||||
spi = spi_new_device(master, info);
|
||||
|
||||
if (spi == NULL || spi->dev.driver == NULL)
|
||||
goto error;
|
||||
|
||||
if (!try_module_get(spi->dev.driver->owner))
|
||||
goto error;
|
||||
|
||||
sd = spi_get_drvdata(spi);
|
||||
|
||||
/* Register with the v4l2_device which increases the module's
|
||||
use count as well. */
|
||||
if (v4l2_device_register_subdev(v4l2_dev, sd))
|
||||
sd = NULL;
|
||||
|
||||
/* Decrease the module use count to match the first try_module_get. */
|
||||
module_put(spi->dev.driver->owner);
|
||||
|
||||
error:
|
||||
/* If we have a client but no subdev, then something went wrong and
|
||||
we must unregister the client. */
|
||||
if (spi && sd == NULL)
|
||||
spi_unregister_device(spi);
|
||||
|
||||
return sd;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_spi_new_subdev);
|
||||
|
||||
#endif /* defined(CONFIG_SPI) */
|
||||
|
||||
/* Clamp x to be between min and max, aligned to a multiple of 2^align. min
|
||||
* and max don't have to be aligned, but there must be at least one valid
|
||||
* value. E.g., min=17,max=31,align=4 is not allowed as there are no multiples
|
||||
* of 16 between 17 and 31. */
|
||||
static unsigned int clamp_align(unsigned int x, unsigned int min,
|
||||
unsigned int max, unsigned int align)
|
||||
{
|
||||
/* Bits that must be zero to be aligned */
|
||||
unsigned int mask = ~((1 << align) - 1);
|
||||
|
||||
/* Clamp to aligned min and max */
|
||||
x = clamp(x, (min + ~mask) & mask, max & mask);
|
||||
|
||||
/* Round to nearest aligned value */
|
||||
if (align)
|
||||
x = (x + (1 << (align - 1))) & mask;
|
||||
|
||||
return x;
|
||||
}
|
||||
|
||||
/* Bound an image to have a width between wmin and wmax, and height between
|
||||
* hmin and hmax, inclusive. Additionally, the width will be a multiple of
|
||||
* 2^walign, the height will be a multiple of 2^halign, and the overall size
|
||||
* (width*height) will be a multiple of 2^salign. The image may be shrunk
|
||||
* or enlarged to fit the alignment constraints.
|
||||
*
|
||||
* The width or height maximum must not be smaller than the corresponding
|
||||
* minimum. The alignments must not be so high there are no possible image
|
||||
* sizes within the allowed bounds. wmin and hmin must be at least 1
|
||||
* (don't use 0). If you don't care about a certain alignment, specify 0,
|
||||
* as 2^0 is 1 and one byte alignment is equivalent to no alignment. If
|
||||
* you only want to adjust downward, specify a maximum that's the same as
|
||||
* the initial value.
|
||||
*/
|
||||
void v4l_bound_align_image(u32 *w, unsigned int wmin, unsigned int wmax,
|
||||
unsigned int walign,
|
||||
u32 *h, unsigned int hmin, unsigned int hmax,
|
||||
unsigned int halign, unsigned int salign)
|
||||
{
|
||||
*w = clamp_align(*w, wmin, wmax, walign);
|
||||
*h = clamp_align(*h, hmin, hmax, halign);
|
||||
|
||||
/* Usually we don't need to align the size and are done now. */
|
||||
if (!salign)
|
||||
return;
|
||||
|
||||
/* How much alignment do we have? */
|
||||
walign = __ffs(*w);
|
||||
halign = __ffs(*h);
|
||||
/* Enough to satisfy the image alignment? */
|
||||
if (walign + halign < salign) {
|
||||
/* Max walign where there is still a valid width */
|
||||
unsigned int wmaxa = __fls(wmax ^ (wmin - 1));
|
||||
/* Max halign where there is still a valid height */
|
||||
unsigned int hmaxa = __fls(hmax ^ (hmin - 1));
|
||||
|
||||
/* up the smaller alignment until we have enough */
|
||||
do {
|
||||
if (halign >= hmaxa ||
|
||||
(walign <= halign && walign < wmaxa)) {
|
||||
*w = clamp_align(*w, wmin, wmax, walign + 1);
|
||||
walign = __ffs(*w);
|
||||
} else {
|
||||
*h = clamp_align(*h, hmin, hmax, halign + 1);
|
||||
halign = __ffs(*h);
|
||||
}
|
||||
} while (halign + walign < salign);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l_bound_align_image);
|
||||
|
||||
const struct v4l2_frmsize_discrete *v4l2_find_nearest_format(
|
||||
const struct v4l2_discrete_probe *probe,
|
||||
s32 width, s32 height)
|
||||
{
|
||||
int i;
|
||||
u32 error, min_error = UINT_MAX;
|
||||
const struct v4l2_frmsize_discrete *size, *best = NULL;
|
||||
|
||||
if (!probe)
|
||||
return best;
|
||||
|
||||
for (i = 0, size = probe->sizes; i < probe->num_sizes; i++, size++) {
|
||||
error = abs(size->width - width) + abs(size->height - height);
|
||||
if (error < min_error) {
|
||||
min_error = error;
|
||||
best = size;
|
||||
}
|
||||
if (!error)
|
||||
break;
|
||||
}
|
||||
|
||||
return best;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_find_nearest_format);
|
||||
|
||||
void v4l2_get_timestamp(struct timeval *tv)
|
||||
{
|
||||
struct timespec ts;
|
||||
|
||||
ktime_get_ts(&ts);
|
||||
tv->tv_sec = ts.tv_sec;
|
||||
tv->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_get_timestamp);
|
1042
drivers/media/v4l2-core/v4l2-compat-ioctl32.c
Normal file
1042
drivers/media/v4l2-core/v4l2-compat-ioctl32.c
Normal file
File diff suppressed because it is too large
Load diff
3428
drivers/media/v4l2-core/v4l2-ctrls.c
Normal file
3428
drivers/media/v4l2-core/v4l2-ctrls.c
Normal file
File diff suppressed because it is too large
Load diff
1048
drivers/media/v4l2-core/v4l2-dev.c
Normal file
1048
drivers/media/v4l2-core/v4l2-dev.c
Normal file
File diff suppressed because it is too large
Load diff
297
drivers/media/v4l2-core/v4l2-device.c
Normal file
297
drivers/media/v4l2-core/v4l2-device.c
Normal file
|
@ -0,0 +1,297 @@
|
|||
/*
|
||||
V4L2 device support.
|
||||
|
||||
Copyright (C) 2008 Hans Verkuil <hverkuil@xs4all.nl>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/ioctl.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/slab.h>
|
||||
#if defined(CONFIG_SPI)
|
||||
#include <linux/spi/spi.h>
|
||||
#endif
|
||||
#include <linux/videodev2.h>
|
||||
#include <media/v4l2-device.h>
|
||||
#include <media/v4l2-ctrls.h>
|
||||
|
||||
int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev)
|
||||
{
|
||||
if (v4l2_dev == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
INIT_LIST_HEAD(&v4l2_dev->subdevs);
|
||||
spin_lock_init(&v4l2_dev->lock);
|
||||
mutex_init(&v4l2_dev->ioctl_lock);
|
||||
v4l2_prio_init(&v4l2_dev->prio);
|
||||
kref_init(&v4l2_dev->ref);
|
||||
get_device(dev);
|
||||
v4l2_dev->dev = dev;
|
||||
if (dev == NULL) {
|
||||
/* If dev == NULL, then name must be filled in by the caller */
|
||||
if (WARN_ON(!v4l2_dev->name[0]))
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Set name to driver name + device name if it is empty. */
|
||||
if (!v4l2_dev->name[0])
|
||||
snprintf(v4l2_dev->name, sizeof(v4l2_dev->name), "%s %s",
|
||||
dev->driver->name, dev_name(dev));
|
||||
if (!dev_get_drvdata(dev))
|
||||
dev_set_drvdata(dev, v4l2_dev);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_device_register);
|
||||
|
||||
static void v4l2_device_release(struct kref *ref)
|
||||
{
|
||||
struct v4l2_device *v4l2_dev =
|
||||
container_of(ref, struct v4l2_device, ref);
|
||||
|
||||
if (v4l2_dev->release)
|
||||
v4l2_dev->release(v4l2_dev);
|
||||
}
|
||||
|
||||
int v4l2_device_put(struct v4l2_device *v4l2_dev)
|
||||
{
|
||||
return kref_put(&v4l2_dev->ref, v4l2_device_release);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_device_put);
|
||||
|
||||
int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
|
||||
atomic_t *instance)
|
||||
{
|
||||
int num = atomic_inc_return(instance) - 1;
|
||||
int len = strlen(basename);
|
||||
|
||||
if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
|
||||
snprintf(v4l2_dev->name, sizeof(v4l2_dev->name),
|
||||
"%s-%d", basename, num);
|
||||
else
|
||||
snprintf(v4l2_dev->name, sizeof(v4l2_dev->name),
|
||||
"%s%d", basename, num);
|
||||
return num;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_device_set_name);
|
||||
|
||||
void v4l2_device_disconnect(struct v4l2_device *v4l2_dev)
|
||||
{
|
||||
if (v4l2_dev->dev == NULL)
|
||||
return;
|
||||
|
||||
if (dev_get_drvdata(v4l2_dev->dev) == v4l2_dev)
|
||||
dev_set_drvdata(v4l2_dev->dev, NULL);
|
||||
put_device(v4l2_dev->dev);
|
||||
v4l2_dev->dev = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_device_disconnect);
|
||||
|
||||
void v4l2_device_unregister(struct v4l2_device *v4l2_dev)
|
||||
{
|
||||
struct v4l2_subdev *sd, *next;
|
||||
|
||||
/* Just return if v4l2_dev is NULL or if it was already
|
||||
* unregistered before. */
|
||||
if (v4l2_dev == NULL || !v4l2_dev->name[0])
|
||||
return;
|
||||
v4l2_device_disconnect(v4l2_dev);
|
||||
|
||||
/* Unregister subdevs */
|
||||
list_for_each_entry_safe(sd, next, &v4l2_dev->subdevs, list) {
|
||||
v4l2_device_unregister_subdev(sd);
|
||||
#if IS_ENABLED(CONFIG_I2C)
|
||||
if (sd->flags & V4L2_SUBDEV_FL_IS_I2C) {
|
||||
struct i2c_client *client = v4l2_get_subdevdata(sd);
|
||||
|
||||
/* We need to unregister the i2c client explicitly.
|
||||
We cannot rely on i2c_del_adapter to always
|
||||
unregister clients for us, since if the i2c bus
|
||||
is a platform bus, then it is never deleted. */
|
||||
if (client)
|
||||
i2c_unregister_device(client);
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
#if defined(CONFIG_SPI)
|
||||
if (sd->flags & V4L2_SUBDEV_FL_IS_SPI) {
|
||||
struct spi_device *spi = v4l2_get_subdevdata(sd);
|
||||
|
||||
if (spi)
|
||||
spi_unregister_device(spi);
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
/* Mark as unregistered, thus preventing duplicate unregistrations */
|
||||
v4l2_dev->name[0] = '\0';
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_device_unregister);
|
||||
|
||||
int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
|
||||
struct v4l2_subdev *sd)
|
||||
{
|
||||
#if defined(CONFIG_MEDIA_CONTROLLER)
|
||||
struct media_entity *entity = &sd->entity;
|
||||
#endif
|
||||
int err;
|
||||
|
||||
/* Check for valid input */
|
||||
if (v4l2_dev == NULL || sd == NULL || !sd->name[0])
|
||||
return -EINVAL;
|
||||
|
||||
/* Warn if we apparently re-register a subdev */
|
||||
WARN_ON(sd->v4l2_dev != NULL);
|
||||
|
||||
/*
|
||||
* The reason to acquire the module here is to avoid unloading
|
||||
* a module of sub-device which is registered to a media
|
||||
* device. To make it possible to unload modules for media
|
||||
* devices that also register sub-devices, do not
|
||||
* try_module_get() such sub-device owners.
|
||||
*/
|
||||
sd->owner_v4l2_dev = v4l2_dev->dev && v4l2_dev->dev->driver &&
|
||||
sd->owner == v4l2_dev->dev->driver->owner;
|
||||
|
||||
if (!sd->owner_v4l2_dev && !try_module_get(sd->owner))
|
||||
return -ENODEV;
|
||||
|
||||
sd->v4l2_dev = v4l2_dev;
|
||||
if (sd->internal_ops && sd->internal_ops->registered) {
|
||||
err = sd->internal_ops->registered(sd);
|
||||
if (err)
|
||||
goto error_module;
|
||||
}
|
||||
|
||||
/* This just returns 0 if either of the two args is NULL */
|
||||
err = v4l2_ctrl_add_handler(v4l2_dev->ctrl_handler, sd->ctrl_handler, NULL);
|
||||
if (err)
|
||||
goto error_unregister;
|
||||
|
||||
#if defined(CONFIG_MEDIA_CONTROLLER)
|
||||
/* Register the entity. */
|
||||
if (v4l2_dev->mdev) {
|
||||
err = media_device_register_entity(v4l2_dev->mdev, entity);
|
||||
if (err < 0)
|
||||
goto error_unregister;
|
||||
}
|
||||
#endif
|
||||
|
||||
spin_lock(&v4l2_dev->lock);
|
||||
list_add_tail(&sd->list, &v4l2_dev->subdevs);
|
||||
spin_unlock(&v4l2_dev->lock);
|
||||
|
||||
return 0;
|
||||
|
||||
error_unregister:
|
||||
if (sd->internal_ops && sd->internal_ops->unregistered)
|
||||
sd->internal_ops->unregistered(sd);
|
||||
error_module:
|
||||
if (!sd->owner_v4l2_dev)
|
||||
module_put(sd->owner);
|
||||
sd->v4l2_dev = NULL;
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_device_register_subdev);
|
||||
|
||||
static void v4l2_device_release_subdev_node(struct video_device *vdev)
|
||||
{
|
||||
struct v4l2_subdev *sd = video_get_drvdata(vdev);
|
||||
sd->devnode = NULL;
|
||||
kfree(vdev);
|
||||
}
|
||||
|
||||
int v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev)
|
||||
{
|
||||
struct video_device *vdev;
|
||||
struct v4l2_subdev *sd;
|
||||
int err;
|
||||
|
||||
/* Register a device node for every subdev marked with the
|
||||
* V4L2_SUBDEV_FL_HAS_DEVNODE flag.
|
||||
*/
|
||||
list_for_each_entry(sd, &v4l2_dev->subdevs, list) {
|
||||
if (!(sd->flags & V4L2_SUBDEV_FL_HAS_DEVNODE))
|
||||
continue;
|
||||
|
||||
vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
|
||||
if (!vdev) {
|
||||
err = -ENOMEM;
|
||||
goto clean_up;
|
||||
}
|
||||
|
||||
video_set_drvdata(vdev, sd);
|
||||
strlcpy(vdev->name, sd->name, sizeof(vdev->name));
|
||||
vdev->v4l2_dev = v4l2_dev;
|
||||
vdev->fops = &v4l2_subdev_fops;
|
||||
vdev->release = v4l2_device_release_subdev_node;
|
||||
vdev->ctrl_handler = sd->ctrl_handler;
|
||||
err = __video_register_device(vdev, VFL_TYPE_SUBDEV, -1, 1,
|
||||
sd->owner);
|
||||
if (err < 0) {
|
||||
kfree(vdev);
|
||||
goto clean_up;
|
||||
}
|
||||
#if defined(CONFIG_MEDIA_CONTROLLER)
|
||||
sd->entity.info.v4l.major = VIDEO_MAJOR;
|
||||
sd->entity.info.v4l.minor = vdev->minor;
|
||||
#endif
|
||||
sd->devnode = vdev;
|
||||
}
|
||||
return 0;
|
||||
|
||||
clean_up:
|
||||
list_for_each_entry(sd, &v4l2_dev->subdevs, list) {
|
||||
if (!sd->devnode)
|
||||
break;
|
||||
video_unregister_device(sd->devnode);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_device_register_subdev_nodes);
|
||||
|
||||
void v4l2_device_unregister_subdev(struct v4l2_subdev *sd)
|
||||
{
|
||||
struct v4l2_device *v4l2_dev;
|
||||
|
||||
/* return if it isn't registered */
|
||||
if (sd == NULL || sd->v4l2_dev == NULL)
|
||||
return;
|
||||
|
||||
v4l2_dev = sd->v4l2_dev;
|
||||
|
||||
spin_lock(&v4l2_dev->lock);
|
||||
list_del(&sd->list);
|
||||
spin_unlock(&v4l2_dev->lock);
|
||||
|
||||
if (sd->internal_ops && sd->internal_ops->unregistered)
|
||||
sd->internal_ops->unregistered(sd);
|
||||
sd->v4l2_dev = NULL;
|
||||
|
||||
#if defined(CONFIG_MEDIA_CONTROLLER)
|
||||
if (v4l2_dev->mdev) {
|
||||
media_entity_remove_links(&sd->entity);
|
||||
media_device_unregister_entity(&sd->entity);
|
||||
}
|
||||
#endif
|
||||
video_unregister_device(sd->devnode);
|
||||
if (!sd->owner_v4l2_dev)
|
||||
module_put(sd->owner);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_device_unregister_subdev);
|
630
drivers/media/v4l2-core/v4l2-dv-timings.c
Normal file
630
drivers/media/v4l2-core/v4l2-dv-timings.c
Normal file
|
@ -0,0 +1,630 @@
|
|||
/*
|
||||
* v4l2-dv-timings - dv-timings helper functions
|
||||
*
|
||||
* Copyright 2013 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
|
||||
*
|
||||
* This program is free software; you may redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; version 2 of the License.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/videodev2.h>
|
||||
#include <linux/v4l2-dv-timings.h>
|
||||
#include <media/v4l2-dv-timings.h>
|
||||
|
||||
MODULE_AUTHOR("Hans Verkuil");
|
||||
MODULE_DESCRIPTION("V4L2 DV Timings Helper Functions");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
const struct v4l2_dv_timings v4l2_dv_timings_presets[] = {
|
||||
V4L2_DV_BT_CEA_640X480P59_94,
|
||||
V4L2_DV_BT_CEA_720X480I59_94,
|
||||
V4L2_DV_BT_CEA_720X480P59_94,
|
||||
V4L2_DV_BT_CEA_720X576I50,
|
||||
V4L2_DV_BT_CEA_720X576P50,
|
||||
V4L2_DV_BT_CEA_1280X720P24,
|
||||
V4L2_DV_BT_CEA_1280X720P25,
|
||||
V4L2_DV_BT_CEA_1280X720P30,
|
||||
V4L2_DV_BT_CEA_1280X720P50,
|
||||
V4L2_DV_BT_CEA_1280X720P60,
|
||||
V4L2_DV_BT_CEA_1920X1080P24,
|
||||
V4L2_DV_BT_CEA_1920X1080P25,
|
||||
V4L2_DV_BT_CEA_1920X1080P30,
|
||||
V4L2_DV_BT_CEA_1920X1080I50,
|
||||
V4L2_DV_BT_CEA_1920X1080P50,
|
||||
V4L2_DV_BT_CEA_1920X1080I60,
|
||||
V4L2_DV_BT_CEA_1920X1080P60,
|
||||
V4L2_DV_BT_DMT_640X350P85,
|
||||
V4L2_DV_BT_DMT_640X400P85,
|
||||
V4L2_DV_BT_DMT_720X400P85,
|
||||
V4L2_DV_BT_DMT_640X480P72,
|
||||
V4L2_DV_BT_DMT_640X480P75,
|
||||
V4L2_DV_BT_DMT_640X480P85,
|
||||
V4L2_DV_BT_DMT_800X600P56,
|
||||
V4L2_DV_BT_DMT_800X600P60,
|
||||
V4L2_DV_BT_DMT_800X600P72,
|
||||
V4L2_DV_BT_DMT_800X600P75,
|
||||
V4L2_DV_BT_DMT_800X600P85,
|
||||
V4L2_DV_BT_DMT_800X600P120_RB,
|
||||
V4L2_DV_BT_DMT_848X480P60,
|
||||
V4L2_DV_BT_DMT_1024X768I43,
|
||||
V4L2_DV_BT_DMT_1024X768P60,
|
||||
V4L2_DV_BT_DMT_1024X768P70,
|
||||
V4L2_DV_BT_DMT_1024X768P75,
|
||||
V4L2_DV_BT_DMT_1024X768P85,
|
||||
V4L2_DV_BT_DMT_1024X768P120_RB,
|
||||
V4L2_DV_BT_DMT_1152X864P75,
|
||||
V4L2_DV_BT_DMT_1280X768P60_RB,
|
||||
V4L2_DV_BT_DMT_1280X768P60,
|
||||
V4L2_DV_BT_DMT_1280X768P75,
|
||||
V4L2_DV_BT_DMT_1280X768P85,
|
||||
V4L2_DV_BT_DMT_1280X768P120_RB,
|
||||
V4L2_DV_BT_DMT_1280X800P60_RB,
|
||||
V4L2_DV_BT_DMT_1280X800P60,
|
||||
V4L2_DV_BT_DMT_1280X800P75,
|
||||
V4L2_DV_BT_DMT_1280X800P85,
|
||||
V4L2_DV_BT_DMT_1280X800P120_RB,
|
||||
V4L2_DV_BT_DMT_1280X960P60,
|
||||
V4L2_DV_BT_DMT_1280X960P85,
|
||||
V4L2_DV_BT_DMT_1280X960P120_RB,
|
||||
V4L2_DV_BT_DMT_1280X1024P60,
|
||||
V4L2_DV_BT_DMT_1280X1024P75,
|
||||
V4L2_DV_BT_DMT_1280X1024P85,
|
||||
V4L2_DV_BT_DMT_1280X1024P120_RB,
|
||||
V4L2_DV_BT_DMT_1360X768P60,
|
||||
V4L2_DV_BT_DMT_1360X768P120_RB,
|
||||
V4L2_DV_BT_DMT_1366X768P60,
|
||||
V4L2_DV_BT_DMT_1366X768P60_RB,
|
||||
V4L2_DV_BT_DMT_1400X1050P60_RB,
|
||||
V4L2_DV_BT_DMT_1400X1050P60,
|
||||
V4L2_DV_BT_DMT_1400X1050P75,
|
||||
V4L2_DV_BT_DMT_1400X1050P85,
|
||||
V4L2_DV_BT_DMT_1400X1050P120_RB,
|
||||
V4L2_DV_BT_DMT_1440X900P60_RB,
|
||||
V4L2_DV_BT_DMT_1440X900P60,
|
||||
V4L2_DV_BT_DMT_1440X900P75,
|
||||
V4L2_DV_BT_DMT_1440X900P85,
|
||||
V4L2_DV_BT_DMT_1440X900P120_RB,
|
||||
V4L2_DV_BT_DMT_1600X900P60_RB,
|
||||
V4L2_DV_BT_DMT_1600X1200P60,
|
||||
V4L2_DV_BT_DMT_1600X1200P65,
|
||||
V4L2_DV_BT_DMT_1600X1200P70,
|
||||
V4L2_DV_BT_DMT_1600X1200P75,
|
||||
V4L2_DV_BT_DMT_1600X1200P85,
|
||||
V4L2_DV_BT_DMT_1600X1200P120_RB,
|
||||
V4L2_DV_BT_DMT_1680X1050P60_RB,
|
||||
V4L2_DV_BT_DMT_1680X1050P60,
|
||||
V4L2_DV_BT_DMT_1680X1050P75,
|
||||
V4L2_DV_BT_DMT_1680X1050P85,
|
||||
V4L2_DV_BT_DMT_1680X1050P120_RB,
|
||||
V4L2_DV_BT_DMT_1792X1344P60,
|
||||
V4L2_DV_BT_DMT_1792X1344P75,
|
||||
V4L2_DV_BT_DMT_1792X1344P120_RB,
|
||||
V4L2_DV_BT_DMT_1856X1392P60,
|
||||
V4L2_DV_BT_DMT_1856X1392P75,
|
||||
V4L2_DV_BT_DMT_1856X1392P120_RB,
|
||||
V4L2_DV_BT_DMT_1920X1200P60_RB,
|
||||
V4L2_DV_BT_DMT_1920X1200P60,
|
||||
V4L2_DV_BT_DMT_1920X1200P75,
|
||||
V4L2_DV_BT_DMT_1920X1200P85,
|
||||
V4L2_DV_BT_DMT_1920X1200P120_RB,
|
||||
V4L2_DV_BT_DMT_1920X1440P60,
|
||||
V4L2_DV_BT_DMT_1920X1440P75,
|
||||
V4L2_DV_BT_DMT_1920X1440P120_RB,
|
||||
V4L2_DV_BT_DMT_2048X1152P60_RB,
|
||||
V4L2_DV_BT_DMT_2560X1600P60_RB,
|
||||
V4L2_DV_BT_DMT_2560X1600P60,
|
||||
V4L2_DV_BT_DMT_2560X1600P75,
|
||||
V4L2_DV_BT_DMT_2560X1600P85,
|
||||
V4L2_DV_BT_DMT_2560X1600P120_RB,
|
||||
V4L2_DV_BT_CEA_3840X2160P24,
|
||||
V4L2_DV_BT_CEA_3840X2160P25,
|
||||
V4L2_DV_BT_CEA_3840X2160P30,
|
||||
V4L2_DV_BT_CEA_3840X2160P50,
|
||||
V4L2_DV_BT_CEA_3840X2160P60,
|
||||
V4L2_DV_BT_CEA_4096X2160P24,
|
||||
V4L2_DV_BT_CEA_4096X2160P25,
|
||||
V4L2_DV_BT_CEA_4096X2160P30,
|
||||
V4L2_DV_BT_CEA_4096X2160P50,
|
||||
V4L2_DV_BT_DMT_4096X2160P59_94_RB,
|
||||
V4L2_DV_BT_CEA_4096X2160P60,
|
||||
{ }
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(v4l2_dv_timings_presets);
|
||||
|
||||
bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t,
|
||||
const struct v4l2_dv_timings_cap *dvcap,
|
||||
v4l2_check_dv_timings_fnc fnc,
|
||||
void *fnc_handle)
|
||||
{
|
||||
const struct v4l2_bt_timings *bt = &t->bt;
|
||||
const struct v4l2_bt_timings_cap *cap = &dvcap->bt;
|
||||
u32 caps = cap->capabilities;
|
||||
|
||||
if (t->type != V4L2_DV_BT_656_1120)
|
||||
return false;
|
||||
if (t->type != dvcap->type ||
|
||||
bt->height < cap->min_height ||
|
||||
bt->height > cap->max_height ||
|
||||
bt->width < cap->min_width ||
|
||||
bt->width > cap->max_width ||
|
||||
bt->pixelclock < cap->min_pixelclock ||
|
||||
bt->pixelclock > cap->max_pixelclock ||
|
||||
(cap->standards && bt->standards &&
|
||||
!(bt->standards & cap->standards)) ||
|
||||
(bt->interlaced && !(caps & V4L2_DV_BT_CAP_INTERLACED)) ||
|
||||
(!bt->interlaced && !(caps & V4L2_DV_BT_CAP_PROGRESSIVE)))
|
||||
return false;
|
||||
return fnc == NULL || fnc(t, fnc_handle);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_valid_dv_timings);
|
||||
|
||||
int v4l2_enum_dv_timings_cap(struct v4l2_enum_dv_timings *t,
|
||||
const struct v4l2_dv_timings_cap *cap,
|
||||
v4l2_check_dv_timings_fnc fnc,
|
||||
void *fnc_handle)
|
||||
{
|
||||
u32 i, idx;
|
||||
|
||||
memset(t->reserved, 0, sizeof(t->reserved));
|
||||
for (i = idx = 0; v4l2_dv_timings_presets[i].bt.width; i++) {
|
||||
if (v4l2_valid_dv_timings(v4l2_dv_timings_presets + i, cap,
|
||||
fnc, fnc_handle) &&
|
||||
idx++ == t->index) {
|
||||
t->timings = v4l2_dv_timings_presets[i];
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_enum_dv_timings_cap);
|
||||
|
||||
bool v4l2_find_dv_timings_cap(struct v4l2_dv_timings *t,
|
||||
const struct v4l2_dv_timings_cap *cap,
|
||||
unsigned pclock_delta,
|
||||
v4l2_check_dv_timings_fnc fnc,
|
||||
void *fnc_handle)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!v4l2_valid_dv_timings(t, cap, fnc, fnc_handle))
|
||||
return false;
|
||||
|
||||
for (i = 0; i < v4l2_dv_timings_presets[i].bt.width; i++) {
|
||||
if (v4l2_valid_dv_timings(v4l2_dv_timings_presets + i, cap,
|
||||
fnc, fnc_handle) &&
|
||||
v4l2_match_dv_timings(t, v4l2_dv_timings_presets + i,
|
||||
pclock_delta)) {
|
||||
*t = v4l2_dv_timings_presets[i];
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_find_dv_timings_cap);
|
||||
|
||||
/**
|
||||
* v4l2_match_dv_timings - check if two timings match
|
||||
* @t1 - compare this v4l2_dv_timings struct...
|
||||
* @t2 - with this struct.
|
||||
* @pclock_delta - the allowed pixelclock deviation.
|
||||
*
|
||||
* Compare t1 with t2 with a given margin of error for the pixelclock.
|
||||
*/
|
||||
bool v4l2_match_dv_timings(const struct v4l2_dv_timings *t1,
|
||||
const struct v4l2_dv_timings *t2,
|
||||
unsigned pclock_delta)
|
||||
{
|
||||
if (t1->type != t2->type || t1->type != V4L2_DV_BT_656_1120)
|
||||
return false;
|
||||
if (t1->bt.width == t2->bt.width &&
|
||||
t1->bt.height == t2->bt.height &&
|
||||
t1->bt.interlaced == t2->bt.interlaced &&
|
||||
t1->bt.polarities == t2->bt.polarities &&
|
||||
t1->bt.pixelclock >= t2->bt.pixelclock - pclock_delta &&
|
||||
t1->bt.pixelclock <= t2->bt.pixelclock + pclock_delta &&
|
||||
t1->bt.hfrontporch == t2->bt.hfrontporch &&
|
||||
t1->bt.vfrontporch == t2->bt.vfrontporch &&
|
||||
t1->bt.vsync == t2->bt.vsync &&
|
||||
t1->bt.vbackporch == t2->bt.vbackporch &&
|
||||
(!t1->bt.interlaced ||
|
||||
(t1->bt.il_vfrontporch == t2->bt.il_vfrontporch &&
|
||||
t1->bt.il_vsync == t2->bt.il_vsync &&
|
||||
t1->bt.il_vbackporch == t2->bt.il_vbackporch)))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_match_dv_timings);
|
||||
|
||||
void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix,
|
||||
const struct v4l2_dv_timings *t, bool detailed)
|
||||
{
|
||||
const struct v4l2_bt_timings *bt = &t->bt;
|
||||
u32 htot, vtot;
|
||||
|
||||
if (t->type != V4L2_DV_BT_656_1120)
|
||||
return;
|
||||
|
||||
htot = V4L2_DV_BT_FRAME_WIDTH(bt);
|
||||
vtot = V4L2_DV_BT_FRAME_HEIGHT(bt);
|
||||
|
||||
if (prefix == NULL)
|
||||
prefix = "";
|
||||
|
||||
pr_info("%s: %s%ux%u%s%u (%ux%u)\n", dev_prefix, prefix,
|
||||
bt->width, bt->height, bt->interlaced ? "i" : "p",
|
||||
(htot * vtot) > 0 ? ((u32)bt->pixelclock / (htot * vtot)) : 0,
|
||||
htot, vtot);
|
||||
|
||||
if (!detailed)
|
||||
return;
|
||||
|
||||
pr_info("%s: horizontal: fp = %u, %ssync = %u, bp = %u\n",
|
||||
dev_prefix, bt->hfrontporch,
|
||||
(bt->polarities & V4L2_DV_HSYNC_POS_POL) ? "+" : "-",
|
||||
bt->hsync, bt->hbackporch);
|
||||
pr_info("%s: vertical: fp = %u, %ssync = %u, bp = %u\n",
|
||||
dev_prefix, bt->vfrontporch,
|
||||
(bt->polarities & V4L2_DV_VSYNC_POS_POL) ? "+" : "-",
|
||||
bt->vsync, bt->vbackporch);
|
||||
pr_info("%s: pixelclock: %llu\n", dev_prefix, bt->pixelclock);
|
||||
pr_info("%s: flags (0x%x):%s%s%s%s\n", dev_prefix, bt->flags,
|
||||
(bt->flags & V4L2_DV_FL_REDUCED_BLANKING) ?
|
||||
" REDUCED_BLANKING" : "",
|
||||
(bt->flags & V4L2_DV_FL_CAN_REDUCE_FPS) ?
|
||||
" CAN_REDUCE_FPS" : "",
|
||||
(bt->flags & V4L2_DV_FL_REDUCED_FPS) ?
|
||||
" REDUCED_FPS" : "",
|
||||
(bt->flags & V4L2_DV_FL_HALF_LINE) ?
|
||||
" HALF_LINE" : "");
|
||||
pr_info("%s: standards (0x%x):%s%s%s%s\n", dev_prefix, bt->standards,
|
||||
(bt->standards & V4L2_DV_BT_STD_CEA861) ? " CEA" : "",
|
||||
(bt->standards & V4L2_DV_BT_STD_DMT) ? " DMT" : "",
|
||||
(bt->standards & V4L2_DV_BT_STD_CVT) ? " CVT" : "",
|
||||
(bt->standards & V4L2_DV_BT_STD_GTF) ? " GTF" : "");
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_print_dv_timings);
|
||||
|
||||
/*
|
||||
* CVT defines
|
||||
* Based on Coordinated Video Timings Standard
|
||||
* version 1.1 September 10, 2003
|
||||
*/
|
||||
|
||||
#define CVT_PXL_CLK_GRAN 250000 /* pixel clock granularity */
|
||||
|
||||
/* Normal blanking */
|
||||
#define CVT_MIN_V_BPORCH 7 /* lines */
|
||||
#define CVT_MIN_V_PORCH_RND 3 /* lines */
|
||||
#define CVT_MIN_VSYNC_BP 550 /* min time of vsync + back porch (us) */
|
||||
|
||||
/* Normal blanking for CVT uses GTF to calculate horizontal blanking */
|
||||
#define CVT_CELL_GRAN 8 /* character cell granularity */
|
||||
#define CVT_M 600 /* blanking formula gradient */
|
||||
#define CVT_C 40 /* blanking formula offset */
|
||||
#define CVT_K 128 /* blanking formula scaling factor */
|
||||
#define CVT_J 20 /* blanking formula scaling factor */
|
||||
#define CVT_C_PRIME (((CVT_C - CVT_J) * CVT_K / 256) + CVT_J)
|
||||
#define CVT_M_PRIME (CVT_K * CVT_M / 256)
|
||||
|
||||
/* Reduced Blanking */
|
||||
#define CVT_RB_MIN_V_BPORCH 7 /* lines */
|
||||
#define CVT_RB_V_FPORCH 3 /* lines */
|
||||
#define CVT_RB_MIN_V_BLANK 460 /* us */
|
||||
#define CVT_RB_H_SYNC 32 /* pixels */
|
||||
#define CVT_RB_H_BPORCH 80 /* pixels */
|
||||
#define CVT_RB_H_BLANK 160 /* pixels */
|
||||
|
||||
/** v4l2_detect_cvt - detect if the given timings follow the CVT standard
|
||||
* @frame_height - the total height of the frame (including blanking) in lines.
|
||||
* @hfreq - the horizontal frequency in Hz.
|
||||
* @vsync - the height of the vertical sync in lines.
|
||||
* @polarities - the horizontal and vertical polarities (same as struct
|
||||
* v4l2_bt_timings polarities).
|
||||
* @fmt - the resulting timings.
|
||||
*
|
||||
* This function will attempt to detect if the given values correspond to a
|
||||
* valid CVT format. If so, then it will return true, and fmt will be filled
|
||||
* in with the found CVT timings.
|
||||
*
|
||||
* TODO: VESA defined a new version 2 of their reduced blanking
|
||||
* formula. Support for that is currently missing in this CVT
|
||||
* detection function.
|
||||
*/
|
||||
bool v4l2_detect_cvt(unsigned frame_height, unsigned hfreq, unsigned vsync,
|
||||
u32 polarities, struct v4l2_dv_timings *fmt)
|
||||
{
|
||||
int v_fp, v_bp, h_fp, h_bp, hsync;
|
||||
int frame_width, image_height, image_width;
|
||||
bool reduced_blanking;
|
||||
unsigned pix_clk;
|
||||
|
||||
if (vsync < 4 || vsync > 7)
|
||||
return false;
|
||||
|
||||
if (polarities == V4L2_DV_VSYNC_POS_POL)
|
||||
reduced_blanking = false;
|
||||
else if (polarities == V4L2_DV_HSYNC_POS_POL)
|
||||
reduced_blanking = true;
|
||||
else
|
||||
return false;
|
||||
|
||||
/* Vertical */
|
||||
if (reduced_blanking) {
|
||||
v_fp = CVT_RB_V_FPORCH;
|
||||
v_bp = (CVT_RB_MIN_V_BLANK * hfreq + 1999999) / 1000000;
|
||||
v_bp -= vsync + v_fp;
|
||||
|
||||
if (v_bp < CVT_RB_MIN_V_BPORCH)
|
||||
v_bp = CVT_RB_MIN_V_BPORCH;
|
||||
} else {
|
||||
v_fp = CVT_MIN_V_PORCH_RND;
|
||||
v_bp = (CVT_MIN_VSYNC_BP * hfreq + 1999999) / 1000000 - vsync;
|
||||
|
||||
if (v_bp < CVT_MIN_V_BPORCH)
|
||||
v_bp = CVT_MIN_V_BPORCH;
|
||||
}
|
||||
image_height = (frame_height - v_fp - vsync - v_bp + 1) & ~0x1;
|
||||
|
||||
/* Aspect ratio based on vsync */
|
||||
switch (vsync) {
|
||||
case 4:
|
||||
image_width = (image_height * 4) / 3;
|
||||
break;
|
||||
case 5:
|
||||
image_width = (image_height * 16) / 9;
|
||||
break;
|
||||
case 6:
|
||||
image_width = (image_height * 16) / 10;
|
||||
break;
|
||||
case 7:
|
||||
/* special case */
|
||||
if (image_height == 1024)
|
||||
image_width = (image_height * 5) / 4;
|
||||
else if (image_height == 768)
|
||||
image_width = (image_height * 15) / 9;
|
||||
else
|
||||
return false;
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
image_width = image_width & ~7;
|
||||
|
||||
/* Horizontal */
|
||||
if (reduced_blanking) {
|
||||
pix_clk = (image_width + CVT_RB_H_BLANK) * hfreq;
|
||||
pix_clk = (pix_clk / CVT_PXL_CLK_GRAN) * CVT_PXL_CLK_GRAN;
|
||||
|
||||
h_bp = CVT_RB_H_BPORCH;
|
||||
hsync = CVT_RB_H_SYNC;
|
||||
h_fp = CVT_RB_H_BLANK - h_bp - hsync;
|
||||
|
||||
frame_width = image_width + CVT_RB_H_BLANK;
|
||||
} else {
|
||||
unsigned ideal_duty_cycle_per_myriad =
|
||||
100 * CVT_C_PRIME - (CVT_M_PRIME * 100000) / hfreq;
|
||||
int h_blank;
|
||||
|
||||
if (ideal_duty_cycle_per_myriad < 2000)
|
||||
ideal_duty_cycle_per_myriad = 2000;
|
||||
|
||||
h_blank = image_width * ideal_duty_cycle_per_myriad /
|
||||
(10000 - ideal_duty_cycle_per_myriad);
|
||||
h_blank = (h_blank / (2 * CVT_CELL_GRAN)) * 2 * CVT_CELL_GRAN;
|
||||
|
||||
pix_clk = (image_width + h_blank) * hfreq;
|
||||
pix_clk = (pix_clk / CVT_PXL_CLK_GRAN) * CVT_PXL_CLK_GRAN;
|
||||
|
||||
h_bp = h_blank / 2;
|
||||
frame_width = image_width + h_blank;
|
||||
|
||||
hsync = (frame_width * 8 + 50) / 100;
|
||||
hsync = hsync - hsync % CVT_CELL_GRAN;
|
||||
h_fp = h_blank - hsync - h_bp;
|
||||
}
|
||||
|
||||
fmt->type = V4L2_DV_BT_656_1120;
|
||||
fmt->bt.polarities = polarities;
|
||||
fmt->bt.width = image_width;
|
||||
fmt->bt.height = image_height;
|
||||
fmt->bt.hfrontporch = h_fp;
|
||||
fmt->bt.vfrontporch = v_fp;
|
||||
fmt->bt.hsync = hsync;
|
||||
fmt->bt.vsync = vsync;
|
||||
fmt->bt.hbackporch = frame_width - image_width - h_fp - hsync;
|
||||
fmt->bt.vbackporch = frame_height - image_height - v_fp - vsync;
|
||||
fmt->bt.pixelclock = pix_clk;
|
||||
fmt->bt.standards = V4L2_DV_BT_STD_CVT;
|
||||
if (reduced_blanking)
|
||||
fmt->bt.flags |= V4L2_DV_FL_REDUCED_BLANKING;
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_detect_cvt);
|
||||
|
||||
/*
|
||||
* GTF defines
|
||||
* Based on Generalized Timing Formula Standard
|
||||
* Version 1.1 September 2, 1999
|
||||
*/
|
||||
|
||||
#define GTF_PXL_CLK_GRAN 250000 /* pixel clock granularity */
|
||||
|
||||
#define GTF_MIN_VSYNC_BP 550 /* min time of vsync + back porch (us) */
|
||||
#define GTF_V_FP 1 /* vertical front porch (lines) */
|
||||
#define GTF_CELL_GRAN 8 /* character cell granularity */
|
||||
|
||||
/* Default */
|
||||
#define GTF_D_M 600 /* blanking formula gradient */
|
||||
#define GTF_D_C 40 /* blanking formula offset */
|
||||
#define GTF_D_K 128 /* blanking formula scaling factor */
|
||||
#define GTF_D_J 20 /* blanking formula scaling factor */
|
||||
#define GTF_D_C_PRIME ((((GTF_D_C - GTF_D_J) * GTF_D_K) / 256) + GTF_D_J)
|
||||
#define GTF_D_M_PRIME ((GTF_D_K * GTF_D_M) / 256)
|
||||
|
||||
/* Secondary */
|
||||
#define GTF_S_M 3600 /* blanking formula gradient */
|
||||
#define GTF_S_C 40 /* blanking formula offset */
|
||||
#define GTF_S_K 128 /* blanking formula scaling factor */
|
||||
#define GTF_S_J 35 /* blanking formula scaling factor */
|
||||
#define GTF_S_C_PRIME ((((GTF_S_C - GTF_S_J) * GTF_S_K) / 256) + GTF_S_J)
|
||||
#define GTF_S_M_PRIME ((GTF_S_K * GTF_S_M) / 256)
|
||||
|
||||
/** v4l2_detect_gtf - detect if the given timings follow the GTF standard
|
||||
* @frame_height - the total height of the frame (including blanking) in lines.
|
||||
* @hfreq - the horizontal frequency in Hz.
|
||||
* @vsync - the height of the vertical sync in lines.
|
||||
* @polarities - the horizontal and vertical polarities (same as struct
|
||||
* v4l2_bt_timings polarities).
|
||||
* @aspect - preferred aspect ratio. GTF has no method of determining the
|
||||
* aspect ratio in order to derive the image width from the
|
||||
* image height, so it has to be passed explicitly. Usually
|
||||
* the native screen aspect ratio is used for this. If it
|
||||
* is not filled in correctly, then 16:9 will be assumed.
|
||||
* @fmt - the resulting timings.
|
||||
*
|
||||
* This function will attempt to detect if the given values correspond to a
|
||||
* valid GTF format. If so, then it will return true, and fmt will be filled
|
||||
* in with the found GTF timings.
|
||||
*/
|
||||
bool v4l2_detect_gtf(unsigned frame_height,
|
||||
unsigned hfreq,
|
||||
unsigned vsync,
|
||||
u32 polarities,
|
||||
struct v4l2_fract aspect,
|
||||
struct v4l2_dv_timings *fmt)
|
||||
{
|
||||
int pix_clk;
|
||||
int v_fp, v_bp, h_fp, hsync;
|
||||
int frame_width, image_height, image_width;
|
||||
bool default_gtf;
|
||||
int h_blank;
|
||||
|
||||
if (vsync != 3)
|
||||
return false;
|
||||
|
||||
if (polarities == V4L2_DV_VSYNC_POS_POL)
|
||||
default_gtf = true;
|
||||
else if (polarities == V4L2_DV_HSYNC_POS_POL)
|
||||
default_gtf = false;
|
||||
else
|
||||
return false;
|
||||
|
||||
/* Vertical */
|
||||
v_fp = GTF_V_FP;
|
||||
v_bp = (GTF_MIN_VSYNC_BP * hfreq + 999999) / 1000000 - vsync;
|
||||
image_height = (frame_height - v_fp - vsync - v_bp + 1) & ~0x1;
|
||||
|
||||
if (aspect.numerator == 0 || aspect.denominator == 0) {
|
||||
aspect.numerator = 16;
|
||||
aspect.denominator = 9;
|
||||
}
|
||||
image_width = ((image_height * aspect.numerator) / aspect.denominator);
|
||||
image_width = (image_width + GTF_CELL_GRAN/2) & ~(GTF_CELL_GRAN - 1);
|
||||
|
||||
/* Horizontal */
|
||||
if (default_gtf)
|
||||
h_blank = ((image_width * GTF_D_C_PRIME * hfreq) -
|
||||
(image_width * GTF_D_M_PRIME * 1000) +
|
||||
(hfreq * (100 - GTF_D_C_PRIME) + GTF_D_M_PRIME * 1000) / 2) /
|
||||
(hfreq * (100 - GTF_D_C_PRIME) + GTF_D_M_PRIME * 1000);
|
||||
else
|
||||
h_blank = ((image_width * GTF_S_C_PRIME * hfreq) -
|
||||
(image_width * GTF_S_M_PRIME * 1000) +
|
||||
(hfreq * (100 - GTF_S_C_PRIME) + GTF_S_M_PRIME * 1000) / 2) /
|
||||
(hfreq * (100 - GTF_S_C_PRIME) + GTF_S_M_PRIME * 1000);
|
||||
|
||||
h_blank = h_blank - h_blank % (2 * GTF_CELL_GRAN);
|
||||
frame_width = image_width + h_blank;
|
||||
|
||||
pix_clk = (image_width + h_blank) * hfreq;
|
||||
pix_clk = pix_clk / GTF_PXL_CLK_GRAN * GTF_PXL_CLK_GRAN;
|
||||
|
||||
hsync = (frame_width * 8 + 50) / 100;
|
||||
hsync = hsync - hsync % GTF_CELL_GRAN;
|
||||
|
||||
h_fp = h_blank / 2 - hsync;
|
||||
|
||||
fmt->type = V4L2_DV_BT_656_1120;
|
||||
fmt->bt.polarities = polarities;
|
||||
fmt->bt.width = image_width;
|
||||
fmt->bt.height = image_height;
|
||||
fmt->bt.hfrontporch = h_fp;
|
||||
fmt->bt.vfrontporch = v_fp;
|
||||
fmt->bt.hsync = hsync;
|
||||
fmt->bt.vsync = vsync;
|
||||
fmt->bt.hbackporch = frame_width - image_width - h_fp - hsync;
|
||||
fmt->bt.vbackporch = frame_height - image_height - v_fp - vsync;
|
||||
fmt->bt.pixelclock = pix_clk;
|
||||
fmt->bt.standards = V4L2_DV_BT_STD_GTF;
|
||||
if (!default_gtf)
|
||||
fmt->bt.flags |= V4L2_DV_FL_REDUCED_BLANKING;
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_detect_gtf);
|
||||
|
||||
/** v4l2_calc_aspect_ratio - calculate the aspect ratio based on bytes
|
||||
* 0x15 and 0x16 from the EDID.
|
||||
* @hor_landscape - byte 0x15 from the EDID.
|
||||
* @vert_portrait - byte 0x16 from the EDID.
|
||||
*
|
||||
* Determines the aspect ratio from the EDID.
|
||||
* See VESA Enhanced EDID standard, release A, rev 2, section 3.6.2:
|
||||
* "Horizontal and Vertical Screen Size or Aspect Ratio"
|
||||
*/
|
||||
struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait)
|
||||
{
|
||||
struct v4l2_fract aspect = { 16, 9 };
|
||||
u32 tmp;
|
||||
u8 ratio;
|
||||
|
||||
/* Nothing filled in, fallback to 16:9 */
|
||||
if (!hor_landscape && !vert_portrait)
|
||||
return aspect;
|
||||
/* Both filled in, so they are interpreted as the screen size in cm */
|
||||
if (hor_landscape && vert_portrait) {
|
||||
aspect.numerator = hor_landscape;
|
||||
aspect.denominator = vert_portrait;
|
||||
return aspect;
|
||||
}
|
||||
/* Only one is filled in, so interpret them as a ratio:
|
||||
(val + 99) / 100 */
|
||||
ratio = hor_landscape | vert_portrait;
|
||||
/* Change some rounded values into the exact aspect ratio */
|
||||
if (ratio == 79) {
|
||||
aspect.numerator = 16;
|
||||
aspect.denominator = 9;
|
||||
} else if (ratio == 34) {
|
||||
aspect.numerator = 4;
|
||||
aspect.denominator = 3;
|
||||
} else if (ratio == 68) {
|
||||
aspect.numerator = 15;
|
||||
aspect.denominator = 9;
|
||||
} else {
|
||||
aspect.numerator = hor_landscape + 99;
|
||||
aspect.denominator = 100;
|
||||
}
|
||||
if (hor_landscape)
|
||||
return aspect;
|
||||
/* The aspect ratio is for portrait, so swap numerator and denominator */
|
||||
tmp = aspect.denominator;
|
||||
aspect.denominator = aspect.numerator;
|
||||
aspect.numerator = tmp;
|
||||
return aspect;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_calc_aspect_ratio);
|
356
drivers/media/v4l2-core/v4l2-event.c
Normal file
356
drivers/media/v4l2-core/v4l2-event.c
Normal file
|
@ -0,0 +1,356 @@
|
|||
/*
|
||||
* v4l2-event.c
|
||||
*
|
||||
* V4L2 events.
|
||||
*
|
||||
* Copyright (C) 2009--2010 Nokia Corporation.
|
||||
*
|
||||
* Contact: Sakari Ailus <sakari.ailus@iki.fi>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
|
||||
* 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include <media/v4l2-dev.h>
|
||||
#include <media/v4l2-fh.h>
|
||||
#include <media/v4l2-event.h>
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/export.h>
|
||||
|
||||
static unsigned sev_pos(const struct v4l2_subscribed_event *sev, unsigned idx)
|
||||
{
|
||||
idx += sev->first;
|
||||
return idx >= sev->elems ? idx - sev->elems : idx;
|
||||
}
|
||||
|
||||
static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
|
||||
{
|
||||
struct v4l2_kevent *kev;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
|
||||
|
||||
if (list_empty(&fh->available)) {
|
||||
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
WARN_ON(fh->navailable == 0);
|
||||
|
||||
kev = list_first_entry(&fh->available, struct v4l2_kevent, list);
|
||||
list_del(&kev->list);
|
||||
fh->navailable--;
|
||||
|
||||
kev->event.pending = fh->navailable;
|
||||
*event = kev->event;
|
||||
kev->sev->first = sev_pos(kev->sev, 1);
|
||||
kev->sev->in_use--;
|
||||
|
||||
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
|
||||
int nonblocking)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (nonblocking)
|
||||
return __v4l2_event_dequeue(fh, event);
|
||||
|
||||
/* Release the vdev lock while waiting */
|
||||
if (fh->vdev->lock)
|
||||
mutex_unlock(fh->vdev->lock);
|
||||
|
||||
do {
|
||||
ret = wait_event_interruptible(fh->wait,
|
||||
fh->navailable != 0);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
ret = __v4l2_event_dequeue(fh, event);
|
||||
} while (ret == -ENOENT);
|
||||
|
||||
if (fh->vdev->lock)
|
||||
mutex_lock(fh->vdev->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
|
||||
|
||||
/* Caller must hold fh->vdev->fh_lock! */
|
||||
static struct v4l2_subscribed_event *v4l2_event_subscribed(
|
||||
struct v4l2_fh *fh, u32 type, u32 id)
|
||||
{
|
||||
struct v4l2_subscribed_event *sev;
|
||||
|
||||
assert_spin_locked(&fh->vdev->fh_lock);
|
||||
|
||||
list_for_each_entry(sev, &fh->subscribed, list)
|
||||
if (sev->type == type && sev->id == id)
|
||||
return sev;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev,
|
||||
const struct timespec *ts)
|
||||
{
|
||||
struct v4l2_subscribed_event *sev;
|
||||
struct v4l2_kevent *kev;
|
||||
bool copy_payload = true;
|
||||
|
||||
/* Are we subscribed? */
|
||||
sev = v4l2_event_subscribed(fh, ev->type, ev->id);
|
||||
if (sev == NULL)
|
||||
return;
|
||||
|
||||
/*
|
||||
* If the event has been added to the fh->subscribed list, but its
|
||||
* add op has not completed yet elems will be 0, treat this as
|
||||
* not being subscribed.
|
||||
*/
|
||||
if (!sev->elems)
|
||||
return;
|
||||
|
||||
/* Increase event sequence number on fh. */
|
||||
fh->sequence++;
|
||||
|
||||
/* Do we have any free events? */
|
||||
if (sev->in_use == sev->elems) {
|
||||
/* no, remove the oldest one */
|
||||
kev = sev->events + sev_pos(sev, 0);
|
||||
list_del(&kev->list);
|
||||
sev->in_use--;
|
||||
sev->first = sev_pos(sev, 1);
|
||||
fh->navailable--;
|
||||
if (sev->elems == 1) {
|
||||
if (sev->ops && sev->ops->replace) {
|
||||
sev->ops->replace(&kev->event, ev);
|
||||
copy_payload = false;
|
||||
}
|
||||
} else if (sev->ops && sev->ops->merge) {
|
||||
struct v4l2_kevent *second_oldest =
|
||||
sev->events + sev_pos(sev, 0);
|
||||
sev->ops->merge(&kev->event, &second_oldest->event);
|
||||
}
|
||||
}
|
||||
|
||||
/* Take one and fill it. */
|
||||
kev = sev->events + sev_pos(sev, sev->in_use);
|
||||
kev->event.type = ev->type;
|
||||
if (copy_payload)
|
||||
kev->event.u = ev->u;
|
||||
kev->event.id = ev->id;
|
||||
kev->event.timestamp = *ts;
|
||||
kev->event.sequence = fh->sequence;
|
||||
sev->in_use++;
|
||||
list_add_tail(&kev->list, &fh->available);
|
||||
|
||||
fh->navailable++;
|
||||
|
||||
wake_up_all(&fh->wait);
|
||||
}
|
||||
|
||||
void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
|
||||
{
|
||||
struct v4l2_fh *fh;
|
||||
unsigned long flags;
|
||||
struct timespec timestamp;
|
||||
|
||||
ktime_get_ts(×tamp);
|
||||
|
||||
spin_lock_irqsave(&vdev->fh_lock, flags);
|
||||
|
||||
list_for_each_entry(fh, &vdev->fh_list, list)
|
||||
__v4l2_event_queue_fh(fh, ev, ×tamp);
|
||||
|
||||
spin_unlock_irqrestore(&vdev->fh_lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_event_queue);
|
||||
|
||||
void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct timespec timestamp;
|
||||
|
||||
ktime_get_ts(×tamp);
|
||||
|
||||
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
|
||||
__v4l2_event_queue_fh(fh, ev, ×tamp);
|
||||
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_event_queue_fh);
|
||||
|
||||
int v4l2_event_pending(struct v4l2_fh *fh)
|
||||
{
|
||||
return fh->navailable;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_event_pending);
|
||||
|
||||
int v4l2_event_subscribe(struct v4l2_fh *fh,
|
||||
const struct v4l2_event_subscription *sub, unsigned elems,
|
||||
const struct v4l2_subscribed_event_ops *ops)
|
||||
{
|
||||
struct v4l2_subscribed_event *sev, *found_ev;
|
||||
unsigned long flags;
|
||||
unsigned i;
|
||||
|
||||
if (sub->type == V4L2_EVENT_ALL)
|
||||
return -EINVAL;
|
||||
|
||||
if (elems < 1)
|
||||
elems = 1;
|
||||
|
||||
sev = kzalloc(sizeof(*sev) + sizeof(struct v4l2_kevent) * elems, GFP_KERNEL);
|
||||
if (!sev)
|
||||
return -ENOMEM;
|
||||
for (i = 0; i < elems; i++)
|
||||
sev->events[i].sev = sev;
|
||||
sev->type = sub->type;
|
||||
sev->id = sub->id;
|
||||
sev->flags = sub->flags;
|
||||
sev->fh = fh;
|
||||
sev->ops = ops;
|
||||
|
||||
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
|
||||
found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
|
||||
if (!found_ev)
|
||||
list_add(&sev->list, &fh->subscribed);
|
||||
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
|
||||
|
||||
if (found_ev) {
|
||||
kfree(sev);
|
||||
return 0; /* Already listening */
|
||||
}
|
||||
|
||||
if (sev->ops && sev->ops->add) {
|
||||
int ret = sev->ops->add(sev, elems);
|
||||
if (ret) {
|
||||
sev->ops = NULL;
|
||||
v4l2_event_unsubscribe(fh, sub);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
/* Mark as ready for use */
|
||||
sev->elems = elems;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
|
||||
|
||||
void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
|
||||
{
|
||||
struct v4l2_event_subscription sub;
|
||||
struct v4l2_subscribed_event *sev;
|
||||
unsigned long flags;
|
||||
|
||||
do {
|
||||
sev = NULL;
|
||||
|
||||
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
|
||||
if (!list_empty(&fh->subscribed)) {
|
||||
sev = list_first_entry(&fh->subscribed,
|
||||
struct v4l2_subscribed_event, list);
|
||||
sub.type = sev->type;
|
||||
sub.id = sev->id;
|
||||
}
|
||||
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
|
||||
if (sev)
|
||||
v4l2_event_unsubscribe(fh, &sub);
|
||||
} while (sev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all);
|
||||
|
||||
int v4l2_event_unsubscribe(struct v4l2_fh *fh,
|
||||
const struct v4l2_event_subscription *sub)
|
||||
{
|
||||
struct v4l2_subscribed_event *sev;
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
if (sub->type == V4L2_EVENT_ALL) {
|
||||
v4l2_event_unsubscribe_all(fh);
|
||||
return 0;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
|
||||
|
||||
sev = v4l2_event_subscribed(fh, sub->type, sub->id);
|
||||
if (sev != NULL) {
|
||||
/* Remove any pending events for this subscription */
|
||||
for (i = 0; i < sev->in_use; i++) {
|
||||
list_del(&sev->events[sev_pos(sev, i)].list);
|
||||
fh->navailable--;
|
||||
}
|
||||
list_del(&sev->list);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
|
||||
|
||||
if (sev && sev->ops && sev->ops->del)
|
||||
sev->ops->del(sev);
|
||||
|
||||
kfree(sev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe);
|
||||
|
||||
int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, struct v4l2_fh *fh,
|
||||
struct v4l2_event_subscription *sub)
|
||||
{
|
||||
return v4l2_event_unsubscribe(fh, sub);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_event_subdev_unsubscribe);
|
||||
|
||||
static void v4l2_event_src_replace(struct v4l2_event *old,
|
||||
const struct v4l2_event *new)
|
||||
{
|
||||
u32 old_changes = old->u.src_change.changes;
|
||||
|
||||
old->u.src_change = new->u.src_change;
|
||||
old->u.src_change.changes |= old_changes;
|
||||
}
|
||||
|
||||
static void v4l2_event_src_merge(const struct v4l2_event *old,
|
||||
struct v4l2_event *new)
|
||||
{
|
||||
new->u.src_change.changes |= old->u.src_change.changes;
|
||||
}
|
||||
|
||||
static const struct v4l2_subscribed_event_ops v4l2_event_src_ch_ops = {
|
||||
.replace = v4l2_event_src_replace,
|
||||
.merge = v4l2_event_src_merge,
|
||||
};
|
||||
|
||||
int v4l2_src_change_event_subscribe(struct v4l2_fh *fh,
|
||||
const struct v4l2_event_subscription *sub)
|
||||
{
|
||||
if (sub->type == V4L2_EVENT_SOURCE_CHANGE)
|
||||
return v4l2_event_subscribe(fh, sub, 0, &v4l2_event_src_ch_ops);
|
||||
return -EINVAL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_src_change_event_subscribe);
|
||||
|
||||
int v4l2_src_change_event_subdev_subscribe(struct v4l2_subdev *sd,
|
||||
struct v4l2_fh *fh, struct v4l2_event_subscription *sub)
|
||||
{
|
||||
return v4l2_src_change_event_subscribe(fh, sub);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_src_change_event_subdev_subscribe);
|
125
drivers/media/v4l2-core/v4l2-fh.c
Normal file
125
drivers/media/v4l2-core/v4l2-fh.c
Normal file
|
@ -0,0 +1,125 @@
|
|||
/*
|
||||
* v4l2-fh.c
|
||||
*
|
||||
* V4L2 file handles.
|
||||
*
|
||||
* Copyright (C) 2009--2010 Nokia Corporation.
|
||||
*
|
||||
* Contact: Sakari Ailus <sakari.ailus@iki.fi>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
|
||||
* 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/export.h>
|
||||
#include <media/v4l2-dev.h>
|
||||
#include <media/v4l2-fh.h>
|
||||
#include <media/v4l2-event.h>
|
||||
#include <media/v4l2-ioctl.h>
|
||||
|
||||
void v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev)
|
||||
{
|
||||
fh->vdev = vdev;
|
||||
/* Inherit from video_device. May be overridden by the driver. */
|
||||
fh->ctrl_handler = vdev->ctrl_handler;
|
||||
INIT_LIST_HEAD(&fh->list);
|
||||
set_bit(V4L2_FL_USES_V4L2_FH, &fh->vdev->flags);
|
||||
/*
|
||||
* determine_valid_ioctls() does not know if struct v4l2_fh
|
||||
* is used by this driver, but here we do. So enable the
|
||||
* prio ioctls here.
|
||||
*/
|
||||
set_bit(_IOC_NR(VIDIOC_G_PRIORITY), vdev->valid_ioctls);
|
||||
set_bit(_IOC_NR(VIDIOC_S_PRIORITY), vdev->valid_ioctls);
|
||||
fh->prio = V4L2_PRIORITY_UNSET;
|
||||
init_waitqueue_head(&fh->wait);
|
||||
INIT_LIST_HEAD(&fh->available);
|
||||
INIT_LIST_HEAD(&fh->subscribed);
|
||||
fh->sequence = -1;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_fh_init);
|
||||
|
||||
void v4l2_fh_add(struct v4l2_fh *fh)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
v4l2_prio_open(fh->vdev->prio, &fh->prio);
|
||||
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
|
||||
list_add(&fh->list, &fh->vdev->fh_list);
|
||||
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_fh_add);
|
||||
|
||||
int v4l2_fh_open(struct file *filp)
|
||||
{
|
||||
struct video_device *vdev = video_devdata(filp);
|
||||
struct v4l2_fh *fh = kzalloc(sizeof(*fh), GFP_KERNEL);
|
||||
|
||||
filp->private_data = fh;
|
||||
if (fh == NULL)
|
||||
return -ENOMEM;
|
||||
v4l2_fh_init(fh, vdev);
|
||||
v4l2_fh_add(fh);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_fh_open);
|
||||
|
||||
void v4l2_fh_del(struct v4l2_fh *fh)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
|
||||
list_del_init(&fh->list);
|
||||
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
|
||||
v4l2_prio_close(fh->vdev->prio, fh->prio);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_fh_del);
|
||||
|
||||
void v4l2_fh_exit(struct v4l2_fh *fh)
|
||||
{
|
||||
if (fh->vdev == NULL)
|
||||
return;
|
||||
v4l2_event_unsubscribe_all(fh);
|
||||
fh->vdev = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_fh_exit);
|
||||
|
||||
int v4l2_fh_release(struct file *filp)
|
||||
{
|
||||
struct v4l2_fh *fh = filp->private_data;
|
||||
|
||||
if (fh) {
|
||||
v4l2_fh_del(fh);
|
||||
v4l2_fh_exit(fh);
|
||||
kfree(fh);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_fh_release);
|
||||
|
||||
int v4l2_fh_is_singular(struct v4l2_fh *fh)
|
||||
{
|
||||
unsigned long flags;
|
||||
int is_singular;
|
||||
|
||||
if (fh == NULL || fh->vdev == NULL)
|
||||
return 0;
|
||||
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
|
||||
is_singular = list_is_singular(&fh->list);
|
||||
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
|
||||
return is_singular;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_fh_is_singular);
|
2583
drivers/media/v4l2-core/v4l2-ioctl.c
Normal file
2583
drivers/media/v4l2-core/v4l2-ioctl.c
Normal file
File diff suppressed because it is too large
Load diff
873
drivers/media/v4l2-core/v4l2-mem2mem.c
Normal file
873
drivers/media/v4l2-core/v4l2-mem2mem.c
Normal file
|
@ -0,0 +1,873 @@
|
|||
/*
|
||||
* Memory-to-memory device framework for Video for Linux 2 and videobuf.
|
||||
*
|
||||
* Helper functions for devices that use videobuf buffers for both their
|
||||
* source and destination.
|
||||
*
|
||||
* Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
|
||||
* Pawel Osciak, <pawel@osciak.com>
|
||||
* Marek Szyprowski, <m.szyprowski@samsung.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <media/videobuf2-core.h>
|
||||
#include <media/v4l2-mem2mem.h>
|
||||
#include <media/v4l2-dev.h>
|
||||
#include <media/v4l2-fh.h>
|
||||
#include <media/v4l2-event.h>
|
||||
|
||||
MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
|
||||
MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
static bool debug;
|
||||
module_param(debug, bool, 0644);
|
||||
|
||||
#define dprintk(fmt, arg...) \
|
||||
do { \
|
||||
if (debug) \
|
||||
printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\
|
||||
} while (0)
|
||||
|
||||
|
||||
/* Instance is already queued on the job_queue */
|
||||
#define TRANS_QUEUED (1 << 0)
|
||||
/* Instance is currently running in hardware */
|
||||
#define TRANS_RUNNING (1 << 1)
|
||||
/* Instance is currently aborting */
|
||||
#define TRANS_ABORT (1 << 2)
|
||||
|
||||
|
||||
/* Offset base for buffers on the destination queue - used to distinguish
|
||||
* between source and destination buffers when mmapping - they receive the same
|
||||
* offsets but for different queues */
|
||||
#define DST_QUEUE_OFF_BASE (1 << 30)
|
||||
|
||||
|
||||
/**
|
||||
* struct v4l2_m2m_dev - per-device context
|
||||
* @curr_ctx: currently running instance
|
||||
* @job_queue: instances queued to run
|
||||
* @job_spinlock: protects job_queue
|
||||
* @m2m_ops: driver callbacks
|
||||
*/
|
||||
struct v4l2_m2m_dev {
|
||||
struct v4l2_m2m_ctx *curr_ctx;
|
||||
|
||||
struct list_head job_queue;
|
||||
spinlock_t job_spinlock;
|
||||
|
||||
const struct v4l2_m2m_ops *m2m_ops;
|
||||
};
|
||||
|
||||
static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
|
||||
enum v4l2_buf_type type)
|
||||
{
|
||||
if (V4L2_TYPE_IS_OUTPUT(type))
|
||||
return &m2m_ctx->out_q_ctx;
|
||||
else
|
||||
return &m2m_ctx->cap_q_ctx;
|
||||
}
|
||||
|
||||
/**
|
||||
* v4l2_m2m_get_vq() - return vb2_queue for the given type
|
||||
*/
|
||||
struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
|
||||
enum v4l2_buf_type type)
|
||||
{
|
||||
struct v4l2_m2m_queue_ctx *q_ctx;
|
||||
|
||||
q_ctx = get_queue_ctx(m2m_ctx, type);
|
||||
if (!q_ctx)
|
||||
return NULL;
|
||||
|
||||
return &q_ctx->q;
|
||||
}
|
||||
EXPORT_SYMBOL(v4l2_m2m_get_vq);
|
||||
|
||||
/**
|
||||
* v4l2_m2m_next_buf() - return next buffer from the list of ready buffers
|
||||
*/
|
||||
void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
|
||||
{
|
||||
struct v4l2_m2m_buffer *b = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
|
||||
|
||||
if (list_empty(&q_ctx->rdy_queue)) {
|
||||
spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
|
||||
spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
|
||||
return &b->vb;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
|
||||
|
||||
/**
|
||||
* v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and
|
||||
* return it
|
||||
*/
|
||||
void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
|
||||
{
|
||||
struct v4l2_m2m_buffer *b = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
|
||||
if (list_empty(&q_ctx->rdy_queue)) {
|
||||
spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
|
||||
return NULL;
|
||||
}
|
||||
b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
|
||||
list_del(&b->list);
|
||||
q_ctx->num_rdy--;
|
||||
spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
|
||||
|
||||
return &b->vb;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove);
|
||||
|
||||
/*
|
||||
* Scheduling handlers
|
||||
*/
|
||||
|
||||
/**
|
||||
* v4l2_m2m_get_curr_priv() - return driver private data for the currently
|
||||
* running instance or NULL if no instance is running
|
||||
*/
|
||||
void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev)
|
||||
{
|
||||
unsigned long flags;
|
||||
void *ret = NULL;
|
||||
|
||||
spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
|
||||
if (m2m_dev->curr_ctx)
|
||||
ret = m2m_dev->curr_ctx->priv;
|
||||
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(v4l2_m2m_get_curr_priv);
|
||||
|
||||
/**
|
||||
* v4l2_m2m_try_run() - select next job to perform and run it if possible
|
||||
*
|
||||
* Get next transaction (if present) from the waiting jobs list and run it.
|
||||
*/
|
||||
static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
|
||||
if (NULL != m2m_dev->curr_ctx) {
|
||||
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
|
||||
dprintk("Another instance is running, won't run now\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (list_empty(&m2m_dev->job_queue)) {
|
||||
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
|
||||
dprintk("No job pending\n");
|
||||
return;
|
||||
}
|
||||
|
||||
m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue,
|
||||
struct v4l2_m2m_ctx, queue);
|
||||
m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING;
|
||||
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
|
||||
|
||||
m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv);
|
||||
}
|
||||
|
||||
/**
|
||||
* v4l2_m2m_try_schedule() - check whether an instance is ready to be added to
|
||||
* the pending job queue and add it if so.
|
||||
* @m2m_ctx: m2m context assigned to the instance to be checked
|
||||
*
|
||||
* There are three basic requirements an instance has to meet to be able to run:
|
||||
* 1) at least one source buffer has to be queued,
|
||||
* 2) at least one destination buffer has to be queued,
|
||||
* 3) streaming has to be on.
|
||||
*
|
||||
* If a queue is buffered (for example a decoder hardware ringbuffer that has
|
||||
* to be drained before doing streamoff), allow scheduling without v4l2 buffers
|
||||
* on that queue.
|
||||
*
|
||||
* There may also be additional, custom requirements. In such case the driver
|
||||
* should supply a custom callback (job_ready in v4l2_m2m_ops) that should
|
||||
* return 1 if the instance is ready.
|
||||
* An example of the above could be an instance that requires more than one
|
||||
* src/dst buffer per transaction.
|
||||
*/
|
||||
void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
|
||||
{
|
||||
struct v4l2_m2m_dev *m2m_dev;
|
||||
unsigned long flags_job, flags_out, flags_cap;
|
||||
|
||||
m2m_dev = m2m_ctx->m2m_dev;
|
||||
dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
|
||||
|
||||
if (!m2m_ctx->out_q_ctx.q.streaming
|
||||
|| !m2m_ctx->cap_q_ctx.q.streaming) {
|
||||
dprintk("Streaming needs to be on for both queues\n");
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
|
||||
|
||||
/* If the context is aborted then don't schedule it */
|
||||
if (m2m_ctx->job_flags & TRANS_ABORT) {
|
||||
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
|
||||
dprintk("Aborted context\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (m2m_ctx->job_flags & TRANS_QUEUED) {
|
||||
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
|
||||
dprintk("On job queue already\n");
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
|
||||
if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)
|
||||
&& !m2m_ctx->out_q_ctx.buffered) {
|
||||
spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
|
||||
flags_out);
|
||||
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
|
||||
dprintk("No input buffers available\n");
|
||||
return;
|
||||
}
|
||||
spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
|
||||
if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)
|
||||
&& !m2m_ctx->cap_q_ctx.buffered) {
|
||||
spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock,
|
||||
flags_cap);
|
||||
spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
|
||||
flags_out);
|
||||
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
|
||||
dprintk("No output buffers available\n");
|
||||
return;
|
||||
}
|
||||
spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
|
||||
spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
|
||||
|
||||
if (m2m_dev->m2m_ops->job_ready
|
||||
&& (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
|
||||
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
|
||||
dprintk("Driver not ready\n");
|
||||
return;
|
||||
}
|
||||
|
||||
list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
|
||||
m2m_ctx->job_flags |= TRANS_QUEUED;
|
||||
|
||||
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
|
||||
|
||||
v4l2_m2m_try_run(m2m_dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule);
|
||||
|
||||
/**
|
||||
* v4l2_m2m_cancel_job() - cancel pending jobs for the context
|
||||
*
|
||||
* In case of streamoff or release called on any context,
|
||||
* 1] If the context is currently running, then abort job will be called
|
||||
* 2] If the context is queued, then the context will be removed from
|
||||
* the job_queue
|
||||
*/
|
||||
static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
|
||||
{
|
||||
struct v4l2_m2m_dev *m2m_dev;
|
||||
unsigned long flags;
|
||||
|
||||
m2m_dev = m2m_ctx->m2m_dev;
|
||||
spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
|
||||
|
||||
m2m_ctx->job_flags |= TRANS_ABORT;
|
||||
if (m2m_ctx->job_flags & TRANS_RUNNING) {
|
||||
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
|
||||
m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
|
||||
dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
|
||||
wait_event(m2m_ctx->finished,
|
||||
!(m2m_ctx->job_flags & TRANS_RUNNING));
|
||||
} else if (m2m_ctx->job_flags & TRANS_QUEUED) {
|
||||
list_del(&m2m_ctx->queue);
|
||||
m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
|
||||
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
|
||||
dprintk("m2m_ctx: %p had been on queue and was removed\n",
|
||||
m2m_ctx);
|
||||
} else {
|
||||
/* Do nothing, was not on queue/running */
|
||||
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* v4l2_m2m_job_finish() - inform the framework that a job has been finished
|
||||
* and have it clean up
|
||||
*
|
||||
* Called by a driver to yield back the device after it has finished with it.
|
||||
* Should be called as soon as possible after reaching a state which allows
|
||||
* other instances to take control of the device.
|
||||
*
|
||||
* This function has to be called only after device_run() callback has been
|
||||
* called on the driver. To prevent recursion, it should not be called directly
|
||||
* from the device_run() callback though.
|
||||
*/
|
||||
void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
|
||||
struct v4l2_m2m_ctx *m2m_ctx)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
|
||||
if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
|
||||
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
|
||||
dprintk("Called by an instance not currently running\n");
|
||||
return;
|
||||
}
|
||||
|
||||
list_del(&m2m_dev->curr_ctx->queue);
|
||||
m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
|
||||
wake_up(&m2m_dev->curr_ctx->finished);
|
||||
m2m_dev->curr_ctx = NULL;
|
||||
|
||||
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
|
||||
|
||||
/* This instance might have more buffers ready, but since we do not
|
||||
* allow more than one job on the job_queue per instance, each has
|
||||
* to be scheduled separately after the previous one finishes. */
|
||||
v4l2_m2m_try_schedule(m2m_ctx);
|
||||
v4l2_m2m_try_run(m2m_dev);
|
||||
}
|
||||
EXPORT_SYMBOL(v4l2_m2m_job_finish);
|
||||
|
||||
/**
|
||||
* v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer
|
||||
*/
|
||||
int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
|
||||
struct v4l2_requestbuffers *reqbufs)
|
||||
{
|
||||
struct vb2_queue *vq;
|
||||
|
||||
vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type);
|
||||
return vb2_reqbufs(vq, reqbufs);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
|
||||
|
||||
/**
|
||||
* v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer
|
||||
*
|
||||
* See v4l2_m2m_mmap() documentation for details.
|
||||
*/
|
||||
int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
|
||||
struct v4l2_buffer *buf)
|
||||
{
|
||||
struct vb2_queue *vq;
|
||||
int ret = 0;
|
||||
unsigned int i;
|
||||
|
||||
vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
|
||||
ret = vb2_querybuf(vq, buf);
|
||||
|
||||
/* Adjust MMAP memory offsets for the CAPTURE queue */
|
||||
if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) {
|
||||
if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) {
|
||||
for (i = 0; i < buf->length; ++i)
|
||||
buf->m.planes[i].m.mem_offset
|
||||
+= DST_QUEUE_OFF_BASE;
|
||||
} else {
|
||||
buf->m.offset += DST_QUEUE_OFF_BASE;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
|
||||
|
||||
/**
|
||||
* v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on
|
||||
* the type
|
||||
*/
|
||||
int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
|
||||
struct v4l2_buffer *buf)
|
||||
{
|
||||
struct vb2_queue *vq;
|
||||
int ret;
|
||||
|
||||
vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
|
||||
ret = vb2_qbuf(vq, buf);
|
||||
if (!ret)
|
||||
v4l2_m2m_try_schedule(m2m_ctx);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
|
||||
|
||||
/**
|
||||
* v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on
|
||||
* the type
|
||||
*/
|
||||
int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
|
||||
struct v4l2_buffer *buf)
|
||||
{
|
||||
struct vb2_queue *vq;
|
||||
|
||||
vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
|
||||
return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
|
||||
|
||||
/**
|
||||
* v4l2_m2m_create_bufs() - create a source or destination buffer, depending
|
||||
* on the type
|
||||
*/
|
||||
int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
|
||||
struct v4l2_create_buffers *create)
|
||||
{
|
||||
struct vb2_queue *vq;
|
||||
|
||||
vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type);
|
||||
return vb2_create_bufs(vq, create);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs);
|
||||
|
||||
/**
|
||||
* v4l2_m2m_expbuf() - export a source or destination buffer, depending on
|
||||
* the type
|
||||
*/
|
||||
int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
|
||||
struct v4l2_exportbuffer *eb)
|
||||
{
|
||||
struct vb2_queue *vq;
|
||||
|
||||
vq = v4l2_m2m_get_vq(m2m_ctx, eb->type);
|
||||
return vb2_expbuf(vq, eb);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf);
|
||||
/**
|
||||
* v4l2_m2m_streamon() - turn on streaming for a video queue
|
||||
*/
|
||||
int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
|
||||
enum v4l2_buf_type type)
|
||||
{
|
||||
struct vb2_queue *vq;
|
||||
int ret;
|
||||
|
||||
vq = v4l2_m2m_get_vq(m2m_ctx, type);
|
||||
ret = vb2_streamon(vq, type);
|
||||
if (!ret)
|
||||
v4l2_m2m_try_schedule(m2m_ctx);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_m2m_streamon);
|
||||
|
||||
/**
|
||||
* v4l2_m2m_streamoff() - turn off streaming for a video queue
|
||||
*/
|
||||
int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
|
||||
enum v4l2_buf_type type)
|
||||
{
|
||||
struct v4l2_m2m_dev *m2m_dev;
|
||||
struct v4l2_m2m_queue_ctx *q_ctx;
|
||||
unsigned long flags_job, flags;
|
||||
int ret;
|
||||
|
||||
/* wait until the current context is dequeued from job_queue */
|
||||
v4l2_m2m_cancel_job(m2m_ctx);
|
||||
|
||||
q_ctx = get_queue_ctx(m2m_ctx, type);
|
||||
ret = vb2_streamoff(&q_ctx->q, type);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
m2m_dev = m2m_ctx->m2m_dev;
|
||||
spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
|
||||
/* We should not be scheduled anymore, since we're dropping a queue. */
|
||||
if (m2m_ctx->job_flags & TRANS_QUEUED)
|
||||
list_del(&m2m_ctx->queue);
|
||||
m2m_ctx->job_flags = 0;
|
||||
|
||||
spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
|
||||
/* Drop queue, since streamoff returns device to the same state as after
|
||||
* calling reqbufs. */
|
||||
INIT_LIST_HEAD(&q_ctx->rdy_queue);
|
||||
q_ctx->num_rdy = 0;
|
||||
spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
|
||||
|
||||
if (m2m_dev->curr_ctx == m2m_ctx) {
|
||||
m2m_dev->curr_ctx = NULL;
|
||||
wake_up(&m2m_ctx->finished);
|
||||
}
|
||||
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
|
||||
|
||||
/**
|
||||
* v4l2_m2m_poll() - poll replacement, for destination buffers only
|
||||
*
|
||||
* Call from the driver's poll() function. Will poll both queues. If a buffer
|
||||
* is available to dequeue (with dqbuf) from the source queue, this will
|
||||
* indicate that a non-blocking write can be performed, while read will be
|
||||
* returned in case of the destination queue.
|
||||
*/
|
||||
unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
|
||||
struct poll_table_struct *wait)
|
||||
{
|
||||
struct video_device *vfd = video_devdata(file);
|
||||
unsigned long req_events = poll_requested_events(wait);
|
||||
struct vb2_queue *src_q, *dst_q;
|
||||
struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
|
||||
unsigned int rc = 0;
|
||||
unsigned long flags;
|
||||
|
||||
if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
|
||||
struct v4l2_fh *fh = file->private_data;
|
||||
|
||||
if (v4l2_event_pending(fh))
|
||||
rc = POLLPRI;
|
||||
else if (req_events & POLLPRI)
|
||||
poll_wait(file, &fh->wait, wait);
|
||||
if (!(req_events & (POLLOUT | POLLWRNORM | POLLIN | POLLRDNORM)))
|
||||
return rc;
|
||||
}
|
||||
|
||||
src_q = v4l2_m2m_get_src_vq(m2m_ctx);
|
||||
dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
|
||||
|
||||
/*
|
||||
* There has to be at least one buffer queued on each queued_list, which
|
||||
* means either in driver already or waiting for driver to claim it
|
||||
* and start processing.
|
||||
*/
|
||||
if ((!src_q->streaming || list_empty(&src_q->queued_list))
|
||||
&& (!dst_q->streaming || list_empty(&dst_q->queued_list))) {
|
||||
rc |= POLLERR;
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (m2m_ctx->m2m_dev->m2m_ops->unlock)
|
||||
m2m_ctx->m2m_dev->m2m_ops->unlock(m2m_ctx->priv);
|
||||
else if (m2m_ctx->q_lock)
|
||||
mutex_unlock(m2m_ctx->q_lock);
|
||||
|
||||
if (list_empty(&src_q->done_list))
|
||||
poll_wait(file, &src_q->done_wq, wait);
|
||||
if (list_empty(&dst_q->done_list))
|
||||
poll_wait(file, &dst_q->done_wq, wait);
|
||||
|
||||
if (m2m_ctx->m2m_dev->m2m_ops->lock)
|
||||
m2m_ctx->m2m_dev->m2m_ops->lock(m2m_ctx->priv);
|
||||
else if (m2m_ctx->q_lock) {
|
||||
if (mutex_lock_interruptible(m2m_ctx->q_lock)) {
|
||||
rc |= POLLERR;
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&src_q->done_lock, flags);
|
||||
if (!list_empty(&src_q->done_list))
|
||||
src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
|
||||
done_entry);
|
||||
if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE
|
||||
|| src_vb->state == VB2_BUF_STATE_ERROR))
|
||||
rc |= POLLOUT | POLLWRNORM;
|
||||
spin_unlock_irqrestore(&src_q->done_lock, flags);
|
||||
|
||||
spin_lock_irqsave(&dst_q->done_lock, flags);
|
||||
if (!list_empty(&dst_q->done_list))
|
||||
dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer,
|
||||
done_entry);
|
||||
if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE
|
||||
|| dst_vb->state == VB2_BUF_STATE_ERROR))
|
||||
rc |= POLLIN | POLLRDNORM;
|
||||
spin_unlock_irqrestore(&dst_q->done_lock, flags);
|
||||
|
||||
end:
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_m2m_poll);
|
||||
|
||||
/**
|
||||
* v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer
|
||||
*
|
||||
* Call from driver's mmap() function. Will handle mmap() for both queues
|
||||
* seamlessly for videobuffer, which will receive normal per-queue offsets and
|
||||
* proper videobuf queue pointers. The differentiation is made outside videobuf
|
||||
* by adding a predefined offset to buffers from one of the queues and
|
||||
* subtracting it before passing it back to videobuf. Only drivers (and
|
||||
* thus applications) receive modified offsets.
|
||||
*/
|
||||
int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
|
||||
struct vb2_queue *vq;
|
||||
|
||||
if (offset < DST_QUEUE_OFF_BASE) {
|
||||
vq = v4l2_m2m_get_src_vq(m2m_ctx);
|
||||
} else {
|
||||
vq = v4l2_m2m_get_dst_vq(m2m_ctx);
|
||||
vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
return vb2_mmap(vq, vma);
|
||||
}
|
||||
EXPORT_SYMBOL(v4l2_m2m_mmap);
|
||||
|
||||
/**
|
||||
* v4l2_m2m_init() - initialize per-driver m2m data
|
||||
*
|
||||
* Usually called from driver's probe() function.
|
||||
*/
|
||||
struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops)
|
||||
{
|
||||
struct v4l2_m2m_dev *m2m_dev;
|
||||
|
||||
if (!m2m_ops || WARN_ON(!m2m_ops->device_run) ||
|
||||
WARN_ON(!m2m_ops->job_abort))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL);
|
||||
if (!m2m_dev)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
m2m_dev->curr_ctx = NULL;
|
||||
m2m_dev->m2m_ops = m2m_ops;
|
||||
INIT_LIST_HEAD(&m2m_dev->job_queue);
|
||||
spin_lock_init(&m2m_dev->job_spinlock);
|
||||
|
||||
return m2m_dev;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_m2m_init);
|
||||
|
||||
/**
|
||||
* v4l2_m2m_release() - cleans up and frees a m2m_dev structure
|
||||
*
|
||||
* Usually called from driver's remove() function.
|
||||
*/
|
||||
void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev)
|
||||
{
|
||||
kfree(m2m_dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_m2m_release);
|
||||
|
||||
/**
|
||||
* v4l2_m2m_ctx_init() - allocate and initialize a m2m context
|
||||
* @priv - driver's instance private data
|
||||
* @m2m_dev - a previously initialized m2m_dev struct
|
||||
* @vq_init - a callback for queue type-specific initialization function to be
|
||||
* used for initializing videobuf_queues
|
||||
*
|
||||
* Usually called from driver's open() function.
|
||||
*/
|
||||
struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
|
||||
void *drv_priv,
|
||||
int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq))
|
||||
{
|
||||
struct v4l2_m2m_ctx *m2m_ctx;
|
||||
struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx;
|
||||
int ret;
|
||||
|
||||
m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL);
|
||||
if (!m2m_ctx)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
m2m_ctx->priv = drv_priv;
|
||||
m2m_ctx->m2m_dev = m2m_dev;
|
||||
init_waitqueue_head(&m2m_ctx->finished);
|
||||
|
||||
out_q_ctx = &m2m_ctx->out_q_ctx;
|
||||
cap_q_ctx = &m2m_ctx->cap_q_ctx;
|
||||
|
||||
INIT_LIST_HEAD(&out_q_ctx->rdy_queue);
|
||||
INIT_LIST_HEAD(&cap_q_ctx->rdy_queue);
|
||||
spin_lock_init(&out_q_ctx->rdy_spinlock);
|
||||
spin_lock_init(&cap_q_ctx->rdy_spinlock);
|
||||
|
||||
INIT_LIST_HEAD(&m2m_ctx->queue);
|
||||
|
||||
ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q);
|
||||
|
||||
if (ret)
|
||||
goto err;
|
||||
/*
|
||||
* If both queues use same mutex assign it as the common buffer
|
||||
* queues lock to the m2m context. This lock is used in the
|
||||
* v4l2_m2m_ioctl_* helpers.
|
||||
*/
|
||||
if (out_q_ctx->q.lock == cap_q_ctx->q.lock)
|
||||
m2m_ctx->q_lock = out_q_ctx->q.lock;
|
||||
|
||||
return m2m_ctx;
|
||||
err:
|
||||
kfree(m2m_ctx);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
|
||||
|
||||
/**
|
||||
* v4l2_m2m_ctx_release() - release m2m context
|
||||
*
|
||||
* Usually called from driver's release() function.
|
||||
*/
|
||||
void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
|
||||
{
|
||||
/* wait until the current context is dequeued from job_queue */
|
||||
v4l2_m2m_cancel_job(m2m_ctx);
|
||||
|
||||
vb2_queue_release(&m2m_ctx->cap_q_ctx.q);
|
||||
vb2_queue_release(&m2m_ctx->out_q_ctx.q);
|
||||
|
||||
kfree(m2m_ctx);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release);
|
||||
|
||||
/**
|
||||
* v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list.
|
||||
*
|
||||
* Call from buf_queue(), videobuf_queue_ops callback.
|
||||
*/
|
||||
void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb)
|
||||
{
|
||||
struct v4l2_m2m_buffer *b = container_of(vb, struct v4l2_m2m_buffer, vb);
|
||||
struct v4l2_m2m_queue_ctx *q_ctx;
|
||||
unsigned long flags;
|
||||
|
||||
q_ctx = get_queue_ctx(m2m_ctx, vb->vb2_queue->type);
|
||||
if (!q_ctx)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
|
||||
list_add_tail(&b->list, &q_ctx->rdy_queue);
|
||||
q_ctx->num_rdy++;
|
||||
spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
|
||||
|
||||
/* Videobuf2 ioctl helpers */
|
||||
|
||||
int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
|
||||
struct v4l2_requestbuffers *rb)
|
||||
{
|
||||
struct v4l2_fh *fh = file->private_data;
|
||||
|
||||
return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs);
|
||||
|
||||
int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv,
|
||||
struct v4l2_create_buffers *create)
|
||||
{
|
||||
struct v4l2_fh *fh = file->private_data;
|
||||
|
||||
return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs);
|
||||
|
||||
int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv,
|
||||
struct v4l2_buffer *buf)
|
||||
{
|
||||
struct v4l2_fh *fh = file->private_data;
|
||||
|
||||
return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf);
|
||||
|
||||
int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv,
|
||||
struct v4l2_buffer *buf)
|
||||
{
|
||||
struct v4l2_fh *fh = file->private_data;
|
||||
|
||||
return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf);
|
||||
|
||||
int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv,
|
||||
struct v4l2_buffer *buf)
|
||||
{
|
||||
struct v4l2_fh *fh = file->private_data;
|
||||
|
||||
return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf);
|
||||
|
||||
int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv,
|
||||
struct v4l2_exportbuffer *eb)
|
||||
{
|
||||
struct v4l2_fh *fh = file->private_data;
|
||||
|
||||
return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf);
|
||||
|
||||
int v4l2_m2m_ioctl_streamon(struct file *file, void *priv,
|
||||
enum v4l2_buf_type type)
|
||||
{
|
||||
struct v4l2_fh *fh = file->private_data;
|
||||
|
||||
return v4l2_m2m_streamon(file, fh->m2m_ctx, type);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon);
|
||||
|
||||
int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv,
|
||||
enum v4l2_buf_type type)
|
||||
{
|
||||
struct v4l2_fh *fh = file->private_data;
|
||||
|
||||
return v4l2_m2m_streamoff(file, fh->m2m_ctx, type);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff);
|
||||
|
||||
/*
|
||||
* v4l2_file_operations helpers. It is assumed here same lock is used
|
||||
* for the output and the capture buffer queue.
|
||||
*/
|
||||
|
||||
int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
{
|
||||
struct v4l2_fh *fh = file->private_data;
|
||||
struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx;
|
||||
int ret;
|
||||
|
||||
if (m2m_ctx->q_lock && mutex_lock_interruptible(m2m_ctx->q_lock))
|
||||
return -ERESTARTSYS;
|
||||
|
||||
ret = v4l2_m2m_mmap(file, m2m_ctx, vma);
|
||||
|
||||
if (m2m_ctx->q_lock)
|
||||
mutex_unlock(m2m_ctx->q_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap);
|
||||
|
||||
unsigned int v4l2_m2m_fop_poll(struct file *file, poll_table *wait)
|
||||
{
|
||||
struct v4l2_fh *fh = file->private_data;
|
||||
struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx;
|
||||
unsigned int ret;
|
||||
|
||||
if (m2m_ctx->q_lock)
|
||||
mutex_lock(m2m_ctx->q_lock);
|
||||
|
||||
ret = v4l2_m2m_poll(file, m2m_ctx, wait);
|
||||
|
||||
if (m2m_ctx->q_lock)
|
||||
mutex_unlock(m2m_ctx->q_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll);
|
||||
|
144
drivers/media/v4l2-core/v4l2-of.c
Normal file
144
drivers/media/v4l2-core/v4l2-of.c
Normal file
|
@ -0,0 +1,144 @@
|
|||
/*
|
||||
* V4L2 OF binding parsing library
|
||||
*
|
||||
* Copyright (C) 2012 - 2013 Samsung Electronics Co., Ltd.
|
||||
* Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
|
||||
*
|
||||
* Copyright (C) 2012 Renesas Electronics Corp.
|
||||
* Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <media/v4l2-of.h>
|
||||
|
||||
static void v4l2_of_parse_csi_bus(const struct device_node *node,
|
||||
struct v4l2_of_endpoint *endpoint)
|
||||
{
|
||||
struct v4l2_of_bus_mipi_csi2 *bus = &endpoint->bus.mipi_csi2;
|
||||
u32 data_lanes[ARRAY_SIZE(bus->data_lanes)];
|
||||
struct property *prop;
|
||||
bool have_clk_lane = false;
|
||||
unsigned int flags = 0;
|
||||
u32 v;
|
||||
|
||||
prop = of_find_property(node, "data-lanes", NULL);
|
||||
if (prop) {
|
||||
const __be32 *lane = NULL;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(data_lanes); i++) {
|
||||
lane = of_prop_next_u32(prop, lane, &data_lanes[i]);
|
||||
if (!lane)
|
||||
break;
|
||||
}
|
||||
bus->num_data_lanes = i;
|
||||
while (i--)
|
||||
bus->data_lanes[i] = data_lanes[i];
|
||||
}
|
||||
|
||||
if (!of_property_read_u32(node, "clock-lanes", &v)) {
|
||||
bus->clock_lane = v;
|
||||
have_clk_lane = true;
|
||||
}
|
||||
|
||||
if (of_get_property(node, "clock-noncontinuous", &v))
|
||||
flags |= V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK;
|
||||
else if (have_clk_lane || bus->num_data_lanes > 0)
|
||||
flags |= V4L2_MBUS_CSI2_CONTINUOUS_CLOCK;
|
||||
|
||||
bus->flags = flags;
|
||||
endpoint->bus_type = V4L2_MBUS_CSI2;
|
||||
}
|
||||
|
||||
static void v4l2_of_parse_parallel_bus(const struct device_node *node,
|
||||
struct v4l2_of_endpoint *endpoint)
|
||||
{
|
||||
struct v4l2_of_bus_parallel *bus = &endpoint->bus.parallel;
|
||||
unsigned int flags = 0;
|
||||
u32 v;
|
||||
|
||||
if (!of_property_read_u32(node, "hsync-active", &v))
|
||||
flags |= v ? V4L2_MBUS_HSYNC_ACTIVE_HIGH :
|
||||
V4L2_MBUS_HSYNC_ACTIVE_LOW;
|
||||
|
||||
if (!of_property_read_u32(node, "vsync-active", &v))
|
||||
flags |= v ? V4L2_MBUS_VSYNC_ACTIVE_HIGH :
|
||||
V4L2_MBUS_VSYNC_ACTIVE_LOW;
|
||||
|
||||
if (!of_property_read_u32(node, "pclk-sample", &v))
|
||||
flags |= v ? V4L2_MBUS_PCLK_SAMPLE_RISING :
|
||||
V4L2_MBUS_PCLK_SAMPLE_FALLING;
|
||||
|
||||
if (!of_property_read_u32(node, "field-even-active", &v))
|
||||
flags |= v ? V4L2_MBUS_FIELD_EVEN_HIGH :
|
||||
V4L2_MBUS_FIELD_EVEN_LOW;
|
||||
if (flags)
|
||||
endpoint->bus_type = V4L2_MBUS_PARALLEL;
|
||||
else
|
||||
endpoint->bus_type = V4L2_MBUS_BT656;
|
||||
|
||||
if (!of_property_read_u32(node, "data-active", &v))
|
||||
flags |= v ? V4L2_MBUS_DATA_ACTIVE_HIGH :
|
||||
V4L2_MBUS_DATA_ACTIVE_LOW;
|
||||
|
||||
if (of_get_property(node, "slave-mode", &v))
|
||||
flags |= V4L2_MBUS_SLAVE;
|
||||
else
|
||||
flags |= V4L2_MBUS_MASTER;
|
||||
|
||||
if (!of_property_read_u32(node, "bus-width", &v))
|
||||
bus->bus_width = v;
|
||||
|
||||
if (!of_property_read_u32(node, "data-shift", &v))
|
||||
bus->data_shift = v;
|
||||
|
||||
if (!of_property_read_u32(node, "sync-on-green-active", &v))
|
||||
flags |= v ? V4L2_MBUS_VIDEO_SOG_ACTIVE_HIGH :
|
||||
V4L2_MBUS_VIDEO_SOG_ACTIVE_LOW;
|
||||
|
||||
bus->flags = flags;
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* v4l2_of_parse_endpoint() - parse all endpoint node properties
|
||||
* @node: pointer to endpoint device_node
|
||||
* @endpoint: pointer to the V4L2 OF endpoint data structure
|
||||
*
|
||||
* All properties are optional. If none are found, we don't set any flags.
|
||||
* This means the port has a static configuration and no properties have
|
||||
* to be specified explicitly.
|
||||
* If any properties that identify the bus as parallel are found and
|
||||
* slave-mode isn't set, we set V4L2_MBUS_MASTER. Similarly, if we recognise
|
||||
* the bus as serial CSI-2 and clock-noncontinuous isn't set, we set the
|
||||
* V4L2_MBUS_CSI2_CONTINUOUS_CLOCK flag.
|
||||
* The caller should hold a reference to @node.
|
||||
*
|
||||
* Return: 0.
|
||||
*/
|
||||
int v4l2_of_parse_endpoint(const struct device_node *node,
|
||||
struct v4l2_of_endpoint *endpoint)
|
||||
{
|
||||
of_graph_parse_endpoint(node, &endpoint->base);
|
||||
endpoint->bus_type = 0;
|
||||
memset(&endpoint->bus, 0, sizeof(endpoint->bus));
|
||||
|
||||
v4l2_of_parse_csi_bus(node, endpoint);
|
||||
/*
|
||||
* Parse the parallel video bus properties only if none
|
||||
* of the MIPI CSI-2 specific properties were found.
|
||||
*/
|
||||
if (endpoint->bus.mipi_csi2.flags == 0)
|
||||
v4l2_of_parse_parallel_bus(node, endpoint);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(v4l2_of_parse_endpoint);
|
587
drivers/media/v4l2-core/v4l2-subdev.c
Normal file
587
drivers/media/v4l2-core/v4l2-subdev.c
Normal file
|
@ -0,0 +1,587 @@
|
|||
/*
|
||||
* V4L2 sub-device
|
||||
*
|
||||
* Copyright (C) 2010 Nokia Corporation
|
||||
*
|
||||
* Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
|
||||
* Sakari Ailus <sakari.ailus@iki.fi>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
|
||||
#include <linux/ioctl.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/videodev2.h>
|
||||
#include <linux/export.h>
|
||||
|
||||
#include <media/v4l2-ctrls.h>
|
||||
#include <media/v4l2-device.h>
|
||||
#include <media/v4l2-ioctl.h>
|
||||
#include <media/v4l2-fh.h>
|
||||
#include <media/v4l2-event.h>
|
||||
|
||||
static int subdev_fh_init(struct v4l2_subdev_fh *fh, struct v4l2_subdev *sd)
|
||||
{
|
||||
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
|
||||
fh->pad = kzalloc(sizeof(*fh->pad) * sd->entity.num_pads, GFP_KERNEL);
|
||||
if (fh->pad == NULL)
|
||||
return -ENOMEM;
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void subdev_fh_free(struct v4l2_subdev_fh *fh)
|
||||
{
|
||||
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
|
||||
kfree(fh->pad);
|
||||
fh->pad = NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
static int subdev_open(struct file *file)
|
||||
{
|
||||
struct video_device *vdev = video_devdata(file);
|
||||
struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
|
||||
struct v4l2_subdev_fh *subdev_fh;
|
||||
#if defined(CONFIG_MEDIA_CONTROLLER)
|
||||
struct media_entity *entity = NULL;
|
||||
#endif
|
||||
int ret;
|
||||
|
||||
subdev_fh = kzalloc(sizeof(*subdev_fh), GFP_KERNEL);
|
||||
if (subdev_fh == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = subdev_fh_init(subdev_fh, sd);
|
||||
if (ret) {
|
||||
kfree(subdev_fh);
|
||||
return ret;
|
||||
}
|
||||
|
||||
v4l2_fh_init(&subdev_fh->vfh, vdev);
|
||||
v4l2_fh_add(&subdev_fh->vfh);
|
||||
file->private_data = &subdev_fh->vfh;
|
||||
#if defined(CONFIG_MEDIA_CONTROLLER)
|
||||
if (sd->v4l2_dev->mdev) {
|
||||
entity = media_entity_get(&sd->entity);
|
||||
if (!entity) {
|
||||
ret = -EBUSY;
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
if (sd->internal_ops && sd->internal_ops->open) {
|
||||
ret = sd->internal_ops->open(sd, subdev_fh);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
#if defined(CONFIG_MEDIA_CONTROLLER)
|
||||
if (entity)
|
||||
media_entity_put(entity);
|
||||
#endif
|
||||
v4l2_fh_del(&subdev_fh->vfh);
|
||||
v4l2_fh_exit(&subdev_fh->vfh);
|
||||
subdev_fh_free(subdev_fh);
|
||||
kfree(subdev_fh);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int subdev_close(struct file *file)
|
||||
{
|
||||
struct video_device *vdev = video_devdata(file);
|
||||
struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
|
||||
struct v4l2_fh *vfh = file->private_data;
|
||||
struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
|
||||
|
||||
if (sd->internal_ops && sd->internal_ops->close)
|
||||
sd->internal_ops->close(sd, subdev_fh);
|
||||
#if defined(CONFIG_MEDIA_CONTROLLER)
|
||||
if (sd->v4l2_dev->mdev)
|
||||
media_entity_put(&sd->entity);
|
||||
#endif
|
||||
v4l2_fh_del(vfh);
|
||||
v4l2_fh_exit(vfh);
|
||||
subdev_fh_free(subdev_fh);
|
||||
kfree(subdev_fh);
|
||||
file->private_data = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
|
||||
static int check_format(struct v4l2_subdev *sd,
|
||||
struct v4l2_subdev_format *format)
|
||||
{
|
||||
if (format->which != V4L2_SUBDEV_FORMAT_TRY &&
|
||||
format->which != V4L2_SUBDEV_FORMAT_ACTIVE)
|
||||
return -EINVAL;
|
||||
|
||||
if (format->pad >= sd->entity.num_pads)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int check_crop(struct v4l2_subdev *sd, struct v4l2_subdev_crop *crop)
|
||||
{
|
||||
if (crop->which != V4L2_SUBDEV_FORMAT_TRY &&
|
||||
crop->which != V4L2_SUBDEV_FORMAT_ACTIVE)
|
||||
return -EINVAL;
|
||||
|
||||
if (crop->pad >= sd->entity.num_pads)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int check_selection(struct v4l2_subdev *sd,
|
||||
struct v4l2_subdev_selection *sel)
|
||||
{
|
||||
if (sel->which != V4L2_SUBDEV_FORMAT_TRY &&
|
||||
sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
|
||||
return -EINVAL;
|
||||
|
||||
if (sel->pad >= sd->entity.num_pads)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int check_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
|
||||
{
|
||||
if (edid->pad >= sd->entity.num_pads)
|
||||
return -EINVAL;
|
||||
|
||||
if (edid->blocks && edid->edid == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
|
||||
{
|
||||
struct video_device *vdev = video_devdata(file);
|
||||
struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
|
||||
struct v4l2_fh *vfh = file->private_data;
|
||||
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
|
||||
struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
|
||||
int rval;
|
||||
#endif
|
||||
|
||||
switch (cmd) {
|
||||
case VIDIOC_QUERYCTRL:
|
||||
return v4l2_queryctrl(vfh->ctrl_handler, arg);
|
||||
|
||||
case VIDIOC_QUERY_EXT_CTRL:
|
||||
return v4l2_query_ext_ctrl(vfh->ctrl_handler, arg);
|
||||
|
||||
case VIDIOC_QUERYMENU:
|
||||
return v4l2_querymenu(vfh->ctrl_handler, arg);
|
||||
|
||||
case VIDIOC_G_CTRL:
|
||||
return v4l2_g_ctrl(vfh->ctrl_handler, arg);
|
||||
|
||||
case VIDIOC_S_CTRL:
|
||||
return v4l2_s_ctrl(vfh, vfh->ctrl_handler, arg);
|
||||
|
||||
case VIDIOC_G_EXT_CTRLS:
|
||||
return v4l2_g_ext_ctrls(vfh->ctrl_handler, arg);
|
||||
|
||||
case VIDIOC_S_EXT_CTRLS:
|
||||
return v4l2_s_ext_ctrls(vfh, vfh->ctrl_handler, arg);
|
||||
|
||||
case VIDIOC_TRY_EXT_CTRLS:
|
||||
return v4l2_try_ext_ctrls(vfh->ctrl_handler, arg);
|
||||
|
||||
case VIDIOC_DQEVENT:
|
||||
if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
|
||||
return -ENOIOCTLCMD;
|
||||
|
||||
return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK);
|
||||
|
||||
case VIDIOC_SUBSCRIBE_EVENT:
|
||||
return v4l2_subdev_call(sd, core, subscribe_event, vfh, arg);
|
||||
|
||||
case VIDIOC_UNSUBSCRIBE_EVENT:
|
||||
return v4l2_subdev_call(sd, core, unsubscribe_event, vfh, arg);
|
||||
|
||||
#ifdef CONFIG_VIDEO_ADV_DEBUG
|
||||
case VIDIOC_DBG_G_REGISTER:
|
||||
{
|
||||
struct v4l2_dbg_register *p = arg;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
return v4l2_subdev_call(sd, core, g_register, p);
|
||||
}
|
||||
case VIDIOC_DBG_S_REGISTER:
|
||||
{
|
||||
struct v4l2_dbg_register *p = arg;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
return v4l2_subdev_call(sd, core, s_register, p);
|
||||
}
|
||||
#endif
|
||||
|
||||
case VIDIOC_LOG_STATUS: {
|
||||
int ret;
|
||||
|
||||
pr_info("%s: ================= START STATUS =================\n",
|
||||
sd->name);
|
||||
ret = v4l2_subdev_call(sd, core, log_status);
|
||||
pr_info("%s: ================== END STATUS ==================\n",
|
||||
sd->name);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
|
||||
case VIDIOC_SUBDEV_G_FMT: {
|
||||
struct v4l2_subdev_format *format = arg;
|
||||
|
||||
rval = check_format(sd, format);
|
||||
if (rval)
|
||||
return rval;
|
||||
|
||||
return v4l2_subdev_call(sd, pad, get_fmt, subdev_fh, format);
|
||||
}
|
||||
|
||||
case VIDIOC_SUBDEV_S_FMT: {
|
||||
struct v4l2_subdev_format *format = arg;
|
||||
|
||||
rval = check_format(sd, format);
|
||||
if (rval)
|
||||
return rval;
|
||||
|
||||
return v4l2_subdev_call(sd, pad, set_fmt, subdev_fh, format);
|
||||
}
|
||||
|
||||
case VIDIOC_SUBDEV_G_CROP: {
|
||||
struct v4l2_subdev_crop *crop = arg;
|
||||
struct v4l2_subdev_selection sel;
|
||||
|
||||
rval = check_crop(sd, crop);
|
||||
if (rval)
|
||||
return rval;
|
||||
|
||||
rval = v4l2_subdev_call(sd, pad, get_crop, subdev_fh, crop);
|
||||
if (rval != -ENOIOCTLCMD)
|
||||
return rval;
|
||||
|
||||
memset(&sel, 0, sizeof(sel));
|
||||
sel.which = crop->which;
|
||||
sel.pad = crop->pad;
|
||||
sel.target = V4L2_SEL_TGT_CROP;
|
||||
|
||||
rval = v4l2_subdev_call(
|
||||
sd, pad, get_selection, subdev_fh, &sel);
|
||||
|
||||
crop->rect = sel.r;
|
||||
|
||||
return rval;
|
||||
}
|
||||
|
||||
case VIDIOC_SUBDEV_S_CROP: {
|
||||
struct v4l2_subdev_crop *crop = arg;
|
||||
struct v4l2_subdev_selection sel;
|
||||
|
||||
rval = check_crop(sd, crop);
|
||||
if (rval)
|
||||
return rval;
|
||||
|
||||
rval = v4l2_subdev_call(sd, pad, set_crop, subdev_fh, crop);
|
||||
if (rval != -ENOIOCTLCMD)
|
||||
return rval;
|
||||
|
||||
memset(&sel, 0, sizeof(sel));
|
||||
sel.which = crop->which;
|
||||
sel.pad = crop->pad;
|
||||
sel.target = V4L2_SEL_TGT_CROP;
|
||||
sel.r = crop->rect;
|
||||
|
||||
rval = v4l2_subdev_call(
|
||||
sd, pad, set_selection, subdev_fh, &sel);
|
||||
|
||||
crop->rect = sel.r;
|
||||
|
||||
return rval;
|
||||
}
|
||||
|
||||
case VIDIOC_SUBDEV_ENUM_MBUS_CODE: {
|
||||
struct v4l2_subdev_mbus_code_enum *code = arg;
|
||||
|
||||
if (code->pad >= sd->entity.num_pads)
|
||||
return -EINVAL;
|
||||
|
||||
return v4l2_subdev_call(sd, pad, enum_mbus_code, subdev_fh,
|
||||
code);
|
||||
}
|
||||
|
||||
case VIDIOC_SUBDEV_ENUM_FRAME_SIZE: {
|
||||
struct v4l2_subdev_frame_size_enum *fse = arg;
|
||||
|
||||
if (fse->pad >= sd->entity.num_pads)
|
||||
return -EINVAL;
|
||||
|
||||
return v4l2_subdev_call(sd, pad, enum_frame_size, subdev_fh,
|
||||
fse);
|
||||
}
|
||||
|
||||
case VIDIOC_SUBDEV_G_FRAME_INTERVAL: {
|
||||
struct v4l2_subdev_frame_interval *fi = arg;
|
||||
|
||||
if (fi->pad >= sd->entity.num_pads)
|
||||
return -EINVAL;
|
||||
|
||||
return v4l2_subdev_call(sd, video, g_frame_interval, arg);
|
||||
}
|
||||
|
||||
case VIDIOC_SUBDEV_S_FRAME_INTERVAL: {
|
||||
struct v4l2_subdev_frame_interval *fi = arg;
|
||||
|
||||
if (fi->pad >= sd->entity.num_pads)
|
||||
return -EINVAL;
|
||||
|
||||
return v4l2_subdev_call(sd, video, s_frame_interval, arg);
|
||||
}
|
||||
|
||||
case VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL: {
|
||||
struct v4l2_subdev_frame_interval_enum *fie = arg;
|
||||
|
||||
if (fie->pad >= sd->entity.num_pads)
|
||||
return -EINVAL;
|
||||
|
||||
return v4l2_subdev_call(sd, pad, enum_frame_interval, subdev_fh,
|
||||
fie);
|
||||
}
|
||||
|
||||
case VIDIOC_SUBDEV_G_SELECTION: {
|
||||
struct v4l2_subdev_selection *sel = arg;
|
||||
|
||||
rval = check_selection(sd, sel);
|
||||
if (rval)
|
||||
return rval;
|
||||
|
||||
return v4l2_subdev_call(
|
||||
sd, pad, get_selection, subdev_fh, sel);
|
||||
}
|
||||
|
||||
case VIDIOC_SUBDEV_S_SELECTION: {
|
||||
struct v4l2_subdev_selection *sel = arg;
|
||||
|
||||
rval = check_selection(sd, sel);
|
||||
if (rval)
|
||||
return rval;
|
||||
|
||||
return v4l2_subdev_call(
|
||||
sd, pad, set_selection, subdev_fh, sel);
|
||||
}
|
||||
|
||||
case VIDIOC_G_EDID: {
|
||||
struct v4l2_subdev_edid *edid = arg;
|
||||
|
||||
rval = check_edid(sd, edid);
|
||||
if (rval)
|
||||
return rval;
|
||||
|
||||
return v4l2_subdev_call(sd, pad, get_edid, edid);
|
||||
}
|
||||
|
||||
case VIDIOC_S_EDID: {
|
||||
struct v4l2_subdev_edid *edid = arg;
|
||||
|
||||
rval = check_edid(sd, edid);
|
||||
if (rval)
|
||||
return rval;
|
||||
|
||||
return v4l2_subdev_call(sd, pad, set_edid, edid);
|
||||
}
|
||||
|
||||
case VIDIOC_SUBDEV_DV_TIMINGS_CAP: {
|
||||
struct v4l2_dv_timings_cap *cap = arg;
|
||||
|
||||
if (cap->pad >= sd->entity.num_pads)
|
||||
return -EINVAL;
|
||||
|
||||
return v4l2_subdev_call(sd, pad, dv_timings_cap, cap);
|
||||
}
|
||||
|
||||
case VIDIOC_SUBDEV_ENUM_DV_TIMINGS: {
|
||||
struct v4l2_enum_dv_timings *dvt = arg;
|
||||
|
||||
if (dvt->pad >= sd->entity.num_pads)
|
||||
return -EINVAL;
|
||||
|
||||
return v4l2_subdev_call(sd, pad, enum_dv_timings, dvt);
|
||||
}
|
||||
|
||||
case VIDIOC_SUBDEV_QUERY_DV_TIMINGS:
|
||||
return v4l2_subdev_call(sd, video, query_dv_timings, arg);
|
||||
|
||||
case VIDIOC_SUBDEV_G_DV_TIMINGS:
|
||||
return v4l2_subdev_call(sd, video, g_dv_timings, arg);
|
||||
|
||||
case VIDIOC_SUBDEV_S_DV_TIMINGS:
|
||||
return v4l2_subdev_call(sd, video, s_dv_timings, arg);
|
||||
#endif
|
||||
default:
|
||||
return v4l2_subdev_call(sd, core, ioctl, cmd, arg);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static long subdev_ioctl(struct file *file, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
return video_usercopy(file, cmd, arg, subdev_do_ioctl);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
static long subdev_compat_ioctl32(struct file *file, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
struct video_device *vdev = video_devdata(file);
|
||||
struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
|
||||
|
||||
return v4l2_subdev_call(sd, core, compat_ioctl32, cmd, arg);
|
||||
}
|
||||
#endif
|
||||
|
||||
static unsigned int subdev_poll(struct file *file, poll_table *wait)
|
||||
{
|
||||
struct video_device *vdev = video_devdata(file);
|
||||
struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
|
||||
struct v4l2_fh *fh = file->private_data;
|
||||
|
||||
if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
|
||||
return POLLERR;
|
||||
|
||||
poll_wait(file, &fh->wait, wait);
|
||||
|
||||
if (v4l2_event_pending(fh))
|
||||
return POLLPRI;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct v4l2_file_operations v4l2_subdev_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = subdev_open,
|
||||
.unlocked_ioctl = subdev_ioctl,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl32 = subdev_compat_ioctl32,
|
||||
#endif
|
||||
.release = subdev_close,
|
||||
.poll = subdev_poll,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_MEDIA_CONTROLLER
|
||||
int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd,
|
||||
struct media_link *link,
|
||||
struct v4l2_subdev_format *source_fmt,
|
||||
struct v4l2_subdev_format *sink_fmt)
|
||||
{
|
||||
/* The width, height and code must match. */
|
||||
if (source_fmt->format.width != sink_fmt->format.width
|
||||
|| source_fmt->format.height != sink_fmt->format.height
|
||||
|| source_fmt->format.code != sink_fmt->format.code)
|
||||
return -EINVAL;
|
||||
|
||||
/* The field order must match, or the sink field order must be NONE
|
||||
* to support interlaced hardware connected to bridges that support
|
||||
* progressive formats only.
|
||||
*/
|
||||
if (source_fmt->format.field != sink_fmt->format.field &&
|
||||
sink_fmt->format.field != V4L2_FIELD_NONE)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate_default);
|
||||
|
||||
static int
|
||||
v4l2_subdev_link_validate_get_format(struct media_pad *pad,
|
||||
struct v4l2_subdev_format *fmt)
|
||||
{
|
||||
if (media_entity_type(pad->entity) == MEDIA_ENT_T_V4L2_SUBDEV) {
|
||||
struct v4l2_subdev *sd =
|
||||
media_entity_to_v4l2_subdev(pad->entity);
|
||||
|
||||
fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
|
||||
fmt->pad = pad->index;
|
||||
return v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt);
|
||||
}
|
||||
|
||||
WARN(pad->entity->type != MEDIA_ENT_T_DEVNODE_V4L,
|
||||
"Driver bug! Wrong media entity type 0x%08x, entity %s\n",
|
||||
pad->entity->type, pad->entity->name);
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int v4l2_subdev_link_validate(struct media_link *link)
|
||||
{
|
||||
struct v4l2_subdev *sink;
|
||||
struct v4l2_subdev_format sink_fmt, source_fmt;
|
||||
int rval;
|
||||
|
||||
rval = v4l2_subdev_link_validate_get_format(
|
||||
link->source, &source_fmt);
|
||||
if (rval < 0)
|
||||
return 0;
|
||||
|
||||
rval = v4l2_subdev_link_validate_get_format(
|
||||
link->sink, &sink_fmt);
|
||||
if (rval < 0)
|
||||
return 0;
|
||||
|
||||
sink = media_entity_to_v4l2_subdev(link->sink->entity);
|
||||
|
||||
rval = v4l2_subdev_call(sink, pad, link_validate, link,
|
||||
&source_fmt, &sink_fmt);
|
||||
if (rval != -ENOIOCTLCMD)
|
||||
return rval;
|
||||
|
||||
return v4l2_subdev_link_validate_default(
|
||||
sink, link, &source_fmt, &sink_fmt);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate);
|
||||
#endif /* CONFIG_MEDIA_CONTROLLER */
|
||||
|
||||
void v4l2_subdev_init(struct v4l2_subdev *sd, const struct v4l2_subdev_ops *ops)
|
||||
{
|
||||
INIT_LIST_HEAD(&sd->list);
|
||||
BUG_ON(!ops);
|
||||
sd->ops = ops;
|
||||
sd->v4l2_dev = NULL;
|
||||
sd->flags = 0;
|
||||
sd->name[0] = '\0';
|
||||
sd->grp_id = 0;
|
||||
sd->dev_priv = NULL;
|
||||
sd->host_priv = NULL;
|
||||
#if defined(CONFIG_MEDIA_CONTROLLER)
|
||||
sd->entity.name = sd->name;
|
||||
sd->entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL(v4l2_subdev_init);
|
1194
drivers/media/v4l2-core/videobuf-core.c
Normal file
1194
drivers/media/v4l2-core/videobuf-core.c
Normal file
File diff suppressed because it is too large
Load diff
412
drivers/media/v4l2-core/videobuf-dma-contig.c
Normal file
412
drivers/media/v4l2-core/videobuf-dma-contig.c
Normal file
|
@ -0,0 +1,412 @@
|
|||
/*
|
||||
* helper functions for physically contiguous capture buffers
|
||||
*
|
||||
* The functions support hardware lacking scatter gather support
|
||||
* (i.e. the buffers must be linear in physical memory)
|
||||
*
|
||||
* Copyright (c) 2008 Magnus Damm
|
||||
*
|
||||
* Based on videobuf-vmalloc.c,
|
||||
* (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <media/videobuf-dma-contig.h>
|
||||
|
||||
struct videobuf_dma_contig_memory {
|
||||
u32 magic;
|
||||
void *vaddr;
|
||||
dma_addr_t dma_handle;
|
||||
unsigned long size;
|
||||
};
|
||||
|
||||
#define MAGIC_DC_MEM 0x0733ac61
|
||||
#define MAGIC_CHECK(is, should) \
|
||||
if (unlikely((is) != (should))) { \
|
||||
pr_err("magic mismatch: %x expected %x\n", (is), (should)); \
|
||||
BUG(); \
|
||||
}
|
||||
|
||||
static int __videobuf_dc_alloc(struct device *dev,
|
||||
struct videobuf_dma_contig_memory *mem,
|
||||
unsigned long size, gfp_t flags)
|
||||
{
|
||||
mem->size = size;
|
||||
mem->vaddr = dma_alloc_coherent(dev, mem->size,
|
||||
&mem->dma_handle, flags);
|
||||
|
||||
if (!mem->vaddr) {
|
||||
dev_err(dev, "memory alloc size %ld failed\n", mem->size);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
dev_dbg(dev, "dma mapped data is at %p (%ld)\n", mem->vaddr, mem->size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __videobuf_dc_free(struct device *dev,
|
||||
struct videobuf_dma_contig_memory *mem)
|
||||
{
|
||||
dma_free_coherent(dev, mem->size, mem->vaddr, mem->dma_handle);
|
||||
|
||||
mem->vaddr = NULL;
|
||||
}
|
||||
|
||||
static void videobuf_vm_open(struct vm_area_struct *vma)
|
||||
{
|
||||
struct videobuf_mapping *map = vma->vm_private_data;
|
||||
|
||||
dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
|
||||
map, map->count, vma->vm_start, vma->vm_end);
|
||||
|
||||
map->count++;
|
||||
}
|
||||
|
||||
static void videobuf_vm_close(struct vm_area_struct *vma)
|
||||
{
|
||||
struct videobuf_mapping *map = vma->vm_private_data;
|
||||
struct videobuf_queue *q = map->q;
|
||||
int i;
|
||||
|
||||
dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
|
||||
map, map->count, vma->vm_start, vma->vm_end);
|
||||
|
||||
map->count--;
|
||||
if (0 == map->count) {
|
||||
struct videobuf_dma_contig_memory *mem;
|
||||
|
||||
dev_dbg(q->dev, "munmap %p q=%p\n", map, q);
|
||||
videobuf_queue_lock(q);
|
||||
|
||||
/* We need first to cancel streams, before unmapping */
|
||||
if (q->streaming)
|
||||
videobuf_queue_cancel(q);
|
||||
|
||||
for (i = 0; i < VIDEO_MAX_FRAME; i++) {
|
||||
if (NULL == q->bufs[i])
|
||||
continue;
|
||||
|
||||
if (q->bufs[i]->map != map)
|
||||
continue;
|
||||
|
||||
mem = q->bufs[i]->priv;
|
||||
if (mem) {
|
||||
/* This callback is called only if kernel has
|
||||
allocated memory and this memory is mmapped.
|
||||
In this case, memory should be freed,
|
||||
in order to do memory unmap.
|
||||
*/
|
||||
|
||||
MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
|
||||
|
||||
/* vfree is not atomic - can't be
|
||||
called with IRQ's disabled
|
||||
*/
|
||||
dev_dbg(q->dev, "buf[%d] freeing %p\n",
|
||||
i, mem->vaddr);
|
||||
|
||||
__videobuf_dc_free(q->dev, mem);
|
||||
mem->vaddr = NULL;
|
||||
}
|
||||
|
||||
q->bufs[i]->map = NULL;
|
||||
q->bufs[i]->baddr = 0;
|
||||
}
|
||||
|
||||
kfree(map);
|
||||
|
||||
videobuf_queue_unlock(q);
|
||||
}
|
||||
}
|
||||
|
||||
static const struct vm_operations_struct videobuf_vm_ops = {
|
||||
.open = videobuf_vm_open,
|
||||
.close = videobuf_vm_close,
|
||||
};
|
||||
|
||||
/**
|
||||
* videobuf_dma_contig_user_put() - reset pointer to user space buffer
|
||||
* @mem: per-buffer private videobuf-dma-contig data
|
||||
*
|
||||
* This function resets the user space pointer
|
||||
*/
|
||||
static void videobuf_dma_contig_user_put(struct videobuf_dma_contig_memory *mem)
|
||||
{
|
||||
mem->dma_handle = 0;
|
||||
mem->size = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* videobuf_dma_contig_user_get() - setup user space memory pointer
|
||||
* @mem: per-buffer private videobuf-dma-contig data
|
||||
* @vb: video buffer to map
|
||||
*
|
||||
* This function validates and sets up a pointer to user space memory.
|
||||
* Only physically contiguous pfn-mapped memory is accepted.
|
||||
*
|
||||
* Returns 0 if successful.
|
||||
*/
|
||||
static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
|
||||
struct videobuf_buffer *vb)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long prev_pfn, this_pfn;
|
||||
unsigned long pages_done, user_address;
|
||||
unsigned int offset;
|
||||
int ret;
|
||||
|
||||
offset = vb->baddr & ~PAGE_MASK;
|
||||
mem->size = PAGE_ALIGN(vb->size + offset);
|
||||
ret = -EINVAL;
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
|
||||
vma = find_vma(mm, vb->baddr);
|
||||
if (!vma)
|
||||
goto out_up;
|
||||
|
||||
if ((vb->baddr + mem->size) > vma->vm_end)
|
||||
goto out_up;
|
||||
|
||||
pages_done = 0;
|
||||
prev_pfn = 0; /* kill warning */
|
||||
user_address = vb->baddr;
|
||||
|
||||
while (pages_done < (mem->size >> PAGE_SHIFT)) {
|
||||
ret = follow_pfn(vma, user_address, &this_pfn);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
if (pages_done == 0)
|
||||
mem->dma_handle = (this_pfn << PAGE_SHIFT) + offset;
|
||||
else if (this_pfn != (prev_pfn + 1))
|
||||
ret = -EFAULT;
|
||||
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
prev_pfn = this_pfn;
|
||||
user_address += PAGE_SIZE;
|
||||
pages_done++;
|
||||
}
|
||||
|
||||
out_up:
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct videobuf_buffer *__videobuf_alloc(size_t size)
|
||||
{
|
||||
struct videobuf_dma_contig_memory *mem;
|
||||
struct videobuf_buffer *vb;
|
||||
|
||||
vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
|
||||
if (vb) {
|
||||
vb->priv = ((char *)vb) + size;
|
||||
mem = vb->priv;
|
||||
mem->magic = MAGIC_DC_MEM;
|
||||
}
|
||||
|
||||
return vb;
|
||||
}
|
||||
|
||||
static void *__videobuf_to_vaddr(struct videobuf_buffer *buf)
|
||||
{
|
||||
struct videobuf_dma_contig_memory *mem = buf->priv;
|
||||
|
||||
BUG_ON(!mem);
|
||||
MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
|
||||
|
||||
return mem->vaddr;
|
||||
}
|
||||
|
||||
static int __videobuf_iolock(struct videobuf_queue *q,
|
||||
struct videobuf_buffer *vb,
|
||||
struct v4l2_framebuffer *fbuf)
|
||||
{
|
||||
struct videobuf_dma_contig_memory *mem = vb->priv;
|
||||
|
||||
BUG_ON(!mem);
|
||||
MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
|
||||
|
||||
switch (vb->memory) {
|
||||
case V4L2_MEMORY_MMAP:
|
||||
dev_dbg(q->dev, "%s memory method MMAP\n", __func__);
|
||||
|
||||
/* All handling should be done by __videobuf_mmap_mapper() */
|
||||
if (!mem->vaddr) {
|
||||
dev_err(q->dev, "memory is not alloced/mmapped.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case V4L2_MEMORY_USERPTR:
|
||||
dev_dbg(q->dev, "%s memory method USERPTR\n", __func__);
|
||||
|
||||
/* handle pointer from user space */
|
||||
if (vb->baddr)
|
||||
return videobuf_dma_contig_user_get(mem, vb);
|
||||
|
||||
/* allocate memory for the read() method */
|
||||
if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(vb->size),
|
||||
GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
break;
|
||||
case V4L2_MEMORY_OVERLAY:
|
||||
default:
|
||||
dev_dbg(q->dev, "%s memory method OVERLAY/unknown\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __videobuf_mmap_mapper(struct videobuf_queue *q,
|
||||
struct videobuf_buffer *buf,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
struct videobuf_dma_contig_memory *mem;
|
||||
struct videobuf_mapping *map;
|
||||
int retval;
|
||||
unsigned long size;
|
||||
|
||||
dev_dbg(q->dev, "%s\n", __func__);
|
||||
|
||||
/* create mapping + update buffer list */
|
||||
map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
|
||||
if (!map)
|
||||
return -ENOMEM;
|
||||
|
||||
buf->map = map;
|
||||
map->q = q;
|
||||
|
||||
buf->baddr = vma->vm_start;
|
||||
|
||||
mem = buf->priv;
|
||||
BUG_ON(!mem);
|
||||
MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
|
||||
|
||||
if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(buf->bsize),
|
||||
GFP_KERNEL | __GFP_COMP))
|
||||
goto error;
|
||||
|
||||
/* Try to remap memory */
|
||||
size = vma->vm_end - vma->vm_start;
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
|
||||
/* the "vm_pgoff" is just used in v4l2 to find the
|
||||
* corresponding buffer data structure which is allocated
|
||||
* earlier and it does not mean the offset from the physical
|
||||
* buffer start address as usual. So set it to 0 to pass
|
||||
* the sanity check in vm_iomap_memory().
|
||||
*/
|
||||
vma->vm_pgoff = 0;
|
||||
|
||||
retval = vm_iomap_memory(vma, mem->dma_handle, size);
|
||||
if (retval) {
|
||||
dev_err(q->dev, "mmap: remap failed with error %d. ",
|
||||
retval);
|
||||
dma_free_coherent(q->dev, mem->size,
|
||||
mem->vaddr, mem->dma_handle);
|
||||
goto error;
|
||||
}
|
||||
|
||||
vma->vm_ops = &videobuf_vm_ops;
|
||||
vma->vm_flags |= VM_DONTEXPAND;
|
||||
vma->vm_private_data = map;
|
||||
|
||||
dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
|
||||
map, q, vma->vm_start, vma->vm_end,
|
||||
(long int)buf->bsize, vma->vm_pgoff, buf->i);
|
||||
|
||||
videobuf_vm_open(vma);
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
kfree(map);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static struct videobuf_qtype_ops qops = {
|
||||
.magic = MAGIC_QTYPE_OPS,
|
||||
.alloc_vb = __videobuf_alloc,
|
||||
.iolock = __videobuf_iolock,
|
||||
.mmap_mapper = __videobuf_mmap_mapper,
|
||||
.vaddr = __videobuf_to_vaddr,
|
||||
};
|
||||
|
||||
void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
|
||||
const struct videobuf_queue_ops *ops,
|
||||
struct device *dev,
|
||||
spinlock_t *irqlock,
|
||||
enum v4l2_buf_type type,
|
||||
enum v4l2_field field,
|
||||
unsigned int msize,
|
||||
void *priv,
|
||||
struct mutex *ext_lock)
|
||||
{
|
||||
videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
|
||||
priv, &qops, ext_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init);
|
||||
|
||||
dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf)
|
||||
{
|
||||
struct videobuf_dma_contig_memory *mem = buf->priv;
|
||||
|
||||
BUG_ON(!mem);
|
||||
MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
|
||||
|
||||
return mem->dma_handle;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(videobuf_to_dma_contig);
|
||||
|
||||
void videobuf_dma_contig_free(struct videobuf_queue *q,
|
||||
struct videobuf_buffer *buf)
|
||||
{
|
||||
struct videobuf_dma_contig_memory *mem = buf->priv;
|
||||
|
||||
/* mmapped memory can't be freed here, otherwise mmapped region
|
||||
would be released, while still needed. In this case, the memory
|
||||
release should happen inside videobuf_vm_close().
|
||||
So, it should free memory only if the memory were allocated for
|
||||
read() operation.
|
||||
*/
|
||||
if (buf->memory != V4L2_MEMORY_USERPTR)
|
||||
return;
|
||||
|
||||
if (!mem)
|
||||
return;
|
||||
|
||||
MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
|
||||
|
||||
/* handle user space pointer case */
|
||||
if (buf->baddr) {
|
||||
videobuf_dma_contig_user_put(mem);
|
||||
return;
|
||||
}
|
||||
|
||||
/* read() method */
|
||||
if (mem->vaddr) {
|
||||
__videobuf_dc_free(q->dev, mem);
|
||||
mem->vaddr = NULL;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(videobuf_dma_contig_free);
|
||||
|
||||
MODULE_DESCRIPTION("helper module to manage video4linux dma contig buffers");
|
||||
MODULE_AUTHOR("Magnus Damm");
|
||||
MODULE_LICENSE("GPL");
|
689
drivers/media/v4l2-core/videobuf-dma-sg.c
Normal file
689
drivers/media/v4l2-core/videobuf-dma-sg.c
Normal file
|
@ -0,0 +1,689 @@
|
|||
/*
|
||||
* helper functions for SG DMA video4linux capture buffers
|
||||
*
|
||||
* The functions expect the hardware being able to scatter gather
|
||||
* (i.e. the buffers are not linear in physical memory, but fragmented
|
||||
* into PAGE_SIZE chunks). They also assume the driver does not need
|
||||
* to touch the video data.
|
||||
*
|
||||
* (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
|
||||
*
|
||||
* Highly based on video-buf written originally by:
|
||||
* (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
|
||||
* (c) 2006 Mauro Carvalho Chehab, <mchehab@infradead.org>
|
||||
* (c) 2006 Ted Walther and John Sokol
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
#include <media/videobuf-dma-sg.h>
|
||||
|
||||
#define MAGIC_DMABUF 0x19721112
|
||||
#define MAGIC_SG_MEM 0x17890714
|
||||
|
||||
#define MAGIC_CHECK(is, should) \
|
||||
if (unlikely((is) != (should))) { \
|
||||
printk(KERN_ERR "magic mismatch: %x (expected %x)\n", \
|
||||
is, should); \
|
||||
BUG(); \
|
||||
}
|
||||
|
||||
static int debug;
|
||||
module_param(debug, int, 0644);
|
||||
|
||||
MODULE_DESCRIPTION("helper module to manage video4linux dma sg buffers");
|
||||
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
#define dprintk(level, fmt, arg...) \
|
||||
if (debug >= level) \
|
||||
printk(KERN_DEBUG "vbuf-sg: " fmt , ## arg)
|
||||
|
||||
/* --------------------------------------------------------------------- */
|
||||
|
||||
/*
|
||||
* Return a scatterlist for some page-aligned vmalloc()'ed memory
|
||||
* block (NULL on errors). Memory for the scatterlist is allocated
|
||||
* using kmalloc. The caller must free the memory.
|
||||
*/
|
||||
static struct scatterlist *videobuf_vmalloc_to_sg(unsigned char *virt,
|
||||
int nr_pages)
|
||||
{
|
||||
struct scatterlist *sglist;
|
||||
struct page *pg;
|
||||
int i;
|
||||
|
||||
sglist = vzalloc(nr_pages * sizeof(*sglist));
|
||||
if (NULL == sglist)
|
||||
return NULL;
|
||||
sg_init_table(sglist, nr_pages);
|
||||
for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) {
|
||||
pg = vmalloc_to_page(virt);
|
||||
if (NULL == pg)
|
||||
goto err;
|
||||
BUG_ON(PageHighMem(pg));
|
||||
sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
|
||||
}
|
||||
return sglist;
|
||||
|
||||
err:
|
||||
vfree(sglist);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return a scatterlist for a an array of userpages (NULL on errors).
|
||||
* Memory for the scatterlist is allocated using kmalloc. The caller
|
||||
* must free the memory.
|
||||
*/
|
||||
static struct scatterlist *videobuf_pages_to_sg(struct page **pages,
|
||||
int nr_pages, int offset, size_t size)
|
||||
{
|
||||
struct scatterlist *sglist;
|
||||
int i;
|
||||
|
||||
if (NULL == pages[0])
|
||||
return NULL;
|
||||
sglist = vmalloc(nr_pages * sizeof(*sglist));
|
||||
if (NULL == sglist)
|
||||
return NULL;
|
||||
sg_init_table(sglist, nr_pages);
|
||||
|
||||
if (PageHighMem(pages[0]))
|
||||
/* DMA to highmem pages might not work */
|
||||
goto highmem;
|
||||
sg_set_page(&sglist[0], pages[0],
|
||||
min_t(size_t, PAGE_SIZE - offset, size), offset);
|
||||
size -= min_t(size_t, PAGE_SIZE - offset, size);
|
||||
for (i = 1; i < nr_pages; i++) {
|
||||
if (NULL == pages[i])
|
||||
goto nopage;
|
||||
if (PageHighMem(pages[i]))
|
||||
goto highmem;
|
||||
sg_set_page(&sglist[i], pages[i], min_t(size_t, PAGE_SIZE, size), 0);
|
||||
size -= min_t(size_t, PAGE_SIZE, size);
|
||||
}
|
||||
return sglist;
|
||||
|
||||
nopage:
|
||||
dprintk(2, "sgl: oops - no page\n");
|
||||
vfree(sglist);
|
||||
return NULL;
|
||||
|
||||
highmem:
|
||||
dprintk(2, "sgl: oops - highmem page\n");
|
||||
vfree(sglist);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* --------------------------------------------------------------------- */
|
||||
|
||||
struct videobuf_dmabuf *videobuf_to_dma(struct videobuf_buffer *buf)
|
||||
{
|
||||
struct videobuf_dma_sg_memory *mem = buf->priv;
|
||||
BUG_ON(!mem);
|
||||
|
||||
MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
|
||||
|
||||
return &mem->dma;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(videobuf_to_dma);
|
||||
|
||||
void videobuf_dma_init(struct videobuf_dmabuf *dma)
|
||||
{
|
||||
memset(dma, 0, sizeof(*dma));
|
||||
dma->magic = MAGIC_DMABUF;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(videobuf_dma_init);
|
||||
|
||||
static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
|
||||
int direction, unsigned long data, unsigned long size)
|
||||
{
|
||||
unsigned long first, last;
|
||||
int err, rw = 0;
|
||||
|
||||
dma->direction = direction;
|
||||
switch (dma->direction) {
|
||||
case DMA_FROM_DEVICE:
|
||||
rw = READ;
|
||||
break;
|
||||
case DMA_TO_DEVICE:
|
||||
rw = WRITE;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
first = (data & PAGE_MASK) >> PAGE_SHIFT;
|
||||
last = ((data+size-1) & PAGE_MASK) >> PAGE_SHIFT;
|
||||
dma->offset = data & ~PAGE_MASK;
|
||||
dma->size = size;
|
||||
dma->nr_pages = last-first+1;
|
||||
dma->pages = kmalloc(dma->nr_pages * sizeof(struct page *), GFP_KERNEL);
|
||||
if (NULL == dma->pages)
|
||||
return -ENOMEM;
|
||||
|
||||
dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
|
||||
data, size, dma->nr_pages);
|
||||
|
||||
err = get_user_pages(current, current->mm,
|
||||
data & PAGE_MASK, dma->nr_pages,
|
||||
rw == READ, 1, /* force */
|
||||
dma->pages, NULL);
|
||||
|
||||
if (err != dma->nr_pages) {
|
||||
dma->nr_pages = (err >= 0) ? err : 0;
|
||||
dprintk(1, "get_user_pages: err=%d [%d]\n", err, dma->nr_pages);
|
||||
return err < 0 ? err : -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int videobuf_dma_init_user(struct videobuf_dmabuf *dma, int direction,
|
||||
unsigned long data, unsigned long size)
|
||||
{
|
||||
int ret;
|
||||
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
ret = videobuf_dma_init_user_locked(dma, direction, data, size);
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(videobuf_dma_init_user);
|
||||
|
||||
int videobuf_dma_init_kernel(struct videobuf_dmabuf *dma, int direction,
|
||||
int nr_pages)
|
||||
{
|
||||
int i;
|
||||
|
||||
dprintk(1, "init kernel [%d pages]\n", nr_pages);
|
||||
|
||||
dma->direction = direction;
|
||||
dma->vaddr_pages = kcalloc(nr_pages, sizeof(*dma->vaddr_pages),
|
||||
GFP_KERNEL);
|
||||
if (!dma->vaddr_pages)
|
||||
return -ENOMEM;
|
||||
|
||||
dma->dma_addr = kcalloc(nr_pages, sizeof(*dma->dma_addr), GFP_KERNEL);
|
||||
if (!dma->dma_addr) {
|
||||
kfree(dma->vaddr_pages);
|
||||
return -ENOMEM;
|
||||
}
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
void *addr;
|
||||
|
||||
addr = dma_alloc_coherent(dma->dev, PAGE_SIZE,
|
||||
&(dma->dma_addr[i]), GFP_KERNEL);
|
||||
if (addr == NULL)
|
||||
goto out_free_pages;
|
||||
|
||||
dma->vaddr_pages[i] = virt_to_page(addr);
|
||||
}
|
||||
dma->vaddr = vmap(dma->vaddr_pages, nr_pages, VM_MAP | VM_IOREMAP,
|
||||
PAGE_KERNEL);
|
||||
if (NULL == dma->vaddr) {
|
||||
dprintk(1, "vmalloc_32(%d pages) failed\n", nr_pages);
|
||||
goto out_free_pages;
|
||||
}
|
||||
|
||||
dprintk(1, "vmalloc is at addr 0x%08lx, size=%d\n",
|
||||
(unsigned long)dma->vaddr,
|
||||
nr_pages << PAGE_SHIFT);
|
||||
|
||||
memset(dma->vaddr, 0, nr_pages << PAGE_SHIFT);
|
||||
dma->nr_pages = nr_pages;
|
||||
|
||||
return 0;
|
||||
out_free_pages:
|
||||
while (i > 0) {
|
||||
void *addr;
|
||||
|
||||
i--;
|
||||
addr = page_address(dma->vaddr_pages[i]);
|
||||
dma_free_coherent(dma->dev, PAGE_SIZE, addr, dma->dma_addr[i]);
|
||||
}
|
||||
kfree(dma->dma_addr);
|
||||
dma->dma_addr = NULL;
|
||||
kfree(dma->vaddr_pages);
|
||||
dma->vaddr_pages = NULL;
|
||||
|
||||
return -ENOMEM;
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(videobuf_dma_init_kernel);
|
||||
|
||||
int videobuf_dma_init_overlay(struct videobuf_dmabuf *dma, int direction,
|
||||
dma_addr_t addr, int nr_pages)
|
||||
{
|
||||
dprintk(1, "init overlay [%d pages @ bus 0x%lx]\n",
|
||||
nr_pages, (unsigned long)addr);
|
||||
dma->direction = direction;
|
||||
|
||||
if (0 == addr)
|
||||
return -EINVAL;
|
||||
|
||||
dma->bus_addr = addr;
|
||||
dma->nr_pages = nr_pages;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(videobuf_dma_init_overlay);
|
||||
|
||||
int videobuf_dma_map(struct device *dev, struct videobuf_dmabuf *dma)
|
||||
{
|
||||
MAGIC_CHECK(dma->magic, MAGIC_DMABUF);
|
||||
BUG_ON(0 == dma->nr_pages);
|
||||
|
||||
if (dma->pages) {
|
||||
dma->sglist = videobuf_pages_to_sg(dma->pages, dma->nr_pages,
|
||||
dma->offset, dma->size);
|
||||
}
|
||||
if (dma->vaddr) {
|
||||
dma->sglist = videobuf_vmalloc_to_sg(dma->vaddr,
|
||||
dma->nr_pages);
|
||||
}
|
||||
if (dma->bus_addr) {
|
||||
dma->sglist = vmalloc(sizeof(*dma->sglist));
|
||||
if (NULL != dma->sglist) {
|
||||
dma->sglen = 1;
|
||||
sg_dma_address(&dma->sglist[0]) = dma->bus_addr
|
||||
& PAGE_MASK;
|
||||
dma->sglist[0].offset = dma->bus_addr & ~PAGE_MASK;
|
||||
sg_dma_len(&dma->sglist[0]) = dma->nr_pages * PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
if (NULL == dma->sglist) {
|
||||
dprintk(1, "scatterlist is NULL\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (!dma->bus_addr) {
|
||||
dma->sglen = dma_map_sg(dev, dma->sglist,
|
||||
dma->nr_pages, dma->direction);
|
||||
if (0 == dma->sglen) {
|
||||
printk(KERN_WARNING
|
||||
"%s: videobuf_map_sg failed\n", __func__);
|
||||
vfree(dma->sglist);
|
||||
dma->sglist = NULL;
|
||||
dma->sglen = 0;
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(videobuf_dma_map);
|
||||
|
||||
int videobuf_dma_unmap(struct device *dev, struct videobuf_dmabuf *dma)
|
||||
{
|
||||
MAGIC_CHECK(dma->magic, MAGIC_DMABUF);
|
||||
|
||||
if (!dma->sglen)
|
||||
return 0;
|
||||
|
||||
dma_unmap_sg(dev, dma->sglist, dma->sglen, dma->direction);
|
||||
|
||||
vfree(dma->sglist);
|
||||
dma->sglist = NULL;
|
||||
dma->sglen = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(videobuf_dma_unmap);
|
||||
|
||||
int videobuf_dma_free(struct videobuf_dmabuf *dma)
|
||||
{
|
||||
int i;
|
||||
MAGIC_CHECK(dma->magic, MAGIC_DMABUF);
|
||||
BUG_ON(dma->sglen);
|
||||
|
||||
if (dma->pages) {
|
||||
for (i = 0; i < dma->nr_pages; i++)
|
||||
page_cache_release(dma->pages[i]);
|
||||
kfree(dma->pages);
|
||||
dma->pages = NULL;
|
||||
}
|
||||
|
||||
if (dma->dma_addr) {
|
||||
for (i = 0; i < dma->nr_pages; i++) {
|
||||
void *addr;
|
||||
|
||||
addr = page_address(dma->vaddr_pages[i]);
|
||||
dma_free_coherent(dma->dev, PAGE_SIZE, addr,
|
||||
dma->dma_addr[i]);
|
||||
}
|
||||
kfree(dma->dma_addr);
|
||||
dma->dma_addr = NULL;
|
||||
kfree(dma->vaddr_pages);
|
||||
dma->vaddr_pages = NULL;
|
||||
vunmap(dma->vaddr);
|
||||
dma->vaddr = NULL;
|
||||
}
|
||||
|
||||
if (dma->bus_addr)
|
||||
dma->bus_addr = 0;
|
||||
dma->direction = DMA_NONE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(videobuf_dma_free);
|
||||
|
||||
/* --------------------------------------------------------------------- */
|
||||
|
||||
static void videobuf_vm_open(struct vm_area_struct *vma)
|
||||
{
|
||||
struct videobuf_mapping *map = vma->vm_private_data;
|
||||
|
||||
dprintk(2, "vm_open %p [count=%d,vma=%08lx-%08lx]\n", map,
|
||||
map->count, vma->vm_start, vma->vm_end);
|
||||
|
||||
map->count++;
|
||||
}
|
||||
|
||||
static void videobuf_vm_close(struct vm_area_struct *vma)
|
||||
{
|
||||
struct videobuf_mapping *map = vma->vm_private_data;
|
||||
struct videobuf_queue *q = map->q;
|
||||
struct videobuf_dma_sg_memory *mem;
|
||||
int i;
|
||||
|
||||
dprintk(2, "vm_close %p [count=%d,vma=%08lx-%08lx]\n", map,
|
||||
map->count, vma->vm_start, vma->vm_end);
|
||||
|
||||
map->count--;
|
||||
if (0 == map->count) {
|
||||
dprintk(1, "munmap %p q=%p\n", map, q);
|
||||
videobuf_queue_lock(q);
|
||||
for (i = 0; i < VIDEO_MAX_FRAME; i++) {
|
||||
if (NULL == q->bufs[i])
|
||||
continue;
|
||||
mem = q->bufs[i]->priv;
|
||||
if (!mem)
|
||||
continue;
|
||||
|
||||
MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
|
||||
|
||||
if (q->bufs[i]->map != map)
|
||||
continue;
|
||||
q->bufs[i]->map = NULL;
|
||||
q->bufs[i]->baddr = 0;
|
||||
q->ops->buf_release(q, q->bufs[i]);
|
||||
}
|
||||
videobuf_queue_unlock(q);
|
||||
kfree(map);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get a anonymous page for the mapping. Make sure we can DMA to that
|
||||
* memory location with 32bit PCI devices (i.e. don't use highmem for
|
||||
* now ...). Bounce buffers don't work very well for the data rates
|
||||
* video capture has.
|
||||
*/
|
||||
static int videobuf_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
{
|
||||
struct page *page;
|
||||
|
||||
dprintk(3, "fault: fault @ %08lx [vma %08lx-%08lx]\n",
|
||||
(unsigned long)vmf->virtual_address,
|
||||
vma->vm_start, vma->vm_end);
|
||||
|
||||
page = alloc_page(GFP_USER | __GFP_DMA32);
|
||||
if (!page)
|
||||
return VM_FAULT_OOM;
|
||||
clear_user_highpage(page, (unsigned long)vmf->virtual_address);
|
||||
vmf->page = page;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct vm_operations_struct videobuf_vm_ops = {
|
||||
.open = videobuf_vm_open,
|
||||
.close = videobuf_vm_close,
|
||||
.fault = videobuf_vm_fault,
|
||||
};
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
* SG handlers for the generic methods
|
||||
*/
|
||||
|
||||
/* Allocated area consists on 3 parts:
|
||||
struct video_buffer
|
||||
struct <driver>_buffer (cx88_buffer, saa7134_buf, ...)
|
||||
struct videobuf_dma_sg_memory
|
||||
*/
|
||||
|
||||
static struct videobuf_buffer *__videobuf_alloc_vb(size_t size)
|
||||
{
|
||||
struct videobuf_dma_sg_memory *mem;
|
||||
struct videobuf_buffer *vb;
|
||||
|
||||
vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
|
||||
if (!vb)
|
||||
return vb;
|
||||
|
||||
mem = vb->priv = ((char *)vb) + size;
|
||||
mem->magic = MAGIC_SG_MEM;
|
||||
|
||||
videobuf_dma_init(&mem->dma);
|
||||
|
||||
dprintk(1, "%s: allocated at %p(%ld+%ld) & %p(%ld)\n",
|
||||
__func__, vb, (long)sizeof(*vb), (long)size - sizeof(*vb),
|
||||
mem, (long)sizeof(*mem));
|
||||
|
||||
return vb;
|
||||
}
|
||||
|
||||
static void *__videobuf_to_vaddr(struct videobuf_buffer *buf)
|
||||
{
|
||||
struct videobuf_dma_sg_memory *mem = buf->priv;
|
||||
BUG_ON(!mem);
|
||||
|
||||
MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
|
||||
|
||||
return mem->dma.vaddr;
|
||||
}
|
||||
|
||||
static int __videobuf_iolock(struct videobuf_queue *q,
|
||||
struct videobuf_buffer *vb,
|
||||
struct v4l2_framebuffer *fbuf)
|
||||
{
|
||||
int err, pages;
|
||||
dma_addr_t bus;
|
||||
struct videobuf_dma_sg_memory *mem = vb->priv;
|
||||
BUG_ON(!mem);
|
||||
|
||||
MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
|
||||
|
||||
if (!mem->dma.dev)
|
||||
mem->dma.dev = q->dev;
|
||||
else
|
||||
WARN_ON(mem->dma.dev != q->dev);
|
||||
|
||||
switch (vb->memory) {
|
||||
case V4L2_MEMORY_MMAP:
|
||||
case V4L2_MEMORY_USERPTR:
|
||||
if (0 == vb->baddr) {
|
||||
/* no userspace addr -- kernel bounce buffer */
|
||||
pages = PAGE_ALIGN(vb->size) >> PAGE_SHIFT;
|
||||
err = videobuf_dma_init_kernel(&mem->dma,
|
||||
DMA_FROM_DEVICE,
|
||||
pages);
|
||||
if (0 != err)
|
||||
return err;
|
||||
} else if (vb->memory == V4L2_MEMORY_USERPTR) {
|
||||
/* dma directly to userspace */
|
||||
err = videobuf_dma_init_user(&mem->dma,
|
||||
DMA_FROM_DEVICE,
|
||||
vb->baddr, vb->bsize);
|
||||
if (0 != err)
|
||||
return err;
|
||||
} else {
|
||||
/* NOTE: HACK: videobuf_iolock on V4L2_MEMORY_MMAP
|
||||
buffers can only be called from videobuf_qbuf
|
||||
we take current->mm->mmap_sem there, to prevent
|
||||
locking inversion, so don't take it here */
|
||||
|
||||
err = videobuf_dma_init_user_locked(&mem->dma,
|
||||
DMA_FROM_DEVICE,
|
||||
vb->baddr, vb->bsize);
|
||||
if (0 != err)
|
||||
return err;
|
||||
}
|
||||
break;
|
||||
case V4L2_MEMORY_OVERLAY:
|
||||
if (NULL == fbuf)
|
||||
return -EINVAL;
|
||||
/* FIXME: need sanity checks for vb->boff */
|
||||
/*
|
||||
* Using a double cast to avoid compiler warnings when
|
||||
* building for PAE. Compiler doesn't like direct casting
|
||||
* of a 32 bit ptr to 64 bit integer.
|
||||
*/
|
||||
bus = (dma_addr_t)(unsigned long)fbuf->base + vb->boff;
|
||||
pages = PAGE_ALIGN(vb->size) >> PAGE_SHIFT;
|
||||
err = videobuf_dma_init_overlay(&mem->dma, DMA_FROM_DEVICE,
|
||||
bus, pages);
|
||||
if (0 != err)
|
||||
return err;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
err = videobuf_dma_map(q->dev, &mem->dma);
|
||||
if (0 != err)
|
||||
return err;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __videobuf_sync(struct videobuf_queue *q,
|
||||
struct videobuf_buffer *buf)
|
||||
{
|
||||
struct videobuf_dma_sg_memory *mem = buf->priv;
|
||||
BUG_ON(!mem || !mem->dma.sglen);
|
||||
|
||||
MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
|
||||
MAGIC_CHECK(mem->dma.magic, MAGIC_DMABUF);
|
||||
|
||||
dma_sync_sg_for_cpu(q->dev, mem->dma.sglist,
|
||||
mem->dma.sglen, mem->dma.direction);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __videobuf_mmap_mapper(struct videobuf_queue *q,
|
||||
struct videobuf_buffer *buf,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
struct videobuf_dma_sg_memory *mem = buf->priv;
|
||||
struct videobuf_mapping *map;
|
||||
unsigned int first, last, size = 0, i;
|
||||
int retval;
|
||||
|
||||
retval = -EINVAL;
|
||||
|
||||
BUG_ON(!mem);
|
||||
MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
|
||||
|
||||
/* look for first buffer to map */
|
||||
for (first = 0; first < VIDEO_MAX_FRAME; first++) {
|
||||
if (buf == q->bufs[first]) {
|
||||
size = PAGE_ALIGN(q->bufs[first]->bsize);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* paranoia, should never happen since buf is always valid. */
|
||||
if (!size) {
|
||||
dprintk(1, "mmap app bug: offset invalid [offset=0x%lx]\n",
|
||||
(vma->vm_pgoff << PAGE_SHIFT));
|
||||
goto done;
|
||||
}
|
||||
|
||||
last = first;
|
||||
|
||||
/* create mapping + update buffer list */
|
||||
retval = -ENOMEM;
|
||||
map = kmalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
|
||||
if (NULL == map)
|
||||
goto done;
|
||||
|
||||
size = 0;
|
||||
for (i = first; i <= last; i++) {
|
||||
if (NULL == q->bufs[i])
|
||||
continue;
|
||||
q->bufs[i]->map = map;
|
||||
q->bufs[i]->baddr = vma->vm_start + size;
|
||||
size += PAGE_ALIGN(q->bufs[i]->bsize);
|
||||
}
|
||||
|
||||
map->count = 1;
|
||||
map->q = q;
|
||||
vma->vm_ops = &videobuf_vm_ops;
|
||||
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vma->vm_flags &= ~VM_IO; /* using shared anonymous pages */
|
||||
vma->vm_private_data = map;
|
||||
dprintk(1, "mmap %p: q=%p %08lx-%08lx pgoff %08lx bufs %d-%d\n",
|
||||
map, q, vma->vm_start, vma->vm_end, vma->vm_pgoff, first, last);
|
||||
retval = 0;
|
||||
|
||||
done:
|
||||
return retval;
|
||||
}
|
||||
|
||||
static struct videobuf_qtype_ops sg_ops = {
|
||||
.magic = MAGIC_QTYPE_OPS,
|
||||
|
||||
.alloc_vb = __videobuf_alloc_vb,
|
||||
.iolock = __videobuf_iolock,
|
||||
.sync = __videobuf_sync,
|
||||
.mmap_mapper = __videobuf_mmap_mapper,
|
||||
.vaddr = __videobuf_to_vaddr,
|
||||
};
|
||||
|
||||
void *videobuf_sg_alloc(size_t size)
|
||||
{
|
||||
struct videobuf_queue q;
|
||||
|
||||
/* Required to make generic handler to call __videobuf_alloc */
|
||||
q.int_ops = &sg_ops;
|
||||
|
||||
q.msize = size;
|
||||
|
||||
return videobuf_alloc_vb(&q);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(videobuf_sg_alloc);
|
||||
|
||||
void videobuf_queue_sg_init(struct videobuf_queue *q,
|
||||
const struct videobuf_queue_ops *ops,
|
||||
struct device *dev,
|
||||
spinlock_t *irqlock,
|
||||
enum v4l2_buf_type type,
|
||||
enum v4l2_field field,
|
||||
unsigned int msize,
|
||||
void *priv,
|
||||
struct mutex *ext_lock)
|
||||
{
|
||||
videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
|
||||
priv, &sg_ops, ext_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(videobuf_queue_sg_init);
|
||||
|
398
drivers/media/v4l2-core/videobuf-dvb.c
Normal file
398
drivers/media/v4l2-core/videobuf-dvb.c
Normal file
|
@ -0,0 +1,398 @@
|
|||
/*
|
||||
*
|
||||
* some helper function for simple DVB cards which simply DMA the
|
||||
* complete transport stream and let the computer sort everything else
|
||||
* (i.e. we are using the software demux, ...). Also uses the
|
||||
* video-buf to manage DMA buffers.
|
||||
*
|
||||
* (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs]
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <linux/freezer.h>
|
||||
|
||||
#include <media/videobuf-core.h>
|
||||
#include <media/videobuf-dvb.h>
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
static unsigned int debug;
|
||||
module_param(debug, int, 0644);
|
||||
MODULE_PARM_DESC(debug,"enable debug messages");
|
||||
|
||||
#define dprintk(fmt, arg...) if (debug) \
|
||||
printk(KERN_DEBUG "%s/dvb: " fmt, dvb->name , ## arg)
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
static int videobuf_dvb_thread(void *data)
|
||||
{
|
||||
struct videobuf_dvb *dvb = data;
|
||||
struct videobuf_buffer *buf;
|
||||
unsigned long flags;
|
||||
void *outp;
|
||||
|
||||
dprintk("dvb thread started\n");
|
||||
set_freezable();
|
||||
videobuf_read_start(&dvb->dvbq);
|
||||
|
||||
for (;;) {
|
||||
/* fetch next buffer */
|
||||
buf = list_entry(dvb->dvbq.stream.next,
|
||||
struct videobuf_buffer, stream);
|
||||
list_del(&buf->stream);
|
||||
videobuf_waiton(&dvb->dvbq, buf, 0, 1);
|
||||
|
||||
/* no more feeds left or stop_feed() asked us to quit */
|
||||
if (0 == dvb->nfeeds)
|
||||
break;
|
||||
if (kthread_should_stop())
|
||||
break;
|
||||
try_to_freeze();
|
||||
|
||||
/* feed buffer data to demux */
|
||||
outp = videobuf_queue_to_vaddr(&dvb->dvbq, buf);
|
||||
|
||||
if (buf->state == VIDEOBUF_DONE)
|
||||
dvb_dmx_swfilter(&dvb->demux, outp,
|
||||
buf->size);
|
||||
|
||||
/* requeue buffer */
|
||||
list_add_tail(&buf->stream,&dvb->dvbq.stream);
|
||||
spin_lock_irqsave(dvb->dvbq.irqlock,flags);
|
||||
dvb->dvbq.ops->buf_queue(&dvb->dvbq,buf);
|
||||
spin_unlock_irqrestore(dvb->dvbq.irqlock,flags);
|
||||
}
|
||||
|
||||
videobuf_read_stop(&dvb->dvbq);
|
||||
dprintk("dvb thread stopped\n");
|
||||
|
||||
/* Hmm, linux becomes *very* unhappy without this ... */
|
||||
while (!kthread_should_stop()) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
schedule();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int videobuf_dvb_start_feed(struct dvb_demux_feed *feed)
|
||||
{
|
||||
struct dvb_demux *demux = feed->demux;
|
||||
struct videobuf_dvb *dvb = demux->priv;
|
||||
int rc;
|
||||
|
||||
if (!demux->dmx.frontend)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&dvb->lock);
|
||||
dvb->nfeeds++;
|
||||
rc = dvb->nfeeds;
|
||||
|
||||
if (NULL != dvb->thread)
|
||||
goto out;
|
||||
dvb->thread = kthread_run(videobuf_dvb_thread,
|
||||
dvb, "%s dvb", dvb->name);
|
||||
if (IS_ERR(dvb->thread)) {
|
||||
rc = PTR_ERR(dvb->thread);
|
||||
dvb->thread = NULL;
|
||||
}
|
||||
|
||||
out:
|
||||
mutex_unlock(&dvb->lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int videobuf_dvb_stop_feed(struct dvb_demux_feed *feed)
|
||||
{
|
||||
struct dvb_demux *demux = feed->demux;
|
||||
struct videobuf_dvb *dvb = demux->priv;
|
||||
int err = 0;
|
||||
|
||||
mutex_lock(&dvb->lock);
|
||||
dvb->nfeeds--;
|
||||
if (0 == dvb->nfeeds && NULL != dvb->thread) {
|
||||
err = kthread_stop(dvb->thread);
|
||||
dvb->thread = NULL;
|
||||
}
|
||||
mutex_unlock(&dvb->lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int videobuf_dvb_register_adapter(struct videobuf_dvb_frontends *fe,
|
||||
struct module *module,
|
||||
void *adapter_priv,
|
||||
struct device *device,
|
||||
char *adapter_name,
|
||||
short *adapter_nr,
|
||||
int mfe_shared)
|
||||
{
|
||||
int result;
|
||||
|
||||
mutex_init(&fe->lock);
|
||||
|
||||
/* register adapter */
|
||||
result = dvb_register_adapter(&fe->adapter, adapter_name, module,
|
||||
device, adapter_nr);
|
||||
if (result < 0) {
|
||||
printk(KERN_WARNING "%s: dvb_register_adapter failed (errno = %d)\n",
|
||||
adapter_name, result);
|
||||
}
|
||||
fe->adapter.priv = adapter_priv;
|
||||
fe->adapter.mfe_shared = mfe_shared;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static int videobuf_dvb_register_frontend(struct dvb_adapter *adapter,
|
||||
struct videobuf_dvb *dvb)
|
||||
{
|
||||
int result;
|
||||
|
||||
/* register frontend */
|
||||
result = dvb_register_frontend(adapter, dvb->frontend);
|
||||
if (result < 0) {
|
||||
printk(KERN_WARNING "%s: dvb_register_frontend failed (errno = %d)\n",
|
||||
dvb->name, result);
|
||||
goto fail_frontend;
|
||||
}
|
||||
|
||||
/* register demux stuff */
|
||||
dvb->demux.dmx.capabilities =
|
||||
DMX_TS_FILTERING | DMX_SECTION_FILTERING |
|
||||
DMX_MEMORY_BASED_FILTERING;
|
||||
dvb->demux.priv = dvb;
|
||||
dvb->demux.filternum = 256;
|
||||
dvb->demux.feednum = 256;
|
||||
dvb->demux.start_feed = videobuf_dvb_start_feed;
|
||||
dvb->demux.stop_feed = videobuf_dvb_stop_feed;
|
||||
result = dvb_dmx_init(&dvb->demux);
|
||||
if (result < 0) {
|
||||
printk(KERN_WARNING "%s: dvb_dmx_init failed (errno = %d)\n",
|
||||
dvb->name, result);
|
||||
goto fail_dmx;
|
||||
}
|
||||
|
||||
dvb->dmxdev.filternum = 256;
|
||||
dvb->dmxdev.demux = &dvb->demux.dmx;
|
||||
dvb->dmxdev.capabilities = 0;
|
||||
result = dvb_dmxdev_init(&dvb->dmxdev, adapter);
|
||||
|
||||
if (result < 0) {
|
||||
printk(KERN_WARNING "%s: dvb_dmxdev_init failed (errno = %d)\n",
|
||||
dvb->name, result);
|
||||
goto fail_dmxdev;
|
||||
}
|
||||
|
||||
dvb->fe_hw.source = DMX_FRONTEND_0;
|
||||
result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_hw);
|
||||
if (result < 0) {
|
||||
printk(KERN_WARNING "%s: add_frontend failed (DMX_FRONTEND_0, errno = %d)\n",
|
||||
dvb->name, result);
|
||||
goto fail_fe_hw;
|
||||
}
|
||||
|
||||
dvb->fe_mem.source = DMX_MEMORY_FE;
|
||||
result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_mem);
|
||||
if (result < 0) {
|
||||
printk(KERN_WARNING "%s: add_frontend failed (DMX_MEMORY_FE, errno = %d)\n",
|
||||
dvb->name, result);
|
||||
goto fail_fe_mem;
|
||||
}
|
||||
|
||||
result = dvb->demux.dmx.connect_frontend(&dvb->demux.dmx, &dvb->fe_hw);
|
||||
if (result < 0) {
|
||||
printk(KERN_WARNING "%s: connect_frontend failed (errno = %d)\n",
|
||||
dvb->name, result);
|
||||
goto fail_fe_conn;
|
||||
}
|
||||
|
||||
/* register network adapter */
|
||||
result = dvb_net_init(adapter, &dvb->net, &dvb->demux.dmx);
|
||||
if (result < 0) {
|
||||
printk(KERN_WARNING "%s: dvb_net_init failed (errno = %d)\n",
|
||||
dvb->name, result);
|
||||
goto fail_fe_conn;
|
||||
}
|
||||
return 0;
|
||||
|
||||
fail_fe_conn:
|
||||
dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_mem);
|
||||
fail_fe_mem:
|
||||
dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_hw);
|
||||
fail_fe_hw:
|
||||
dvb_dmxdev_release(&dvb->dmxdev);
|
||||
fail_dmxdev:
|
||||
dvb_dmx_release(&dvb->demux);
|
||||
fail_dmx:
|
||||
dvb_unregister_frontend(dvb->frontend);
|
||||
fail_frontend:
|
||||
dvb_frontend_detach(dvb->frontend);
|
||||
dvb->frontend = NULL;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Register a single adapter and one or more frontends */
|
||||
int videobuf_dvb_register_bus(struct videobuf_dvb_frontends *f,
|
||||
struct module *module,
|
||||
void *adapter_priv,
|
||||
struct device *device,
|
||||
short *adapter_nr,
|
||||
int mfe_shared)
|
||||
{
|
||||
struct list_head *list, *q;
|
||||
struct videobuf_dvb_frontend *fe;
|
||||
int res;
|
||||
|
||||
fe = videobuf_dvb_get_frontend(f, 1);
|
||||
if (!fe) {
|
||||
printk(KERN_WARNING "Unable to register the adapter which has no frontends\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Bring up the adapter */
|
||||
res = videobuf_dvb_register_adapter(f, module, adapter_priv, device,
|
||||
fe->dvb.name, adapter_nr, mfe_shared);
|
||||
if (res < 0) {
|
||||
printk(KERN_WARNING "videobuf_dvb_register_adapter failed (errno = %d)\n", res);
|
||||
return res;
|
||||
}
|
||||
|
||||
/* Attach all of the frontends to the adapter */
|
||||
mutex_lock(&f->lock);
|
||||
list_for_each_safe(list, q, &f->felist) {
|
||||
fe = list_entry(list, struct videobuf_dvb_frontend, felist);
|
||||
res = videobuf_dvb_register_frontend(&f->adapter, &fe->dvb);
|
||||
if (res < 0) {
|
||||
printk(KERN_WARNING "%s: videobuf_dvb_register_frontend failed (errno = %d)\n",
|
||||
fe->dvb.name, res);
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&f->lock);
|
||||
return 0;
|
||||
|
||||
err:
|
||||
mutex_unlock(&f->lock);
|
||||
videobuf_dvb_unregister_bus(f);
|
||||
return res;
|
||||
}
|
||||
EXPORT_SYMBOL(videobuf_dvb_register_bus);
|
||||
|
||||
void videobuf_dvb_unregister_bus(struct videobuf_dvb_frontends *f)
|
||||
{
|
||||
videobuf_dvb_dealloc_frontends(f);
|
||||
|
||||
dvb_unregister_adapter(&f->adapter);
|
||||
}
|
||||
EXPORT_SYMBOL(videobuf_dvb_unregister_bus);
|
||||
|
||||
struct videobuf_dvb_frontend *videobuf_dvb_get_frontend(
|
||||
struct videobuf_dvb_frontends *f, int id)
|
||||
{
|
||||
struct list_head *list, *q;
|
||||
struct videobuf_dvb_frontend *fe, *ret = NULL;
|
||||
|
||||
mutex_lock(&f->lock);
|
||||
|
||||
list_for_each_safe(list, q, &f->felist) {
|
||||
fe = list_entry(list, struct videobuf_dvb_frontend, felist);
|
||||
if (fe->id == id) {
|
||||
ret = fe;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&f->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(videobuf_dvb_get_frontend);
|
||||
|
||||
int videobuf_dvb_find_frontend(struct videobuf_dvb_frontends *f,
|
||||
struct dvb_frontend *p)
|
||||
{
|
||||
struct list_head *list, *q;
|
||||
struct videobuf_dvb_frontend *fe = NULL;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&f->lock);
|
||||
|
||||
list_for_each_safe(list, q, &f->felist) {
|
||||
fe = list_entry(list, struct videobuf_dvb_frontend, felist);
|
||||
if (fe->dvb.frontend == p) {
|
||||
ret = fe->id;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&f->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(videobuf_dvb_find_frontend);
|
||||
|
||||
struct videobuf_dvb_frontend *videobuf_dvb_alloc_frontend(
|
||||
struct videobuf_dvb_frontends *f, int id)
|
||||
{
|
||||
struct videobuf_dvb_frontend *fe;
|
||||
|
||||
fe = kzalloc(sizeof(struct videobuf_dvb_frontend), GFP_KERNEL);
|
||||
if (fe == NULL)
|
||||
goto fail_alloc;
|
||||
|
||||
fe->id = id;
|
||||
mutex_init(&fe->dvb.lock);
|
||||
|
||||
mutex_lock(&f->lock);
|
||||
list_add_tail(&fe->felist, &f->felist);
|
||||
mutex_unlock(&f->lock);
|
||||
|
||||
fail_alloc:
|
||||
return fe;
|
||||
}
|
||||
EXPORT_SYMBOL(videobuf_dvb_alloc_frontend);
|
||||
|
||||
void videobuf_dvb_dealloc_frontends(struct videobuf_dvb_frontends *f)
|
||||
{
|
||||
struct list_head *list, *q;
|
||||
struct videobuf_dvb_frontend *fe;
|
||||
|
||||
mutex_lock(&f->lock);
|
||||
list_for_each_safe(list, q, &f->felist) {
|
||||
fe = list_entry(list, struct videobuf_dvb_frontend, felist);
|
||||
if (fe->dvb.net.dvbdev) {
|
||||
dvb_net_release(&fe->dvb.net);
|
||||
fe->dvb.demux.dmx.remove_frontend(&fe->dvb.demux.dmx,
|
||||
&fe->dvb.fe_mem);
|
||||
fe->dvb.demux.dmx.remove_frontend(&fe->dvb.demux.dmx,
|
||||
&fe->dvb.fe_hw);
|
||||
dvb_dmxdev_release(&fe->dvb.dmxdev);
|
||||
dvb_dmx_release(&fe->dvb.demux);
|
||||
dvb_unregister_frontend(fe->dvb.frontend);
|
||||
}
|
||||
if (fe->dvb.frontend)
|
||||
/* always allocated, may have been reset */
|
||||
dvb_frontend_detach(fe->dvb.frontend);
|
||||
list_del(list); /* remove list entry */
|
||||
kfree(fe); /* free frontend allocation */
|
||||
}
|
||||
mutex_unlock(&f->lock);
|
||||
}
|
||||
EXPORT_SYMBOL(videobuf_dvb_dealloc_frontends);
|
349
drivers/media/v4l2-core/videobuf-vmalloc.c
Normal file
349
drivers/media/v4l2-core/videobuf-vmalloc.c
Normal file
|
@ -0,0 +1,349 @@
|
|||
/*
|
||||
* helper functions for vmalloc video4linux capture buffers
|
||||
*
|
||||
* The functions expect the hardware being able to scatter gather
|
||||
* (i.e. the buffers are not linear in physical memory, but fragmented
|
||||
* into PAGE_SIZE chunks). They also assume the driver does not need
|
||||
* to touch the video data.
|
||||
*
|
||||
* (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
#include <linux/pci.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
#include <media/videobuf-vmalloc.h>
|
||||
|
||||
#define MAGIC_DMABUF 0x17760309
|
||||
#define MAGIC_VMAL_MEM 0x18221223
|
||||
|
||||
#define MAGIC_CHECK(is, should) \
|
||||
if (unlikely((is) != (should))) { \
|
||||
printk(KERN_ERR "magic mismatch: %x (expected %x)\n", \
|
||||
is, should); \
|
||||
BUG(); \
|
||||
}
|
||||
|
||||
static int debug;
|
||||
module_param(debug, int, 0644);
|
||||
|
||||
MODULE_DESCRIPTION("helper module to manage video4linux vmalloc buffers");
|
||||
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
#define dprintk(level, fmt, arg...) \
|
||||
if (debug >= level) \
|
||||
printk(KERN_DEBUG "vbuf-vmalloc: " fmt , ## arg)
|
||||
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
static void videobuf_vm_open(struct vm_area_struct *vma)
|
||||
{
|
||||
struct videobuf_mapping *map = vma->vm_private_data;
|
||||
|
||||
dprintk(2, "vm_open %p [count=%u,vma=%08lx-%08lx]\n", map,
|
||||
map->count, vma->vm_start, vma->vm_end);
|
||||
|
||||
map->count++;
|
||||
}
|
||||
|
||||
static void videobuf_vm_close(struct vm_area_struct *vma)
|
||||
{
|
||||
struct videobuf_mapping *map = vma->vm_private_data;
|
||||
struct videobuf_queue *q = map->q;
|
||||
int i;
|
||||
|
||||
dprintk(2, "vm_close %p [count=%u,vma=%08lx-%08lx]\n", map,
|
||||
map->count, vma->vm_start, vma->vm_end);
|
||||
|
||||
map->count--;
|
||||
if (0 == map->count) {
|
||||
struct videobuf_vmalloc_memory *mem;
|
||||
|
||||
dprintk(1, "munmap %p q=%p\n", map, q);
|
||||
videobuf_queue_lock(q);
|
||||
|
||||
/* We need first to cancel streams, before unmapping */
|
||||
if (q->streaming)
|
||||
videobuf_queue_cancel(q);
|
||||
|
||||
for (i = 0; i < VIDEO_MAX_FRAME; i++) {
|
||||
if (NULL == q->bufs[i])
|
||||
continue;
|
||||
|
||||
if (q->bufs[i]->map != map)
|
||||
continue;
|
||||
|
||||
mem = q->bufs[i]->priv;
|
||||
if (mem) {
|
||||
/* This callback is called only if kernel has
|
||||
allocated memory and this memory is mmapped.
|
||||
In this case, memory should be freed,
|
||||
in order to do memory unmap.
|
||||
*/
|
||||
|
||||
MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
|
||||
|
||||
/* vfree is not atomic - can't be
|
||||
called with IRQ's disabled
|
||||
*/
|
||||
dprintk(1, "%s: buf[%d] freeing (%p)\n",
|
||||
__func__, i, mem->vaddr);
|
||||
|
||||
vfree(mem->vaddr);
|
||||
mem->vaddr = NULL;
|
||||
}
|
||||
|
||||
q->bufs[i]->map = NULL;
|
||||
q->bufs[i]->baddr = 0;
|
||||
}
|
||||
|
||||
kfree(map);
|
||||
|
||||
videobuf_queue_unlock(q);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static const struct vm_operations_struct videobuf_vm_ops = {
|
||||
.open = videobuf_vm_open,
|
||||
.close = videobuf_vm_close,
|
||||
};
|
||||
|
||||
/* ---------------------------------------------------------------------
|
||||
* vmalloc handlers for the generic methods
|
||||
*/
|
||||
|
||||
/* Allocated area consists on 3 parts:
|
||||
struct video_buffer
|
||||
struct <driver>_buffer (cx88_buffer, saa7134_buf, ...)
|
||||
struct videobuf_dma_sg_memory
|
||||
*/
|
||||
|
||||
static struct videobuf_buffer *__videobuf_alloc_vb(size_t size)
|
||||
{
|
||||
struct videobuf_vmalloc_memory *mem;
|
||||
struct videobuf_buffer *vb;
|
||||
|
||||
vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
|
||||
if (!vb)
|
||||
return vb;
|
||||
|
||||
mem = vb->priv = ((char *)vb) + size;
|
||||
mem->magic = MAGIC_VMAL_MEM;
|
||||
|
||||
dprintk(1, "%s: allocated at %p(%ld+%ld) & %p(%ld)\n",
|
||||
__func__, vb, (long)sizeof(*vb), (long)size - sizeof(*vb),
|
||||
mem, (long)sizeof(*mem));
|
||||
|
||||
return vb;
|
||||
}
|
||||
|
||||
static int __videobuf_iolock(struct videobuf_queue *q,
|
||||
struct videobuf_buffer *vb,
|
||||
struct v4l2_framebuffer *fbuf)
|
||||
{
|
||||
struct videobuf_vmalloc_memory *mem = vb->priv;
|
||||
int pages;
|
||||
|
||||
BUG_ON(!mem);
|
||||
|
||||
MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
|
||||
|
||||
switch (vb->memory) {
|
||||
case V4L2_MEMORY_MMAP:
|
||||
dprintk(1, "%s memory method MMAP\n", __func__);
|
||||
|
||||
/* All handling should be done by __videobuf_mmap_mapper() */
|
||||
if (!mem->vaddr) {
|
||||
printk(KERN_ERR "memory is not alloced/mmapped.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case V4L2_MEMORY_USERPTR:
|
||||
pages = PAGE_ALIGN(vb->size);
|
||||
|
||||
dprintk(1, "%s memory method USERPTR\n", __func__);
|
||||
|
||||
if (vb->baddr) {
|
||||
printk(KERN_ERR "USERPTR is currently not supported\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* The only USERPTR currently supported is the one needed for
|
||||
* read() method.
|
||||
*/
|
||||
|
||||
mem->vaddr = vmalloc_user(pages);
|
||||
if (!mem->vaddr) {
|
||||
printk(KERN_ERR "vmalloc (%d pages) failed\n", pages);
|
||||
return -ENOMEM;
|
||||
}
|
||||
dprintk(1, "vmalloc is at addr %p (%d pages)\n",
|
||||
mem->vaddr, pages);
|
||||
|
||||
#if 0
|
||||
int rc;
|
||||
/* Kernel userptr is used also by read() method. In this case,
|
||||
there's no need to remap, since data will be copied to user
|
||||
*/
|
||||
if (!vb->baddr)
|
||||
return 0;
|
||||
|
||||
/* FIXME: to properly support USERPTR, remap should occur.
|
||||
The code below won't work, since mem->vma = NULL
|
||||
*/
|
||||
/* Try to remap memory */
|
||||
rc = remap_vmalloc_range(mem->vma, (void *)vb->baddr, 0);
|
||||
if (rc < 0) {
|
||||
printk(KERN_ERR "mmap: remap failed with error %d", rc);
|
||||
return -ENOMEM;
|
||||
}
|
||||
#endif
|
||||
|
||||
break;
|
||||
case V4L2_MEMORY_OVERLAY:
|
||||
default:
|
||||
dprintk(1, "%s memory method OVERLAY/unknown\n", __func__);
|
||||
|
||||
/* Currently, doesn't support V4L2_MEMORY_OVERLAY */
|
||||
printk(KERN_ERR "Memory method currently unsupported.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __videobuf_mmap_mapper(struct videobuf_queue *q,
|
||||
struct videobuf_buffer *buf,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
struct videobuf_vmalloc_memory *mem;
|
||||
struct videobuf_mapping *map;
|
||||
int retval, pages;
|
||||
|
||||
dprintk(1, "%s\n", __func__);
|
||||
|
||||
/* create mapping + update buffer list */
|
||||
map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
|
||||
if (NULL == map)
|
||||
return -ENOMEM;
|
||||
|
||||
buf->map = map;
|
||||
map->q = q;
|
||||
|
||||
buf->baddr = vma->vm_start;
|
||||
|
||||
mem = buf->priv;
|
||||
BUG_ON(!mem);
|
||||
MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
|
||||
|
||||
pages = PAGE_ALIGN(vma->vm_end - vma->vm_start);
|
||||
mem->vaddr = vmalloc_user(pages);
|
||||
if (!mem->vaddr) {
|
||||
printk(KERN_ERR "vmalloc (%d pages) failed\n", pages);
|
||||
goto error;
|
||||
}
|
||||
dprintk(1, "vmalloc is at addr %p (%d pages)\n", mem->vaddr, pages);
|
||||
|
||||
/* Try to remap memory */
|
||||
retval = remap_vmalloc_range(vma, mem->vaddr, 0);
|
||||
if (retval < 0) {
|
||||
printk(KERN_ERR "mmap: remap failed with error %d. ", retval);
|
||||
vfree(mem->vaddr);
|
||||
goto error;
|
||||
}
|
||||
|
||||
vma->vm_ops = &videobuf_vm_ops;
|
||||
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vma->vm_private_data = map;
|
||||
|
||||
dprintk(1, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
|
||||
map, q, vma->vm_start, vma->vm_end,
|
||||
(long int)buf->bsize,
|
||||
vma->vm_pgoff, buf->i);
|
||||
|
||||
videobuf_vm_open(vma);
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
mem = NULL;
|
||||
kfree(map);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static struct videobuf_qtype_ops qops = {
|
||||
.magic = MAGIC_QTYPE_OPS,
|
||||
|
||||
.alloc_vb = __videobuf_alloc_vb,
|
||||
.iolock = __videobuf_iolock,
|
||||
.mmap_mapper = __videobuf_mmap_mapper,
|
||||
.vaddr = videobuf_to_vmalloc,
|
||||
};
|
||||
|
||||
void videobuf_queue_vmalloc_init(struct videobuf_queue *q,
|
||||
const struct videobuf_queue_ops *ops,
|
||||
struct device *dev,
|
||||
spinlock_t *irqlock,
|
||||
enum v4l2_buf_type type,
|
||||
enum v4l2_field field,
|
||||
unsigned int msize,
|
||||
void *priv,
|
||||
struct mutex *ext_lock)
|
||||
{
|
||||
videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
|
||||
priv, &qops, ext_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(videobuf_queue_vmalloc_init);
|
||||
|
||||
void *videobuf_to_vmalloc(struct videobuf_buffer *buf)
|
||||
{
|
||||
struct videobuf_vmalloc_memory *mem = buf->priv;
|
||||
BUG_ON(!mem);
|
||||
MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
|
||||
|
||||
return mem->vaddr;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(videobuf_to_vmalloc);
|
||||
|
||||
void videobuf_vmalloc_free(struct videobuf_buffer *buf)
|
||||
{
|
||||
struct videobuf_vmalloc_memory *mem = buf->priv;
|
||||
|
||||
/* mmapped memory can't be freed here, otherwise mmapped region
|
||||
would be released, while still needed. In this case, the memory
|
||||
release should happen inside videobuf_vm_close().
|
||||
So, it should free memory only if the memory were allocated for
|
||||
read() operation.
|
||||
*/
|
||||
if ((buf->memory != V4L2_MEMORY_USERPTR) || buf->baddr)
|
||||
return;
|
||||
|
||||
if (!mem)
|
||||
return;
|
||||
|
||||
MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
|
||||
|
||||
vfree(mem->vaddr);
|
||||
mem->vaddr = NULL;
|
||||
|
||||
return;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(videobuf_vmalloc_free);
|
||||
|
3582
drivers/media/v4l2-core/videobuf2-core.c
Normal file
3582
drivers/media/v4l2-core/videobuf2-core.c
Normal file
File diff suppressed because it is too large
Load diff
859
drivers/media/v4l2-core/videobuf2-dma-contig.c
Normal file
859
drivers/media/v4l2-core/videobuf2-dma-contig.c
Normal file
|
@ -0,0 +1,859 @@
|
|||
/*
|
||||
* videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
|
||||
*
|
||||
* Copyright (C) 2010 Samsung Electronics
|
||||
*
|
||||
* Author: Pawel Osciak <pawel@osciak.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/dma-buf.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
|
||||
#include <media/videobuf2-core.h>
|
||||
#include <media/videobuf2-dma-contig.h>
|
||||
#include <media/videobuf2-memops.h>
|
||||
|
||||
struct vb2_dc_conf {
|
||||
struct device *dev;
|
||||
};
|
||||
|
||||
struct vb2_dc_buf {
|
||||
struct device *dev;
|
||||
void *vaddr;
|
||||
unsigned long size;
|
||||
dma_addr_t dma_addr;
|
||||
enum dma_data_direction dma_dir;
|
||||
struct sg_table *dma_sgt;
|
||||
|
||||
/* MMAP related */
|
||||
struct vb2_vmarea_handler handler;
|
||||
atomic_t refcount;
|
||||
struct sg_table *sgt_base;
|
||||
|
||||
/* USERPTR related */
|
||||
struct vm_area_struct *vma;
|
||||
|
||||
/* DMABUF related */
|
||||
struct dma_buf_attachment *db_attach;
|
||||
};
|
||||
|
||||
/*********************************************/
|
||||
/* scatterlist table functions */
|
||||
/*********************************************/
|
||||
|
||||
|
||||
static void vb2_dc_sgt_foreach_page(struct sg_table *sgt,
|
||||
void (*cb)(struct page *pg))
|
||||
{
|
||||
struct scatterlist *s;
|
||||
unsigned int i;
|
||||
|
||||
for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
|
||||
struct page *page = sg_page(s);
|
||||
unsigned int n_pages = PAGE_ALIGN(s->offset + s->length)
|
||||
>> PAGE_SHIFT;
|
||||
unsigned int j;
|
||||
|
||||
for (j = 0; j < n_pages; ++j, ++page)
|
||||
cb(page);
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
|
||||
{
|
||||
struct scatterlist *s;
|
||||
dma_addr_t expected = sg_dma_address(sgt->sgl);
|
||||
unsigned int i;
|
||||
unsigned long size = 0;
|
||||
|
||||
for_each_sg(sgt->sgl, s, sgt->nents, i) {
|
||||
if (sg_dma_address(s) != expected)
|
||||
break;
|
||||
expected = sg_dma_address(s) + sg_dma_len(s);
|
||||
size += sg_dma_len(s);
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
/*********************************************/
|
||||
/* callbacks for all buffers */
|
||||
/*********************************************/
|
||||
|
||||
static void *vb2_dc_cookie(void *buf_priv)
|
||||
{
|
||||
struct vb2_dc_buf *buf = buf_priv;
|
||||
|
||||
return &buf->dma_addr;
|
||||
}
|
||||
|
||||
static void *vb2_dc_vaddr(void *buf_priv)
|
||||
{
|
||||
struct vb2_dc_buf *buf = buf_priv;
|
||||
|
||||
if (!buf->vaddr && buf->db_attach)
|
||||
buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
|
||||
|
||||
return buf->vaddr;
|
||||
}
|
||||
|
||||
static unsigned int vb2_dc_num_users(void *buf_priv)
|
||||
{
|
||||
struct vb2_dc_buf *buf = buf_priv;
|
||||
|
||||
return atomic_read(&buf->refcount);
|
||||
}
|
||||
|
||||
static void vb2_dc_prepare(void *buf_priv)
|
||||
{
|
||||
struct vb2_dc_buf *buf = buf_priv;
|
||||
struct sg_table *sgt = buf->dma_sgt;
|
||||
|
||||
/* DMABUF exporter will flush the cache for us */
|
||||
if (!sgt || buf->db_attach)
|
||||
return;
|
||||
|
||||
dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
|
||||
}
|
||||
|
||||
static void vb2_dc_finish(void *buf_priv)
|
||||
{
|
||||
struct vb2_dc_buf *buf = buf_priv;
|
||||
struct sg_table *sgt = buf->dma_sgt;
|
||||
|
||||
/* DMABUF exporter will flush the cache for us */
|
||||
if (!sgt || buf->db_attach)
|
||||
return;
|
||||
|
||||
dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
|
||||
}
|
||||
|
||||
/*********************************************/
|
||||
/* callbacks for MMAP buffers */
|
||||
/*********************************************/
|
||||
|
||||
static void vb2_dc_put(void *buf_priv)
|
||||
{
|
||||
struct vb2_dc_buf *buf = buf_priv;
|
||||
|
||||
if (!atomic_dec_and_test(&buf->refcount))
|
||||
return;
|
||||
|
||||
if (buf->sgt_base) {
|
||||
sg_free_table(buf->sgt_base);
|
||||
kfree(buf->sgt_base);
|
||||
}
|
||||
dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr);
|
||||
put_device(buf->dev);
|
||||
kfree(buf);
|
||||
}
|
||||
|
||||
static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags)
|
||||
{
|
||||
struct vb2_dc_conf *conf = alloc_ctx;
|
||||
struct device *dev = conf->dev;
|
||||
struct vb2_dc_buf *buf;
|
||||
|
||||
buf = kzalloc(sizeof *buf, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
buf->vaddr = dma_alloc_coherent(dev, size, &buf->dma_addr,
|
||||
GFP_KERNEL | gfp_flags);
|
||||
if (!buf->vaddr) {
|
||||
dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
|
||||
kfree(buf);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
/* Prevent the device from being released while the buffer is used */
|
||||
buf->dev = get_device(dev);
|
||||
buf->size = size;
|
||||
|
||||
buf->handler.refcount = &buf->refcount;
|
||||
buf->handler.put = vb2_dc_put;
|
||||
buf->handler.arg = buf;
|
||||
|
||||
atomic_inc(&buf->refcount);
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
||||
static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
|
||||
{
|
||||
struct vb2_dc_buf *buf = buf_priv;
|
||||
int ret;
|
||||
|
||||
if (!buf) {
|
||||
printk(KERN_ERR "No buffer to map\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
|
||||
* map whole buffer
|
||||
*/
|
||||
vma->vm_pgoff = 0;
|
||||
|
||||
ret = dma_mmap_coherent(buf->dev, vma, buf->vaddr,
|
||||
buf->dma_addr, buf->size);
|
||||
|
||||
if (ret) {
|
||||
pr_err("Remapping memory failed, error: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vma->vm_private_data = &buf->handler;
|
||||
vma->vm_ops = &vb2_common_vm_ops;
|
||||
|
||||
vma->vm_ops->open(vma);
|
||||
|
||||
pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
|
||||
__func__, (unsigned long)buf->dma_addr, vma->vm_start,
|
||||
buf->size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*********************************************/
|
||||
/* DMABUF ops for exporters */
|
||||
/*********************************************/
|
||||
|
||||
struct vb2_dc_attachment {
|
||||
struct sg_table sgt;
|
||||
enum dma_data_direction dir;
|
||||
};
|
||||
|
||||
static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
|
||||
struct dma_buf_attachment *dbuf_attach)
|
||||
{
|
||||
struct vb2_dc_attachment *attach;
|
||||
unsigned int i;
|
||||
struct scatterlist *rd, *wr;
|
||||
struct sg_table *sgt;
|
||||
struct vb2_dc_buf *buf = dbuf->priv;
|
||||
int ret;
|
||||
|
||||
attach = kzalloc(sizeof(*attach), GFP_KERNEL);
|
||||
if (!attach)
|
||||
return -ENOMEM;
|
||||
|
||||
sgt = &attach->sgt;
|
||||
/* Copy the buf->base_sgt scatter list to the attachment, as we can't
|
||||
* map the same scatter list to multiple attachments at the same time.
|
||||
*/
|
||||
ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
|
||||
if (ret) {
|
||||
kfree(attach);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
rd = buf->sgt_base->sgl;
|
||||
wr = sgt->sgl;
|
||||
for (i = 0; i < sgt->orig_nents; ++i) {
|
||||
sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
|
||||
rd = sg_next(rd);
|
||||
wr = sg_next(wr);
|
||||
}
|
||||
|
||||
attach->dir = DMA_NONE;
|
||||
dbuf_attach->priv = attach;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
|
||||
struct dma_buf_attachment *db_attach)
|
||||
{
|
||||
struct vb2_dc_attachment *attach = db_attach->priv;
|
||||
struct sg_table *sgt;
|
||||
|
||||
if (!attach)
|
||||
return;
|
||||
|
||||
sgt = &attach->sgt;
|
||||
|
||||
/* release the scatterlist cache */
|
||||
if (attach->dir != DMA_NONE)
|
||||
dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
|
||||
attach->dir);
|
||||
sg_free_table(sgt);
|
||||
kfree(attach);
|
||||
db_attach->priv = NULL;
|
||||
}
|
||||
|
||||
static struct sg_table *vb2_dc_dmabuf_ops_map(
|
||||
struct dma_buf_attachment *db_attach, enum dma_data_direction dir)
|
||||
{
|
||||
struct vb2_dc_attachment *attach = db_attach->priv;
|
||||
/* stealing dmabuf mutex to serialize map/unmap operations */
|
||||
struct mutex *lock = &db_attach->dmabuf->lock;
|
||||
struct sg_table *sgt;
|
||||
int ret;
|
||||
|
||||
mutex_lock(lock);
|
||||
|
||||
sgt = &attach->sgt;
|
||||
/* return previously mapped sg table */
|
||||
if (attach->dir == dir) {
|
||||
mutex_unlock(lock);
|
||||
return sgt;
|
||||
}
|
||||
|
||||
/* release any previous cache */
|
||||
if (attach->dir != DMA_NONE) {
|
||||
dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
|
||||
attach->dir);
|
||||
attach->dir = DMA_NONE;
|
||||
}
|
||||
|
||||
/* mapping to the client with new direction */
|
||||
ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dir);
|
||||
if (ret <= 0) {
|
||||
pr_err("failed to map scatterlist\n");
|
||||
mutex_unlock(lock);
|
||||
return ERR_PTR(-EIO);
|
||||
}
|
||||
|
||||
attach->dir = dir;
|
||||
|
||||
mutex_unlock(lock);
|
||||
|
||||
return sgt;
|
||||
}
|
||||
|
||||
static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
|
||||
struct sg_table *sgt, enum dma_data_direction dir)
|
||||
{
|
||||
/* nothing to be done here */
|
||||
}
|
||||
|
||||
static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
|
||||
{
|
||||
/* drop reference obtained in vb2_dc_get_dmabuf */
|
||||
vb2_dc_put(dbuf->priv);
|
||||
}
|
||||
|
||||
static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
|
||||
{
|
||||
struct vb2_dc_buf *buf = dbuf->priv;
|
||||
|
||||
return buf->vaddr + pgnum * PAGE_SIZE;
|
||||
}
|
||||
|
||||
static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
|
||||
{
|
||||
struct vb2_dc_buf *buf = dbuf->priv;
|
||||
|
||||
return buf->vaddr;
|
||||
}
|
||||
|
||||
static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
return vb2_dc_mmap(dbuf->priv, vma);
|
||||
}
|
||||
|
||||
static struct dma_buf_ops vb2_dc_dmabuf_ops = {
|
||||
.attach = vb2_dc_dmabuf_ops_attach,
|
||||
.detach = vb2_dc_dmabuf_ops_detach,
|
||||
.map_dma_buf = vb2_dc_dmabuf_ops_map,
|
||||
.unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
|
||||
.kmap = vb2_dc_dmabuf_ops_kmap,
|
||||
.kmap_atomic = vb2_dc_dmabuf_ops_kmap,
|
||||
.vmap = vb2_dc_dmabuf_ops_vmap,
|
||||
.mmap = vb2_dc_dmabuf_ops_mmap,
|
||||
.release = vb2_dc_dmabuf_ops_release,
|
||||
};
|
||||
|
||||
static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
|
||||
{
|
||||
int ret;
|
||||
struct sg_table *sgt;
|
||||
|
||||
sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
|
||||
if (!sgt) {
|
||||
dev_err(buf->dev, "failed to alloc sg table\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ret = dma_get_sgtable(buf->dev, sgt, buf->vaddr, buf->dma_addr,
|
||||
buf->size);
|
||||
if (ret < 0) {
|
||||
dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
|
||||
kfree(sgt);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return sgt;
|
||||
}
|
||||
|
||||
static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
|
||||
{
|
||||
struct vb2_dc_buf *buf = buf_priv;
|
||||
struct dma_buf *dbuf;
|
||||
|
||||
if (!buf->sgt_base)
|
||||
buf->sgt_base = vb2_dc_get_base_sgt(buf);
|
||||
|
||||
if (WARN_ON(!buf->sgt_base))
|
||||
return NULL;
|
||||
|
||||
dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, flags, NULL);
|
||||
if (IS_ERR(dbuf))
|
||||
return NULL;
|
||||
|
||||
/* dmabuf keeps reference to vb2 buffer */
|
||||
atomic_inc(&buf->refcount);
|
||||
|
||||
return dbuf;
|
||||
}
|
||||
|
||||
/*********************************************/
|
||||
/* callbacks for USERPTR buffers */
|
||||
/*********************************************/
|
||||
|
||||
static inline int vma_is_io(struct vm_area_struct *vma)
|
||||
{
|
||||
return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
|
||||
}
|
||||
|
||||
static int vb2_dc_get_user_pfn(unsigned long start, int n_pages,
|
||||
struct vm_area_struct *vma, unsigned long *res)
|
||||
{
|
||||
unsigned long pfn, start_pfn, prev_pfn;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
|
||||
if (!vma_is_io(vma))
|
||||
return -EFAULT;
|
||||
|
||||
ret = follow_pfn(vma, start, &pfn);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
start_pfn = pfn;
|
||||
start += PAGE_SIZE;
|
||||
|
||||
for (i = 1; i < n_pages; ++i, start += PAGE_SIZE) {
|
||||
prev_pfn = pfn;
|
||||
ret = follow_pfn(vma, start, &pfn);
|
||||
|
||||
if (ret) {
|
||||
pr_err("no page for address %lu\n", start);
|
||||
return ret;
|
||||
}
|
||||
if (pfn != prev_pfn + 1)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*res = start_pfn;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
|
||||
int n_pages, struct vm_area_struct *vma, int write)
|
||||
{
|
||||
if (vma_is_io(vma)) {
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < n_pages; ++i, start += PAGE_SIZE) {
|
||||
unsigned long pfn;
|
||||
int ret = follow_pfn(vma, start, &pfn);
|
||||
|
||||
if (!pfn_valid(pfn))
|
||||
return -EINVAL;
|
||||
|
||||
if (ret) {
|
||||
pr_err("no page for address %lu\n", start);
|
||||
return ret;
|
||||
}
|
||||
pages[i] = pfn_to_page(pfn);
|
||||
}
|
||||
} else {
|
||||
int n;
|
||||
|
||||
n = get_user_pages(current, current->mm, start & PAGE_MASK,
|
||||
n_pages, write, 1, pages, NULL);
|
||||
/* negative error means that no page was pinned */
|
||||
n = max(n, 0);
|
||||
if (n != n_pages) {
|
||||
pr_err("got only %d of %d user pages\n", n, n_pages);
|
||||
while (n)
|
||||
put_page(pages[--n]);
|
||||
return -EFAULT;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vb2_dc_put_dirty_page(struct page *page)
|
||||
{
|
||||
set_page_dirty_lock(page);
|
||||
put_page(page);
|
||||
}
|
||||
|
||||
static void vb2_dc_put_userptr(void *buf_priv)
|
||||
{
|
||||
struct vb2_dc_buf *buf = buf_priv;
|
||||
struct sg_table *sgt = buf->dma_sgt;
|
||||
|
||||
if (sgt) {
|
||||
dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
|
||||
if (!vma_is_io(buf->vma))
|
||||
vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
|
||||
|
||||
sg_free_table(sgt);
|
||||
kfree(sgt);
|
||||
}
|
||||
vb2_put_vma(buf->vma);
|
||||
kfree(buf);
|
||||
}
|
||||
|
||||
/*
|
||||
* For some kind of reserved memory there might be no struct page available,
|
||||
* so all that can be done to support such 'pages' is to try to convert
|
||||
* pfn to dma address or at the last resort just assume that
|
||||
* dma address == physical address (like it has been assumed in earlier version
|
||||
* of videobuf2-dma-contig
|
||||
*/
|
||||
|
||||
#ifdef __arch_pfn_to_dma
|
||||
static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
|
||||
{
|
||||
return (dma_addr_t)__arch_pfn_to_dma(dev, pfn);
|
||||
}
|
||||
#elif defined(__pfn_to_bus)
|
||||
static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
|
||||
{
|
||||
return (dma_addr_t)__pfn_to_bus(pfn);
|
||||
}
|
||||
#elif defined(__pfn_to_phys)
|
||||
static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
|
||||
{
|
||||
return (dma_addr_t)__pfn_to_phys(pfn);
|
||||
}
|
||||
#else
|
||||
static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
|
||||
{
|
||||
/* really, we cannot do anything better at this point */
|
||||
return (dma_addr_t)(pfn) << PAGE_SHIFT;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
|
||||
unsigned long size, int write)
|
||||
{
|
||||
struct vb2_dc_conf *conf = alloc_ctx;
|
||||
struct vb2_dc_buf *buf;
|
||||
unsigned long start;
|
||||
unsigned long end;
|
||||
unsigned long offset;
|
||||
struct page **pages;
|
||||
int n_pages;
|
||||
int ret = 0;
|
||||
struct vm_area_struct *vma;
|
||||
struct sg_table *sgt;
|
||||
unsigned long contig_size;
|
||||
unsigned long dma_align = dma_get_cache_alignment();
|
||||
|
||||
/* Only cache aligned DMA transfers are reliable */
|
||||
if (!IS_ALIGNED(vaddr | size, dma_align)) {
|
||||
pr_debug("user data must be aligned to %lu bytes\n", dma_align);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
if (!size) {
|
||||
pr_debug("size is zero\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
buf = kzalloc(sizeof *buf, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
buf->dev = conf->dev;
|
||||
buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
|
||||
|
||||
start = vaddr & PAGE_MASK;
|
||||
offset = vaddr & ~PAGE_MASK;
|
||||
end = PAGE_ALIGN(vaddr + size);
|
||||
n_pages = (end - start) >> PAGE_SHIFT;
|
||||
|
||||
pages = kmalloc(n_pages * sizeof(pages[0]), GFP_KERNEL);
|
||||
if (!pages) {
|
||||
ret = -ENOMEM;
|
||||
pr_err("failed to allocate pages table\n");
|
||||
goto fail_buf;
|
||||
}
|
||||
|
||||
/* current->mm->mmap_sem is taken by videobuf2 core */
|
||||
vma = find_vma(current->mm, vaddr);
|
||||
if (!vma) {
|
||||
pr_err("no vma for address %lu\n", vaddr);
|
||||
ret = -EFAULT;
|
||||
goto fail_pages;
|
||||
}
|
||||
|
||||
if (vma->vm_end < vaddr + size) {
|
||||
pr_err("vma at %lu is too small for %lu bytes\n", vaddr, size);
|
||||
ret = -EFAULT;
|
||||
goto fail_pages;
|
||||
}
|
||||
|
||||
buf->vma = vb2_get_vma(vma);
|
||||
if (!buf->vma) {
|
||||
pr_err("failed to copy vma\n");
|
||||
ret = -ENOMEM;
|
||||
goto fail_pages;
|
||||
}
|
||||
|
||||
/* extract page list from userspace mapping */
|
||||
ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write);
|
||||
if (ret) {
|
||||
unsigned long pfn;
|
||||
if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) {
|
||||
buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, pfn);
|
||||
buf->size = size;
|
||||
kfree(pages);
|
||||
return buf;
|
||||
}
|
||||
|
||||
pr_err("failed to get user pages\n");
|
||||
goto fail_vma;
|
||||
}
|
||||
|
||||
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
|
||||
if (!sgt) {
|
||||
pr_err("failed to allocate sg table\n");
|
||||
ret = -ENOMEM;
|
||||
goto fail_get_user_pages;
|
||||
}
|
||||
|
||||
ret = sg_alloc_table_from_pages(sgt, pages, n_pages,
|
||||
offset, size, GFP_KERNEL);
|
||||
if (ret) {
|
||||
pr_err("failed to initialize sg table\n");
|
||||
goto fail_sgt;
|
||||
}
|
||||
|
||||
/* pages are no longer needed */
|
||||
kfree(pages);
|
||||
pages = NULL;
|
||||
|
||||
sgt->nents = dma_map_sg(buf->dev, sgt->sgl, sgt->orig_nents,
|
||||
buf->dma_dir);
|
||||
if (sgt->nents <= 0) {
|
||||
pr_err("failed to map scatterlist\n");
|
||||
ret = -EIO;
|
||||
goto fail_sgt_init;
|
||||
}
|
||||
|
||||
contig_size = vb2_dc_get_contiguous_size(sgt);
|
||||
if (contig_size < size) {
|
||||
pr_err("contiguous mapping is too small %lu/%lu\n",
|
||||
contig_size, size);
|
||||
ret = -EFAULT;
|
||||
goto fail_map_sg;
|
||||
}
|
||||
|
||||
buf->dma_addr = sg_dma_address(sgt->sgl);
|
||||
buf->size = size;
|
||||
buf->dma_sgt = sgt;
|
||||
|
||||
return buf;
|
||||
|
||||
fail_map_sg:
|
||||
dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
|
||||
|
||||
fail_sgt_init:
|
||||
if (!vma_is_io(buf->vma))
|
||||
vb2_dc_sgt_foreach_page(sgt, put_page);
|
||||
sg_free_table(sgt);
|
||||
|
||||
fail_sgt:
|
||||
kfree(sgt);
|
||||
|
||||
fail_get_user_pages:
|
||||
if (pages && !vma_is_io(buf->vma))
|
||||
while (n_pages)
|
||||
put_page(pages[--n_pages]);
|
||||
|
||||
fail_vma:
|
||||
vb2_put_vma(buf->vma);
|
||||
|
||||
fail_pages:
|
||||
kfree(pages); /* kfree is NULL-proof */
|
||||
|
||||
fail_buf:
|
||||
kfree(buf);
|
||||
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
/*********************************************/
|
||||
/* callbacks for DMABUF buffers */
|
||||
/*********************************************/
|
||||
|
||||
static int vb2_dc_map_dmabuf(void *mem_priv)
|
||||
{
|
||||
struct vb2_dc_buf *buf = mem_priv;
|
||||
struct sg_table *sgt;
|
||||
unsigned long contig_size;
|
||||
|
||||
if (WARN_ON(!buf->db_attach)) {
|
||||
pr_err("trying to pin a non attached buffer\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (WARN_ON(buf->dma_sgt)) {
|
||||
pr_err("dmabuf buffer is already pinned\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* get the associated scatterlist for this buffer */
|
||||
sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
|
||||
if (IS_ERR(sgt)) {
|
||||
pr_err("Error getting dmabuf scatterlist\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* checking if dmabuf is big enough to store contiguous chunk */
|
||||
contig_size = vb2_dc_get_contiguous_size(sgt);
|
||||
if (contig_size < buf->size) {
|
||||
pr_err("contiguous chunk is too small %lu/%lu b\n",
|
||||
contig_size, buf->size);
|
||||
dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
buf->dma_addr = sg_dma_address(sgt->sgl);
|
||||
buf->dma_sgt = sgt;
|
||||
buf->vaddr = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vb2_dc_unmap_dmabuf(void *mem_priv)
|
||||
{
|
||||
struct vb2_dc_buf *buf = mem_priv;
|
||||
struct sg_table *sgt = buf->dma_sgt;
|
||||
|
||||
if (WARN_ON(!buf->db_attach)) {
|
||||
pr_err("trying to unpin a not attached buffer\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (WARN_ON(!sgt)) {
|
||||
pr_err("dmabuf buffer is already unpinned\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (buf->vaddr) {
|
||||
dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
|
||||
buf->vaddr = NULL;
|
||||
}
|
||||
dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
|
||||
|
||||
buf->dma_addr = 0;
|
||||
buf->dma_sgt = NULL;
|
||||
}
|
||||
|
||||
static void vb2_dc_detach_dmabuf(void *mem_priv)
|
||||
{
|
||||
struct vb2_dc_buf *buf = mem_priv;
|
||||
|
||||
/* if vb2 works correctly you should never detach mapped buffer */
|
||||
if (WARN_ON(buf->dma_addr))
|
||||
vb2_dc_unmap_dmabuf(buf);
|
||||
|
||||
/* detach this attachment */
|
||||
dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
|
||||
kfree(buf);
|
||||
}
|
||||
|
||||
static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
|
||||
unsigned long size, int write)
|
||||
{
|
||||
struct vb2_dc_conf *conf = alloc_ctx;
|
||||
struct vb2_dc_buf *buf;
|
||||
struct dma_buf_attachment *dba;
|
||||
|
||||
if (dbuf->size < size)
|
||||
return ERR_PTR(-EFAULT);
|
||||
|
||||
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
|
||||
if (!buf)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
buf->dev = conf->dev;
|
||||
/* create attachment for the dmabuf with the user device */
|
||||
dba = dma_buf_attach(dbuf, buf->dev);
|
||||
if (IS_ERR(dba)) {
|
||||
pr_err("failed to attach dmabuf\n");
|
||||
kfree(buf);
|
||||
return dba;
|
||||
}
|
||||
|
||||
buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
|
||||
buf->size = size;
|
||||
buf->db_attach = dba;
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
||||
/*********************************************/
|
||||
/* DMA CONTIG exported functions */
|
||||
/*********************************************/
|
||||
|
||||
const struct vb2_mem_ops vb2_dma_contig_memops = {
|
||||
.alloc = vb2_dc_alloc,
|
||||
.put = vb2_dc_put,
|
||||
.get_dmabuf = vb2_dc_get_dmabuf,
|
||||
.cookie = vb2_dc_cookie,
|
||||
.vaddr = vb2_dc_vaddr,
|
||||
.mmap = vb2_dc_mmap,
|
||||
.get_userptr = vb2_dc_get_userptr,
|
||||
.put_userptr = vb2_dc_put_userptr,
|
||||
.prepare = vb2_dc_prepare,
|
||||
.finish = vb2_dc_finish,
|
||||
.map_dmabuf = vb2_dc_map_dmabuf,
|
||||
.unmap_dmabuf = vb2_dc_unmap_dmabuf,
|
||||
.attach_dmabuf = vb2_dc_attach_dmabuf,
|
||||
.detach_dmabuf = vb2_dc_detach_dmabuf,
|
||||
.num_users = vb2_dc_num_users,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
|
||||
|
||||
void *vb2_dma_contig_init_ctx(struct device *dev)
|
||||
{
|
||||
struct vb2_dc_conf *conf;
|
||||
|
||||
conf = kzalloc(sizeof *conf, GFP_KERNEL);
|
||||
if (!conf)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
conf->dev = dev;
|
||||
|
||||
return conf;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx);
|
||||
|
||||
void vb2_dma_contig_cleanup_ctx(void *alloc_ctx)
|
||||
{
|
||||
kfree(alloc_ctx);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx);
|
||||
|
||||
MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
|
||||
MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
|
||||
MODULE_LICENSE("GPL");
|
359
drivers/media/v4l2-core/videobuf2-dma-sg.c
Normal file
359
drivers/media/v4l2-core/videobuf2-dma-sg.c
Normal file
|
@ -0,0 +1,359 @@
|
|||
/*
|
||||
* videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
|
||||
*
|
||||
* Copyright (C) 2010 Samsung Electronics
|
||||
*
|
||||
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#include <media/videobuf2-core.h>
|
||||
#include <media/videobuf2-memops.h>
|
||||
#include <media/videobuf2-dma-sg.h>
|
||||
|
||||
static int debug;
|
||||
module_param(debug, int, 0644);
|
||||
|
||||
#define dprintk(level, fmt, arg...) \
|
||||
do { \
|
||||
if (debug >= level) \
|
||||
printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg); \
|
||||
} while (0)
|
||||
|
||||
struct vb2_dma_sg_buf {
|
||||
void *vaddr;
|
||||
struct page **pages;
|
||||
int write;
|
||||
int offset;
|
||||
struct sg_table sg_table;
|
||||
size_t size;
|
||||
unsigned int num_pages;
|
||||
atomic_t refcount;
|
||||
struct vb2_vmarea_handler handler;
|
||||
struct vm_area_struct *vma;
|
||||
};
|
||||
|
||||
static void vb2_dma_sg_put(void *buf_priv);
|
||||
|
||||
static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
|
||||
gfp_t gfp_flags)
|
||||
{
|
||||
unsigned int last_page = 0;
|
||||
int size = buf->size;
|
||||
|
||||
while (size > 0) {
|
||||
struct page *pages;
|
||||
int order;
|
||||
int i;
|
||||
|
||||
order = get_order(size);
|
||||
/* Dont over allocate*/
|
||||
if ((PAGE_SIZE << order) > size)
|
||||
order--;
|
||||
|
||||
pages = NULL;
|
||||
while (!pages) {
|
||||
pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
|
||||
__GFP_NOWARN | gfp_flags, order);
|
||||
if (pages)
|
||||
break;
|
||||
|
||||
if (order == 0) {
|
||||
while (last_page--)
|
||||
__free_page(buf->pages[last_page]);
|
||||
return -ENOMEM;
|
||||
}
|
||||
order--;
|
||||
}
|
||||
|
||||
split_page(pages, order);
|
||||
for (i = 0; i < (1 << order); i++)
|
||||
buf->pages[last_page++] = &pages[i];
|
||||
|
||||
size -= PAGE_SIZE << order;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags)
|
||||
{
|
||||
struct vb2_dma_sg_buf *buf;
|
||||
int ret;
|
||||
int num_pages;
|
||||
|
||||
buf = kzalloc(sizeof *buf, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return NULL;
|
||||
|
||||
buf->vaddr = NULL;
|
||||
buf->write = 0;
|
||||
buf->offset = 0;
|
||||
buf->size = size;
|
||||
/* size is already page aligned */
|
||||
buf->num_pages = size >> PAGE_SHIFT;
|
||||
|
||||
buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
|
||||
GFP_KERNEL);
|
||||
if (!buf->pages)
|
||||
goto fail_pages_array_alloc;
|
||||
|
||||
ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags);
|
||||
if (ret)
|
||||
goto fail_pages_alloc;
|
||||
|
||||
ret = sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
|
||||
buf->num_pages, 0, size, GFP_KERNEL);
|
||||
if (ret)
|
||||
goto fail_table_alloc;
|
||||
|
||||
buf->handler.refcount = &buf->refcount;
|
||||
buf->handler.put = vb2_dma_sg_put;
|
||||
buf->handler.arg = buf;
|
||||
|
||||
atomic_inc(&buf->refcount);
|
||||
|
||||
dprintk(1, "%s: Allocated buffer of %d pages\n",
|
||||
__func__, buf->num_pages);
|
||||
return buf;
|
||||
|
||||
fail_table_alloc:
|
||||
num_pages = buf->num_pages;
|
||||
while (num_pages--)
|
||||
__free_page(buf->pages[num_pages]);
|
||||
fail_pages_alloc:
|
||||
kfree(buf->pages);
|
||||
fail_pages_array_alloc:
|
||||
kfree(buf);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void vb2_dma_sg_put(void *buf_priv)
|
||||
{
|
||||
struct vb2_dma_sg_buf *buf = buf_priv;
|
||||
int i = buf->num_pages;
|
||||
|
||||
if (atomic_dec_and_test(&buf->refcount)) {
|
||||
dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
|
||||
buf->num_pages);
|
||||
if (buf->vaddr)
|
||||
vm_unmap_ram(buf->vaddr, buf->num_pages);
|
||||
sg_free_table(&buf->sg_table);
|
||||
while (--i >= 0)
|
||||
__free_page(buf->pages[i]);
|
||||
kfree(buf->pages);
|
||||
kfree(buf);
|
||||
}
|
||||
}
|
||||
|
||||
static inline int vma_is_io(struct vm_area_struct *vma)
|
||||
{
|
||||
return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
|
||||
}
|
||||
|
||||
static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
|
||||
unsigned long size, int write)
|
||||
{
|
||||
struct vb2_dma_sg_buf *buf;
|
||||
unsigned long first, last;
|
||||
int num_pages_from_user;
|
||||
struct vm_area_struct *vma;
|
||||
|
||||
buf = kzalloc(sizeof *buf, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return NULL;
|
||||
|
||||
buf->vaddr = NULL;
|
||||
buf->write = write;
|
||||
buf->offset = vaddr & ~PAGE_MASK;
|
||||
buf->size = size;
|
||||
|
||||
first = (vaddr & PAGE_MASK) >> PAGE_SHIFT;
|
||||
last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
|
||||
buf->num_pages = last - first + 1;
|
||||
|
||||
buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
|
||||
GFP_KERNEL);
|
||||
if (!buf->pages)
|
||||
goto userptr_fail_alloc_pages;
|
||||
|
||||
vma = find_vma(current->mm, vaddr);
|
||||
if (!vma) {
|
||||
dprintk(1, "no vma for address %lu\n", vaddr);
|
||||
goto userptr_fail_find_vma;
|
||||
}
|
||||
|
||||
if (vma->vm_end < vaddr + size) {
|
||||
dprintk(1, "vma at %lu is too small for %lu bytes\n",
|
||||
vaddr, size);
|
||||
goto userptr_fail_find_vma;
|
||||
}
|
||||
|
||||
buf->vma = vb2_get_vma(vma);
|
||||
if (!buf->vma) {
|
||||
dprintk(1, "failed to copy vma\n");
|
||||
goto userptr_fail_find_vma;
|
||||
}
|
||||
|
||||
if (vma_is_io(buf->vma)) {
|
||||
for (num_pages_from_user = 0;
|
||||
num_pages_from_user < buf->num_pages;
|
||||
++num_pages_from_user, vaddr += PAGE_SIZE) {
|
||||
unsigned long pfn;
|
||||
|
||||
if (follow_pfn(vma, vaddr, &pfn)) {
|
||||
dprintk(1, "no page for address %lu\n", vaddr);
|
||||
break;
|
||||
}
|
||||
buf->pages[num_pages_from_user] = pfn_to_page(pfn);
|
||||
}
|
||||
} else
|
||||
num_pages_from_user = get_user_pages(current, current->mm,
|
||||
vaddr & PAGE_MASK,
|
||||
buf->num_pages,
|
||||
write,
|
||||
1, /* force */
|
||||
buf->pages,
|
||||
NULL);
|
||||
|
||||
if (num_pages_from_user != buf->num_pages)
|
||||
goto userptr_fail_get_user_pages;
|
||||
|
||||
if (sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
|
||||
buf->num_pages, buf->offset, size, 0))
|
||||
goto userptr_fail_alloc_table_from_pages;
|
||||
|
||||
return buf;
|
||||
|
||||
userptr_fail_alloc_table_from_pages:
|
||||
userptr_fail_get_user_pages:
|
||||
dprintk(1, "get_user_pages requested/got: %d/%d]\n",
|
||||
buf->num_pages, num_pages_from_user);
|
||||
if (!vma_is_io(buf->vma))
|
||||
while (--num_pages_from_user >= 0)
|
||||
put_page(buf->pages[num_pages_from_user]);
|
||||
vb2_put_vma(buf->vma);
|
||||
userptr_fail_find_vma:
|
||||
kfree(buf->pages);
|
||||
userptr_fail_alloc_pages:
|
||||
kfree(buf);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* @put_userptr: inform the allocator that a USERPTR buffer will no longer
|
||||
* be used
|
||||
*/
|
||||
static void vb2_dma_sg_put_userptr(void *buf_priv)
|
||||
{
|
||||
struct vb2_dma_sg_buf *buf = buf_priv;
|
||||
int i = buf->num_pages;
|
||||
|
||||
dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
|
||||
__func__, buf->num_pages);
|
||||
if (buf->vaddr)
|
||||
vm_unmap_ram(buf->vaddr, buf->num_pages);
|
||||
sg_free_table(&buf->sg_table);
|
||||
while (--i >= 0) {
|
||||
if (buf->write)
|
||||
set_page_dirty_lock(buf->pages[i]);
|
||||
if (!vma_is_io(buf->vma))
|
||||
put_page(buf->pages[i]);
|
||||
}
|
||||
kfree(buf->pages);
|
||||
vb2_put_vma(buf->vma);
|
||||
kfree(buf);
|
||||
}
|
||||
|
||||
static void *vb2_dma_sg_vaddr(void *buf_priv)
|
||||
{
|
||||
struct vb2_dma_sg_buf *buf = buf_priv;
|
||||
|
||||
BUG_ON(!buf);
|
||||
|
||||
if (!buf->vaddr)
|
||||
buf->vaddr = vm_map_ram(buf->pages,
|
||||
buf->num_pages,
|
||||
-1,
|
||||
PAGE_KERNEL);
|
||||
|
||||
/* add offset in case userptr is not page-aligned */
|
||||
return buf->vaddr + buf->offset;
|
||||
}
|
||||
|
||||
static unsigned int vb2_dma_sg_num_users(void *buf_priv)
|
||||
{
|
||||
struct vb2_dma_sg_buf *buf = buf_priv;
|
||||
|
||||
return atomic_read(&buf->refcount);
|
||||
}
|
||||
|
||||
static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
|
||||
{
|
||||
struct vb2_dma_sg_buf *buf = buf_priv;
|
||||
unsigned long uaddr = vma->vm_start;
|
||||
unsigned long usize = vma->vm_end - vma->vm_start;
|
||||
int i = 0;
|
||||
|
||||
if (!buf) {
|
||||
printk(KERN_ERR "No memory to map\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
do {
|
||||
int ret;
|
||||
|
||||
ret = vm_insert_page(vma, uaddr, buf->pages[i++]);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "Remapping memory, error: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
uaddr += PAGE_SIZE;
|
||||
usize -= PAGE_SIZE;
|
||||
} while (usize > 0);
|
||||
|
||||
|
||||
/*
|
||||
* Use common vm_area operations to track buffer refcount.
|
||||
*/
|
||||
vma->vm_private_data = &buf->handler;
|
||||
vma->vm_ops = &vb2_common_vm_ops;
|
||||
|
||||
vma->vm_ops->open(vma);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *vb2_dma_sg_cookie(void *buf_priv)
|
||||
{
|
||||
struct vb2_dma_sg_buf *buf = buf_priv;
|
||||
|
||||
return &buf->sg_table;
|
||||
}
|
||||
|
||||
const struct vb2_mem_ops vb2_dma_sg_memops = {
|
||||
.alloc = vb2_dma_sg_alloc,
|
||||
.put = vb2_dma_sg_put,
|
||||
.get_userptr = vb2_dma_sg_get_userptr,
|
||||
.put_userptr = vb2_dma_sg_put_userptr,
|
||||
.vaddr = vb2_dma_sg_vaddr,
|
||||
.mmap = vb2_dma_sg_mmap,
|
||||
.num_users = vb2_dma_sg_num_users,
|
||||
.cookie = vb2_dma_sg_cookie,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
|
||||
|
||||
MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
|
||||
MODULE_AUTHOR("Andrzej Pietrasiewicz");
|
||||
MODULE_LICENSE("GPL");
|
336
drivers/media/v4l2-core/videobuf2-dvb.c
Normal file
336
drivers/media/v4l2-core/videobuf2-dvb.c
Normal file
|
@ -0,0 +1,336 @@
|
|||
/*
|
||||
*
|
||||
* some helper function for simple DVB cards which simply DMA the
|
||||
* complete transport stream and let the computer sort everything else
|
||||
* (i.e. we are using the software demux, ...). Also uses the
|
||||
* video-buf to manage DMA buffers.
|
||||
*
|
||||
* (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs]
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <media/videobuf2-dvb.h>
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
static int dvb_fnc(struct vb2_buffer *vb, void *priv)
|
||||
{
|
||||
struct vb2_dvb *dvb = priv;
|
||||
|
||||
dvb_dmx_swfilter(&dvb->demux, vb2_plane_vaddr(vb, 0),
|
||||
vb2_get_plane_payload(vb, 0));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vb2_dvb_start_feed(struct dvb_demux_feed *feed)
|
||||
{
|
||||
struct dvb_demux *demux = feed->demux;
|
||||
struct vb2_dvb *dvb = demux->priv;
|
||||
int rc = 0;
|
||||
|
||||
if (!demux->dmx.frontend)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&dvb->lock);
|
||||
dvb->nfeeds++;
|
||||
|
||||
if (!dvb->dvbq.threadio) {
|
||||
rc = vb2_thread_start(&dvb->dvbq, dvb_fnc, dvb, dvb->name);
|
||||
if (rc)
|
||||
dvb->nfeeds--;
|
||||
}
|
||||
if (!rc)
|
||||
rc = dvb->nfeeds;
|
||||
mutex_unlock(&dvb->lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int vb2_dvb_stop_feed(struct dvb_demux_feed *feed)
|
||||
{
|
||||
struct dvb_demux *demux = feed->demux;
|
||||
struct vb2_dvb *dvb = demux->priv;
|
||||
int err = 0;
|
||||
|
||||
mutex_lock(&dvb->lock);
|
||||
dvb->nfeeds--;
|
||||
if (0 == dvb->nfeeds)
|
||||
err = vb2_thread_stop(&dvb->dvbq);
|
||||
mutex_unlock(&dvb->lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int vb2_dvb_register_adapter(struct vb2_dvb_frontends *fe,
|
||||
struct module *module,
|
||||
void *adapter_priv,
|
||||
struct device *device,
|
||||
char *adapter_name,
|
||||
short *adapter_nr,
|
||||
int mfe_shared)
|
||||
{
|
||||
int result;
|
||||
|
||||
mutex_init(&fe->lock);
|
||||
|
||||
/* register adapter */
|
||||
result = dvb_register_adapter(&fe->adapter, adapter_name, module,
|
||||
device, adapter_nr);
|
||||
if (result < 0) {
|
||||
pr_warn("%s: dvb_register_adapter failed (errno = %d)\n",
|
||||
adapter_name, result);
|
||||
}
|
||||
fe->adapter.priv = adapter_priv;
|
||||
fe->adapter.mfe_shared = mfe_shared;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static int vb2_dvb_register_frontend(struct dvb_adapter *adapter,
|
||||
struct vb2_dvb *dvb)
|
||||
{
|
||||
int result;
|
||||
|
||||
/* register frontend */
|
||||
result = dvb_register_frontend(adapter, dvb->frontend);
|
||||
if (result < 0) {
|
||||
pr_warn("%s: dvb_register_frontend failed (errno = %d)\n",
|
||||
dvb->name, result);
|
||||
goto fail_frontend;
|
||||
}
|
||||
|
||||
/* register demux stuff */
|
||||
dvb->demux.dmx.capabilities =
|
||||
DMX_TS_FILTERING | DMX_SECTION_FILTERING |
|
||||
DMX_MEMORY_BASED_FILTERING;
|
||||
dvb->demux.priv = dvb;
|
||||
dvb->demux.filternum = 256;
|
||||
dvb->demux.feednum = 256;
|
||||
dvb->demux.start_feed = vb2_dvb_start_feed;
|
||||
dvb->demux.stop_feed = vb2_dvb_stop_feed;
|
||||
result = dvb_dmx_init(&dvb->demux);
|
||||
if (result < 0) {
|
||||
pr_warn("%s: dvb_dmx_init failed (errno = %d)\n",
|
||||
dvb->name, result);
|
||||
goto fail_dmx;
|
||||
}
|
||||
|
||||
dvb->dmxdev.filternum = 256;
|
||||
dvb->dmxdev.demux = &dvb->demux.dmx;
|
||||
dvb->dmxdev.capabilities = 0;
|
||||
result = dvb_dmxdev_init(&dvb->dmxdev, adapter);
|
||||
|
||||
if (result < 0) {
|
||||
pr_warn("%s: dvb_dmxdev_init failed (errno = %d)\n",
|
||||
dvb->name, result);
|
||||
goto fail_dmxdev;
|
||||
}
|
||||
|
||||
dvb->fe_hw.source = DMX_FRONTEND_0;
|
||||
result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_hw);
|
||||
if (result < 0) {
|
||||
pr_warn("%s: add_frontend failed (DMX_FRONTEND_0, errno = %d)\n",
|
||||
dvb->name, result);
|
||||
goto fail_fe_hw;
|
||||
}
|
||||
|
||||
dvb->fe_mem.source = DMX_MEMORY_FE;
|
||||
result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_mem);
|
||||
if (result < 0) {
|
||||
pr_warn("%s: add_frontend failed (DMX_MEMORY_FE, errno = %d)\n",
|
||||
dvb->name, result);
|
||||
goto fail_fe_mem;
|
||||
}
|
||||
|
||||
result = dvb->demux.dmx.connect_frontend(&dvb->demux.dmx, &dvb->fe_hw);
|
||||
if (result < 0) {
|
||||
pr_warn("%s: connect_frontend failed (errno = %d)\n",
|
||||
dvb->name, result);
|
||||
goto fail_fe_conn;
|
||||
}
|
||||
|
||||
/* register network adapter */
|
||||
result = dvb_net_init(adapter, &dvb->net, &dvb->demux.dmx);
|
||||
if (result < 0) {
|
||||
pr_warn("%s: dvb_net_init failed (errno = %d)\n",
|
||||
dvb->name, result);
|
||||
goto fail_fe_conn;
|
||||
}
|
||||
return 0;
|
||||
|
||||
fail_fe_conn:
|
||||
dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_mem);
|
||||
fail_fe_mem:
|
||||
dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_hw);
|
||||
fail_fe_hw:
|
||||
dvb_dmxdev_release(&dvb->dmxdev);
|
||||
fail_dmxdev:
|
||||
dvb_dmx_release(&dvb->demux);
|
||||
fail_dmx:
|
||||
dvb_unregister_frontend(dvb->frontend);
|
||||
fail_frontend:
|
||||
dvb_frontend_detach(dvb->frontend);
|
||||
dvb->frontend = NULL;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
/* Register a single adapter and one or more frontends */
|
||||
int vb2_dvb_register_bus(struct vb2_dvb_frontends *f,
|
||||
struct module *module,
|
||||
void *adapter_priv,
|
||||
struct device *device,
|
||||
short *adapter_nr,
|
||||
int mfe_shared)
|
||||
{
|
||||
struct list_head *list, *q;
|
||||
struct vb2_dvb_frontend *fe;
|
||||
int res;
|
||||
|
||||
fe = vb2_dvb_get_frontend(f, 1);
|
||||
if (!fe) {
|
||||
pr_warn("Unable to register the adapter which has no frontends\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Bring up the adapter */
|
||||
res = vb2_dvb_register_adapter(f, module, adapter_priv, device,
|
||||
fe->dvb.name, adapter_nr, mfe_shared);
|
||||
if (res < 0) {
|
||||
pr_warn("vb2_dvb_register_adapter failed (errno = %d)\n", res);
|
||||
return res;
|
||||
}
|
||||
|
||||
/* Attach all of the frontends to the adapter */
|
||||
mutex_lock(&f->lock);
|
||||
list_for_each_safe(list, q, &f->felist) {
|
||||
fe = list_entry(list, struct vb2_dvb_frontend, felist);
|
||||
res = vb2_dvb_register_frontend(&f->adapter, &fe->dvb);
|
||||
if (res < 0) {
|
||||
pr_warn("%s: vb2_dvb_register_frontend failed (errno = %d)\n",
|
||||
fe->dvb.name, res);
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&f->lock);
|
||||
return 0;
|
||||
|
||||
err:
|
||||
mutex_unlock(&f->lock);
|
||||
vb2_dvb_unregister_bus(f);
|
||||
return res;
|
||||
}
|
||||
EXPORT_SYMBOL(vb2_dvb_register_bus);
|
||||
|
||||
void vb2_dvb_unregister_bus(struct vb2_dvb_frontends *f)
|
||||
{
|
||||
vb2_dvb_dealloc_frontends(f);
|
||||
|
||||
dvb_unregister_adapter(&f->adapter);
|
||||
}
|
||||
EXPORT_SYMBOL(vb2_dvb_unregister_bus);
|
||||
|
||||
struct vb2_dvb_frontend *vb2_dvb_get_frontend(
|
||||
struct vb2_dvb_frontends *f, int id)
|
||||
{
|
||||
struct list_head *list, *q;
|
||||
struct vb2_dvb_frontend *fe, *ret = NULL;
|
||||
|
||||
mutex_lock(&f->lock);
|
||||
|
||||
list_for_each_safe(list, q, &f->felist) {
|
||||
fe = list_entry(list, struct vb2_dvb_frontend, felist);
|
||||
if (fe->id == id) {
|
||||
ret = fe;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&f->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(vb2_dvb_get_frontend);
|
||||
|
||||
int vb2_dvb_find_frontend(struct vb2_dvb_frontends *f,
|
||||
struct dvb_frontend *p)
|
||||
{
|
||||
struct list_head *list, *q;
|
||||
struct vb2_dvb_frontend *fe = NULL;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&f->lock);
|
||||
|
||||
list_for_each_safe(list, q, &f->felist) {
|
||||
fe = list_entry(list, struct vb2_dvb_frontend, felist);
|
||||
if (fe->dvb.frontend == p) {
|
||||
ret = fe->id;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&f->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(vb2_dvb_find_frontend);
|
||||
|
||||
struct vb2_dvb_frontend *vb2_dvb_alloc_frontend(
|
||||
struct vb2_dvb_frontends *f, int id)
|
||||
{
|
||||
struct vb2_dvb_frontend *fe;
|
||||
|
||||
fe = kzalloc(sizeof(struct vb2_dvb_frontend), GFP_KERNEL);
|
||||
if (fe == NULL)
|
||||
return NULL;
|
||||
|
||||
fe->id = id;
|
||||
mutex_init(&fe->dvb.lock);
|
||||
|
||||
mutex_lock(&f->lock);
|
||||
list_add_tail(&fe->felist, &f->felist);
|
||||
mutex_unlock(&f->lock);
|
||||
return fe;
|
||||
}
|
||||
EXPORT_SYMBOL(vb2_dvb_alloc_frontend);
|
||||
|
||||
void vb2_dvb_dealloc_frontends(struct vb2_dvb_frontends *f)
|
||||
{
|
||||
struct list_head *list, *q;
|
||||
struct vb2_dvb_frontend *fe;
|
||||
|
||||
mutex_lock(&f->lock);
|
||||
list_for_each_safe(list, q, &f->felist) {
|
||||
fe = list_entry(list, struct vb2_dvb_frontend, felist);
|
||||
if (fe->dvb.net.dvbdev) {
|
||||
dvb_net_release(&fe->dvb.net);
|
||||
fe->dvb.demux.dmx.remove_frontend(&fe->dvb.demux.dmx,
|
||||
&fe->dvb.fe_mem);
|
||||
fe->dvb.demux.dmx.remove_frontend(&fe->dvb.demux.dmx,
|
||||
&fe->dvb.fe_hw);
|
||||
dvb_dmxdev_release(&fe->dvb.dmxdev);
|
||||
dvb_dmx_release(&fe->dvb.demux);
|
||||
dvb_unregister_frontend(fe->dvb.frontend);
|
||||
}
|
||||
if (fe->dvb.frontend)
|
||||
/* always allocated, may have been reset */
|
||||
dvb_frontend_detach(fe->dvb.frontend);
|
||||
list_del(list); /* remove list entry */
|
||||
kfree(fe); /* free frontend allocation */
|
||||
}
|
||||
mutex_unlock(&f->lock);
|
||||
}
|
||||
EXPORT_SYMBOL(vb2_dvb_dealloc_frontends);
|
930
drivers/media/v4l2-core/videobuf2-ion.c
Normal file
930
drivers/media/v4l2-core/videobuf2-ion.c
Normal file
|
@ -0,0 +1,930 @@
|
|||
/* linux/drivers/media/video/videobuf2-ion.c
|
||||
*
|
||||
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
|
||||
* http://www.samsung.com/
|
||||
*
|
||||
* Implementation of Android ION memory allocator for videobuf2
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/dma-buf.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#include <media/videobuf2-core.h>
|
||||
#include <media/videobuf2-memops.h>
|
||||
#include <media/videobuf2-ion.h>
|
||||
|
||||
#include <linux/exynos_iovmm.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
#define vb2ion_err(dev, fmt, ...) \
|
||||
dev_err(dev, "VB2ION: " pr_fmt(fmt), ##__VA_ARGS__)
|
||||
|
||||
struct vb2_ion_context {
|
||||
struct device *dev;
|
||||
struct ion_client *client;
|
||||
unsigned long alignment;
|
||||
long flags;
|
||||
|
||||
/* protects iommu_active_cnt and protected */
|
||||
struct mutex lock;
|
||||
int iommu_active_cnt;
|
||||
bool protected;
|
||||
};
|
||||
|
||||
struct vb2_ion_buf {
|
||||
struct vb2_ion_context *ctx;
|
||||
struct vb2_vmarea_handler handler;
|
||||
struct vm_area_struct *vma;
|
||||
struct ion_handle *handle;
|
||||
struct dma_buf *dma_buf;
|
||||
struct dma_buf_attachment *attachment;
|
||||
enum dma_data_direction direction;
|
||||
void *kva;
|
||||
unsigned long size;
|
||||
atomic_t ref;
|
||||
bool cached;
|
||||
bool ion;
|
||||
struct vb2_ion_cookie cookie;
|
||||
};
|
||||
|
||||
#define CACHE_FLUSH_ALL_SIZE SZ_8M
|
||||
#define DMA_SYNC_SIZE SZ_512K
|
||||
#define OUTER_FLUSH_ALL_SIZE SZ_1M
|
||||
|
||||
#define ctx_cached(ctx) (!(ctx->flags & VB2ION_CTX_UNCACHED))
|
||||
#define ctx_iommu(ctx) (!!(ctx->flags & VB2ION_CTX_IOMMU))
|
||||
|
||||
void vb2_ion_set_cached(void *ctx, bool cached)
|
||||
{
|
||||
struct vb2_ion_context *vb2ctx = ctx;
|
||||
|
||||
if (cached)
|
||||
vb2ctx->flags &= ~VB2ION_CTX_UNCACHED;
|
||||
else
|
||||
vb2ctx->flags |= VB2ION_CTX_UNCACHED;
|
||||
}
|
||||
EXPORT_SYMBOL(vb2_ion_set_cached);
|
||||
|
||||
int vb2_ion_set_alignment(void *ctx, size_t alignment)
|
||||
{
|
||||
struct vb2_ion_context *vb2ctx = ctx;
|
||||
|
||||
if ((alignment != 0) && (alignment < PAGE_SIZE))
|
||||
return -EINVAL;
|
||||
|
||||
if (alignment & ~alignment)
|
||||
return -EINVAL;
|
||||
|
||||
if (alignment == 0)
|
||||
vb2ctx->alignment = PAGE_SIZE;
|
||||
else
|
||||
vb2ctx->alignment = alignment;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(vb2_ion_set_alignment);
|
||||
|
||||
void *vb2_ion_create_context(struct device *dev, size_t alignment, long flags)
|
||||
{
|
||||
struct vb2_ion_context *ctx;
|
||||
|
||||
/* non-contigous memory without H/W virtualization is not supported */
|
||||
if ((flags & VB2ION_CTX_VMCONTIG) && !(flags & VB2ION_CTX_IOMMU))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
|
||||
if (!ctx)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ctx->dev = dev;
|
||||
ctx->client = ion_client_create(ion_exynos, dev_name(dev));
|
||||
if (IS_ERR(ctx->client)) {
|
||||
void *retp = ctx->client;
|
||||
kfree(ctx);
|
||||
return retp;
|
||||
}
|
||||
|
||||
vb2_ion_set_alignment(ctx, alignment);
|
||||
ctx->flags = flags;
|
||||
mutex_init(&ctx->lock);
|
||||
|
||||
return ctx;
|
||||
}
|
||||
EXPORT_SYMBOL(vb2_ion_create_context);
|
||||
|
||||
void vb2_ion_destroy_context(void *ctx)
|
||||
{
|
||||
struct vb2_ion_context *vb2ctx = ctx;
|
||||
|
||||
mutex_destroy(&vb2ctx->lock);
|
||||
ion_client_destroy(vb2ctx->client);
|
||||
kfree(vb2ctx);
|
||||
}
|
||||
EXPORT_SYMBOL(vb2_ion_destroy_context);
|
||||
|
||||
void *vb2_ion_private_alloc(void *alloc_ctx, size_t size)
|
||||
{
|
||||
struct vb2_ion_context *ctx = alloc_ctx;
|
||||
struct vb2_ion_buf *buf;
|
||||
int heapflags = ion_heapflag(ctx->flags);
|
||||
int flags = ion_flag(ctx->flags);
|
||||
int ret = 0;
|
||||
|
||||
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
|
||||
if (!buf)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
|
||||
flags |= ctx_cached(ctx) ?
|
||||
ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC : 0;
|
||||
|
||||
if ((ion_flag(ctx->flags) & VB2ION_CTX_DRM_VIDEO) ||
|
||||
(ion_flag(ctx->flags) & VB2ION_CTX_DRM_MFCFW) ||
|
||||
(ion_flag(ctx->flags) & VB2ION_CTX_DRM_STREAM))
|
||||
flags |= ION_FLAG_PROTECTED;
|
||||
|
||||
buf->handle = ion_alloc(ctx->client, size, ctx->alignment,
|
||||
heapflags, flags);
|
||||
if (IS_ERR(buf->handle)) {
|
||||
ret = -ENOMEM;
|
||||
goto err_alloc;
|
||||
}
|
||||
|
||||
buf->dma_buf = ion_share_dma_buf(ctx->client, buf->handle);
|
||||
if (IS_ERR(buf->dma_buf)) {
|
||||
ret = PTR_ERR(buf->dma_buf);
|
||||
goto err_share;
|
||||
}
|
||||
|
||||
buf->attachment = dma_buf_attach(buf->dma_buf, ctx->dev);
|
||||
if (IS_ERR(buf->attachment)) {
|
||||
ret = PTR_ERR(buf->attachment);
|
||||
goto err_attach;
|
||||
}
|
||||
|
||||
buf->cookie.sgt = dma_buf_map_attachment(
|
||||
buf->attachment, DMA_BIDIRECTIONAL);
|
||||
if (IS_ERR(buf->cookie.sgt)) {
|
||||
ret = PTR_ERR(buf->cookie.sgt);
|
||||
goto err_map_dmabuf;
|
||||
}
|
||||
|
||||
buf->ctx = ctx;
|
||||
buf->size = size;
|
||||
buf->cached = ctx_cached(ctx);
|
||||
buf->direction = DMA_BIDIRECTIONAL;
|
||||
buf->ion = true;
|
||||
|
||||
mutex_lock(&ctx->lock);
|
||||
if (ctx_iommu(ctx) && !ctx->protected) {
|
||||
buf->cookie.ioaddr = ion_iovmm_map(buf->attachment, 0,
|
||||
buf->size, 0, 0);
|
||||
if (IS_ERR_VALUE(buf->cookie.ioaddr)) {
|
||||
ret = (int)buf->cookie.ioaddr;
|
||||
mutex_unlock(&ctx->lock);
|
||||
goto err_ion_map_io;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&ctx->lock);
|
||||
|
||||
return &buf->cookie;
|
||||
|
||||
err_ion_map_io:
|
||||
if (buf->kva)
|
||||
ion_unmap_kernel(ctx->client, buf->handle);
|
||||
dma_buf_unmap_attachment(buf->attachment, buf->cookie.sgt,
|
||||
DMA_BIDIRECTIONAL);
|
||||
err_map_dmabuf:
|
||||
dma_buf_detach(buf->dma_buf, buf->attachment);
|
||||
err_attach:
|
||||
dma_buf_put(buf->dma_buf);
|
||||
err_share:
|
||||
ion_free(ctx->client, buf->handle);
|
||||
err_alloc:
|
||||
kfree(buf);
|
||||
|
||||
pr_err("%s: Error occured while allocating\n", __func__);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
void vb2_ion_private_free(void *cookie)
|
||||
{
|
||||
struct vb2_ion_buf *buf =
|
||||
container_of(cookie, struct vb2_ion_buf, cookie);
|
||||
struct vb2_ion_context *ctx;
|
||||
|
||||
if (WARN_ON(IS_ERR_OR_NULL(cookie)))
|
||||
return;
|
||||
|
||||
ctx = buf->ctx;
|
||||
mutex_lock(&ctx->lock);
|
||||
if (ctx_iommu(ctx) && !ctx->protected)
|
||||
ion_iovmm_unmap(buf->attachment, buf->cookie.ioaddr);
|
||||
mutex_unlock(&ctx->lock);
|
||||
|
||||
dma_buf_unmap_attachment(buf->attachment, buf->cookie.sgt,
|
||||
DMA_BIDIRECTIONAL);
|
||||
dma_buf_detach(buf->dma_buf, buf->attachment);
|
||||
dma_buf_put(buf->dma_buf);
|
||||
|
||||
if (buf->kva)
|
||||
ion_unmap_kernel(ctx->client, buf->handle);
|
||||
ion_free(ctx->client, buf->handle);
|
||||
|
||||
kfree(buf);
|
||||
}
|
||||
|
||||
static void vb2_ion_put(void *buf_priv)
|
||||
{
|
||||
struct vb2_ion_buf *buf = buf_priv;
|
||||
|
||||
if (atomic_dec_and_test(&buf->ref))
|
||||
vb2_ion_private_free(&buf->cookie);
|
||||
}
|
||||
|
||||
static void *vb2_ion_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags)
|
||||
{
|
||||
struct vb2_ion_buf *buf;
|
||||
void *cookie;
|
||||
|
||||
cookie = vb2_ion_private_alloc(alloc_ctx, size);
|
||||
if (IS_ERR(cookie))
|
||||
return cookie;
|
||||
|
||||
buf = container_of(cookie, struct vb2_ion_buf, cookie);
|
||||
|
||||
buf->handler.refcount = &buf->ref;
|
||||
buf->handler.put = vb2_ion_put;
|
||||
buf->handler.arg = buf;
|
||||
atomic_set(&buf->ref, 1);
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
||||
|
||||
void *vb2_ion_private_vaddr(void *cookie)
|
||||
{
|
||||
struct vb2_ion_buf *buf =
|
||||
container_of(cookie, struct vb2_ion_buf, cookie);
|
||||
if (WARN_ON(IS_ERR_OR_NULL(cookie)))
|
||||
return NULL;
|
||||
|
||||
if (!buf->kva) {
|
||||
buf->kva = ion_map_kernel(buf->ctx->client, buf->handle);
|
||||
if (IS_ERR_OR_NULL(buf->kva))
|
||||
buf->kva = NULL;
|
||||
|
||||
buf->kva += buf->cookie.offset;
|
||||
}
|
||||
|
||||
return buf->kva;
|
||||
}
|
||||
|
||||
static void *vb2_ion_cookie(void *buf_priv)
|
||||
{
|
||||
struct vb2_ion_buf *buf = buf_priv;
|
||||
|
||||
if (WARN_ON(!buf))
|
||||
return NULL;
|
||||
|
||||
return (void *)&buf->cookie;
|
||||
}
|
||||
|
||||
static void *vb2_ion_vaddr(void *buf_priv)
|
||||
{
|
||||
struct vb2_ion_buf *buf = buf_priv;
|
||||
|
||||
if (WARN_ON(!buf))
|
||||
return NULL;
|
||||
|
||||
if (buf->kva != NULL)
|
||||
return buf->kva;
|
||||
|
||||
if (buf->handle)
|
||||
return vb2_ion_private_vaddr(&buf->cookie);
|
||||
|
||||
if (dma_buf_begin_cpu_access(buf->dma_buf,
|
||||
0, buf->size, buf->direction))
|
||||
return NULL;
|
||||
|
||||
buf->kva = dma_buf_kmap(buf->dma_buf, buf->cookie.offset / PAGE_SIZE);
|
||||
|
||||
if (buf->kva == NULL)
|
||||
dma_buf_end_cpu_access(buf->dma_buf, 0,
|
||||
buf->size, buf->direction);
|
||||
else
|
||||
buf->kva += buf->cookie.offset & ~PAGE_MASK;
|
||||
|
||||
return buf->kva;
|
||||
}
|
||||
|
||||
static unsigned int vb2_ion_num_users(void *buf_priv)
|
||||
{
|
||||
struct vb2_ion_buf *buf = buf_priv;
|
||||
|
||||
if (WARN_ON(!buf))
|
||||
return 0;
|
||||
|
||||
return atomic_read(&buf->ref);
|
||||
}
|
||||
|
||||
static int vb2_ion_mmap(void *buf_priv, struct vm_area_struct *vma)
|
||||
{
|
||||
struct vb2_ion_buf *buf = buf_priv;
|
||||
unsigned long vm_start = vma->vm_start;
|
||||
unsigned long vm_end = vma->vm_end;
|
||||
struct scatterlist *sg = buf->cookie.sgt->sgl;
|
||||
unsigned long size;
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (buf->size < (vm_end - vm_start))
|
||||
return ret;
|
||||
|
||||
if (!buf->cached)
|
||||
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
|
||||
|
||||
size = min_t(size_t, vm_end - vm_start, sg_dma_len(sg));
|
||||
|
||||
ret = remap_pfn_range(vma, vm_start, page_to_pfn(sg_page(sg)),
|
||||
size, vma->vm_page_prot);
|
||||
|
||||
for (sg = sg_next(sg), vm_start += size;
|
||||
!ret && sg && (vm_start < vm_end);
|
||||
vm_start += size, sg = sg_next(sg)) {
|
||||
size = min_t(size_t, vm_end - vm_start, sg_dma_len(sg));
|
||||
ret = remap_pfn_range(vma, vm_start, page_to_pfn(sg_page(sg)),
|
||||
size, vma->vm_page_prot);
|
||||
}
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (vm_start < vm_end)
|
||||
return -EINVAL;
|
||||
|
||||
vma->vm_flags |= VM_DONTEXPAND;
|
||||
vma->vm_private_data = &buf->handler;
|
||||
vma->vm_ops = &vb2_common_vm_ops;
|
||||
|
||||
vma->vm_ops->open(vma);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vb2_ion_map_dmabuf(void *mem_priv)
|
||||
{
|
||||
struct vb2_ion_buf *buf = mem_priv;
|
||||
struct vb2_ion_context *ctx = buf->ctx;
|
||||
|
||||
if (WARN_ON(!buf->attachment)) {
|
||||
pr_err("trying to pin a non attached buffer\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (WARN_ON(buf->cookie.sgt)) {
|
||||
pr_err("dmabuf buffer is already pinned\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* get the associated scatterlist for this buffer */
|
||||
buf->cookie.sgt = dma_buf_map_attachment(buf->attachment,
|
||||
buf->direction);
|
||||
if (IS_ERR_OR_NULL(buf->cookie.sgt)) {
|
||||
pr_err("Error getting dmabuf scatterlist\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
buf->cookie.offset = 0;
|
||||
buf->cookie.paddr = sg_phys(buf->cookie.sgt->sgl) + buf->cookie.offset;
|
||||
|
||||
mutex_lock(&ctx->lock);
|
||||
if (ctx_iommu(ctx) && !ctx->protected && buf->cookie.ioaddr == 0) {
|
||||
buf->cookie.ioaddr = ion_iovmm_map(buf->attachment, 0,
|
||||
buf->size, buf->direction, 0);
|
||||
if (IS_ERR_VALUE(buf->cookie.ioaddr)) {
|
||||
pr_err("buf->cookie.ioaddr is error: %pa\n",
|
||||
&buf->cookie.ioaddr);
|
||||
mutex_unlock(&ctx->lock);
|
||||
dma_buf_unmap_attachment(buf->attachment,
|
||||
buf->cookie.sgt, buf->direction);
|
||||
return (int)buf->cookie.ioaddr;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&ctx->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vb2_ion_unmap_dmabuf(void *mem_priv)
|
||||
{
|
||||
struct vb2_ion_buf *buf = mem_priv;
|
||||
|
||||
if (WARN_ON(!buf->attachment)) {
|
||||
pr_err("trying to unpin a not attached buffer\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (WARN_ON(!buf->cookie.sgt)) {
|
||||
pr_err("dmabuf buffer is already unpinned\n");
|
||||
return;
|
||||
}
|
||||
|
||||
dma_buf_unmap_attachment(buf->attachment,
|
||||
buf->cookie.sgt, buf->direction);
|
||||
|
||||
buf->cookie.sgt = NULL;
|
||||
}
|
||||
|
||||
static void vb2_ion_detach_dmabuf(void *mem_priv)
|
||||
{
|
||||
struct vb2_ion_buf *buf = mem_priv;
|
||||
struct vb2_ion_context *ctx = buf->ctx;
|
||||
|
||||
mutex_lock(&ctx->lock);
|
||||
if (buf->cookie.ioaddr && ctx_iommu(ctx) && !ctx->protected ) {
|
||||
ion_iovmm_unmap(buf->attachment, buf->cookie.ioaddr);
|
||||
buf->cookie.ioaddr = 0;
|
||||
}
|
||||
mutex_unlock(&ctx->lock);
|
||||
|
||||
if (buf->kva != NULL) {
|
||||
dma_buf_kunmap(buf->dma_buf, 0, buf->kva);
|
||||
dma_buf_end_cpu_access(buf->dma_buf, 0, buf->size, 0);
|
||||
}
|
||||
|
||||
/* detach this attachment */
|
||||
dma_buf_detach(buf->dma_buf, buf->attachment);
|
||||
kfree(buf);
|
||||
}
|
||||
|
||||
static void *vb2_ion_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
|
||||
unsigned long size, int write)
|
||||
{
|
||||
struct vb2_ion_buf *buf;
|
||||
struct dma_buf_attachment *attachment;
|
||||
|
||||
if (dbuf->size < size) {
|
||||
WARN(1, "dbuf->size(%zd) is smaller than size(%ld)\n",
|
||||
dbuf->size, size);
|
||||
return ERR_PTR(-EFAULT);
|
||||
}
|
||||
|
||||
buf = kzalloc(sizeof *buf, GFP_KERNEL);
|
||||
if (!buf) {
|
||||
pr_err("out of memory\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
buf->ctx = alloc_ctx;
|
||||
/* create attachment for the dmabuf with the user device */
|
||||
attachment = dma_buf_attach(dbuf, buf->ctx->dev);
|
||||
if (IS_ERR(attachment)) {
|
||||
pr_err("failed to attach dmabuf\n");
|
||||
kfree(buf);
|
||||
return attachment;
|
||||
}
|
||||
|
||||
buf->direction = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
|
||||
buf->size = size;
|
||||
buf->dma_buf = dbuf;
|
||||
buf->ion = true;
|
||||
buf->attachment = attachment;
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
||||
static void vb2_ion_put_vma(struct vm_area_struct *vma)
|
||||
{
|
||||
while (vma) {
|
||||
struct vm_area_struct *tmp;
|
||||
|
||||
tmp = vma;
|
||||
vma = vma->vm_prev;
|
||||
vb2_put_vma(tmp);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Verify the user's input and
|
||||
* obtain the reference to the vma against the user's area
|
||||
* Returns the last node of doubly linked list of replicated vma.
|
||||
*/
|
||||
static struct vm_area_struct *vb2_ion_get_vma(struct device *dev,
|
||||
unsigned long vaddr, unsigned long size)
|
||||
{
|
||||
struct vm_area_struct *new_vma = NULL;
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long addr = vaddr;
|
||||
unsigned long len = size;
|
||||
|
||||
if ((vaddr + size) <= vaddr) {
|
||||
vb2ion_err(dev, "size overflow in user area, [%#lx, %#lx)\n",
|
||||
vaddr, size);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for (vma = find_vma(current->mm, addr);
|
||||
vma && len && (addr >= vma->vm_start); vma = vma->vm_next) {
|
||||
struct vm_area_struct *cur_vma;
|
||||
|
||||
cur_vma = vb2_get_vma(vma);
|
||||
|
||||
if (new_vma) {
|
||||
if ((cur_vma->vm_file != new_vma->vm_file) ||
|
||||
(cur_vma->vm_ops != new_vma->vm_ops)) {
|
||||
vb2ion_err(dev,
|
||||
"[%#lx, %#lx) crosses disparate vmas\n",
|
||||
vaddr, size);
|
||||
vb2_put_vma(cur_vma);
|
||||
break;
|
||||
}
|
||||
new_vma->vm_next = cur_vma;
|
||||
}
|
||||
cur_vma->vm_prev = new_vma;
|
||||
new_vma = cur_vma;
|
||||
|
||||
if ((addr + len) <= vma->vm_end) {
|
||||
addr = addr + len;
|
||||
len = 0;
|
||||
} else {
|
||||
len -= vma->vm_end - addr;
|
||||
addr = vma->vm_end;
|
||||
}
|
||||
}
|
||||
|
||||
if (len) { /* error detected */
|
||||
vb2ion_err(dev, "Invalid user area [%#lx, %#lx)\n",
|
||||
vaddr, size);
|
||||
vb2_ion_put_vma(new_vma);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
while (new_vma && new_vma->vm_prev)
|
||||
new_vma = new_vma->vm_prev;
|
||||
|
||||
return new_vma;
|
||||
}
|
||||
|
||||
static void vb2_ion_put_userptr_dmabuf(struct vb2_ion_context *ctx,
|
||||
struct vb2_ion_buf *buf)
|
||||
{
|
||||
if (ctx_iommu(ctx))
|
||||
ion_iovmm_unmap(buf->attachment,
|
||||
buf->cookie.ioaddr - buf->cookie.offset);
|
||||
|
||||
dma_buf_unmap_attachment(buf->attachment,
|
||||
buf->cookie.sgt, buf->direction);
|
||||
dma_buf_detach(buf->dma_buf, buf->attachment);
|
||||
}
|
||||
|
||||
static void *vb2_ion_get_userptr_dmabuf(struct vb2_ion_context *ctx,
|
||||
struct vb2_ion_buf *buf, unsigned long vaddr)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
buf->attachment = dma_buf_attach(buf->dma_buf, ctx->dev);
|
||||
if (IS_ERR(buf->attachment)) {
|
||||
dev_err(ctx->dev,
|
||||
"%s: Failed to pin user buffer @ %#lx/%#lx\n",
|
||||
__func__, vaddr, buf->size);
|
||||
return ERR_CAST(buf->attachment);
|
||||
}
|
||||
|
||||
buf->cookie.sgt = dma_buf_map_attachment(buf->attachment,
|
||||
buf->direction);
|
||||
if (IS_ERR(buf->cookie.sgt)) {
|
||||
dev_err(ctx->dev,
|
||||
"%s: Failed to get sgt of user buffer @ %#lx/%#lx\n",
|
||||
__func__, vaddr, buf->size);
|
||||
ret = ERR_CAST(buf->cookie.sgt);
|
||||
goto err_map;
|
||||
}
|
||||
|
||||
if (ctx_iommu(ctx)) {
|
||||
buf->cookie.ioaddr = ion_iovmm_map(buf->attachment, 0,
|
||||
buf->size, buf->direction, 0);
|
||||
if (IS_ERR_VALUE(buf->cookie.ioaddr)) {
|
||||
ret = ERR_PTR(buf->cookie.ioaddr);
|
||||
goto err_iovmm;
|
||||
}
|
||||
|
||||
buf->cookie.ioaddr += buf->cookie.offset;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
err_iovmm:
|
||||
dma_buf_unmap_attachment(buf->attachment,
|
||||
buf->cookie.sgt, buf->direction);
|
||||
err_map:
|
||||
dma_buf_detach(buf->dma_buf, buf->attachment);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void *vb2_ion_get_userptr(void *alloc_ctx, unsigned long vaddr,
|
||||
unsigned long size, int write)
|
||||
{
|
||||
struct vb2_ion_context *ctx = alloc_ctx;
|
||||
struct vb2_ion_buf *buf = NULL;
|
||||
struct vm_area_struct *vma;
|
||||
void *p_ret = ERR_PTR(-ENOMEM);;
|
||||
|
||||
if (ctx->protected) {
|
||||
dev_err(ctx->dev,
|
||||
"%s: protected mode is not supported with userptr\n",
|
||||
__func__);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
vma = vb2_ion_get_vma(ctx->dev, vaddr, size);
|
||||
if (!vma) {
|
||||
dev_err(ctx->dev,
|
||||
"%s: Failed to holding user buffer @ %#lx/%#lx\n",
|
||||
__func__, vaddr, size);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
|
||||
if (!buf) {
|
||||
dev_err(ctx->dev, "%s: Not enough memory\n", __func__);
|
||||
p_ret = ERR_PTR(-ENOMEM);
|
||||
goto err_alloc;
|
||||
}
|
||||
|
||||
if (vma->vm_file) {
|
||||
get_file(vma->vm_file);
|
||||
buf->dma_buf = get_dma_buf_file(vma->vm_file);
|
||||
}
|
||||
|
||||
buf->ctx = ctx;
|
||||
buf->direction = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
|
||||
buf->size = size;
|
||||
buf->vma = vma;
|
||||
|
||||
if (buf->dma_buf) {
|
||||
buf->ion = true;
|
||||
buf->cookie.offset = vaddr - vma->vm_start;
|
||||
|
||||
p_ret = vb2_ion_get_userptr_dmabuf(ctx, buf, vaddr);
|
||||
if (IS_ERR(p_ret))
|
||||
goto err_map;
|
||||
} else if (ctx_iommu(ctx)) {
|
||||
int prot = IOMMU_READ;
|
||||
|
||||
if (write)
|
||||
prot |= IOMMU_WRITE;
|
||||
buf->cookie.ioaddr = exynos_iovmm_map_userptr(
|
||||
ctx->dev, vaddr, size, prot);
|
||||
if (IS_ERR_VALUE(buf->cookie.ioaddr)) {
|
||||
p_ret = ERR_PTR(buf->cookie.ioaddr);
|
||||
goto err_map;
|
||||
}
|
||||
}
|
||||
|
||||
if ((pgprot_noncached(buf->vma->vm_page_prot)
|
||||
== buf->vma->vm_page_prot)
|
||||
|| (pgprot_writecombine(buf->vma->vm_page_prot)
|
||||
== buf->vma->vm_page_prot))
|
||||
buf->cached = false;
|
||||
else
|
||||
buf->cached = true;
|
||||
|
||||
return buf;
|
||||
err_map:
|
||||
if (buf->dma_buf)
|
||||
dma_buf_put(buf->dma_buf);
|
||||
if (vma->vm_file)
|
||||
fput(vma->vm_file);
|
||||
kfree(buf);
|
||||
err_alloc:
|
||||
vb2_ion_put_vma(vma);
|
||||
|
||||
return p_ret;
|
||||
}
|
||||
|
||||
static void vb2_ion_put_userptr(void *mem_priv)
|
||||
{
|
||||
struct vb2_ion_buf *buf = mem_priv;
|
||||
|
||||
/* TODO: kva with non dmabuf */
|
||||
if (buf->kva) {
|
||||
dma_buf_kunmap(buf->dma_buf, buf->cookie.offset / PAGE_SIZE,
|
||||
buf->kva - (buf->cookie.offset & ~PAGE_SIZE));
|
||||
dma_buf_end_cpu_access(buf->dma_buf, buf->cookie.offset,
|
||||
buf->size, DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
if (buf->dma_buf)
|
||||
vb2_ion_put_userptr_dmabuf(buf->ctx, buf);
|
||||
else if (ctx_iommu(buf->ctx))
|
||||
exynos_iovmm_unmap_userptr(buf->ctx->dev, buf->cookie.ioaddr);
|
||||
|
||||
if (buf->dma_buf)
|
||||
dma_buf_put(buf->dma_buf);
|
||||
if (buf->vma->vm_file)
|
||||
fput(buf->vma->vm_file);
|
||||
|
||||
vb2_ion_put_vma(buf->vma);
|
||||
|
||||
kfree(buf);
|
||||
}
|
||||
|
||||
const struct vb2_mem_ops vb2_ion_memops = {
|
||||
.alloc = vb2_ion_alloc,
|
||||
.put = vb2_ion_put,
|
||||
.cookie = vb2_ion_cookie,
|
||||
.vaddr = vb2_ion_vaddr,
|
||||
.mmap = vb2_ion_mmap,
|
||||
.map_dmabuf = vb2_ion_map_dmabuf,
|
||||
.unmap_dmabuf = vb2_ion_unmap_dmabuf,
|
||||
.attach_dmabuf = vb2_ion_attach_dmabuf,
|
||||
.detach_dmabuf = vb2_ion_detach_dmabuf,
|
||||
.get_userptr = vb2_ion_get_userptr,
|
||||
.put_userptr = vb2_ion_put_userptr,
|
||||
.num_users = vb2_ion_num_users,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(vb2_ion_memops);
|
||||
|
||||
void vb2_ion_sync_for_device(void *cookie, off_t offset, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
struct vb2_ion_buf *buf = container_of(cookie,
|
||||
struct vb2_ion_buf, cookie);
|
||||
|
||||
dev_dbg(buf->ctx->dev, "syncing for device, dmabuf: %p, kva: %p, "
|
||||
"size: %zd, dir: %d\n", buf->dma_buf, buf->kva, size, dir);
|
||||
|
||||
if (buf->kva) {
|
||||
BUG_ON((offset < 0) || (offset > buf->size));
|
||||
BUG_ON((offset + size) < size);
|
||||
BUG_ON((size > buf->size) || ((offset + size) > buf->size));
|
||||
|
||||
exynos_ion_sync_vaddr_for_device(buf->ctx->dev,
|
||||
buf->kva, size, offset, dir);
|
||||
} else if (buf->dma_buf) {
|
||||
exynos_ion_sync_dmabuf_for_device(buf->ctx->dev,
|
||||
buf->dma_buf, size, dir);
|
||||
} else if (buf->vma && buf->cached) {
|
||||
if (size < CACHE_FLUSH_ALL_SIZE)
|
||||
exynos_iommu_sync_for_device(buf->ctx->dev,
|
||||
buf->cookie.ioaddr, size, dir);
|
||||
else
|
||||
flush_all_cpu_caches();
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_ion_sync_for_device);
|
||||
|
||||
void vb2_ion_sync_for_cpu(void *cookie, off_t offset, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
struct vb2_ion_buf *buf = container_of(cookie,
|
||||
struct vb2_ion_buf, cookie);
|
||||
|
||||
dev_dbg(buf->ctx->dev, "syncing for cpu, dmabuf: %p, kva: %p, "
|
||||
"size: %zd, dir: %d\n", buf->dma_buf, buf->kva, size, dir);
|
||||
|
||||
if (buf->kva) {
|
||||
BUG_ON((offset < 0) || (offset > buf->size));
|
||||
BUG_ON((offset + size) < size);
|
||||
BUG_ON((size > buf->size) || ((offset + size) > buf->size));
|
||||
|
||||
exynos_ion_sync_vaddr_for_cpu(buf->ctx->dev,
|
||||
buf->kva, size, offset, dir);
|
||||
} else if (buf->dma_buf) {
|
||||
exynos_ion_sync_dmabuf_for_cpu(buf->ctx->dev,
|
||||
buf->dma_buf, size, dir);
|
||||
} else if (buf->vma && buf->cached) {
|
||||
if (size < CACHE_FLUSH_ALL_SIZE)
|
||||
exynos_iommu_sync_for_cpu(buf->ctx->dev,
|
||||
buf->cookie.ioaddr, size, dir);
|
||||
else
|
||||
flush_all_cpu_caches();
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_ion_sync_for_cpu);
|
||||
|
||||
int vb2_ion_buf_prepare(struct vb2_buffer *vb)
|
||||
{
|
||||
int i;
|
||||
enum dma_data_direction dir;
|
||||
|
||||
dir = V4L2_TYPE_IS_OUTPUT(vb->v4l2_buf.type) ?
|
||||
DMA_TO_DEVICE : DMA_FROM_DEVICE;
|
||||
|
||||
for (i = 0; i < vb->num_planes; i++) {
|
||||
struct vb2_ion_buf *buf = vb->planes[i].mem_priv;
|
||||
|
||||
vb2_ion_sync_for_device((void *) &buf->cookie, 0,
|
||||
buf->size, dir);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_ion_buf_prepare);
|
||||
|
||||
void vb2_ion_buf_finish(struct vb2_buffer *vb)
|
||||
{
|
||||
int i;
|
||||
enum dma_data_direction dir;
|
||||
|
||||
dir = V4L2_TYPE_IS_OUTPUT(vb->v4l2_buf.type) ?
|
||||
DMA_TO_DEVICE : DMA_FROM_DEVICE;
|
||||
|
||||
for (i = 0; i < vb->num_planes; i++) {
|
||||
struct vb2_ion_buf *buf = vb->planes[i].mem_priv;
|
||||
|
||||
vb2_ion_sync_for_cpu((void *) &buf->cookie, 0, buf->size, dir);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_ion_buf_finish);
|
||||
|
||||
int vb2_ion_buf_prepare_exact(struct vb2_buffer *vb)
|
||||
{
|
||||
int i;
|
||||
enum dma_data_direction dir;
|
||||
|
||||
dir = V4L2_TYPE_IS_OUTPUT(vb->v4l2_buf.type) ?
|
||||
DMA_TO_DEVICE : DMA_FROM_DEVICE;
|
||||
|
||||
for (i = 0; i < vb->num_planes; i++) {
|
||||
struct vb2_ion_buf *buf = vb->planes[i].mem_priv;
|
||||
|
||||
vb2_ion_sync_for_device((void *) &buf->cookie, 0,
|
||||
vb2_get_plane_payload(vb, i), dir);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_ion_buf_prepare_exact);
|
||||
|
||||
int vb2_ion_buf_finish_exact(struct vb2_buffer *vb)
|
||||
{
|
||||
int i;
|
||||
enum dma_data_direction dir;
|
||||
|
||||
dir = V4L2_TYPE_IS_OUTPUT(vb->v4l2_buf.type) ?
|
||||
DMA_TO_DEVICE : DMA_FROM_DEVICE;
|
||||
|
||||
for (i = 0; i < vb->num_planes; i++) {
|
||||
struct vb2_ion_buf *buf = vb->planes[i].mem_priv;
|
||||
|
||||
vb2_ion_sync_for_cpu((void *) &buf->cookie, 0,
|
||||
vb2_get_plane_payload(vb, i), dir);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_ion_buf_finish_exact);
|
||||
|
||||
void vb2_ion_detach_iommu(void *alloc_ctx)
|
||||
{
|
||||
struct vb2_ion_context *ctx = alloc_ctx;
|
||||
|
||||
if (!ctx_iommu(ctx))
|
||||
return;
|
||||
|
||||
mutex_lock(&ctx->lock);
|
||||
BUG_ON(ctx->iommu_active_cnt == 0);
|
||||
|
||||
if (--ctx->iommu_active_cnt == 0 && !ctx->protected)
|
||||
iovmm_deactivate(ctx->dev);
|
||||
mutex_unlock(&ctx->lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_ion_detach_iommu);
|
||||
|
||||
int vb2_ion_attach_iommu(void *alloc_ctx)
|
||||
{
|
||||
struct vb2_ion_context *ctx = alloc_ctx;
|
||||
int ret = 0;
|
||||
|
||||
if (!ctx_iommu(ctx))
|
||||
return -ENOENT;
|
||||
|
||||
mutex_lock(&ctx->lock);
|
||||
if (ctx->iommu_active_cnt == 0 && !ctx->protected)
|
||||
ret = iovmm_activate(ctx->dev);
|
||||
if (!ret)
|
||||
ctx->iommu_active_cnt++;
|
||||
mutex_unlock(&ctx->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_ion_attach_iommu);
|
||||
|
||||
MODULE_AUTHOR("Cho KyongHo <pullip.cho@samsung.com>");
|
||||
MODULE_AUTHOR("Jinsung Yang <jsgood.yang@samsung.com>");
|
||||
MODULE_DESCRIPTION("Android ION allocator handling routines for videobuf2");
|
||||
MODULE_LICENSE("GPL");
|
187
drivers/media/v4l2-core/videobuf2-memops.c
Normal file
187
drivers/media/v4l2-core/videobuf2-memops.c
Normal file
|
@ -0,0 +1,187 @@
|
|||
/*
|
||||
* videobuf2-memops.c - generic memory handling routines for videobuf2
|
||||
*
|
||||
* Copyright (C) 2010 Samsung Electronics
|
||||
*
|
||||
* Author: Pawel Osciak <pawel@osciak.com>
|
||||
* Marek Szyprowski <m.szyprowski@samsung.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/file.h>
|
||||
|
||||
#include <media/videobuf2-core.h>
|
||||
#include <media/videobuf2-memops.h>
|
||||
|
||||
/**
|
||||
* vb2_get_vma() - acquire and lock the virtual memory area
|
||||
* @vma: given virtual memory area
|
||||
*
|
||||
* This function attempts to acquire an area mapped in the userspace for
|
||||
* the duration of a hardware operation. The area is "locked" by performing
|
||||
* the same set of operation that are done when process calls fork() and
|
||||
* memory areas are duplicated.
|
||||
*
|
||||
* Returns a copy of a virtual memory region on success or NULL.
|
||||
*/
|
||||
struct vm_area_struct *vb2_get_vma(struct vm_area_struct *vma)
|
||||
{
|
||||
struct vm_area_struct *vma_copy;
|
||||
|
||||
vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
|
||||
if (vma_copy == NULL)
|
||||
return NULL;
|
||||
|
||||
if (vma->vm_ops && vma->vm_ops->open)
|
||||
vma->vm_ops->open(vma);
|
||||
|
||||
if (vma->vm_file)
|
||||
get_file(vma->vm_file);
|
||||
|
||||
memcpy(vma_copy, vma, sizeof(*vma));
|
||||
|
||||
vma_copy->vm_mm = NULL;
|
||||
vma_copy->vm_next = NULL;
|
||||
vma_copy->vm_prev = NULL;
|
||||
|
||||
return vma_copy;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_get_vma);
|
||||
|
||||
/**
|
||||
* vb2_put_userptr() - release a userspace virtual memory area
|
||||
* @vma: virtual memory region associated with the area to be released
|
||||
*
|
||||
* This function releases the previously acquired memory area after a hardware
|
||||
* operation.
|
||||
*/
|
||||
void vb2_put_vma(struct vm_area_struct *vma)
|
||||
{
|
||||
if (!vma)
|
||||
return;
|
||||
|
||||
if (vma->vm_ops && vma->vm_ops->close)
|
||||
vma->vm_ops->close(vma);
|
||||
|
||||
if (vma->vm_file)
|
||||
fput(vma->vm_file);
|
||||
|
||||
kfree(vma);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_put_vma);
|
||||
|
||||
/**
|
||||
* vb2_get_contig_userptr() - lock physically contiguous userspace mapped memory
|
||||
* @vaddr: starting virtual address of the area to be verified
|
||||
* @size: size of the area
|
||||
* @res_paddr: will return physical address for the given vaddr
|
||||
* @res_vma: will return locked copy of struct vm_area for the given area
|
||||
*
|
||||
* This function will go through memory area of size @size mapped at @vaddr and
|
||||
* verify that the underlying physical pages are contiguous. If they are
|
||||
* contiguous the virtual memory area is locked and a @res_vma is filled with
|
||||
* the copy and @res_pa set to the physical address of the buffer.
|
||||
*
|
||||
* Returns 0 on success.
|
||||
*/
|
||||
int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size,
|
||||
struct vm_area_struct **res_vma, dma_addr_t *res_pa)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long offset, start, end;
|
||||
unsigned long this_pfn, prev_pfn;
|
||||
dma_addr_t pa = 0;
|
||||
|
||||
start = vaddr;
|
||||
offset = start & ~PAGE_MASK;
|
||||
end = start + size;
|
||||
|
||||
vma = find_vma(mm, start);
|
||||
|
||||
if (vma == NULL || vma->vm_end < end)
|
||||
return -EFAULT;
|
||||
|
||||
for (prev_pfn = 0; start < end; start += PAGE_SIZE) {
|
||||
int ret = follow_pfn(vma, start, &this_pfn);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (prev_pfn == 0)
|
||||
pa = this_pfn << PAGE_SHIFT;
|
||||
else if (this_pfn != prev_pfn + 1)
|
||||
return -EFAULT;
|
||||
|
||||
prev_pfn = this_pfn;
|
||||
}
|
||||
|
||||
/*
|
||||
* Memory is contigous, lock vma and return to the caller
|
||||
*/
|
||||
*res_vma = vb2_get_vma(vma);
|
||||
if (*res_vma == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
*res_pa = pa + offset;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vb2_get_contig_userptr);
|
||||
|
||||
/**
|
||||
* vb2_common_vm_open() - increase refcount of the vma
|
||||
* @vma: virtual memory region for the mapping
|
||||
*
|
||||
* This function adds another user to the provided vma. It expects
|
||||
* struct vb2_vmarea_handler pointer in vma->vm_private_data.
|
||||
*/
|
||||
static void vb2_common_vm_open(struct vm_area_struct *vma)
|
||||
{
|
||||
struct vb2_vmarea_handler *h = vma->vm_private_data;
|
||||
|
||||
pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n",
|
||||
__func__, h, atomic_read(h->refcount), vma->vm_start,
|
||||
vma->vm_end);
|
||||
|
||||
atomic_inc(h->refcount);
|
||||
}
|
||||
|
||||
/**
|
||||
* vb2_common_vm_close() - decrease refcount of the vma
|
||||
* @vma: virtual memory region for the mapping
|
||||
*
|
||||
* This function releases the user from the provided vma. It expects
|
||||
* struct vb2_vmarea_handler pointer in vma->vm_private_data.
|
||||
*/
|
||||
static void vb2_common_vm_close(struct vm_area_struct *vma)
|
||||
{
|
||||
struct vb2_vmarea_handler *h = vma->vm_private_data;
|
||||
|
||||
pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n",
|
||||
__func__, h, atomic_read(h->refcount), vma->vm_start,
|
||||
vma->vm_end);
|
||||
|
||||
h->put(h->arg);
|
||||
}
|
||||
|
||||
/**
|
||||
* vb2_common_vm_ops - common vm_ops used for tracking refcount of mmaped
|
||||
* video buffers
|
||||
*/
|
||||
const struct vm_operations_struct vb2_common_vm_ops = {
|
||||
.open = vb2_common_vm_open,
|
||||
.close = vb2_common_vm_close,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(vb2_common_vm_ops);
|
||||
|
||||
MODULE_DESCRIPTION("common memory handling routines for videobuf2");
|
||||
MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
|
||||
MODULE_LICENSE("GPL");
|
279
drivers/media/v4l2-core/videobuf2-vmalloc.c
Normal file
279
drivers/media/v4l2-core/videobuf2-vmalloc.c
Normal file
|
@ -0,0 +1,279 @@
|
|||
/*
|
||||
* videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2
|
||||
*
|
||||
* Copyright (C) 2010 Samsung Electronics
|
||||
*
|
||||
* Author: Pawel Osciak <pawel@osciak.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/io.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#include <media/videobuf2-core.h>
|
||||
#include <media/videobuf2-vmalloc.h>
|
||||
#include <media/videobuf2-memops.h>
|
||||
|
||||
struct vb2_vmalloc_buf {
|
||||
void *vaddr;
|
||||
struct page **pages;
|
||||
struct vm_area_struct *vma;
|
||||
int write;
|
||||
unsigned long size;
|
||||
unsigned int n_pages;
|
||||
atomic_t refcount;
|
||||
struct vb2_vmarea_handler handler;
|
||||
struct dma_buf *dbuf;
|
||||
};
|
||||
|
||||
static void vb2_vmalloc_put(void *buf_priv);
|
||||
|
||||
static void *vb2_vmalloc_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags)
|
||||
{
|
||||
struct vb2_vmalloc_buf *buf;
|
||||
|
||||
buf = kzalloc(sizeof(*buf), GFP_KERNEL | gfp_flags);
|
||||
if (!buf)
|
||||
return NULL;
|
||||
|
||||
buf->size = size;
|
||||
buf->vaddr = vmalloc_user(buf->size);
|
||||
buf->handler.refcount = &buf->refcount;
|
||||
buf->handler.put = vb2_vmalloc_put;
|
||||
buf->handler.arg = buf;
|
||||
|
||||
if (!buf->vaddr) {
|
||||
pr_debug("vmalloc of size %ld failed\n", buf->size);
|
||||
kfree(buf);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
atomic_inc(&buf->refcount);
|
||||
return buf;
|
||||
}
|
||||
|
||||
static void vb2_vmalloc_put(void *buf_priv)
|
||||
{
|
||||
struct vb2_vmalloc_buf *buf = buf_priv;
|
||||
|
||||
if (atomic_dec_and_test(&buf->refcount)) {
|
||||
vfree(buf->vaddr);
|
||||
kfree(buf);
|
||||
}
|
||||
}
|
||||
|
||||
static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
|
||||
unsigned long size, int write)
|
||||
{
|
||||
struct vb2_vmalloc_buf *buf;
|
||||
unsigned long first, last;
|
||||
int n_pages, offset;
|
||||
struct vm_area_struct *vma;
|
||||
dma_addr_t physp;
|
||||
|
||||
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
|
||||
if (!buf)
|
||||
return NULL;
|
||||
|
||||
buf->write = write;
|
||||
offset = vaddr & ~PAGE_MASK;
|
||||
buf->size = size;
|
||||
|
||||
|
||||
vma = find_vma(current->mm, vaddr);
|
||||
if (vma && (vma->vm_flags & VM_PFNMAP) && (vma->vm_pgoff)) {
|
||||
if (vb2_get_contig_userptr(vaddr, size, &vma, &physp))
|
||||
goto fail_pages_array_alloc;
|
||||
buf->vma = vma;
|
||||
buf->vaddr = ioremap_nocache(physp, size);
|
||||
if (!buf->vaddr)
|
||||
goto fail_pages_array_alloc;
|
||||
} else {
|
||||
first = vaddr >> PAGE_SHIFT;
|
||||
last = (vaddr + size - 1) >> PAGE_SHIFT;
|
||||
buf->n_pages = last - first + 1;
|
||||
buf->pages = kzalloc(buf->n_pages * sizeof(struct page *),
|
||||
GFP_KERNEL);
|
||||
if (!buf->pages)
|
||||
goto fail_pages_array_alloc;
|
||||
|
||||
/* current->mm->mmap_sem is taken by videobuf2 core */
|
||||
n_pages = get_user_pages(current, current->mm,
|
||||
vaddr & PAGE_MASK, buf->n_pages,
|
||||
write, 1, /* force */
|
||||
buf->pages, NULL);
|
||||
if (n_pages != buf->n_pages)
|
||||
goto fail_get_user_pages;
|
||||
|
||||
buf->vaddr = vm_map_ram(buf->pages, buf->n_pages, -1,
|
||||
PAGE_KERNEL);
|
||||
if (!buf->vaddr)
|
||||
goto fail_get_user_pages;
|
||||
}
|
||||
|
||||
buf->vaddr += offset;
|
||||
return buf;
|
||||
|
||||
fail_get_user_pages:
|
||||
pr_debug("get_user_pages requested/got: %d/%d]\n", n_pages,
|
||||
buf->n_pages);
|
||||
while (--n_pages >= 0)
|
||||
put_page(buf->pages[n_pages]);
|
||||
kfree(buf->pages);
|
||||
|
||||
fail_pages_array_alloc:
|
||||
kfree(buf);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void vb2_vmalloc_put_userptr(void *buf_priv)
|
||||
{
|
||||
struct vb2_vmalloc_buf *buf = buf_priv;
|
||||
unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
|
||||
unsigned int i;
|
||||
|
||||
if (buf->pages) {
|
||||
if (vaddr)
|
||||
vm_unmap_ram((void *)vaddr, buf->n_pages);
|
||||
for (i = 0; i < buf->n_pages; ++i) {
|
||||
if (buf->write)
|
||||
set_page_dirty_lock(buf->pages[i]);
|
||||
put_page(buf->pages[i]);
|
||||
}
|
||||
kfree(buf->pages);
|
||||
} else {
|
||||
if (buf->vma)
|
||||
vb2_put_vma(buf->vma);
|
||||
iounmap(buf->vaddr);
|
||||
}
|
||||
kfree(buf);
|
||||
}
|
||||
|
||||
static void *vb2_vmalloc_vaddr(void *buf_priv)
|
||||
{
|
||||
struct vb2_vmalloc_buf *buf = buf_priv;
|
||||
|
||||
if (!buf->vaddr) {
|
||||
pr_err("Address of an unallocated plane requested "
|
||||
"or cannot map user pointer\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return buf->vaddr;
|
||||
}
|
||||
|
||||
static unsigned int vb2_vmalloc_num_users(void *buf_priv)
|
||||
{
|
||||
struct vb2_vmalloc_buf *buf = buf_priv;
|
||||
return atomic_read(&buf->refcount);
|
||||
}
|
||||
|
||||
static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
|
||||
{
|
||||
struct vb2_vmalloc_buf *buf = buf_priv;
|
||||
int ret;
|
||||
|
||||
if (!buf) {
|
||||
pr_err("No memory to map\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = remap_vmalloc_range(vma, buf->vaddr, 0);
|
||||
if (ret) {
|
||||
pr_err("Remapping vmalloc memory, error: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure that vm_areas for 2 buffers won't be merged together
|
||||
*/
|
||||
vma->vm_flags |= VM_DONTEXPAND;
|
||||
|
||||
/*
|
||||
* Use common vm_area operations to track buffer refcount.
|
||||
*/
|
||||
vma->vm_private_data = &buf->handler;
|
||||
vma->vm_ops = &vb2_common_vm_ops;
|
||||
|
||||
vma->vm_ops->open(vma);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*********************************************/
|
||||
/* callbacks for DMABUF buffers */
|
||||
/*********************************************/
|
||||
|
||||
static int vb2_vmalloc_map_dmabuf(void *mem_priv)
|
||||
{
|
||||
struct vb2_vmalloc_buf *buf = mem_priv;
|
||||
|
||||
buf->vaddr = dma_buf_vmap(buf->dbuf);
|
||||
|
||||
return buf->vaddr ? 0 : -EFAULT;
|
||||
}
|
||||
|
||||
static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
|
||||
{
|
||||
struct vb2_vmalloc_buf *buf = mem_priv;
|
||||
|
||||
dma_buf_vunmap(buf->dbuf, buf->vaddr);
|
||||
buf->vaddr = NULL;
|
||||
}
|
||||
|
||||
static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
|
||||
{
|
||||
struct vb2_vmalloc_buf *buf = mem_priv;
|
||||
|
||||
if (buf->vaddr)
|
||||
dma_buf_vunmap(buf->dbuf, buf->vaddr);
|
||||
|
||||
kfree(buf);
|
||||
}
|
||||
|
||||
static void *vb2_vmalloc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
|
||||
unsigned long size, int write)
|
||||
{
|
||||
struct vb2_vmalloc_buf *buf;
|
||||
|
||||
if (dbuf->size < size)
|
||||
return ERR_PTR(-EFAULT);
|
||||
|
||||
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
|
||||
if (!buf)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
buf->dbuf = dbuf;
|
||||
buf->write = write;
|
||||
buf->size = size;
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
||||
|
||||
const struct vb2_mem_ops vb2_vmalloc_memops = {
|
||||
.alloc = vb2_vmalloc_alloc,
|
||||
.put = vb2_vmalloc_put,
|
||||
.get_userptr = vb2_vmalloc_get_userptr,
|
||||
.put_userptr = vb2_vmalloc_put_userptr,
|
||||
.map_dmabuf = vb2_vmalloc_map_dmabuf,
|
||||
.unmap_dmabuf = vb2_vmalloc_unmap_dmabuf,
|
||||
.attach_dmabuf = vb2_vmalloc_attach_dmabuf,
|
||||
.detach_dmabuf = vb2_vmalloc_detach_dmabuf,
|
||||
.vaddr = vb2_vmalloc_vaddr,
|
||||
.mmap = vb2_vmalloc_mmap,
|
||||
.num_users = vb2_vmalloc_num_users,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(vb2_vmalloc_memops);
|
||||
|
||||
MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2");
|
||||
MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
|
||||
MODULE_LICENSE("GPL");
|
Loading…
Add table
Add a link
Reference in a new issue