Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

14
drivers/video/adf/Kconfig Normal file
View file

@ -0,0 +1,14 @@
menuconfig ADF
depends on SYNC
depends on DMA_SHARED_BUFFER
tristate "Atomic Display Framework"
menuconfig ADF_FBDEV
depends on ADF
depends on FB
tristate "Helper for implementing the fbdev API in ADF drivers"
menuconfig ADF_MEMBLOCK
depends on ADF
depends on HAVE_MEMBLOCK
tristate "Helper for using memblocks as buffers in ADF drivers"

View file

@ -0,0 +1,15 @@
ccflags-y := -Idrivers/staging/android
CFLAGS_adf.o := -I$(src)
obj-$(CONFIG_ADF) += adf.o \
adf_client.o \
adf_fops.o \
adf_format.o \
adf_sysfs.o
obj-$(CONFIG_COMPAT) += adf_fops32.o
obj-$(CONFIG_ADF_FBDEV) += adf_fbdev.o
obj-$(CONFIG_ADF_MEMBLOCK) += adf_memblock.o

1188
drivers/video/adf/adf.c Normal file

File diff suppressed because it is too large Load diff

71
drivers/video/adf/adf.h Normal file
View file

@ -0,0 +1,71 @@
/*
* Copyright (C) 2013 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __VIDEO_ADF_ADF_H
#define __VIDEO_ADF_ADF_H
#include <linux/idr.h>
#include <linux/list.h>
#include <video/adf.h>
#include "sync.h"
struct adf_event_refcount {
struct rb_node node;
enum adf_event_type type;
int refcount;
};
void adf_buffer_cleanup(struct adf_buffer *buf);
void adf_buffer_mapping_cleanup(struct adf_buffer_mapping *mapping,
struct adf_buffer *buf);
void adf_post_cleanup(struct adf_device *dev, struct adf_pending_post *post);
struct adf_attachment_list *adf_attachment_find(struct list_head *list,
struct adf_overlay_engine *eng, struct adf_interface *intf);
int adf_attachment_validate(struct adf_device *dev,
struct adf_overlay_engine *eng, struct adf_interface *intf);
void adf_attachment_free(struct adf_attachment_list *attachment);
struct adf_event_refcount *adf_obj_find_event_refcount(struct adf_obj *obj,
enum adf_event_type type);
static inline int adf_obj_check_supports_event(struct adf_obj *obj,
enum adf_event_type type)
{
if (!obj->ops || !obj->ops->supports_event)
return -EOPNOTSUPP;
if (!obj->ops->supports_event(obj, type))
return -EINVAL;
return 0;
}
static inline int adf_device_attach_op(struct adf_device *dev,
struct adf_overlay_engine *eng, struct adf_interface *intf)
{
if (!dev->ops->attach)
return 0;
return dev->ops->attach(dev, eng, intf);
}
static inline int adf_device_detach_op(struct adf_device *dev,
struct adf_overlay_engine *eng, struct adf_interface *intf)
{
if (!dev->ops->detach)
return 0;
return dev->ops->detach(dev, eng, intf);
}
#endif /* __VIDEO_ADF_ADF_H */

View file

@ -0,0 +1,811 @@
/*
* Copyright (C) 2013 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include "sw_sync.h"
#include <video/adf.h>
#include <video/adf_client.h>
#include <video/adf_format.h>
#include "adf.h"
static inline bool vsync_active(u8 state)
{
return state == DRM_MODE_DPMS_ON || state == DRM_MODE_DPMS_STANDBY;
}
/**
* adf_interface_blank - set interface's DPMS state
*
* @intf: the interface
* @state: one of %DRM_MODE_DPMS_*
*
* Returns 0 on success or -errno on failure.
*/
int adf_interface_blank(struct adf_interface *intf, u8 state)
{
struct adf_device *dev = adf_interface_parent(intf);
u8 prev_state;
bool disable_vsync;
bool enable_vsync;
int ret = 0;
struct adf_event_refcount *vsync_refcount;
if (!intf->ops || !intf->ops->blank)
return -EOPNOTSUPP;
if (state > DRM_MODE_DPMS_OFF)
return -EINVAL;
mutex_lock(&dev->client_lock);
if (state != DRM_MODE_DPMS_ON)
flush_kthread_worker(&dev->post_worker);
mutex_lock(&intf->base.event_lock);
vsync_refcount = adf_obj_find_event_refcount(&intf->base,
ADF_EVENT_VSYNC);
if (!vsync_refcount) {
ret = -ENOMEM;
goto done;
}
prev_state = intf->dpms_state;
if (prev_state == state) {
ret = -EBUSY;
goto done;
}
disable_vsync = vsync_active(prev_state) &&
!vsync_active(state) &&
vsync_refcount->refcount;
enable_vsync = !vsync_active(prev_state) &&
vsync_active(state) &&
vsync_refcount->refcount;
if (disable_vsync)
intf->base.ops->set_event(&intf->base, ADF_EVENT_VSYNC,
false);
ret = intf->ops->blank(intf, state);
if (ret < 0) {
if (disable_vsync)
intf->base.ops->set_event(&intf->base, ADF_EVENT_VSYNC,
true);
goto done;
}
if (enable_vsync)
intf->base.ops->set_event(&intf->base, ADF_EVENT_VSYNC,
true);
intf->dpms_state = state;
done:
mutex_unlock(&intf->base.event_lock);
mutex_unlock(&dev->client_lock);
return ret;
}
EXPORT_SYMBOL(adf_interface_blank);
/**
* adf_interface_blank - get interface's current DPMS state
*
* @intf: the interface
*
* Returns one of %DRM_MODE_DPMS_*.
*/
u8 adf_interface_dpms_state(struct adf_interface *intf)
{
struct adf_device *dev = adf_interface_parent(intf);
u8 dpms_state;
mutex_lock(&dev->client_lock);
dpms_state = intf->dpms_state;
mutex_unlock(&dev->client_lock);
return dpms_state;
}
EXPORT_SYMBOL(adf_interface_dpms_state);
/**
* adf_interface_current_mode - get interface's current display mode
*
* @intf: the interface
* @mode: returns the current mode
*/
void adf_interface_current_mode(struct adf_interface *intf,
struct drm_mode_modeinfo *mode)
{
struct adf_device *dev = adf_interface_parent(intf);
mutex_lock(&dev->client_lock);
memcpy(mode, &intf->current_mode, sizeof(*mode));
mutex_unlock(&dev->client_lock);
}
EXPORT_SYMBOL(adf_interface_current_mode);
/**
* adf_interface_modelist - get interface's modelist
*
* @intf: the interface
* @modelist: storage for the modelist (optional)
* @n_modes: length of @modelist
*
* If @modelist is not NULL, adf_interface_modelist() will copy up to @n_modes
* modelist entries into @modelist.
*
* Returns the length of the modelist.
*/
size_t adf_interface_modelist(struct adf_interface *intf,
struct drm_mode_modeinfo *modelist, size_t n_modes)
{
unsigned long flags;
size_t retval;
read_lock_irqsave(&intf->hotplug_modelist_lock, flags);
if (modelist)
memcpy(modelist, intf->modelist, sizeof(modelist[0]) *
min(n_modes, intf->n_modes));
retval = intf->n_modes;
read_unlock_irqrestore(&intf->hotplug_modelist_lock, flags);
return retval;
}
EXPORT_SYMBOL(adf_interface_modelist);
/**
* adf_interface_set_mode - set interface's display mode
*
* @intf: the interface
* @mode: the new mode
*
* Returns 0 on success or -errno on failure.
*/
int adf_interface_set_mode(struct adf_interface *intf,
struct drm_mode_modeinfo *mode)
{
struct adf_device *dev = adf_interface_parent(intf);
int ret = 0;
if (!intf->ops || !intf->ops->modeset)
return -EOPNOTSUPP;
mutex_lock(&dev->client_lock);
flush_kthread_worker(&dev->post_worker);
ret = intf->ops->modeset(intf, mode);
if (ret < 0)
goto done;
memcpy(&intf->current_mode, mode, sizeof(*mode));
done:
mutex_unlock(&dev->client_lock);
return ret;
}
EXPORT_SYMBOL(adf_interface_set_mode);
/**
* adf_interface_screen_size - get size of screen connected to interface
*
* @intf: the interface
* @width_mm: returns the screen width in mm
* @height_mm: returns the screen width in mm
*
* Returns 0 on success or -errno on failure.
*/
int adf_interface_get_screen_size(struct adf_interface *intf, u16 *width_mm,
u16 *height_mm)
{
struct adf_device *dev = adf_interface_parent(intf);
int ret;
if (!intf->ops || !intf->ops->screen_size)
return -EOPNOTSUPP;
mutex_lock(&dev->client_lock);
ret = intf->ops->screen_size(intf, width_mm, height_mm);
mutex_unlock(&dev->client_lock);
return ret;
}
EXPORT_SYMBOL(adf_interface_get_screen_size);
/**
* adf_overlay_engine_supports_format - returns whether a format is in an
* overlay engine's supported list
*
* @eng: the overlay engine
* @format: format fourcc
*/
bool adf_overlay_engine_supports_format(struct adf_overlay_engine *eng,
u32 format)
{
size_t i;
for (i = 0; i < eng->ops->n_supported_formats; i++)
if (format == eng->ops->supported_formats[i])
return true;
return false;
}
EXPORT_SYMBOL(adf_overlay_engine_supports_format);
static int adf_buffer_validate(struct adf_buffer *buf)
{
struct adf_overlay_engine *eng = buf->overlay_engine;
struct device *dev = &eng->base.dev;
struct adf_device *parent = adf_overlay_engine_parent(eng);
u8 hsub, vsub, num_planes, cpp[ADF_MAX_PLANES], i;
if (!adf_overlay_engine_supports_format(eng, buf->format)) {
char format_str[ADF_FORMAT_STR_SIZE];
adf_format_str(buf->format, format_str);
dev_err(dev, "unsupported format %s\n", format_str);
return -EINVAL;
}
if (!adf_format_is_standard(buf->format))
return parent->ops->validate_custom_format(parent, buf);
hsub = adf_format_horz_chroma_subsampling(buf->format);
vsub = adf_format_vert_chroma_subsampling(buf->format);
num_planes = adf_format_num_planes(buf->format);
for (i = 0; i < num_planes; i++)
cpp[i] = adf_format_plane_cpp(buf->format, i);
return adf_format_validate_yuv(parent, buf, num_planes, hsub, vsub,
cpp);
}
static int adf_buffer_map(struct adf_device *dev, struct adf_buffer *buf,
struct adf_buffer_mapping *mapping)
{
int ret = 0;
size_t i;
for (i = 0; i < buf->n_planes; i++) {
struct dma_buf_attachment *attachment;
struct sg_table *sg_table;
attachment = dma_buf_attach(buf->dma_bufs[i], dev->dev);
if (IS_ERR(attachment)) {
ret = PTR_ERR(attachment);
dev_err(&dev->base.dev, "attaching plane %zu failed: %d\n",
i, ret);
goto done;
}
mapping->attachments[i] = attachment;
sg_table = dma_buf_map_attachment(attachment, DMA_TO_DEVICE);
if (IS_ERR(sg_table)) {
ret = PTR_ERR(sg_table);
dev_err(&dev->base.dev, "mapping plane %zu failed: %d",
i, ret);
goto done;
} else if (!sg_table) {
ret = -ENOMEM;
dev_err(&dev->base.dev, "mapping plane %zu failed\n",
i);
goto done;
}
mapping->sg_tables[i] = sg_table;
}
done:
if (ret < 0)
adf_buffer_mapping_cleanup(mapping, buf);
return ret;
}
static struct sync_fence *adf_sw_complete_fence(struct adf_device *dev)
{
struct sync_pt *pt;
struct sync_fence *complete_fence;
if (!dev->timeline) {
dev->timeline = sw_sync_timeline_create(dev->base.name);
if (!dev->timeline)
return ERR_PTR(-ENOMEM);
dev->timeline_max = 1;
}
dev->timeline_max++;
pt = sw_sync_pt_create(dev->timeline, dev->timeline_max);
if (!pt)
goto err_pt_create;
complete_fence = sync_fence_create(dev->base.name, pt);
if (!complete_fence)
goto err_fence_create;
return complete_fence;
err_fence_create:
sync_pt_free(pt);
err_pt_create:
dev->timeline_max--;
return ERR_PTR(-ENOSYS);
}
/**
* adf_device_post - flip to a new set of buffers
*
* @dev: device targeted by the flip
* @intfs: interfaces targeted by the flip
* @n_intfs: number of targeted interfaces
* @bufs: description of buffers displayed
* @n_bufs: number of buffers displayed
* @custom_data: driver-private data
* @custom_data_size: size of driver-private data
*
* adf_device_post() will copy @intfs, @bufs, and @custom_data, so they may
* point to variables on the stack. adf_device_post() also takes its own
* reference on each of the dma-bufs in @bufs. The adf_device_post_nocopy()
* variant transfers ownership of these resources to ADF instead.
*
* On success, returns a sync fence which signals when the buffers are removed
* from the screen. On failure, returns ERR_PTR(-errno).
*/
struct sync_fence *adf_device_post(struct adf_device *dev,
struct adf_interface **intfs, size_t n_intfs,
struct adf_buffer *bufs, size_t n_bufs, void *custom_data,
size_t custom_data_size)
{
struct adf_interface **intfs_copy = NULL;
struct adf_buffer *bufs_copy = NULL;
void *custom_data_copy = NULL;
struct sync_fence *ret;
size_t i;
intfs_copy = kzalloc(sizeof(intfs_copy[0]) * n_intfs, GFP_KERNEL);
if (!intfs_copy)
return ERR_PTR(-ENOMEM);
bufs_copy = kzalloc(sizeof(bufs_copy[0]) * n_bufs, GFP_KERNEL);
if (!bufs_copy) {
ret = ERR_PTR(-ENOMEM);
goto err_alloc;
}
custom_data_copy = kzalloc(custom_data_size, GFP_KERNEL);
if (!custom_data_copy) {
ret = ERR_PTR(-ENOMEM);
goto err_alloc;
}
for (i = 0; i < n_bufs; i++) {
size_t j;
for (j = 0; j < bufs[i].n_planes; j++)
get_dma_buf(bufs[i].dma_bufs[j]);
}
memcpy(intfs_copy, intfs, sizeof(intfs_copy[0]) * n_intfs);
memcpy(bufs_copy, bufs, sizeof(bufs_copy[0]) * n_bufs);
memcpy(custom_data_copy, custom_data, custom_data_size);
ret = adf_device_post_nocopy(dev, intfs_copy, n_intfs, bufs_copy,
n_bufs, custom_data_copy, custom_data_size);
if (IS_ERR(ret))
goto err_post;
return ret;
err_post:
for (i = 0; i < n_bufs; i++) {
size_t j;
for (j = 0; j < bufs[i].n_planes; j++)
dma_buf_put(bufs[i].dma_bufs[j]);
}
err_alloc:
kfree(custom_data_copy);
kfree(bufs_copy);
kfree(intfs_copy);
return ret;
}
EXPORT_SYMBOL(adf_device_post);
/**
* adf_device_post_nocopy - flip to a new set of buffers
*
* adf_device_post_nocopy() has the same behavior as adf_device_post(),
* except ADF does not copy @intfs, @bufs, or @custom_data, and it does
* not take an extra reference on the dma-bufs in @bufs.
*
* @intfs, @bufs, and @custom_data must point to buffers allocated by
* kmalloc(). On success, ADF takes ownership of these buffers and the dma-bufs
* in @bufs, and will kfree()/dma_buf_put() them when they are no longer needed.
* On failure, adf_device_post_nocopy() does NOT take ownership of these
* buffers or the dma-bufs, and the caller must clean them up.
*
* adf_device_post_nocopy() is mainly intended for implementing ADF's ioctls.
* Clients may find the nocopy variant useful in limited cases, but most should
* call adf_device_post() instead.
*/
struct sync_fence *adf_device_post_nocopy(struct adf_device *dev,
struct adf_interface **intfs, size_t n_intfs,
struct adf_buffer *bufs, size_t n_bufs,
void *custom_data, size_t custom_data_size)
{
struct adf_pending_post *cfg;
struct adf_buffer_mapping *mappings;
struct sync_fence *ret;
size_t i;
int err;
cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
if (!cfg)
return ERR_PTR(-ENOMEM);
mappings = kzalloc(sizeof(mappings[0]) * n_bufs, GFP_KERNEL);
if (!mappings) {
ret = ERR_PTR(-ENOMEM);
goto err_alloc;
}
mutex_lock(&dev->client_lock);
for (i = 0; i < n_bufs; i++) {
err = adf_buffer_validate(&bufs[i]);
if (err < 0) {
ret = ERR_PTR(err);
goto err_buf;
}
err = adf_buffer_map(dev, &bufs[i], &mappings[i]);
if (err < 0) {
ret = ERR_PTR(err);
goto err_buf;
}
}
INIT_LIST_HEAD(&cfg->head);
cfg->config.n_bufs = n_bufs;
cfg->config.bufs = bufs;
cfg->config.mappings = mappings;
cfg->config.custom_data = custom_data;
cfg->config.custom_data_size = custom_data_size;
err = dev->ops->validate(dev, &cfg->config, &cfg->state);
if (err < 0) {
ret = ERR_PTR(err);
goto err_buf;
}
mutex_lock(&dev->post_lock);
if (dev->ops->complete_fence)
ret = dev->ops->complete_fence(dev, &cfg->config,
cfg->state);
else
ret = adf_sw_complete_fence(dev);
if (IS_ERR(ret))
goto err_fence;
list_add_tail(&cfg->head, &dev->post_list);
queue_kthread_work(&dev->post_worker, &dev->post_work);
mutex_unlock(&dev->post_lock);
mutex_unlock(&dev->client_lock);
kfree(intfs);
return ret;
err_fence:
mutex_unlock(&dev->post_lock);
err_buf:
for (i = 0; i < n_bufs; i++)
adf_buffer_mapping_cleanup(&mappings[i], &bufs[i]);
mutex_unlock(&dev->client_lock);
kfree(mappings);
err_alloc:
kfree(cfg);
return ret;
}
EXPORT_SYMBOL(adf_device_post_nocopy);
static void adf_attachment_list_to_array(struct adf_device *dev,
struct list_head *src, struct adf_attachment *dst, size_t size)
{
struct adf_attachment_list *entry;
size_t i = 0;
if (!dst)
return;
list_for_each_entry(entry, src, head) {
if (i == size)
return;
dst[i] = entry->attachment;
i++;
}
}
/**
* adf_device_attachments - get device's list of active attachments
*
* @dev: the device
* @attachments: storage for the attachment list (optional)
* @n_attachments: length of @attachments
*
* If @attachments is not NULL, adf_device_attachments() will copy up to
* @n_attachments entries into @attachments.
*
* Returns the length of the active attachment list.
*/
size_t adf_device_attachments(struct adf_device *dev,
struct adf_attachment *attachments, size_t n_attachments)
{
size_t retval;
mutex_lock(&dev->client_lock);
adf_attachment_list_to_array(dev, &dev->attached, attachments,
n_attachments);
retval = dev->n_attached;
mutex_unlock(&dev->client_lock);
return retval;
}
EXPORT_SYMBOL(adf_device_attachments);
/**
* adf_device_attachments_allowed - get device's list of allowed attachments
*
* @dev: the device
* @attachments: storage for the attachment list (optional)
* @n_attachments: length of @attachments
*
* If @attachments is not NULL, adf_device_attachments_allowed() will copy up to
* @n_attachments entries into @attachments.
*
* Returns the length of the allowed attachment list.
*/
size_t adf_device_attachments_allowed(struct adf_device *dev,
struct adf_attachment *attachments, size_t n_attachments)
{
size_t retval;
mutex_lock(&dev->client_lock);
adf_attachment_list_to_array(dev, &dev->attach_allowed, attachments,
n_attachments);
retval = dev->n_attach_allowed;
mutex_unlock(&dev->client_lock);
return retval;
}
EXPORT_SYMBOL(adf_device_attachments_allowed);
/**
* adf_device_attached - return whether an overlay engine and interface are
* attached
*
* @dev: the parent device
* @eng: the overlay engine
* @intf: the interface
*/
bool adf_device_attached(struct adf_device *dev, struct adf_overlay_engine *eng,
struct adf_interface *intf)
{
struct adf_attachment_list *attachment;
mutex_lock(&dev->client_lock);
attachment = adf_attachment_find(&dev->attached, eng, intf);
mutex_unlock(&dev->client_lock);
return attachment != NULL;
}
EXPORT_SYMBOL(adf_device_attached);
/**
* adf_device_attach_allowed - return whether the ADF device supports attaching
* an overlay engine and interface
*
* @dev: the parent device
* @eng: the overlay engine
* @intf: the interface
*/
bool adf_device_attach_allowed(struct adf_device *dev,
struct adf_overlay_engine *eng, struct adf_interface *intf)
{
struct adf_attachment_list *attachment;
mutex_lock(&dev->client_lock);
attachment = adf_attachment_find(&dev->attach_allowed, eng, intf);
mutex_unlock(&dev->client_lock);
return attachment != NULL;
}
EXPORT_SYMBOL(adf_device_attach_allowed);
/**
* adf_device_attach - attach an overlay engine to an interface
*
* @dev: the parent device
* @eng: the overlay engine
* @intf: the interface
*
* Returns 0 on success, -%EINVAL if attaching @intf and @eng is not allowed,
* -%EALREADY if @intf and @eng are already attached, or -errno on any other
* failure.
*/
int adf_device_attach(struct adf_device *dev, struct adf_overlay_engine *eng,
struct adf_interface *intf)
{
int ret;
struct adf_attachment_list *attachment = NULL;
ret = adf_attachment_validate(dev, eng, intf);
if (ret < 0)
return ret;
mutex_lock(&dev->client_lock);
if (dev->n_attached == ADF_MAX_ATTACHMENTS) {
ret = -ENOMEM;
goto done;
}
if (!adf_attachment_find(&dev->attach_allowed, eng, intf)) {
ret = -EINVAL;
goto done;
}
if (adf_attachment_find(&dev->attached, eng, intf)) {
ret = -EALREADY;
goto done;
}
ret = adf_device_attach_op(dev, eng, intf);
if (ret < 0)
goto done;
attachment = kzalloc(sizeof(*attachment), GFP_KERNEL);
if (!attachment) {
ret = -ENOMEM;
goto done;
}
attachment->attachment.interface = intf;
attachment->attachment.overlay_engine = eng;
list_add_tail(&attachment->head, &dev->attached);
dev->n_attached++;
done:
mutex_unlock(&dev->client_lock);
if (ret < 0)
kfree(attachment);
return ret;
}
EXPORT_SYMBOL(adf_device_attach);
/**
* adf_device_detach - detach an overlay engine from an interface
*
* @dev: the parent device
* @eng: the overlay engine
* @intf: the interface
*
* Returns 0 on success, -%EINVAL if @intf and @eng are not attached,
* or -errno on any other failure.
*/
int adf_device_detach(struct adf_device *dev, struct adf_overlay_engine *eng,
struct adf_interface *intf)
{
int ret;
struct adf_attachment_list *attachment;
ret = adf_attachment_validate(dev, eng, intf);
if (ret < 0)
return ret;
mutex_lock(&dev->client_lock);
attachment = adf_attachment_find(&dev->attached, eng, intf);
if (!attachment) {
ret = -EINVAL;
goto done;
}
ret = adf_device_detach_op(dev, eng, intf);
if (ret < 0)
goto done;
adf_attachment_free(attachment);
dev->n_attached--;
done:
mutex_unlock(&dev->client_lock);
return ret;
}
EXPORT_SYMBOL(adf_device_detach);
/**
* adf_interface_simple_buffer_alloc - allocate a simple buffer
*
* @intf: target interface
* @w: width in pixels
* @h: height in pixels
* @format: format fourcc
* @dma_buf: returns the allocated buffer
* @offset: returns the byte offset of the allocated buffer's first pixel
* @pitch: returns the allocated buffer's pitch
*
* See &struct adf_simple_buffer_alloc for a description of simple buffers and
* their limitations.
*
* Returns 0 on success or -errno on failure.
*/
int adf_interface_simple_buffer_alloc(struct adf_interface *intf, u16 w, u16 h,
u32 format, struct dma_buf **dma_buf, u32 *offset, u32 *pitch)
{
if (!intf->ops || !intf->ops->alloc_simple_buffer)
return -EOPNOTSUPP;
if (!adf_format_is_rgb(format))
return -EINVAL;
return intf->ops->alloc_simple_buffer(intf, w, h, format, dma_buf,
offset, pitch);
}
EXPORT_SYMBOL(adf_interface_simple_buffer_alloc);
/**
* adf_interface_simple_post - flip to a single buffer
*
* @intf: interface targeted by the flip
* @buf: buffer to display
*
* adf_interface_simple_post() can be used generically for simple display
* configurations, since the client does not need to provide any driver-private
* configuration data.
*
* adf_interface_simple_post() has the same copying semantics as
* adf_device_post().
*
* On success, returns a sync fence which signals when the buffer is removed
* from the screen. On failure, returns ERR_PTR(-errno).
*/
struct sync_fence *adf_interface_simple_post(struct adf_interface *intf,
struct adf_buffer *buf)
{
size_t custom_data_size = 0;
void *custom_data = NULL;
struct sync_fence *ret;
if (intf->ops && intf->ops->describe_simple_post) {
int err;
custom_data = kzalloc(ADF_MAX_CUSTOM_DATA_SIZE, GFP_KERNEL);
if (!custom_data) {
ret = ERR_PTR(-ENOMEM);
goto done;
}
err = intf->ops->describe_simple_post(intf, buf, custom_data,
&custom_data_size);
if (err < 0) {
ret = ERR_PTR(err);
goto done;
}
}
ret = adf_device_post(adf_interface_parent(intf), &intf, 1, buf, 1,
custom_data, custom_data_size);
done:
kfree(custom_data);
return ret;
}
EXPORT_SYMBOL(adf_interface_simple_post);

View file

@ -0,0 +1,665 @@
/*
* Copyright (C) 2013 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/vmalloc.h>
#include <video/adf.h>
#include <video/adf_client.h>
#include <video/adf_fbdev.h>
#include <video/adf_format.h>
#include "adf.h"
struct adf_fbdev_format {
u32 fourcc;
u32 bpp;
u32 r_length;
u32 g_length;
u32 b_length;
u32 a_length;
u32 r_offset;
u32 g_offset;
u32 b_offset;
u32 a_offset;
};
static const struct adf_fbdev_format format_table[] = {
{DRM_FORMAT_RGB332, 8, 3, 3, 2, 0, 5, 2, 0, 0},
{DRM_FORMAT_BGR233, 8, 3, 3, 2, 0, 0, 3, 5, 0},
{DRM_FORMAT_XRGB4444, 16, 4, 4, 4, 0, 8, 4, 0, 0},
{DRM_FORMAT_XBGR4444, 16, 4, 4, 4, 0, 0, 4, 8, 0},
{DRM_FORMAT_RGBX4444, 16, 4, 4, 4, 0, 12, 8, 4, 0},
{DRM_FORMAT_BGRX4444, 16, 4, 4, 4, 0, 0, 4, 8, 0},
{DRM_FORMAT_ARGB4444, 16, 4, 4, 4, 4, 8, 4, 0, 12},
{DRM_FORMAT_ABGR4444, 16, 4, 4, 4, 4, 0, 4, 8, 12},
{DRM_FORMAT_RGBA4444, 16, 4, 4, 4, 4, 12, 8, 4, 0},
{DRM_FORMAT_BGRA4444, 16, 4, 4, 4, 4, 0, 4, 8, 0},
{DRM_FORMAT_XRGB1555, 16, 5, 5, 5, 0, 10, 5, 0, 0},
{DRM_FORMAT_XBGR1555, 16, 5, 5, 5, 0, 0, 5, 10, 0},
{DRM_FORMAT_RGBX5551, 16, 5, 5, 5, 0, 11, 6, 1, 0},
{DRM_FORMAT_BGRX5551, 16, 5, 5, 5, 0, 1, 6, 11, 0},
{DRM_FORMAT_ARGB1555, 16, 5, 5, 5, 1, 10, 5, 0, 15},
{DRM_FORMAT_ABGR1555, 16, 5, 5, 5, 1, 0, 5, 10, 15},
{DRM_FORMAT_RGBA5551, 16, 5, 5, 5, 1, 11, 6, 1, 0},
{DRM_FORMAT_BGRA5551, 16, 5, 5, 5, 1, 1, 6, 11, 0},
{DRM_FORMAT_RGB565, 16, 5, 6, 5, 0, 11, 5, 0, 0},
{DRM_FORMAT_BGR565, 16, 5, 6, 5, 0, 0, 5, 11, 0},
{DRM_FORMAT_RGB888, 24, 8, 8, 8, 0, 16, 8, 0, 0},
{DRM_FORMAT_BGR888, 24, 8, 8, 8, 0, 0, 8, 16, 0},
{DRM_FORMAT_XRGB8888, 32, 8, 8, 8, 0, 16, 8, 0, 0},
{DRM_FORMAT_XBGR8888, 32, 8, 8, 8, 0, 0, 8, 16, 0},
{DRM_FORMAT_RGBX8888, 32, 8, 8, 8, 0, 24, 16, 8, 0},
{DRM_FORMAT_BGRX8888, 32, 8, 8, 8, 0, 8, 16, 24, 0},
{DRM_FORMAT_ARGB8888, 32, 8, 8, 8, 8, 16, 8, 0, 24},
{DRM_FORMAT_ABGR8888, 32, 8, 8, 8, 8, 0, 8, 16, 24},
{DRM_FORMAT_RGBA8888, 32, 8, 8, 8, 8, 24, 16, 8, 0},
{DRM_FORMAT_BGRA8888, 32, 8, 8, 8, 8, 8, 16, 24, 0},
{DRM_FORMAT_XRGB2101010, 32, 10, 10, 10, 0, 20, 10, 0, 0},
{DRM_FORMAT_XBGR2101010, 32, 10, 10, 10, 0, 0, 10, 20, 0},
{DRM_FORMAT_RGBX1010102, 32, 10, 10, 10, 0, 22, 12, 2, 0},
{DRM_FORMAT_BGRX1010102, 32, 10, 10, 10, 0, 2, 12, 22, 0},
{DRM_FORMAT_ARGB2101010, 32, 10, 10, 10, 2, 20, 10, 0, 30},
{DRM_FORMAT_ABGR2101010, 32, 10, 10, 10, 2, 0, 10, 20, 30},
{DRM_FORMAT_RGBA1010102, 32, 10, 10, 10, 2, 22, 12, 2, 0},
{DRM_FORMAT_BGRA1010102, 32, 10, 10, 10, 2, 2, 12, 22, 0},
};
static u32 drm_fourcc_from_fb_var(struct fb_var_screeninfo *var)
{
size_t i;
for (i = 0; i < ARRAY_SIZE(format_table); i++) {
const struct adf_fbdev_format *f = &format_table[i];
if (var->red.length == f->r_length &&
var->red.offset == f->r_offset &&
var->green.length == f->g_length &&
var->green.offset == f->g_offset &&
var->blue.length == f->b_length &&
var->blue.offset == f->b_offset &&
var->transp.length == f->a_length &&
(var->transp.length == 0 ||
var->transp.offset == f->a_offset))
return f->fourcc;
}
return 0;
}
static const struct adf_fbdev_format *fbdev_format_info(u32 format)
{
size_t i;
for (i = 0; i < ARRAY_SIZE(format_table); i++) {
const struct adf_fbdev_format *f = &format_table[i];
if (f->fourcc == format)
return f;
}
BUG();
}
void adf_modeinfo_to_fb_videomode(const struct drm_mode_modeinfo *mode,
struct fb_videomode *vmode)
{
memset(vmode, 0, sizeof(*vmode));
vmode->refresh = mode->vrefresh;
vmode->xres = mode->hdisplay;
vmode->yres = mode->vdisplay;
vmode->pixclock = mode->clock ? KHZ2PICOS(mode->clock) : 0;
vmode->left_margin = mode->htotal - mode->hsync_end;
vmode->right_margin = mode->hsync_start - mode->hdisplay;
vmode->upper_margin = mode->vtotal - mode->vsync_end;
vmode->lower_margin = mode->vsync_start - mode->vdisplay;
vmode->hsync_len = mode->hsync_end - mode->hsync_start;
vmode->vsync_len = mode->vsync_end - mode->vsync_start;
vmode->sync = 0;
if (mode->flags & DRM_MODE_FLAG_PHSYNC)
vmode->sync |= FB_SYNC_HOR_HIGH_ACT;
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
vmode->sync |= FB_SYNC_VERT_HIGH_ACT;
if (mode->flags & DRM_MODE_FLAG_PCSYNC)
vmode->sync |= FB_SYNC_COMP_HIGH_ACT;
if (mode->flags & DRM_MODE_FLAG_BCAST)
vmode->sync |= FB_SYNC_BROADCAST;
vmode->vmode = 0;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
vmode->vmode |= FB_VMODE_INTERLACED;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
vmode->vmode |= FB_VMODE_DOUBLE;
}
EXPORT_SYMBOL(adf_modeinfo_to_fb_videomode);
void adf_modeinfo_from_fb_videomode(const struct fb_videomode *vmode,
struct drm_mode_modeinfo *mode)
{
memset(mode, 0, sizeof(*mode));
mode->hdisplay = vmode->xres;
mode->hsync_start = mode->hdisplay + vmode->right_margin;
mode->hsync_end = mode->hsync_start + vmode->hsync_len;
mode->htotal = mode->hsync_end + vmode->left_margin;
mode->vdisplay = vmode->yres;
mode->vsync_start = mode->vdisplay + vmode->lower_margin;
mode->vsync_end = mode->vsync_start + vmode->vsync_len;
mode->vtotal = mode->vsync_end + vmode->upper_margin;
mode->clock = vmode->pixclock ? PICOS2KHZ(vmode->pixclock) : 0;
mode->flags = 0;
if (vmode->sync & FB_SYNC_HOR_HIGH_ACT)
mode->flags |= DRM_MODE_FLAG_PHSYNC;
if (vmode->sync & FB_SYNC_VERT_HIGH_ACT)
mode->flags |= DRM_MODE_FLAG_PVSYNC;
if (vmode->sync & FB_SYNC_COMP_HIGH_ACT)
mode->flags |= DRM_MODE_FLAG_PCSYNC;
if (vmode->sync & FB_SYNC_BROADCAST)
mode->flags |= DRM_MODE_FLAG_BCAST;
if (vmode->vmode & FB_VMODE_INTERLACED)
mode->flags |= DRM_MODE_FLAG_INTERLACE;
if (vmode->vmode & FB_VMODE_DOUBLE)
mode->flags |= DRM_MODE_FLAG_DBLSCAN;
if (vmode->refresh)
mode->vrefresh = vmode->refresh;
else
adf_modeinfo_set_vrefresh(mode);
if (vmode->name)
strlcpy(mode->name, vmode->name, sizeof(mode->name));
else
adf_modeinfo_set_name(mode);
}
EXPORT_SYMBOL(adf_modeinfo_from_fb_videomode);
static int adf_fbdev_post(struct adf_fbdev *fbdev)
{
struct adf_buffer buf;
struct sync_fence *complete_fence;
int ret = 0;
memset(&buf, 0, sizeof(buf));
buf.overlay_engine = fbdev->eng;
buf.w = fbdev->info->var.xres;
buf.h = fbdev->info->var.yres;
buf.format = fbdev->format;
buf.dma_bufs[0] = fbdev->dma_buf;
buf.offset[0] = fbdev->offset +
fbdev->info->var.yoffset * fbdev->pitch +
fbdev->info->var.xoffset *
(fbdev->info->var.bits_per_pixel / 8);
buf.pitch[0] = fbdev->pitch;
buf.n_planes = 1;
complete_fence = adf_interface_simple_post(fbdev->intf, &buf);
if (IS_ERR(complete_fence)) {
ret = PTR_ERR(complete_fence);
goto done;
}
sync_fence_put(complete_fence);
done:
return ret;
}
static const u16 vga_palette[][3] = {
{0x0000, 0x0000, 0x0000},
{0x0000, 0x0000, 0xAAAA},
{0x0000, 0xAAAA, 0x0000},
{0x0000, 0xAAAA, 0xAAAA},
{0xAAAA, 0x0000, 0x0000},
{0xAAAA, 0x0000, 0xAAAA},
{0xAAAA, 0x5555, 0x0000},
{0xAAAA, 0xAAAA, 0xAAAA},
{0x5555, 0x5555, 0x5555},
{0x5555, 0x5555, 0xFFFF},
{0x5555, 0xFFFF, 0x5555},
{0x5555, 0xFFFF, 0xFFFF},
{0xFFFF, 0x5555, 0x5555},
{0xFFFF, 0x5555, 0xFFFF},
{0xFFFF, 0xFFFF, 0x5555},
{0xFFFF, 0xFFFF, 0xFFFF},
};
static int adf_fb_alloc(struct adf_fbdev *fbdev)
{
int ret;
ret = adf_interface_simple_buffer_alloc(fbdev->intf,
fbdev->default_xres_virtual,
fbdev->default_yres_virtual,
fbdev->default_format,
&fbdev->dma_buf, &fbdev->offset, &fbdev->pitch);
if (ret < 0) {
dev_err(fbdev->info->dev, "allocating fb failed: %d\n", ret);
return ret;
}
fbdev->vaddr = dma_buf_vmap(fbdev->dma_buf);
if (!fbdev->vaddr) {
ret = -ENOMEM;
dev_err(fbdev->info->dev, "vmapping fb failed\n");
goto err_vmap;
}
fbdev->info->fix.line_length = fbdev->pitch;
fbdev->info->var.xres_virtual = fbdev->default_xres_virtual;
fbdev->info->var.yres_virtual = fbdev->default_yres_virtual;
fbdev->info->fix.smem_len = fbdev->dma_buf->size;
fbdev->info->screen_base = fbdev->vaddr;
return 0;
err_vmap:
dma_buf_put(fbdev->dma_buf);
return ret;
}
static void adf_fb_destroy(struct adf_fbdev *fbdev)
{
dma_buf_vunmap(fbdev->dma_buf, fbdev->vaddr);
dma_buf_put(fbdev->dma_buf);
}
static void adf_fbdev_set_format(struct adf_fbdev *fbdev, u32 format)
{
size_t i;
const struct adf_fbdev_format *info = fbdev_format_info(format);
for (i = 0; i < ARRAY_SIZE(vga_palette); i++) {
u16 r = vga_palette[i][0];
u16 g = vga_palette[i][1];
u16 b = vga_palette[i][2];
r >>= (16 - info->r_length);
g >>= (16 - info->g_length);
b >>= (16 - info->b_length);
fbdev->pseudo_palette[i] =
(r << info->r_offset) |
(g << info->g_offset) |
(b << info->b_offset);
if (info->a_length) {
u16 a = BIT(info->a_length) - 1;
fbdev->pseudo_palette[i] |= (a << info->a_offset);
}
}
fbdev->info->var.bits_per_pixel = adf_format_bpp(format);
fbdev->info->var.red.length = info->r_length;
fbdev->info->var.red.offset = info->r_offset;
fbdev->info->var.green.length = info->g_length;
fbdev->info->var.green.offset = info->g_offset;
fbdev->info->var.blue.length = info->b_length;
fbdev->info->var.blue.offset = info->b_offset;
fbdev->info->var.transp.length = info->a_length;
fbdev->info->var.transp.offset = info->a_offset;
fbdev->format = format;
}
static void adf_fbdev_fill_modelist(struct adf_fbdev *fbdev)
{
struct drm_mode_modeinfo *modelist;
struct fb_videomode fbmode;
size_t n_modes, i;
int ret = 0;
n_modes = adf_interface_modelist(fbdev->intf, NULL, 0);
modelist = kzalloc(sizeof(modelist[0]) * n_modes, GFP_KERNEL);
if (!modelist) {
dev_warn(fbdev->info->dev, "allocating new modelist failed; keeping old modelist\n");
return;
}
adf_interface_modelist(fbdev->intf, modelist, n_modes);
fb_destroy_modelist(&fbdev->info->modelist);
for (i = 0; i < n_modes; i++) {
adf_modeinfo_to_fb_videomode(&modelist[i], &fbmode);
ret = fb_add_videomode(&fbmode, &fbdev->info->modelist);
if (ret < 0)
dev_warn(fbdev->info->dev, "adding mode %s to modelist failed: %d\n",
modelist[i].name, ret);
}
kfree(modelist);
}
/**
* adf_fbdev_open - default implementation of fbdev open op
*/
int adf_fbdev_open(struct fb_info *info, int user)
{
struct adf_fbdev *fbdev = info->par;
int ret;
mutex_lock(&fbdev->refcount_lock);
if (unlikely(fbdev->refcount == UINT_MAX)) {
ret = -EMFILE;
goto done;
}
if (!fbdev->refcount) {
struct drm_mode_modeinfo mode;
struct fb_videomode fbmode;
struct adf_device *dev = adf_interface_parent(fbdev->intf);
ret = adf_device_attach(dev, fbdev->eng, fbdev->intf);
if (ret < 0 && ret != -EALREADY)
goto done;
ret = adf_fb_alloc(fbdev);
if (ret < 0)
goto done;
adf_interface_current_mode(fbdev->intf, &mode);
adf_modeinfo_to_fb_videomode(&mode, &fbmode);
fb_videomode_to_var(&fbdev->info->var, &fbmode);
adf_fbdev_set_format(fbdev, fbdev->default_format);
adf_fbdev_fill_modelist(fbdev);
}
ret = adf_fbdev_post(fbdev);
if (ret < 0) {
if (!fbdev->refcount)
adf_fb_destroy(fbdev);
goto done;
}
fbdev->refcount++;
done:
mutex_unlock(&fbdev->refcount_lock);
return ret;
}
EXPORT_SYMBOL(adf_fbdev_open);
/**
* adf_fbdev_release - default implementation of fbdev release op
*/
int adf_fbdev_release(struct fb_info *info, int user)
{
struct adf_fbdev *fbdev = info->par;
mutex_lock(&fbdev->refcount_lock);
BUG_ON(!fbdev->refcount);
fbdev->refcount--;
if (!fbdev->refcount)
adf_fb_destroy(fbdev);
mutex_unlock(&fbdev->refcount_lock);
return 0;
}
EXPORT_SYMBOL(adf_fbdev_release);
/**
* adf_fbdev_check_var - default implementation of fbdev check_var op
*/
int adf_fbdev_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
struct adf_fbdev *fbdev = info->par;
bool valid_format = true;
u32 format = drm_fourcc_from_fb_var(var);
u32 pitch = var->xres_virtual * var->bits_per_pixel / 8;
if (!format) {
dev_dbg(info->dev, "%s: unrecognized format\n", __func__);
valid_format = false;
}
if (valid_format && var->grayscale) {
dev_dbg(info->dev, "%s: grayscale modes not supported\n",
__func__);
valid_format = false;
}
if (valid_format && var->nonstd) {
dev_dbg(info->dev, "%s: nonstandard formats not supported\n",
__func__);
valid_format = false;
}
if (valid_format && !adf_overlay_engine_supports_format(fbdev->eng,
format)) {
char format_str[ADF_FORMAT_STR_SIZE];
adf_format_str(format, format_str);
dev_dbg(info->dev, "%s: format %s not supported by overlay engine %s\n",
__func__, format_str, fbdev->eng->base.name);
valid_format = false;
}
if (valid_format && pitch > fbdev->pitch) {
dev_dbg(info->dev, "%s: fb pitch too small for var (pitch = %u, xres_virtual = %u, bits_per_pixel = %u)\n",
__func__, fbdev->pitch, var->xres_virtual,
var->bits_per_pixel);
valid_format = false;
}
if (valid_format && var->yres_virtual > fbdev->default_yres_virtual) {
dev_dbg(info->dev, "%s: fb height too small for var (h = %u, yres_virtual = %u)\n",
__func__, fbdev->default_yres_virtual,
var->yres_virtual);
valid_format = false;
}
if (valid_format) {
var->activate = info->var.activate;
var->height = info->var.height;
var->width = info->var.width;
var->accel_flags = info->var.accel_flags;
var->rotate = info->var.rotate;
var->colorspace = info->var.colorspace;
/* userspace can't change these */
} else {
/* if any part of the format is invalid then fixing it up is
impractical, so save just the modesetting bits and
overwrite everything else */
struct fb_videomode mode;
fb_var_to_videomode(&mode, var);
memcpy(var, &info->var, sizeof(*var));
fb_videomode_to_var(var, &mode);
}
return 0;
}
EXPORT_SYMBOL(adf_fbdev_check_var);
/**
* adf_fbdev_set_par - default implementation of fbdev set_par op
*/
int adf_fbdev_set_par(struct fb_info *info)
{
struct adf_fbdev *fbdev = info->par;
struct adf_interface *intf = fbdev->intf;
struct fb_videomode vmode;
struct drm_mode_modeinfo mode;
int ret;
u32 format = drm_fourcc_from_fb_var(&info->var);
fb_var_to_videomode(&vmode, &info->var);
adf_modeinfo_from_fb_videomode(&vmode, &mode);
ret = adf_interface_set_mode(intf, &mode);
if (ret < 0)
return ret;
ret = adf_fbdev_post(fbdev);
if (ret < 0)
return ret;
if (format != fbdev->format)
adf_fbdev_set_format(fbdev, format);
return 0;
}
EXPORT_SYMBOL(adf_fbdev_set_par);
/**
* adf_fbdev_blank - default implementation of fbdev blank op
*/
int adf_fbdev_blank(int blank, struct fb_info *info)
{
struct adf_fbdev *fbdev = info->par;
struct adf_interface *intf = fbdev->intf;
u8 dpms_state;
switch (blank) {
case FB_BLANK_UNBLANK:
dpms_state = DRM_MODE_DPMS_ON;
break;
case FB_BLANK_NORMAL:
dpms_state = DRM_MODE_DPMS_STANDBY;
break;
case FB_BLANK_VSYNC_SUSPEND:
dpms_state = DRM_MODE_DPMS_SUSPEND;
break;
case FB_BLANK_HSYNC_SUSPEND:
dpms_state = DRM_MODE_DPMS_STANDBY;
break;
case FB_BLANK_POWERDOWN:
dpms_state = DRM_MODE_DPMS_OFF;
break;
default:
return -EINVAL;
}
return adf_interface_blank(intf, dpms_state);
}
EXPORT_SYMBOL(adf_fbdev_blank);
/**
* adf_fbdev_pan_display - default implementation of fbdev pan_display op
*/
int adf_fbdev_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
{
struct adf_fbdev *fbdev = info->par;
return adf_fbdev_post(fbdev);
}
EXPORT_SYMBOL(adf_fbdev_pan_display);
/**
* adf_fbdev_mmap - default implementation of fbdev mmap op
*/
int adf_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
struct adf_fbdev *fbdev = info->par;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
return dma_buf_mmap(fbdev->dma_buf, vma, 0);
}
EXPORT_SYMBOL(adf_fbdev_mmap);
/**
* adf_fbdev_init - initialize helper to wrap ADF device in fbdev API
*
* @fbdev: the fbdev helper
* @interface: the ADF interface that will display the framebuffer
* @eng: the ADF overlay engine that will scan out the framebuffer
* @xres_virtual: the virtual width of the framebuffer
* @yres_virtual: the virtual height of the framebuffer
* @format: the format of the framebuffer
* @fbops: the device's fbdev ops
* @fmt: formatting for the framebuffer identification string
* @...: variable arguments
*
* @format must be a standard, non-indexed RGB format, i.e.,
* adf_format_is_rgb(@format) && @format != @DRM_FORMAT_C8.
*
* Returns 0 on success or -errno on failure.
*/
int adf_fbdev_init(struct adf_fbdev *fbdev, struct adf_interface *interface,
struct adf_overlay_engine *eng,
u16 xres_virtual, u16 yres_virtual, u32 format,
struct fb_ops *fbops, const char *fmt, ...)
{
struct adf_device *parent = adf_interface_parent(interface);
struct device *dev = &parent->base.dev;
u16 width_mm, height_mm;
va_list args;
int ret;
if (!adf_format_is_rgb(format) ||
format == DRM_FORMAT_C8) {
dev_err(dev, "fbdev helper does not support format %u\n",
format);
return -EINVAL;
}
memset(fbdev, 0, sizeof(*fbdev));
fbdev->intf = interface;
fbdev->eng = eng;
fbdev->info = framebuffer_alloc(0, dev);
if (!fbdev->info) {
dev_err(dev, "allocating framebuffer device failed\n");
return -ENOMEM;
}
mutex_init(&fbdev->refcount_lock);
fbdev->default_xres_virtual = xres_virtual;
fbdev->default_yres_virtual = yres_virtual;
fbdev->default_format = format;
fbdev->info->flags = FBINFO_FLAG_DEFAULT;
ret = adf_interface_get_screen_size(interface, &width_mm, &height_mm);
if (ret < 0) {
width_mm = 0;
height_mm = 0;
}
fbdev->info->var.width = width_mm;
fbdev->info->var.height = height_mm;
fbdev->info->var.activate = FB_ACTIVATE_VBL;
va_start(args, fmt);
vsnprintf(fbdev->info->fix.id, sizeof(fbdev->info->fix.id), fmt, args);
va_end(args);
fbdev->info->fix.type = FB_TYPE_PACKED_PIXELS;
fbdev->info->fix.visual = FB_VISUAL_TRUECOLOR;
fbdev->info->fix.xpanstep = 1;
fbdev->info->fix.ypanstep = 1;
INIT_LIST_HEAD(&fbdev->info->modelist);
fbdev->info->fbops = fbops;
fbdev->info->pseudo_palette = fbdev->pseudo_palette;
fbdev->info->par = fbdev;
ret = register_framebuffer(fbdev->info);
if (ret < 0) {
dev_err(dev, "registering framebuffer failed: %d\n", ret);
return ret;
}
return 0;
}
EXPORT_SYMBOL(adf_fbdev_init);
/**
* adf_fbdev_destroy - destroy helper to wrap ADF device in fbdev API
*
* @fbdev: the fbdev helper
*/
void adf_fbdev_destroy(struct adf_fbdev *fbdev)
{
unregister_framebuffer(fbdev->info);
BUG_ON(fbdev->refcount);
mutex_destroy(&fbdev->refcount_lock);
framebuffer_release(fbdev->info);
}
EXPORT_SYMBOL(adf_fbdev_destroy);

View file

@ -0,0 +1,957 @@
/*
* Copyright (C) 2013 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/bitops.h>
#include <linux/circ_buf.h>
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <video/adf_client.h>
#include <video/adf_format.h>
#include "sw_sync.h"
#include "sync.h"
#include "adf.h"
#include "adf_fops.h"
#include "adf_sysfs.h"
#ifdef CONFIG_COMPAT
#include "adf_fops32.h"
#endif
static int adf_obj_set_event(struct adf_obj *obj, struct adf_file *file,
struct adf_set_event __user *arg)
{
struct adf_set_event data;
bool enabled;
unsigned long flags;
int err;
if (copy_from_user(&data, arg, sizeof(data)))
return -EFAULT;
err = adf_obj_check_supports_event(obj, data.type);
if (err < 0)
return err;
spin_lock_irqsave(&obj->file_lock, flags);
if (data.enabled)
enabled = test_and_set_bit(data.type,
file->event_subscriptions);
else
enabled = test_and_clear_bit(data.type,
file->event_subscriptions);
spin_unlock_irqrestore(&obj->file_lock, flags);
if (data.enabled == enabled)
return -EALREADY;
if (data.enabled)
adf_event_get(obj, data.type);
else
adf_event_put(obj, data.type);
return 0;
}
static int adf_obj_copy_custom_data_to_user(struct adf_obj *obj,
void __user *dst, size_t *dst_size)
{
void *custom_data;
size_t custom_data_size;
int ret;
if (!obj->ops || !obj->ops->custom_data) {
dev_dbg(&obj->dev, "%s: no custom_data op\n", __func__);
return 0;
}
custom_data = kzalloc(ADF_MAX_CUSTOM_DATA_SIZE, GFP_KERNEL);
if (!custom_data)
return -ENOMEM;
ret = obj->ops->custom_data(obj, custom_data, &custom_data_size);
if (ret < 0)
goto done;
if (copy_to_user(dst, custom_data, min(*dst_size, custom_data_size))) {
ret = -EFAULT;
goto done;
}
*dst_size = custom_data_size;
done:
kfree(custom_data);
return ret;
}
static int adf_eng_get_data(struct adf_overlay_engine *eng,
struct adf_overlay_engine_data __user *arg)
{
struct adf_device *dev = adf_overlay_engine_parent(eng);
struct adf_overlay_engine_data data;
size_t n_supported_formats;
u32 *supported_formats = NULL;
int ret = 0;
if (copy_from_user(&data, arg, sizeof(data)))
return -EFAULT;
strlcpy(data.name, eng->base.name, sizeof(data.name));
if (data.n_supported_formats > ADF_MAX_SUPPORTED_FORMATS)
return -EINVAL;
n_supported_formats = data.n_supported_formats;
data.n_supported_formats = eng->ops->n_supported_formats;
if (n_supported_formats) {
supported_formats = kzalloc(n_supported_formats *
sizeof(supported_formats[0]), GFP_KERNEL);
if (!supported_formats)
return -ENOMEM;
}
memcpy(supported_formats, eng->ops->supported_formats,
sizeof(u32) * min(n_supported_formats,
eng->ops->n_supported_formats));
mutex_lock(&dev->client_lock);
ret = adf_obj_copy_custom_data_to_user(&eng->base, arg->custom_data,
&data.custom_data_size);
mutex_unlock(&dev->client_lock);
if (ret < 0)
goto done;
if (copy_to_user(arg, &data, sizeof(data))) {
ret = -EFAULT;
goto done;
}
if (supported_formats && copy_to_user(arg->supported_formats,
supported_formats,
n_supported_formats * sizeof(supported_formats[0])))
ret = -EFAULT;
done:
kfree(supported_formats);
return ret;
}
static int adf_buffer_import(struct adf_device *dev,
struct adf_buffer_config __user *cfg, struct adf_buffer *buf)
{
struct adf_buffer_config user_buf;
size_t i;
int ret = 0;
if (copy_from_user(&user_buf, cfg, sizeof(user_buf)))
return -EFAULT;
memset(buf, 0, sizeof(*buf));
if (user_buf.n_planes > ADF_MAX_PLANES) {
dev_err(&dev->base.dev, "invalid plane count %u\n",
user_buf.n_planes);
return -EINVAL;
}
buf->overlay_engine = idr_find(&dev->overlay_engines,
user_buf.overlay_engine);
if (!buf->overlay_engine) {
dev_err(&dev->base.dev, "invalid overlay engine id %u\n",
user_buf.overlay_engine);
return -ENOENT;
}
buf->w = user_buf.w;
buf->h = user_buf.h;
buf->format = user_buf.format;
for (i = 0; i < user_buf.n_planes; i++) {
buf->dma_bufs[i] = dma_buf_get(user_buf.fd[i]);
if (IS_ERR(buf->dma_bufs[i])) {
ret = PTR_ERR(buf->dma_bufs[i]);
dev_err(&dev->base.dev, "importing dma_buf fd %d failed: %d\n",
user_buf.fd[i], ret);
buf->dma_bufs[i] = NULL;
goto done;
}
buf->offset[i] = user_buf.offset[i];
buf->pitch[i] = user_buf.pitch[i];
}
buf->n_planes = user_buf.n_planes;
if (user_buf.acquire_fence >= 0) {
buf->acquire_fence = sync_fence_fdget(user_buf.acquire_fence);
if (!buf->acquire_fence) {
dev_err(&dev->base.dev, "getting fence fd %d failed\n",
user_buf.acquire_fence);
ret = -EINVAL;
goto done;
}
}
done:
if (ret < 0)
adf_buffer_cleanup(buf);
return ret;
}
static int adf_device_post_config(struct adf_device *dev,
struct adf_post_config __user *arg)
{
struct sync_fence *complete_fence;
int complete_fence_fd;
struct adf_buffer *bufs = NULL;
struct adf_interface **intfs = NULL;
size_t n_intfs, n_bufs, i;
void *custom_data = NULL;
size_t custom_data_size;
int ret = 0;
complete_fence_fd = get_unused_fd();
if (complete_fence_fd < 0)
return complete_fence_fd;
if (get_user(n_intfs, &arg->n_interfaces)) {
ret = -EFAULT;
goto err_get_user;
}
if (n_intfs > ADF_MAX_INTERFACES) {
ret = -EINVAL;
goto err_get_user;
}
if (get_user(n_bufs, &arg->n_bufs)) {
ret = -EFAULT;
goto err_get_user;
}
if (n_bufs > ADF_MAX_BUFFERS) {
ret = -EINVAL;
goto err_get_user;
}
if (get_user(custom_data_size, &arg->custom_data_size)) {
ret = -EFAULT;
goto err_get_user;
}
if (custom_data_size > ADF_MAX_CUSTOM_DATA_SIZE) {
ret = -EINVAL;
goto err_get_user;
}
if (n_intfs) {
intfs = kmalloc(sizeof(intfs[0]) * n_intfs, GFP_KERNEL);
if (!intfs) {
ret = -ENOMEM;
goto err_get_user;
}
}
for (i = 0; i < n_intfs; i++) {
u32 intf_id;
if (get_user(intf_id, &arg->interfaces[i])) {
ret = -EFAULT;
goto err_get_user;
}
intfs[i] = idr_find(&dev->interfaces, intf_id);
if (!intfs[i]) {
ret = -EINVAL;
goto err_get_user;
}
}
if (n_bufs) {
bufs = kzalloc(sizeof(bufs[0]) * n_bufs, GFP_KERNEL);
if (!bufs) {
ret = -ENOMEM;
goto err_get_user;
}
}
for (i = 0; i < n_bufs; i++) {
ret = adf_buffer_import(dev, &arg->bufs[i], &bufs[i]);
if (ret < 0) {
memset(&bufs[i], 0, sizeof(bufs[i]));
goto err_import;
}
}
if (custom_data_size) {
custom_data = kzalloc(custom_data_size, GFP_KERNEL);
if (!custom_data) {
ret = -ENOMEM;
goto err_import;
}
if (copy_from_user(custom_data, arg->custom_data,
custom_data_size)) {
ret = -EFAULT;
goto err_import;
}
}
if (put_user(complete_fence_fd, &arg->complete_fence)) {
ret = -EFAULT;
goto err_import;
}
complete_fence = adf_device_post_nocopy(dev, intfs, n_intfs, bufs,
n_bufs, custom_data, custom_data_size);
if (IS_ERR(complete_fence)) {
ret = PTR_ERR(complete_fence);
goto err_import;
}
sync_fence_install(complete_fence, complete_fence_fd);
return 0;
err_import:
for (i = 0; i < n_bufs; i++)
adf_buffer_cleanup(&bufs[i]);
err_get_user:
kfree(custom_data);
kfree(bufs);
kfree(intfs);
put_unused_fd(complete_fence_fd);
return ret;
}
static int adf_intf_simple_post_config(struct adf_interface *intf,
struct adf_simple_post_config __user *arg)
{
struct adf_device *dev = intf->base.parent;
struct sync_fence *complete_fence;
int complete_fence_fd;
struct adf_buffer buf;
int ret = 0;
complete_fence_fd = get_unused_fd();
if (complete_fence_fd < 0)
return complete_fence_fd;
ret = adf_buffer_import(dev, &arg->buf, &buf);
if (ret < 0)
goto err_import;
if (put_user(complete_fence_fd, &arg->complete_fence)) {
ret = -EFAULT;
goto err_put_user;
}
complete_fence = adf_interface_simple_post(intf, &buf);
if (IS_ERR(complete_fence)) {
ret = PTR_ERR(complete_fence);
goto err_put_user;
}
sync_fence_install(complete_fence, complete_fence_fd);
return 0;
err_put_user:
adf_buffer_cleanup(&buf);
err_import:
put_unused_fd(complete_fence_fd);
return ret;
}
static int adf_intf_simple_buffer_alloc(struct adf_interface *intf,
struct adf_simple_buffer_alloc __user *arg)
{
struct adf_simple_buffer_alloc data;
struct dma_buf *dma_buf;
int ret = 0;
if (copy_from_user(&data, arg, sizeof(data)))
return -EFAULT;
data.fd = get_unused_fd_flags(O_CLOEXEC);
if (data.fd < 0)
return data.fd;
ret = adf_interface_simple_buffer_alloc(intf, data.w, data.h,
data.format, &dma_buf, &data.offset, &data.pitch);
if (ret < 0)
goto err_alloc;
if (copy_to_user(arg, &data, sizeof(*arg))) {
ret = -EFAULT;
goto err_copy;
}
fd_install(data.fd, dma_buf->file);
return 0;
err_copy:
dma_buf_put(dma_buf);
err_alloc:
put_unused_fd(data.fd);
return ret;
}
static int adf_copy_attachment_list_to_user(
struct adf_attachment_config __user *to, size_t n_to,
struct adf_attachment *from, size_t n_from)
{
struct adf_attachment_config *temp;
size_t n = min(n_to, n_from);
size_t i;
int ret = 0;
if (!n)
return 0;
temp = kzalloc(n * sizeof(temp[0]), GFP_KERNEL);
if (!temp)
return -ENOMEM;
for (i = 0; i < n; i++) {
temp[i].interface = from[i].interface->base.id;
temp[i].overlay_engine = from[i].overlay_engine->base.id;
}
if (copy_to_user(to, temp, n * sizeof(to[0]))) {
ret = -EFAULT;
goto done;
}
done:
kfree(temp);
return ret;
}
static int adf_device_get_data(struct adf_device *dev,
struct adf_device_data __user *arg)
{
struct adf_device_data data;
size_t n_attach;
struct adf_attachment *attach = NULL;
size_t n_allowed_attach;
struct adf_attachment *allowed_attach = NULL;
int ret = 0;
if (copy_from_user(&data, arg, sizeof(data)))
return -EFAULT;
if (data.n_attachments > ADF_MAX_ATTACHMENTS ||
data.n_allowed_attachments > ADF_MAX_ATTACHMENTS)
return -EINVAL;
strlcpy(data.name, dev->base.name, sizeof(data.name));
if (data.n_attachments) {
attach = kzalloc(data.n_attachments * sizeof(attach[0]),
GFP_KERNEL);
if (!attach)
return -ENOMEM;
}
n_attach = adf_device_attachments(dev, attach, data.n_attachments);
if (data.n_allowed_attachments) {
allowed_attach = kzalloc(data.n_allowed_attachments *
sizeof(allowed_attach[0]), GFP_KERNEL);
if (!allowed_attach) {
ret = -ENOMEM;
goto done;
}
}
n_allowed_attach = adf_device_attachments_allowed(dev, allowed_attach,
data.n_allowed_attachments);
mutex_lock(&dev->client_lock);
ret = adf_obj_copy_custom_data_to_user(&dev->base, arg->custom_data,
&data.custom_data_size);
mutex_unlock(&dev->client_lock);
if (ret < 0)
goto done;
ret = adf_copy_attachment_list_to_user(arg->attachments,
data.n_attachments, attach, n_attach);
if (ret < 0)
goto done;
ret = adf_copy_attachment_list_to_user(arg->allowed_attachments,
data.n_allowed_attachments, allowed_attach,
n_allowed_attach);
if (ret < 0)
goto done;
data.n_attachments = n_attach;
data.n_allowed_attachments = n_allowed_attach;
if (copy_to_user(arg, &data, sizeof(data)))
ret = -EFAULT;
done:
kfree(allowed_attach);
kfree(attach);
return ret;
}
static int adf_device_handle_attachment(struct adf_device *dev,
struct adf_attachment_config __user *arg, bool attach)
{
struct adf_attachment_config data;
struct adf_overlay_engine *eng;
struct adf_interface *intf;
if (copy_from_user(&data, arg, sizeof(data)))
return -EFAULT;
eng = idr_find(&dev->overlay_engines, data.overlay_engine);
if (!eng) {
dev_err(&dev->base.dev, "invalid overlay engine id %u\n",
data.overlay_engine);
return -EINVAL;
}
intf = idr_find(&dev->interfaces, data.interface);
if (!intf) {
dev_err(&dev->base.dev, "invalid interface id %u\n",
data.interface);
return -EINVAL;
}
if (attach)
return adf_device_attach(dev, eng, intf);
else
return adf_device_detach(dev, eng, intf);
}
static int adf_intf_set_mode(struct adf_interface *intf,
struct drm_mode_modeinfo __user *arg)
{
struct drm_mode_modeinfo mode;
if (copy_from_user(&mode, arg, sizeof(mode)))
return -EFAULT;
return adf_interface_set_mode(intf, &mode);
}
static int adf_intf_get_data(struct adf_interface *intf,
struct adf_interface_data __user *arg)
{
struct adf_device *dev = adf_interface_parent(intf);
struct adf_interface_data data;
struct drm_mode_modeinfo *modelist;
size_t modelist_size;
int err;
int ret = 0;
unsigned long flags;
if (copy_from_user(&data, arg, sizeof(data)))
return -EFAULT;
strlcpy(data.name, intf->base.name, sizeof(data.name));
data.type = intf->type;
data.id = intf->idx;
data.flags = intf->flags;
err = adf_interface_get_screen_size(intf, &data.width_mm,
&data.height_mm);
if (err < 0) {
data.width_mm = 0;
data.height_mm = 0;
}
modelist = kmalloc(sizeof(modelist[0]) * ADF_MAX_MODES, GFP_KERNEL);
if (!modelist)
return -ENOMEM;
mutex_lock(&dev->client_lock);
read_lock_irqsave(&intf->hotplug_modelist_lock, flags);
data.hotplug_detect = intf->hotplug_detect;
modelist_size = min(data.n_available_modes, intf->n_modes) *
sizeof(intf->modelist[0]);
memcpy(modelist, intf->modelist, modelist_size);
data.n_available_modes = intf->n_modes;
read_unlock_irqrestore(&intf->hotplug_modelist_lock, flags);
if (copy_to_user(arg->available_modes, modelist, modelist_size)) {
ret = -EFAULT;
goto done;
}
data.dpms_state = intf->dpms_state;
memcpy(&data.current_mode, &intf->current_mode,
sizeof(intf->current_mode));
ret = adf_obj_copy_custom_data_to_user(&intf->base, arg->custom_data,
&data.custom_data_size);
done:
mutex_unlock(&dev->client_lock);
kfree(modelist);
if (ret < 0)
return ret;
if (copy_to_user(arg, &data, sizeof(data)))
ret = -EFAULT;
return ret;
}
static inline long adf_obj_custom_ioctl(struct adf_obj *obj, unsigned int cmd,
unsigned long arg)
{
if (obj->ops && obj->ops->ioctl)
return obj->ops->ioctl(obj, cmd, arg);
return -ENOTTY;
}
static long adf_overlay_engine_ioctl(struct adf_overlay_engine *eng,
struct adf_file *file, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case ADF_SET_EVENT:
return adf_obj_set_event(&eng->base, file,
(struct adf_set_event __user *)arg);
case ADF_GET_OVERLAY_ENGINE_DATA:
return adf_eng_get_data(eng,
(struct adf_overlay_engine_data __user *)arg);
case ADF_BLANK:
case ADF_POST_CONFIG:
case ADF_SET_MODE:
case ADF_GET_DEVICE_DATA:
case ADF_GET_INTERFACE_DATA:
case ADF_SIMPLE_POST_CONFIG:
case ADF_SIMPLE_BUFFER_ALLOC:
case ADF_ATTACH:
case ADF_DETACH:
return -EINVAL;
default:
return adf_obj_custom_ioctl(&eng->base, cmd, arg);
}
}
static long adf_interface_ioctl(struct adf_interface *intf,
struct adf_file *file, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case ADF_SET_EVENT:
return adf_obj_set_event(&intf->base, file,
(struct adf_set_event __user *)arg);
case ADF_BLANK:
return adf_interface_blank(intf, arg);
case ADF_SET_MODE:
return adf_intf_set_mode(intf,
(struct drm_mode_modeinfo __user *)arg);
case ADF_GET_INTERFACE_DATA:
return adf_intf_get_data(intf,
(struct adf_interface_data __user *)arg);
case ADF_SIMPLE_POST_CONFIG:
return adf_intf_simple_post_config(intf,
(struct adf_simple_post_config __user *)arg);
case ADF_SIMPLE_BUFFER_ALLOC:
return adf_intf_simple_buffer_alloc(intf,
(struct adf_simple_buffer_alloc __user *)arg);
case ADF_POST_CONFIG:
case ADF_GET_DEVICE_DATA:
case ADF_GET_OVERLAY_ENGINE_DATA:
case ADF_ATTACH:
case ADF_DETACH:
return -EINVAL;
default:
return adf_obj_custom_ioctl(&intf->base, cmd, arg);
}
}
static long adf_device_ioctl(struct adf_device *dev, struct adf_file *file,
unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case ADF_SET_EVENT:
return adf_obj_set_event(&dev->base, file,
(struct adf_set_event __user *)arg);
case ADF_POST_CONFIG:
return adf_device_post_config(dev,
(struct adf_post_config __user *)arg);
case ADF_GET_DEVICE_DATA:
return adf_device_get_data(dev,
(struct adf_device_data __user *)arg);
case ADF_ATTACH:
return adf_device_handle_attachment(dev,
(struct adf_attachment_config __user *)arg,
true);
case ADF_DETACH:
return adf_device_handle_attachment(dev,
(struct adf_attachment_config __user *)arg,
false);
case ADF_BLANK:
case ADF_SET_MODE:
case ADF_GET_INTERFACE_DATA:
case ADF_GET_OVERLAY_ENGINE_DATA:
case ADF_SIMPLE_POST_CONFIG:
case ADF_SIMPLE_BUFFER_ALLOC:
return -EINVAL;
default:
return adf_obj_custom_ioctl(&dev->base, cmd, arg);
}
}
static int adf_file_open(struct inode *inode, struct file *file)
{
struct adf_obj *obj;
struct adf_file *fpriv = NULL;
unsigned long flags;
int ret = 0;
obj = adf_obj_sysfs_find(iminor(inode));
if (!obj)
return -ENODEV;
dev_dbg(&obj->dev, "opening %s\n", dev_name(&obj->dev));
if (!try_module_get(obj->parent->ops->owner)) {
dev_err(&obj->dev, "getting owner module failed\n");
return -ENODEV;
}
fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
if (!fpriv) {
ret = -ENOMEM;
goto done;
}
INIT_LIST_HEAD(&fpriv->head);
fpriv->obj = obj;
init_waitqueue_head(&fpriv->event_wait);
file->private_data = fpriv;
if (obj->ops && obj->ops->open) {
ret = obj->ops->open(obj, inode, file);
if (ret < 0)
goto done;
}
spin_lock_irqsave(&obj->file_lock, flags);
list_add_tail(&fpriv->head, &obj->file_list);
spin_unlock_irqrestore(&obj->file_lock, flags);
done:
if (ret < 0) {
kfree(fpriv);
module_put(obj->parent->ops->owner);
}
return ret;
}
static int adf_file_release(struct inode *inode, struct file *file)
{
struct adf_file *fpriv = file->private_data;
struct adf_obj *obj = fpriv->obj;
enum adf_event_type event_type;
unsigned long flags;
if (obj->ops && obj->ops->release)
obj->ops->release(obj, inode, file);
spin_lock_irqsave(&obj->file_lock, flags);
list_del(&fpriv->head);
spin_unlock_irqrestore(&obj->file_lock, flags);
for_each_set_bit(event_type, fpriv->event_subscriptions,
ADF_EVENT_TYPE_MAX) {
adf_event_put(obj, event_type);
}
kfree(fpriv);
module_put(obj->parent->ops->owner);
dev_dbg(&obj->dev, "released %s\n", dev_name(&obj->dev));
return 0;
}
long adf_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct adf_file *fpriv = file->private_data;
struct adf_obj *obj = fpriv->obj;
long ret = -EINVAL;
dev_dbg(&obj->dev, "%s ioctl %u\n", dev_name(&obj->dev), _IOC_NR(cmd));
switch (obj->type) {
case ADF_OBJ_OVERLAY_ENGINE:
ret = adf_overlay_engine_ioctl(adf_obj_to_overlay_engine(obj),
fpriv, cmd, arg);
break;
case ADF_OBJ_INTERFACE:
ret = adf_interface_ioctl(adf_obj_to_interface(obj), fpriv, cmd,
arg);
break;
case ADF_OBJ_DEVICE:
ret = adf_device_ioctl(adf_obj_to_device(obj), fpriv, cmd, arg);
break;
}
return ret;
}
static inline bool adf_file_event_available(struct adf_file *fpriv)
{
int head = fpriv->event_head;
int tail = fpriv->event_tail;
return CIRC_CNT(head, tail, sizeof(fpriv->event_buf)) != 0;
}
void adf_file_queue_event(struct adf_file *fpriv, struct adf_event *event)
{
int head = fpriv->event_head;
int tail = fpriv->event_tail;
size_t space = CIRC_SPACE(head, tail, sizeof(fpriv->event_buf));
size_t space_to_end =
CIRC_SPACE_TO_END(head, tail, sizeof(fpriv->event_buf));
if (space < event->length) {
dev_dbg(&fpriv->obj->dev,
"insufficient buffer space for event %u\n",
event->type);
return;
}
if (space_to_end >= event->length) {
memcpy(fpriv->event_buf + head, event, event->length);
} else {
memcpy(fpriv->event_buf + head, event, space_to_end);
memcpy(fpriv->event_buf, (u8 *)event + space_to_end,
event->length - space_to_end);
}
smp_wmb();
fpriv->event_head = (fpriv->event_head + event->length) &
(sizeof(fpriv->event_buf) - 1);
wake_up_interruptible_all(&fpriv->event_wait);
}
static ssize_t adf_file_copy_to_user(struct adf_file *fpriv,
char __user *buffer, size_t buffer_size)
{
int head, tail;
u8 *event_buf;
size_t cnt, cnt_to_end, copy_size = 0;
ssize_t ret = 0;
unsigned long flags;
event_buf = kmalloc(min(buffer_size, sizeof(fpriv->event_buf)),
GFP_KERNEL);
if (!event_buf)
return -ENOMEM;
spin_lock_irqsave(&fpriv->obj->file_lock, flags);
if (!adf_file_event_available(fpriv))
goto out;
head = fpriv->event_head;
tail = fpriv->event_tail;
cnt = CIRC_CNT(head, tail, sizeof(fpriv->event_buf));
cnt_to_end = CIRC_CNT_TO_END(head, tail, sizeof(fpriv->event_buf));
copy_size = min(buffer_size, cnt);
if (cnt_to_end >= copy_size) {
memcpy(event_buf, fpriv->event_buf + tail, copy_size);
} else {
memcpy(event_buf, fpriv->event_buf + tail, cnt_to_end);
memcpy(event_buf + cnt_to_end, fpriv->event_buf,
copy_size - cnt_to_end);
}
fpriv->event_tail = (fpriv->event_tail + copy_size) &
(sizeof(fpriv->event_buf) - 1);
out:
spin_unlock_irqrestore(&fpriv->obj->file_lock, flags);
if (copy_size) {
if (copy_to_user(buffer, event_buf, copy_size))
ret = -EFAULT;
else
ret = copy_size;
}
kfree(event_buf);
return ret;
}
ssize_t adf_file_read(struct file *filp, char __user *buffer,
size_t count, loff_t *offset)
{
struct adf_file *fpriv = filp->private_data;
int err;
err = wait_event_interruptible(fpriv->event_wait,
adf_file_event_available(fpriv));
if (err < 0)
return err;
return adf_file_copy_to_user(fpriv, buffer, count);
}
unsigned int adf_file_poll(struct file *filp, struct poll_table_struct *wait)
{
struct adf_file *fpriv = filp->private_data;
unsigned int mask = 0;
poll_wait(filp, &fpriv->event_wait, wait);
if (adf_file_event_available(fpriv))
mask |= POLLIN | POLLRDNORM;
return mask;
}
const struct file_operations adf_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = adf_file_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = adf_file_compat_ioctl,
#endif
.open = adf_file_open,
.release = adf_file_release,
.llseek = default_llseek,
.read = adf_file_read,
.poll = adf_file_poll,
};

View file

@ -0,0 +1,37 @@
/*
* Copyright (C) 2013 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __VIDEO_ADF_ADF_FOPS_H
#define __VIDEO_ADF_ADF_FOPS_H
#include <linux/bitmap.h>
#include <linux/fs.h>
extern const struct file_operations adf_fops;
struct adf_file {
struct list_head head;
struct adf_obj *obj;
DECLARE_BITMAP(event_subscriptions, ADF_EVENT_TYPE_MAX);
u8 event_buf[4096];
int event_head;
int event_tail;
wait_queue_head_t event_wait;
};
void adf_file_queue_event(struct adf_file *file, struct adf_event *event);
long adf_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
#endif /* __VIDEO_ADF_ADF_FOPS_H */

View file

@ -0,0 +1,217 @@
/*
* Copyright (C) 2013 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/uaccess.h>
#include <video/adf.h>
#include "adf_fops.h"
#include "adf_fops32.h"
long adf_compat_post_config(struct file *file,
struct adf_post_config32 __user *arg)
{
struct adf_post_config32 cfg32;
struct adf_post_config __user *cfg;
int ret;
if (copy_from_user(&cfg32, arg, sizeof(cfg32)))
return -EFAULT;
cfg = compat_alloc_user_space(sizeof(*cfg));
if (!access_ok(VERIFY_WRITE, cfg, sizeof(*cfg)))
return -EFAULT;
if (put_user(cfg32.n_interfaces, &cfg->n_interfaces) ||
put_user(compat_ptr(cfg32.interfaces),
&cfg->interfaces) ||
put_user(cfg32.n_bufs, &cfg->n_bufs) ||
put_user(compat_ptr(cfg32.bufs), &cfg->bufs) ||
put_user(cfg32.custom_data_size,
&cfg->custom_data_size) ||
put_user(compat_ptr(cfg32.custom_data),
&cfg->custom_data))
return -EFAULT;
ret = adf_file_ioctl(file, ADF_POST_CONFIG, (unsigned long)cfg);
if (ret < 0)
return ret;
if (copy_in_user(&arg->complete_fence, &cfg->complete_fence,
sizeof(cfg->complete_fence)))
return -EFAULT;
return 0;
}
long adf_compat_get_device_data(struct file *file,
struct adf_device_data32 __user *arg)
{
struct adf_device_data32 data32;
struct adf_device_data __user *data;
int ret;
if (copy_from_user(&data32, arg, sizeof(data32)))
return -EFAULT;
data = compat_alloc_user_space(sizeof(*data));
if (!access_ok(VERIFY_WRITE, data, sizeof(*data)))
return -EFAULT;
if (put_user(data32.n_attachments, &data->n_attachments) ||
put_user(compat_ptr(data32.attachments),
&data->attachments) ||
put_user(data32.n_allowed_attachments,
&data->n_allowed_attachments) ||
put_user(compat_ptr(data32.allowed_attachments),
&data->allowed_attachments) ||
put_user(data32.custom_data_size,
&data->custom_data_size) ||
put_user(compat_ptr(data32.custom_data),
&data->custom_data))
return -EFAULT;
ret = adf_file_ioctl(file, ADF_GET_DEVICE_DATA, (unsigned long)data);
if (ret < 0)
return ret;
if (copy_in_user(arg->name, data->name, sizeof(arg->name)) ||
copy_in_user(&arg->n_attachments, &data->n_attachments,
sizeof(arg->n_attachments)) ||
copy_in_user(&arg->n_allowed_attachments,
&data->n_allowed_attachments,
sizeof(arg->n_allowed_attachments)) ||
copy_in_user(&arg->custom_data_size,
&data->custom_data_size,
sizeof(arg->custom_data_size)))
return -EFAULT;
return 0;
}
long adf_compat_get_interface_data(struct file *file,
struct adf_interface_data32 __user *arg)
{
struct adf_interface_data32 data32;
struct adf_interface_data __user *data;
int ret;
if (copy_from_user(&data32, arg, sizeof(data32)))
return -EFAULT;
data = compat_alloc_user_space(sizeof(*data));
if (!access_ok(VERIFY_WRITE, data, sizeof(*data)))
return -EFAULT;
if (put_user(data32.n_available_modes, &data->n_available_modes) ||
put_user(compat_ptr(data32.available_modes),
&data->available_modes) ||
put_user(data32.custom_data_size,
&data->custom_data_size) ||
put_user(compat_ptr(data32.custom_data),
&data->custom_data))
return -EFAULT;
ret = adf_file_ioctl(file, ADF_GET_INTERFACE_DATA, (unsigned long)data);
if (ret < 0)
return ret;
if (copy_in_user(arg->name, data->name, sizeof(arg->name)) ||
copy_in_user(&arg->type, &data->type,
sizeof(arg->type)) ||
copy_in_user(&arg->id, &data->id, sizeof(arg->id)) ||
copy_in_user(&arg->flags, &data->flags,
sizeof(arg->flags)) ||
copy_in_user(&arg->dpms_state, &data->dpms_state,
sizeof(arg->dpms_state)) ||
copy_in_user(&arg->hotplug_detect,
&data->hotplug_detect,
sizeof(arg->hotplug_detect)) ||
copy_in_user(&arg->width_mm, &data->width_mm,
sizeof(arg->width_mm)) ||
copy_in_user(&arg->height_mm, &data->height_mm,
sizeof(arg->height_mm)) ||
copy_in_user(&arg->current_mode, &data->current_mode,
sizeof(arg->current_mode)) ||
copy_in_user(&arg->n_available_modes,
&data->n_available_modes,
sizeof(arg->n_available_modes)) ||
copy_in_user(&arg->custom_data_size,
&data->custom_data_size,
sizeof(arg->custom_data_size)))
return -EFAULT;
return 0;
}
long adf_compat_get_overlay_engine_data(struct file *file,
struct adf_overlay_engine_data32 __user *arg)
{
struct adf_overlay_engine_data32 data32;
struct adf_overlay_engine_data __user *data;
int ret;
if (copy_from_user(&data32, arg, sizeof(data32)))
return -EFAULT;
data = compat_alloc_user_space(sizeof(*data));
if (!access_ok(VERIFY_WRITE, data, sizeof(*data)))
return -EFAULT;
if (put_user(data32.n_supported_formats, &data->n_supported_formats) ||
put_user(compat_ptr(data32.supported_formats),
&data->supported_formats) ||
put_user(data32.custom_data_size,
&data->custom_data_size) ||
put_user(compat_ptr(data32.custom_data),
&data->custom_data))
return -EFAULT;
ret = adf_file_ioctl(file, ADF_GET_OVERLAY_ENGINE_DATA,
(unsigned long)data);
if (ret < 0)
return ret;
if (copy_in_user(arg->name, data->name, sizeof(arg->name)) ||
copy_in_user(&arg->n_supported_formats,
&data->n_supported_formats,
sizeof(arg->n_supported_formats)) ||
copy_in_user(&arg->custom_data_size,
&data->custom_data_size,
sizeof(arg->custom_data_size)))
return -EFAULT;
return 0;
}
long adf_file_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
switch (cmd) {
case ADF_POST_CONFIG32:
return adf_compat_post_config(file, compat_ptr(arg));
case ADF_GET_DEVICE_DATA32:
return adf_compat_get_device_data(file, compat_ptr(arg));
case ADF_GET_INTERFACE_DATA32:
return adf_compat_get_interface_data(file, compat_ptr(arg));
case ADF_GET_OVERLAY_ENGINE_DATA32:
return adf_compat_get_overlay_engine_data(file,
compat_ptr(arg));
default:
return adf_file_ioctl(file, cmd, arg);
}
}

View file

@ -0,0 +1,78 @@
#ifndef __VIDEO_ADF_ADF_FOPS32_H
#define __VIDEO_ADF_ADF_FOPS32_H
#include <linux/compat.h>
#include <linux/ioctl.h>
#include <video/adf.h>
#define ADF_POST_CONFIG32 \
_IOW(ADF_IOCTL_TYPE, 2, struct adf_post_config32)
#define ADF_GET_DEVICE_DATA32 \
_IOR(ADF_IOCTL_TYPE, 4, struct adf_device_data32)
#define ADF_GET_INTERFACE_DATA32 \
_IOR(ADF_IOCTL_TYPE, 5, struct adf_interface_data32)
#define ADF_GET_OVERLAY_ENGINE_DATA32 \
_IOR(ADF_IOCTL_TYPE, 6, struct adf_overlay_engine_data32)
struct adf_post_config32 {
compat_size_t n_interfaces;
compat_uptr_t interfaces;
compat_size_t n_bufs;
compat_uptr_t bufs;
compat_size_t custom_data_size;
compat_uptr_t custom_data;
__s32 complete_fence;
};
struct adf_device_data32 {
char name[ADF_NAME_LEN];
compat_size_t n_attachments;
compat_uptr_t attachments;
compat_size_t n_allowed_attachments;
compat_uptr_t allowed_attachments;
compat_size_t custom_data_size;
compat_uptr_t custom_data;
};
struct adf_interface_data32 {
char name[ADF_NAME_LEN];
__u8 type;
__u32 id;
/* e.g. type=ADF_INTF_TYPE_DSI, id=1 => DSI.1 */
__u32 flags;
__u8 dpms_state;
__u8 hotplug_detect;
__u16 width_mm;
__u16 height_mm;
struct drm_mode_modeinfo current_mode;
compat_size_t n_available_modes;
compat_uptr_t available_modes;
compat_size_t custom_data_size;
compat_uptr_t custom_data;
};
struct adf_overlay_engine_data32 {
char name[ADF_NAME_LEN];
compat_size_t n_supported_formats;
compat_uptr_t supported_formats;
compat_size_t custom_data_size;
compat_uptr_t custom_data;
};
long adf_file_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg);
#endif /* __VIDEO_ADF_ADF_FOPS32_H */

View file

@ -0,0 +1,280 @@
/*
* Copyright (C) 2013 Google, Inc.
* modified from drivers/gpu/drm/drm_crtc.c
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/export.h>
#include <linux/kernel.h>
#include <drm/drm_fourcc.h>
#include <video/adf_format.h>
bool adf_format_is_standard(u32 format)
{
switch (format) {
case DRM_FORMAT_C8:
case DRM_FORMAT_RGB332:
case DRM_FORMAT_BGR233:
case DRM_FORMAT_XRGB4444:
case DRM_FORMAT_XBGR4444:
case DRM_FORMAT_RGBX4444:
case DRM_FORMAT_BGRX4444:
case DRM_FORMAT_ARGB4444:
case DRM_FORMAT_ABGR4444:
case DRM_FORMAT_RGBA4444:
case DRM_FORMAT_BGRA4444:
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_XBGR1555:
case DRM_FORMAT_RGBX5551:
case DRM_FORMAT_BGRX5551:
case DRM_FORMAT_ARGB1555:
case DRM_FORMAT_ABGR1555:
case DRM_FORMAT_RGBA5551:
case DRM_FORMAT_BGRA5551:
case DRM_FORMAT_RGB565:
case DRM_FORMAT_BGR565:
case DRM_FORMAT_RGB888:
case DRM_FORMAT_BGR888:
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_RGBX8888:
case DRM_FORMAT_BGRX8888:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_RGBA8888:
case DRM_FORMAT_BGRA8888:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
case DRM_FORMAT_RGBX1010102:
case DRM_FORMAT_BGRX1010102:
case DRM_FORMAT_ARGB2101010:
case DRM_FORMAT_ABGR2101010:
case DRM_FORMAT_RGBA1010102:
case DRM_FORMAT_BGRA1010102:
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
case DRM_FORMAT_AYUV:
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV61:
case DRM_FORMAT_YUV410:
case DRM_FORMAT_YVU410:
case DRM_FORMAT_YUV411:
case DRM_FORMAT_YVU411:
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
case DRM_FORMAT_YUV422:
case DRM_FORMAT_YVU422:
case DRM_FORMAT_YUV444:
case DRM_FORMAT_YVU444:
return true;
default:
return false;
}
}
EXPORT_SYMBOL(adf_format_is_standard);
bool adf_format_is_rgb(u32 format)
{
switch (format) {
case DRM_FORMAT_C8:
case DRM_FORMAT_RGB332:
case DRM_FORMAT_BGR233:
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_XBGR1555:
case DRM_FORMAT_RGBX5551:
case DRM_FORMAT_BGRX5551:
case DRM_FORMAT_ARGB1555:
case DRM_FORMAT_ABGR1555:
case DRM_FORMAT_RGBA5551:
case DRM_FORMAT_BGRA5551:
case DRM_FORMAT_RGB565:
case DRM_FORMAT_BGR565:
case DRM_FORMAT_RGB888:
case DRM_FORMAT_BGR888:
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_RGBX8888:
case DRM_FORMAT_BGRX8888:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
case DRM_FORMAT_RGBX1010102:
case DRM_FORMAT_BGRX1010102:
case DRM_FORMAT_ARGB2101010:
case DRM_FORMAT_ABGR2101010:
case DRM_FORMAT_RGBA1010102:
case DRM_FORMAT_BGRA1010102:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_RGBA8888:
case DRM_FORMAT_BGRA8888:
return true;
default:
return false;
}
}
EXPORT_SYMBOL(adf_format_is_rgb);
u8 adf_format_num_planes(u32 format)
{
switch (format) {
case DRM_FORMAT_YUV410:
case DRM_FORMAT_YVU410:
case DRM_FORMAT_YUV411:
case DRM_FORMAT_YVU411:
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
case DRM_FORMAT_YUV422:
case DRM_FORMAT_YVU422:
case DRM_FORMAT_YUV444:
case DRM_FORMAT_YVU444:
return 3;
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV61:
return 2;
default:
return 1;
}
}
EXPORT_SYMBOL(adf_format_num_planes);
u8 adf_format_bpp(u32 format)
{
switch (format) {
case DRM_FORMAT_C8:
case DRM_FORMAT_RGB332:
case DRM_FORMAT_BGR233:
return 8;
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_XBGR1555:
case DRM_FORMAT_RGBX5551:
case DRM_FORMAT_BGRX5551:
case DRM_FORMAT_ARGB1555:
case DRM_FORMAT_ABGR1555:
case DRM_FORMAT_RGBA5551:
case DRM_FORMAT_BGRA5551:
case DRM_FORMAT_RGB565:
case DRM_FORMAT_BGR565:
return 16;
case DRM_FORMAT_RGB888:
case DRM_FORMAT_BGR888:
return 24;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_RGBX8888:
case DRM_FORMAT_BGRX8888:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
case DRM_FORMAT_RGBX1010102:
case DRM_FORMAT_BGRX1010102:
case DRM_FORMAT_ARGB2101010:
case DRM_FORMAT_ABGR2101010:
case DRM_FORMAT_RGBA1010102:
case DRM_FORMAT_BGRA1010102:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_RGBA8888:
case DRM_FORMAT_BGRA8888:
return 32;
default:
pr_debug("%s: unsupported pixel format %u\n", __func__, format);
return 0;
}
}
EXPORT_SYMBOL(adf_format_bpp);
u8 adf_format_plane_cpp(u32 format, int plane)
{
if (plane >= adf_format_num_planes(format))
return 0;
switch (format) {
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
return 2;
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV61:
return plane ? 2 : 1;
case DRM_FORMAT_YUV410:
case DRM_FORMAT_YVU410:
case DRM_FORMAT_YUV411:
case DRM_FORMAT_YVU411:
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
case DRM_FORMAT_YUV422:
case DRM_FORMAT_YVU422:
case DRM_FORMAT_YUV444:
case DRM_FORMAT_YVU444:
return 1;
default:
return adf_format_bpp(format) / 8;
}
}
EXPORT_SYMBOL(adf_format_plane_cpp);
u8 adf_format_horz_chroma_subsampling(u32 format)
{
switch (format) {
case DRM_FORMAT_YUV411:
case DRM_FORMAT_YVU411:
case DRM_FORMAT_YUV410:
case DRM_FORMAT_YVU410:
return 4;
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV61:
case DRM_FORMAT_YUV422:
case DRM_FORMAT_YVU422:
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
return 2;
default:
return 1;
}
}
EXPORT_SYMBOL(adf_format_horz_chroma_subsampling);
u8 adf_format_vert_chroma_subsampling(u32 format)
{
switch (format) {
case DRM_FORMAT_YUV410:
case DRM_FORMAT_YVU410:
return 4;
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
return 2;
default:
return 1;
}
}
EXPORT_SYMBOL(adf_format_vert_chroma_subsampling);

View file

@ -0,0 +1,160 @@
/*
* Copyright (C) 2013 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/dma-buf.h>
#include <linux/highmem.h>
#include <linux/memblock.h>
#include <linux/slab.h>
struct adf_memblock_pdata {
phys_addr_t base;
};
static struct sg_table *adf_memblock_map(struct dma_buf_attachment *attach,
enum dma_data_direction direction)
{
struct adf_memblock_pdata *pdata = attach->dmabuf->priv;
unsigned long pfn = PFN_DOWN(pdata->base);
struct page *page = pfn_to_page(pfn);
struct sg_table *table;
int nents, ret;
table = kzalloc(sizeof(*table), GFP_KERNEL);
if (!table)
return ERR_PTR(-ENOMEM);
ret = sg_alloc_table(table, 1, GFP_KERNEL);
if (ret < 0)
goto err_alloc;
sg_set_page(table->sgl, page, attach->dmabuf->size, 0);
nents = dma_map_sg(attach->dev, table->sgl, 1, direction);
if (!nents) {
ret = -EINVAL;
goto err_map;
}
return table;
err_map:
sg_free_table(table);
err_alloc:
kfree(table);
return ERR_PTR(ret);
}
static void adf_memblock_unmap(struct dma_buf_attachment *attach,
struct sg_table *table, enum dma_data_direction direction)
{
dma_unmap_sg(attach->dev, table->sgl, 1, direction);
sg_free_table(table);
}
static void __init_memblock adf_memblock_release(struct dma_buf *buf)
{
struct adf_memblock_pdata *pdata = buf->priv;
int err = memblock_free(pdata->base, buf->size);
if (err < 0)
pr_warn("%s: freeing memblock failed: %d\n", __func__, err);
kfree(pdata);
}
static void *adf_memblock_do_kmap(struct dma_buf *buf, unsigned long pgoffset,
bool atomic)
{
struct adf_memblock_pdata *pdata = buf->priv;
unsigned long pfn = PFN_DOWN(pdata->base) + pgoffset;
struct page *page = pfn_to_page(pfn);
if (atomic)
return kmap_atomic(page);
else
return kmap(page);
}
static void *adf_memblock_kmap_atomic(struct dma_buf *buf,
unsigned long pgoffset)
{
return adf_memblock_do_kmap(buf, pgoffset, true);
}
static void adf_memblock_kunmap_atomic(struct dma_buf *buf,
unsigned long pgoffset, void *vaddr)
{
kunmap_atomic(vaddr);
}
static void *adf_memblock_kmap(struct dma_buf *buf, unsigned long pgoffset)
{
return adf_memblock_do_kmap(buf, pgoffset, false);
}
static void adf_memblock_kunmap(struct dma_buf *buf, unsigned long pgoffset,
void *vaddr)
{
kunmap(vaddr);
}
static int adf_memblock_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
{
struct adf_memblock_pdata *pdata = buf->priv;
return remap_pfn_range(vma, vma->vm_start, PFN_DOWN(pdata->base),
vma->vm_end - vma->vm_start, vma->vm_page_prot);
}
struct dma_buf_ops adf_memblock_ops = {
.map_dma_buf = adf_memblock_map,
.unmap_dma_buf = adf_memblock_unmap,
.release = adf_memblock_release,
.kmap_atomic = adf_memblock_kmap_atomic,
.kunmap_atomic = adf_memblock_kunmap_atomic,
.kmap = adf_memblock_kmap,
.kunmap = adf_memblock_kunmap,
.mmap = adf_memblock_mmap,
};
/**
* adf_memblock_export - export a memblock reserved area as a dma-buf
*
* @base: base physical address
* @size: memblock size
* @flags: mode flags for the dma-buf's file
*
* @base and @size must be page-aligned.
*
* Returns a dma-buf on success or ERR_PTR(-errno) on failure.
*/
struct dma_buf *adf_memblock_export(phys_addr_t base, size_t size, int flags)
{
struct adf_memblock_pdata *pdata;
struct dma_buf *buf;
if (PAGE_ALIGN(base) != base || PAGE_ALIGN(size) != size)
return ERR_PTR(-EINVAL);
pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return ERR_PTR(-ENOMEM);
pdata->base = base;
buf = dma_buf_export(pdata, &adf_memblock_ops, size, flags, NULL);
if (IS_ERR(buf))
kfree(pdata);
return buf;
}
EXPORT_SYMBOL(adf_memblock_export);

View file

@ -0,0 +1,296 @@
/*
* Copyright (C) 2013 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <video/adf_client.h>
#include "adf.h"
#include "adf_fops.h"
#include "adf_sysfs.h"
static struct class *adf_class;
static int adf_major;
static DEFINE_IDR(adf_minors);
#define dev_to_adf_interface(p) \
adf_obj_to_interface(container_of(p, struct adf_obj, dev))
static ssize_t dpms_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct adf_interface *intf = dev_to_adf_interface(dev);
return scnprintf(buf, PAGE_SIZE, "%u\n",
adf_interface_dpms_state(intf));
}
static ssize_t dpms_state_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct adf_interface *intf = dev_to_adf_interface(dev);
u8 dpms_state;
int err;
err = kstrtou8(buf, 0, &dpms_state);
if (err < 0)
return err;
err = adf_interface_blank(intf, dpms_state);
if (err < 0)
return err;
return count;
}
static ssize_t current_mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct adf_interface *intf = dev_to_adf_interface(dev);
struct drm_mode_modeinfo mode;
adf_interface_current_mode(intf, &mode);
if (mode.name[0]) {
return scnprintf(buf, PAGE_SIZE, "%s\n", mode.name);
} else {
bool interlaced = !!(mode.flags & DRM_MODE_FLAG_INTERLACE);
return scnprintf(buf, PAGE_SIZE, "%ux%u%s\n", mode.hdisplay,
mode.vdisplay, interlaced ? "i" : "");
}
}
static ssize_t type_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct adf_interface *intf = dev_to_adf_interface(dev);
return scnprintf(buf, PAGE_SIZE, "%s\n",
adf_interface_type_str(intf));
}
static ssize_t vsync_timestamp_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct adf_interface *intf = dev_to_adf_interface(dev);
ktime_t timestamp;
unsigned long flags;
read_lock_irqsave(&intf->vsync_lock, flags);
memcpy(&timestamp, &intf->vsync_timestamp, sizeof(timestamp));
read_unlock_irqrestore(&intf->vsync_lock, flags);
return scnprintf(buf, PAGE_SIZE, "%llu\n", ktime_to_ns(timestamp));
}
static ssize_t hotplug_detect_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct adf_interface *intf = dev_to_adf_interface(dev);
return scnprintf(buf, PAGE_SIZE, "%u\n", intf->hotplug_detect);
}
static struct device_attribute adf_interface_attrs[] = {
__ATTR(dpms_state, S_IRUGO|S_IWUSR, dpms_state_show, dpms_state_store),
__ATTR_RO(current_mode),
__ATTR_RO(hotplug_detect),
__ATTR_RO(type),
__ATTR_RO(vsync_timestamp),
};
int adf_obj_sysfs_init(struct adf_obj *obj, struct device *parent)
{
int ret = idr_alloc(&adf_minors, obj, 0, 0, GFP_KERNEL);
if (ret < 0) {
pr_err("%s: allocating adf minor failed: %d\n", __func__,
ret);
return ret;
}
obj->minor = ret;
obj->dev.parent = parent;
obj->dev.class = adf_class;
obj->dev.devt = MKDEV(adf_major, obj->minor);
ret = device_register(&obj->dev);
if (ret < 0) {
pr_err("%s: registering adf object failed: %d\n", __func__,
ret);
goto err_device_register;
}
return 0;
err_device_register:
idr_remove(&adf_minors, obj->minor);
return ret;
}
static char *adf_device_devnode(struct device *dev, umode_t *mode,
kuid_t *uid, kgid_t *gid)
{
struct adf_obj *obj = container_of(dev, struct adf_obj, dev);
return kasprintf(GFP_KERNEL, "adf%d", obj->id);
}
static char *adf_interface_devnode(struct device *dev, umode_t *mode,
kuid_t *uid, kgid_t *gid)
{
struct adf_obj *obj = container_of(dev, struct adf_obj, dev);
struct adf_interface *intf = adf_obj_to_interface(obj);
struct adf_device *parent = adf_interface_parent(intf);
return kasprintf(GFP_KERNEL, "adf-interface%d.%d",
parent->base.id, intf->base.id);
}
static char *adf_overlay_engine_devnode(struct device *dev, umode_t *mode,
kuid_t *uid, kgid_t *gid)
{
struct adf_obj *obj = container_of(dev, struct adf_obj, dev);
struct adf_overlay_engine *eng = adf_obj_to_overlay_engine(obj);
struct adf_device *parent = adf_overlay_engine_parent(eng);
return kasprintf(GFP_KERNEL, "adf-overlay-engine%d.%d",
parent->base.id, eng->base.id);
}
static void adf_noop_release(struct device *dev)
{
}
static struct device_type adf_device_type = {
.name = "adf_device",
.devnode = adf_device_devnode,
.release = adf_noop_release,
};
static struct device_type adf_interface_type = {
.name = "adf_interface",
.devnode = adf_interface_devnode,
.release = adf_noop_release,
};
static struct device_type adf_overlay_engine_type = {
.name = "adf_overlay_engine",
.devnode = adf_overlay_engine_devnode,
.release = adf_noop_release,
};
int adf_device_sysfs_init(struct adf_device *dev)
{
dev->base.dev.type = &adf_device_type;
dev_set_name(&dev->base.dev, "%s", dev->base.name);
return adf_obj_sysfs_init(&dev->base, dev->dev);
}
int adf_interface_sysfs_init(struct adf_interface *intf)
{
struct adf_device *parent = adf_interface_parent(intf);
size_t i, j;
int ret;
intf->base.dev.type = &adf_interface_type;
dev_set_name(&intf->base.dev, "%s-interface%d", parent->base.name,
intf->base.id);
ret = adf_obj_sysfs_init(&intf->base, &parent->base.dev);
if (ret < 0)
return ret;
for (i = 0; i < ARRAY_SIZE(adf_interface_attrs); i++) {
ret = device_create_file(&intf->base.dev,
&adf_interface_attrs[i]);
if (ret < 0) {
dev_err(&intf->base.dev, "creating sysfs attribute %s failed: %d\n",
adf_interface_attrs[i].attr.name, ret);
goto err;
}
}
return 0;
err:
for (j = 0; j < i; j++)
device_remove_file(&intf->base.dev, &adf_interface_attrs[j]);
return ret;
}
int adf_overlay_engine_sysfs_init(struct adf_overlay_engine *eng)
{
struct adf_device *parent = adf_overlay_engine_parent(eng);
eng->base.dev.type = &adf_overlay_engine_type;
dev_set_name(&eng->base.dev, "%s-overlay-engine%d", parent->base.name,
eng->base.id);
return adf_obj_sysfs_init(&eng->base, &parent->base.dev);
}
struct adf_obj *adf_obj_sysfs_find(int minor)
{
return idr_find(&adf_minors, minor);
}
void adf_obj_sysfs_destroy(struct adf_obj *obj)
{
idr_remove(&adf_minors, obj->minor);
device_unregister(&obj->dev);
}
void adf_device_sysfs_destroy(struct adf_device *dev)
{
adf_obj_sysfs_destroy(&dev->base);
}
void adf_interface_sysfs_destroy(struct adf_interface *intf)
{
size_t i;
for (i = 0; i < ARRAY_SIZE(adf_interface_attrs); i++)
device_remove_file(&intf->base.dev, &adf_interface_attrs[i]);
adf_obj_sysfs_destroy(&intf->base);
}
void adf_overlay_engine_sysfs_destroy(struct adf_overlay_engine *eng)
{
adf_obj_sysfs_destroy(&eng->base);
}
int adf_sysfs_init(void)
{
struct class *class;
int ret;
class = class_create(THIS_MODULE, "adf");
if (IS_ERR(class)) {
ret = PTR_ERR(class);
pr_err("%s: creating class failed: %d\n", __func__, ret);
return ret;
}
ret = register_chrdev(0, "adf", &adf_fops);
if (ret < 0) {
pr_err("%s: registering device failed: %d\n", __func__, ret);
goto err_chrdev;
}
adf_class = class;
adf_major = ret;
return 0;
err_chrdev:
class_destroy(adf_class);
return ret;
}
void adf_sysfs_destroy(void)
{
idr_destroy(&adf_minors);
class_destroy(adf_class);
}

View file

@ -0,0 +1,33 @@
/*
* Copyright (C) 2013 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __VIDEO_ADF_ADF_SYSFS_H
#define __VIDEO_ADF_ADF_SYSFS_H
struct adf_device;
struct adf_interface;
struct adf_overlay_engine;
int adf_device_sysfs_init(struct adf_device *dev);
void adf_device_sysfs_destroy(struct adf_device *dev);
int adf_interface_sysfs_init(struct adf_interface *intf);
void adf_interface_sysfs_destroy(struct adf_interface *intf);
int adf_overlay_engine_sysfs_init(struct adf_overlay_engine *eng);
void adf_overlay_engine_sysfs_destroy(struct adf_overlay_engine *eng);
struct adf_obj *adf_obj_sysfs_find(int minor);
int adf_sysfs_init(void);
void adf_sysfs_destroy(void);
#endif /* __VIDEO_ADF_ADF_SYSFS_H */

View file

@ -0,0 +1,93 @@
/*
* Copyright (C) 2013 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM adf
#if !defined(__VIDEO_ADF_ADF_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
#define __VIDEO_ADF_ADF_TRACE_H
#include <linux/tracepoint.h>
#include <video/adf.h>
TRACE_EVENT(adf_event,
TP_PROTO(struct adf_obj *obj, enum adf_event_type type),
TP_ARGS(obj, type),
TP_STRUCT__entry(
__string(name, obj->name)
__field(enum adf_event_type, type)
__array(char, type_str, 32)
),
TP_fast_assign(
__assign_str(name, obj->name);
__entry->type = type;
strlcpy(__entry->type_str, adf_event_type_str(obj, type),
sizeof(__entry->type_str));
),
TP_printk("obj=%s type=%u (%s)",
__get_str(name),
__entry->type,
__entry->type_str)
);
TRACE_EVENT(adf_event_enable,
TP_PROTO(struct adf_obj *obj, enum adf_event_type type),
TP_ARGS(obj, type),
TP_STRUCT__entry(
__string(name, obj->name)
__field(enum adf_event_type, type)
__array(char, type_str, 32)
),
TP_fast_assign(
__assign_str(name, obj->name);
__entry->type = type;
strlcpy(__entry->type_str, adf_event_type_str(obj, type),
sizeof(__entry->type_str));
),
TP_printk("obj=%s type=%u (%s)",
__get_str(name),
__entry->type,
__entry->type_str)
);
TRACE_EVENT(adf_event_disable,
TP_PROTO(struct adf_obj *obj, enum adf_event_type type),
TP_ARGS(obj, type),
TP_STRUCT__entry(
__string(name, obj->name)
__field(enum adf_event_type, type)
__array(char, type_str, 32)
),
TP_fast_assign(
__assign_str(name, obj->name);
__entry->type = type;
strlcpy(__entry->type_str, adf_event_type_str(obj, type),
sizeof(__entry->type_str));
),
TP_printk("obj=%s type=%u (%s)",
__get_str(name),
__entry->type,
__entry->type_str)
);
#endif /* __VIDEO_ADF_ADF_TRACE_H */
#undef TRACE_INCLUDE_PATH
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE adf_trace
#include <trace/define_trace.h>