mirror of
https://github.com/AetherDroid/android_kernel_samsung_on5xelte.git
synced 2025-10-30 07:38:52 +01:00
Fixed MTP to work with TWRP
This commit is contained in:
commit
f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions
26
drivers/gpu/drm/omapdrm/Kconfig
Normal file
26
drivers/gpu/drm/omapdrm/Kconfig
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
|
||||
config DRM_OMAP
|
||||
tristate "OMAP DRM"
|
||||
depends on DRM
|
||||
depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM
|
||||
depends on OMAP2_DSS
|
||||
select DRM_KMS_HELPER
|
||||
select DRM_KMS_FB_HELPER
|
||||
select FB_SYS_FILLRECT
|
||||
select FB_SYS_COPYAREA
|
||||
select FB_SYS_IMAGEBLIT
|
||||
select FB_SYS_FOPS
|
||||
default n
|
||||
help
|
||||
DRM display driver for OMAP2/3/4 based boards.
|
||||
|
||||
config DRM_OMAP_NUM_CRTCS
|
||||
int "Number of CRTCs"
|
||||
range 1 10
|
||||
default 1 if ARCH_OMAP2 || ARCH_OMAP3
|
||||
default 2 if ARCH_OMAP4
|
||||
depends on DRM_OMAP
|
||||
help
|
||||
Select the number of video overlays which can be used as framebuffers.
|
||||
The remaining overlays are reserved for video.
|
||||
|
||||
21
drivers/gpu/drm/omapdrm/Makefile
Normal file
21
drivers/gpu/drm/omapdrm/Makefile
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
#
|
||||
# Makefile for the drm device driver. This driver provides support for the
|
||||
# Direct Rendering Infrastructure (DRI)
|
||||
#
|
||||
|
||||
ccflags-y := -Iinclude/drm -Werror
|
||||
omapdrm-y := omap_drv.o \
|
||||
omap_irq.o \
|
||||
omap_debugfs.o \
|
||||
omap_crtc.o \
|
||||
omap_plane.o \
|
||||
omap_encoder.o \
|
||||
omap_connector.o \
|
||||
omap_fb.o \
|
||||
omap_fbdev.o \
|
||||
omap_gem.o \
|
||||
omap_gem_dmabuf.o \
|
||||
omap_dmm_tiler.o \
|
||||
tcm-sita.o
|
||||
|
||||
obj-$(CONFIG_DRM_OMAP) += omapdrm.o
|
||||
23
drivers/gpu/drm/omapdrm/TODO
Normal file
23
drivers/gpu/drm/omapdrm/TODO
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
TODO
|
||||
. Where should we do eviction (detatch_pages())? We aren't necessarily
|
||||
accessing the pages via a GART, so maybe we need some other threshold
|
||||
to put a cap on the # of pages that can be pin'd.
|
||||
. Use mm_shrinker to trigger unpinning pages.
|
||||
. This is mainly theoretical since most of these devices don't actually
|
||||
have swap or harddrive.
|
||||
. GEM/shmem backed pages can have existing mappings (kernel linear map,
|
||||
etc..), which isn't really ideal.
|
||||
. Revisit GEM sync object infrastructure.. TTM has some framework for this
|
||||
already. Possibly this could be refactored out and made more common?
|
||||
There should be some way to do this with less wheel-reinvention.
|
||||
. This can be handled by the dma-buf fence/reservation stuff when it
|
||||
lands
|
||||
|
||||
Userspace:
|
||||
. git://anongit.freedesktop.org/xorg/driver/xf86-video-omap
|
||||
|
||||
Currently tested on
|
||||
. OMAP3530 beagleboard
|
||||
. OMAP4430 pandaboard
|
||||
. OMAP4460 pandaboard
|
||||
. OMAP5432 uEVM
|
||||
331
drivers/gpu/drm/omapdrm/omap_connector.c
Normal file
331
drivers/gpu/drm/omapdrm/omap_connector.c
Normal file
|
|
@ -0,0 +1,331 @@
|
|||
/*
|
||||
* drivers/gpu/drm/omapdrm/omap_connector.c
|
||||
*
|
||||
* Copyright (C) 2011 Texas Instruments
|
||||
* Author: Rob Clark <rob@ti.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published by
|
||||
* the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "omap_drv.h"
|
||||
|
||||
#include "drm_crtc.h"
|
||||
#include "drm_crtc_helper.h"
|
||||
|
||||
/*
|
||||
* connector funcs
|
||||
*/
|
||||
|
||||
#define to_omap_connector(x) container_of(x, struct omap_connector, base)
|
||||
|
||||
struct omap_connector {
|
||||
struct drm_connector base;
|
||||
struct omap_dss_device *dssdev;
|
||||
struct drm_encoder *encoder;
|
||||
bool hdmi_mode;
|
||||
};
|
||||
|
||||
bool omap_connector_get_hdmi_mode(struct drm_connector *connector)
|
||||
{
|
||||
struct omap_connector *omap_connector = to_omap_connector(connector);
|
||||
|
||||
return omap_connector->hdmi_mode;
|
||||
}
|
||||
|
||||
void copy_timings_omap_to_drm(struct drm_display_mode *mode,
|
||||
struct omap_video_timings *timings)
|
||||
{
|
||||
mode->clock = timings->pixelclock / 1000;
|
||||
|
||||
mode->hdisplay = timings->x_res;
|
||||
mode->hsync_start = mode->hdisplay + timings->hfp;
|
||||
mode->hsync_end = mode->hsync_start + timings->hsw;
|
||||
mode->htotal = mode->hsync_end + timings->hbp;
|
||||
|
||||
mode->vdisplay = timings->y_res;
|
||||
mode->vsync_start = mode->vdisplay + timings->vfp;
|
||||
mode->vsync_end = mode->vsync_start + timings->vsw;
|
||||
mode->vtotal = mode->vsync_end + timings->vbp;
|
||||
|
||||
mode->flags = 0;
|
||||
|
||||
if (timings->interlace)
|
||||
mode->flags |= DRM_MODE_FLAG_INTERLACE;
|
||||
|
||||
if (timings->hsync_level == OMAPDSS_SIG_ACTIVE_HIGH)
|
||||
mode->flags |= DRM_MODE_FLAG_PHSYNC;
|
||||
else
|
||||
mode->flags |= DRM_MODE_FLAG_NHSYNC;
|
||||
|
||||
if (timings->vsync_level == OMAPDSS_SIG_ACTIVE_HIGH)
|
||||
mode->flags |= DRM_MODE_FLAG_PVSYNC;
|
||||
else
|
||||
mode->flags |= DRM_MODE_FLAG_NVSYNC;
|
||||
}
|
||||
|
||||
void copy_timings_drm_to_omap(struct omap_video_timings *timings,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
timings->pixelclock = mode->clock * 1000;
|
||||
|
||||
timings->x_res = mode->hdisplay;
|
||||
timings->hfp = mode->hsync_start - mode->hdisplay;
|
||||
timings->hsw = mode->hsync_end - mode->hsync_start;
|
||||
timings->hbp = mode->htotal - mode->hsync_end;
|
||||
|
||||
timings->y_res = mode->vdisplay;
|
||||
timings->vfp = mode->vsync_start - mode->vdisplay;
|
||||
timings->vsw = mode->vsync_end - mode->vsync_start;
|
||||
timings->vbp = mode->vtotal - mode->vsync_end;
|
||||
|
||||
timings->interlace = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
|
||||
|
||||
if (mode->flags & DRM_MODE_FLAG_PHSYNC)
|
||||
timings->hsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
|
||||
else
|
||||
timings->hsync_level = OMAPDSS_SIG_ACTIVE_LOW;
|
||||
|
||||
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
|
||||
timings->vsync_level = OMAPDSS_SIG_ACTIVE_HIGH;
|
||||
else
|
||||
timings->vsync_level = OMAPDSS_SIG_ACTIVE_LOW;
|
||||
|
||||
timings->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE;
|
||||
timings->de_level = OMAPDSS_SIG_ACTIVE_HIGH;
|
||||
timings->sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES;
|
||||
}
|
||||
|
||||
static enum drm_connector_status omap_connector_detect(
|
||||
struct drm_connector *connector, bool force)
|
||||
{
|
||||
struct omap_connector *omap_connector = to_omap_connector(connector);
|
||||
struct omap_dss_device *dssdev = omap_connector->dssdev;
|
||||
struct omap_dss_driver *dssdrv = dssdev->driver;
|
||||
enum drm_connector_status ret;
|
||||
|
||||
if (dssdrv->detect) {
|
||||
if (dssdrv->detect(dssdev))
|
||||
ret = connector_status_connected;
|
||||
else
|
||||
ret = connector_status_disconnected;
|
||||
} else if (dssdev->type == OMAP_DISPLAY_TYPE_DPI ||
|
||||
dssdev->type == OMAP_DISPLAY_TYPE_DBI ||
|
||||
dssdev->type == OMAP_DISPLAY_TYPE_SDI ||
|
||||
dssdev->type == OMAP_DISPLAY_TYPE_DSI) {
|
||||
ret = connector_status_connected;
|
||||
} else {
|
||||
ret = connector_status_unknown;
|
||||
}
|
||||
|
||||
VERB("%s: %d (force=%d)", omap_connector->dssdev->name, ret, force);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void omap_connector_destroy(struct drm_connector *connector)
|
||||
{
|
||||
struct omap_connector *omap_connector = to_omap_connector(connector);
|
||||
struct omap_dss_device *dssdev = omap_connector->dssdev;
|
||||
|
||||
DBG("%s", omap_connector->dssdev->name);
|
||||
drm_connector_unregister(connector);
|
||||
drm_connector_cleanup(connector);
|
||||
kfree(omap_connector);
|
||||
|
||||
omap_dss_put_device(dssdev);
|
||||
}
|
||||
|
||||
#define MAX_EDID 512
|
||||
|
||||
static int omap_connector_get_modes(struct drm_connector *connector)
|
||||
{
|
||||
struct omap_connector *omap_connector = to_omap_connector(connector);
|
||||
struct omap_dss_device *dssdev = omap_connector->dssdev;
|
||||
struct omap_dss_driver *dssdrv = dssdev->driver;
|
||||
struct drm_device *dev = connector->dev;
|
||||
int n = 0;
|
||||
|
||||
DBG("%s", omap_connector->dssdev->name);
|
||||
|
||||
/* if display exposes EDID, then we parse that in the normal way to
|
||||
* build table of supported modes.. otherwise (ie. fixed resolution
|
||||
* LCD panels) we just return a single mode corresponding to the
|
||||
* currently configured timings:
|
||||
*/
|
||||
if (dssdrv->read_edid) {
|
||||
void *edid = kzalloc(MAX_EDID, GFP_KERNEL);
|
||||
|
||||
if ((dssdrv->read_edid(dssdev, edid, MAX_EDID) > 0) &&
|
||||
drm_edid_is_valid(edid)) {
|
||||
drm_mode_connector_update_edid_property(
|
||||
connector, edid);
|
||||
n = drm_add_edid_modes(connector, edid);
|
||||
|
||||
omap_connector->hdmi_mode =
|
||||
drm_detect_hdmi_monitor(edid);
|
||||
} else {
|
||||
drm_mode_connector_update_edid_property(
|
||||
connector, NULL);
|
||||
}
|
||||
|
||||
kfree(edid);
|
||||
} else {
|
||||
struct drm_display_mode *mode = drm_mode_create(dev);
|
||||
struct omap_video_timings timings = {0};
|
||||
|
||||
dssdrv->get_timings(dssdev, &timings);
|
||||
|
||||
copy_timings_omap_to_drm(mode, &timings);
|
||||
|
||||
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
|
||||
drm_mode_set_name(mode);
|
||||
drm_mode_probed_add(connector, mode);
|
||||
|
||||
n = 1;
|
||||
}
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
static int omap_connector_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct omap_connector *omap_connector = to_omap_connector(connector);
|
||||
struct omap_dss_device *dssdev = omap_connector->dssdev;
|
||||
struct omap_dss_driver *dssdrv = dssdev->driver;
|
||||
struct omap_video_timings timings = {0};
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct drm_display_mode *new_mode;
|
||||
int r, ret = MODE_BAD;
|
||||
|
||||
copy_timings_drm_to_omap(&timings, mode);
|
||||
mode->vrefresh = drm_mode_vrefresh(mode);
|
||||
|
||||
/*
|
||||
* if the panel driver doesn't have a check_timings, it's most likely
|
||||
* a fixed resolution panel, check if the timings match with the
|
||||
* panel's timings
|
||||
*/
|
||||
if (dssdrv->check_timings) {
|
||||
r = dssdrv->check_timings(dssdev, &timings);
|
||||
} else {
|
||||
struct omap_video_timings t = {0};
|
||||
|
||||
dssdrv->get_timings(dssdev, &t);
|
||||
|
||||
if (memcmp(&timings, &t, sizeof(struct omap_video_timings)))
|
||||
r = -EINVAL;
|
||||
else
|
||||
r = 0;
|
||||
}
|
||||
|
||||
if (!r) {
|
||||
/* check if vrefresh is still valid */
|
||||
new_mode = drm_mode_duplicate(dev, mode);
|
||||
new_mode->clock = timings.pixelclock / 1000;
|
||||
new_mode->vrefresh = 0;
|
||||
if (mode->vrefresh == drm_mode_vrefresh(new_mode))
|
||||
ret = MODE_OK;
|
||||
drm_mode_destroy(dev, new_mode);
|
||||
}
|
||||
|
||||
DBG("connector: mode %s: "
|
||||
"%d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
|
||||
(ret == MODE_OK) ? "valid" : "invalid",
|
||||
mode->base.id, mode->name, mode->vrefresh, mode->clock,
|
||||
mode->hdisplay, mode->hsync_start,
|
||||
mode->hsync_end, mode->htotal,
|
||||
mode->vdisplay, mode->vsync_start,
|
||||
mode->vsync_end, mode->vtotal, mode->type, mode->flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct drm_encoder *omap_connector_attached_encoder(
|
||||
struct drm_connector *connector)
|
||||
{
|
||||
struct omap_connector *omap_connector = to_omap_connector(connector);
|
||||
return omap_connector->encoder;
|
||||
}
|
||||
|
||||
static const struct drm_connector_funcs omap_connector_funcs = {
|
||||
.dpms = drm_helper_connector_dpms,
|
||||
.detect = omap_connector_detect,
|
||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
.destroy = omap_connector_destroy,
|
||||
};
|
||||
|
||||
static const struct drm_connector_helper_funcs omap_connector_helper_funcs = {
|
||||
.get_modes = omap_connector_get_modes,
|
||||
.mode_valid = omap_connector_mode_valid,
|
||||
.best_encoder = omap_connector_attached_encoder,
|
||||
};
|
||||
|
||||
/* flush an area of the framebuffer (in case of manual update display that
|
||||
* is not automatically flushed)
|
||||
*/
|
||||
void omap_connector_flush(struct drm_connector *connector,
|
||||
int x, int y, int w, int h)
|
||||
{
|
||||
struct omap_connector *omap_connector = to_omap_connector(connector);
|
||||
|
||||
/* TODO: enable when supported in dss */
|
||||
VERB("%s: %d,%d, %dx%d", omap_connector->dssdev->name, x, y, w, h);
|
||||
}
|
||||
|
||||
/* initialize connector */
|
||||
struct drm_connector *omap_connector_init(struct drm_device *dev,
|
||||
int connector_type, struct omap_dss_device *dssdev,
|
||||
struct drm_encoder *encoder)
|
||||
{
|
||||
struct drm_connector *connector = NULL;
|
||||
struct omap_connector *omap_connector;
|
||||
|
||||
DBG("%s", dssdev->name);
|
||||
|
||||
omap_dss_get_device(dssdev);
|
||||
|
||||
omap_connector = kzalloc(sizeof(struct omap_connector), GFP_KERNEL);
|
||||
if (!omap_connector)
|
||||
goto fail;
|
||||
|
||||
omap_connector->dssdev = dssdev;
|
||||
omap_connector->encoder = encoder;
|
||||
|
||||
connector = &omap_connector->base;
|
||||
|
||||
drm_connector_init(dev, connector, &omap_connector_funcs,
|
||||
connector_type);
|
||||
drm_connector_helper_add(connector, &omap_connector_helper_funcs);
|
||||
|
||||
#if 0 /* enable when dss2 supports hotplug */
|
||||
if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_HPD)
|
||||
connector->polled = 0;
|
||||
else
|
||||
#endif
|
||||
connector->polled = DRM_CONNECTOR_POLL_CONNECT |
|
||||
DRM_CONNECTOR_POLL_DISCONNECT;
|
||||
|
||||
connector->interlace_allowed = 1;
|
||||
connector->doublescan_allowed = 0;
|
||||
|
||||
drm_connector_register(connector);
|
||||
|
||||
return connector;
|
||||
|
||||
fail:
|
||||
if (connector)
|
||||
omap_connector_destroy(connector);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
739
drivers/gpu/drm/omapdrm/omap_crtc.c
Normal file
739
drivers/gpu/drm/omapdrm/omap_crtc.c
Normal file
|
|
@ -0,0 +1,739 @@
|
|||
/*
|
||||
* drivers/gpu/drm/omapdrm/omap_crtc.c
|
||||
*
|
||||
* Copyright (C) 2011 Texas Instruments
|
||||
* Author: Rob Clark <rob@ti.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published by
|
||||
* the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "omap_drv.h"
|
||||
|
||||
#include <drm/drm_mode.h>
|
||||
#include "drm_crtc.h"
|
||||
#include "drm_crtc_helper.h"
|
||||
|
||||
#define to_omap_crtc(x) container_of(x, struct omap_crtc, base)
|
||||
|
||||
struct omap_crtc {
|
||||
struct drm_crtc base;
|
||||
struct drm_plane *plane;
|
||||
|
||||
const char *name;
|
||||
int pipe;
|
||||
enum omap_channel channel;
|
||||
struct omap_overlay_manager_info info;
|
||||
struct drm_encoder *current_encoder;
|
||||
|
||||
/*
|
||||
* Temporary: eventually this will go away, but it is needed
|
||||
* for now to keep the output's happy. (They only need
|
||||
* mgr->id.) Eventually this will be replaced w/ something
|
||||
* more common-panel-framework-y
|
||||
*/
|
||||
struct omap_overlay_manager *mgr;
|
||||
|
||||
struct omap_video_timings timings;
|
||||
bool enabled;
|
||||
bool full_update;
|
||||
|
||||
struct omap_drm_apply apply;
|
||||
|
||||
struct omap_drm_irq apply_irq;
|
||||
struct omap_drm_irq error_irq;
|
||||
|
||||
/* list of in-progress apply's: */
|
||||
struct list_head pending_applies;
|
||||
|
||||
/* list of queued apply's: */
|
||||
struct list_head queued_applies;
|
||||
|
||||
/* for handling queued and in-progress applies: */
|
||||
struct work_struct apply_work;
|
||||
|
||||
/* if there is a pending flip, these will be non-null: */
|
||||
struct drm_pending_vblank_event *event;
|
||||
struct drm_framebuffer *old_fb;
|
||||
|
||||
/* for handling page flips without caring about what
|
||||
* the callback is called from. Possibly we should just
|
||||
* make omap_gem always call the cb from the worker so
|
||||
* we don't have to care about this..
|
||||
*
|
||||
* XXX maybe fold into apply_work??
|
||||
*/
|
||||
struct work_struct page_flip_work;
|
||||
};
|
||||
|
||||
uint32_t pipe2vbl(struct drm_crtc *crtc)
|
||||
{
|
||||
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
|
||||
|
||||
return dispc_mgr_get_vsync_irq(omap_crtc->channel);
|
||||
}
|
||||
|
||||
/*
|
||||
* Manager-ops, callbacks from output when they need to configure
|
||||
* the upstream part of the video pipe.
|
||||
*
|
||||
* Most of these we can ignore until we add support for command-mode
|
||||
* panels.. for video-mode the crtc-helpers already do an adequate
|
||||
* job of sequencing the setup of the video pipe in the proper order
|
||||
*/
|
||||
|
||||
/* ovl-mgr-id -> crtc */
|
||||
static struct omap_crtc *omap_crtcs[8];
|
||||
|
||||
/* we can probably ignore these until we support command-mode panels: */
|
||||
static int omap_crtc_connect(struct omap_overlay_manager *mgr,
|
||||
struct omap_dss_device *dst)
|
||||
{
|
||||
if (mgr->output)
|
||||
return -EINVAL;
|
||||
|
||||
if ((mgr->supported_outputs & dst->id) == 0)
|
||||
return -EINVAL;
|
||||
|
||||
dst->manager = mgr;
|
||||
mgr->output = dst;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void omap_crtc_disconnect(struct omap_overlay_manager *mgr,
|
||||
struct omap_dss_device *dst)
|
||||
{
|
||||
mgr->output->manager = NULL;
|
||||
mgr->output = NULL;
|
||||
}
|
||||
|
||||
static void omap_crtc_start_update(struct omap_overlay_manager *mgr)
|
||||
{
|
||||
}
|
||||
|
||||
static void set_enabled(struct drm_crtc *crtc, bool enable);
|
||||
|
||||
static int omap_crtc_enable(struct omap_overlay_manager *mgr)
|
||||
{
|
||||
struct omap_crtc *omap_crtc = omap_crtcs[mgr->id];
|
||||
|
||||
dispc_mgr_setup(omap_crtc->channel, &omap_crtc->info);
|
||||
dispc_mgr_set_timings(omap_crtc->channel,
|
||||
&omap_crtc->timings);
|
||||
set_enabled(&omap_crtc->base, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void omap_crtc_disable(struct omap_overlay_manager *mgr)
|
||||
{
|
||||
struct omap_crtc *omap_crtc = omap_crtcs[mgr->id];
|
||||
|
||||
set_enabled(&omap_crtc->base, false);
|
||||
}
|
||||
|
||||
static void omap_crtc_set_timings(struct omap_overlay_manager *mgr,
|
||||
const struct omap_video_timings *timings)
|
||||
{
|
||||
struct omap_crtc *omap_crtc = omap_crtcs[mgr->id];
|
||||
DBG("%s", omap_crtc->name);
|
||||
omap_crtc->timings = *timings;
|
||||
omap_crtc->full_update = true;
|
||||
}
|
||||
|
||||
static void omap_crtc_set_lcd_config(struct omap_overlay_manager *mgr,
|
||||
const struct dss_lcd_mgr_config *config)
|
||||
{
|
||||
struct omap_crtc *omap_crtc = omap_crtcs[mgr->id];
|
||||
DBG("%s", omap_crtc->name);
|
||||
dispc_mgr_set_lcd_config(omap_crtc->channel, config);
|
||||
}
|
||||
|
||||
static int omap_crtc_register_framedone_handler(
|
||||
struct omap_overlay_manager *mgr,
|
||||
void (*handler)(void *), void *data)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void omap_crtc_unregister_framedone_handler(
|
||||
struct omap_overlay_manager *mgr,
|
||||
void (*handler)(void *), void *data)
|
||||
{
|
||||
}
|
||||
|
||||
static const struct dss_mgr_ops mgr_ops = {
|
||||
.connect = omap_crtc_connect,
|
||||
.disconnect = omap_crtc_disconnect,
|
||||
.start_update = omap_crtc_start_update,
|
||||
.enable = omap_crtc_enable,
|
||||
.disable = omap_crtc_disable,
|
||||
.set_timings = omap_crtc_set_timings,
|
||||
.set_lcd_config = omap_crtc_set_lcd_config,
|
||||
.register_framedone_handler = omap_crtc_register_framedone_handler,
|
||||
.unregister_framedone_handler = omap_crtc_unregister_framedone_handler,
|
||||
};
|
||||
|
||||
/*
|
||||
* CRTC funcs:
|
||||
*/
|
||||
|
||||
static void omap_crtc_destroy(struct drm_crtc *crtc)
|
||||
{
|
||||
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
|
||||
|
||||
DBG("%s", omap_crtc->name);
|
||||
|
||||
WARN_ON(omap_crtc->apply_irq.registered);
|
||||
omap_irq_unregister(crtc->dev, &omap_crtc->error_irq);
|
||||
|
||||
drm_crtc_cleanup(crtc);
|
||||
|
||||
kfree(omap_crtc);
|
||||
}
|
||||
|
||||
static void omap_crtc_dpms(struct drm_crtc *crtc, int mode)
|
||||
{
|
||||
struct omap_drm_private *priv = crtc->dev->dev_private;
|
||||
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
|
||||
bool enabled = (mode == DRM_MODE_DPMS_ON);
|
||||
int i;
|
||||
|
||||
DBG("%s: %d", omap_crtc->name, mode);
|
||||
|
||||
if (enabled != omap_crtc->enabled) {
|
||||
omap_crtc->enabled = enabled;
|
||||
omap_crtc->full_update = true;
|
||||
omap_crtc_apply(crtc, &omap_crtc->apply);
|
||||
|
||||
/* also enable our private plane: */
|
||||
WARN_ON(omap_plane_dpms(omap_crtc->plane, mode));
|
||||
|
||||
/* and any attached overlay planes: */
|
||||
for (i = 0; i < priv->num_planes; i++) {
|
||||
struct drm_plane *plane = priv->planes[i];
|
||||
if (plane->crtc == crtc)
|
||||
WARN_ON(omap_plane_dpms(plane, mode));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static bool omap_crtc_mode_fixup(struct drm_crtc *crtc,
|
||||
const struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static int omap_crtc_mode_set(struct drm_crtc *crtc,
|
||||
struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode,
|
||||
int x, int y,
|
||||
struct drm_framebuffer *old_fb)
|
||||
{
|
||||
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
|
||||
|
||||
mode = adjusted_mode;
|
||||
|
||||
DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
|
||||
omap_crtc->name, mode->base.id, mode->name,
|
||||
mode->vrefresh, mode->clock,
|
||||
mode->hdisplay, mode->hsync_start,
|
||||
mode->hsync_end, mode->htotal,
|
||||
mode->vdisplay, mode->vsync_start,
|
||||
mode->vsync_end, mode->vtotal,
|
||||
mode->type, mode->flags);
|
||||
|
||||
copy_timings_drm_to_omap(&omap_crtc->timings, mode);
|
||||
omap_crtc->full_update = true;
|
||||
|
||||
return omap_plane_mode_set(omap_crtc->plane, crtc, crtc->primary->fb,
|
||||
0, 0, mode->hdisplay, mode->vdisplay,
|
||||
x << 16, y << 16,
|
||||
mode->hdisplay << 16, mode->vdisplay << 16,
|
||||
NULL, NULL);
|
||||
}
|
||||
|
||||
static void omap_crtc_prepare(struct drm_crtc *crtc)
|
||||
{
|
||||
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
|
||||
DBG("%s", omap_crtc->name);
|
||||
omap_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
|
||||
}
|
||||
|
||||
static void omap_crtc_commit(struct drm_crtc *crtc)
|
||||
{
|
||||
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
|
||||
DBG("%s", omap_crtc->name);
|
||||
omap_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
|
||||
}
|
||||
|
||||
static int omap_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
|
||||
struct drm_framebuffer *old_fb)
|
||||
{
|
||||
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
|
||||
struct drm_plane *plane = omap_crtc->plane;
|
||||
struct drm_display_mode *mode = &crtc->mode;
|
||||
|
||||
return omap_plane_mode_set(plane, crtc, crtc->primary->fb,
|
||||
0, 0, mode->hdisplay, mode->vdisplay,
|
||||
x << 16, y << 16,
|
||||
mode->hdisplay << 16, mode->vdisplay << 16,
|
||||
NULL, NULL);
|
||||
}
|
||||
|
||||
static void vblank_cb(void *arg)
|
||||
{
|
||||
struct drm_crtc *crtc = arg;
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev->event_lock, flags);
|
||||
|
||||
/* wakeup userspace */
|
||||
if (omap_crtc->event)
|
||||
drm_send_vblank_event(dev, omap_crtc->pipe, omap_crtc->event);
|
||||
|
||||
omap_crtc->event = NULL;
|
||||
omap_crtc->old_fb = NULL;
|
||||
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
}
|
||||
|
||||
static void page_flip_worker(struct work_struct *work)
|
||||
{
|
||||
struct omap_crtc *omap_crtc =
|
||||
container_of(work, struct omap_crtc, page_flip_work);
|
||||
struct drm_crtc *crtc = &omap_crtc->base;
|
||||
struct drm_display_mode *mode = &crtc->mode;
|
||||
struct drm_gem_object *bo;
|
||||
|
||||
drm_modeset_lock(&crtc->mutex, NULL);
|
||||
omap_plane_mode_set(omap_crtc->plane, crtc, crtc->primary->fb,
|
||||
0, 0, mode->hdisplay, mode->vdisplay,
|
||||
crtc->x << 16, crtc->y << 16,
|
||||
mode->hdisplay << 16, mode->vdisplay << 16,
|
||||
vblank_cb, crtc);
|
||||
drm_modeset_unlock(&crtc->mutex);
|
||||
|
||||
bo = omap_framebuffer_bo(crtc->primary->fb, 0);
|
||||
drm_gem_object_unreference_unlocked(bo);
|
||||
}
|
||||
|
||||
static void page_flip_cb(void *arg)
|
||||
{
|
||||
struct drm_crtc *crtc = arg;
|
||||
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
|
||||
struct omap_drm_private *priv = crtc->dev->dev_private;
|
||||
|
||||
/* avoid assumptions about what ctxt we are called from: */
|
||||
queue_work(priv->wq, &omap_crtc->page_flip_work);
|
||||
}
|
||||
|
||||
static int omap_crtc_page_flip_locked(struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
struct drm_pending_vblank_event *event,
|
||||
uint32_t page_flip_flags)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
|
||||
struct drm_plane *primary = crtc->primary;
|
||||
struct drm_gem_object *bo;
|
||||
unsigned long flags;
|
||||
|
||||
DBG("%d -> %d (event=%p)", primary->fb ? primary->fb->base.id : -1,
|
||||
fb->base.id, event);
|
||||
|
||||
spin_lock_irqsave(&dev->event_lock, flags);
|
||||
|
||||
if (omap_crtc->old_fb) {
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
dev_err(dev->dev, "already a pending flip\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
omap_crtc->event = event;
|
||||
omap_crtc->old_fb = primary->fb = fb;
|
||||
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
|
||||
/*
|
||||
* Hold a reference temporarily until the crtc is updated
|
||||
* and takes the reference to the bo. This avoids it
|
||||
* getting freed from under us:
|
||||
*/
|
||||
bo = omap_framebuffer_bo(fb, 0);
|
||||
drm_gem_object_reference(bo);
|
||||
|
||||
omap_gem_op_async(bo, OMAP_GEM_READ, page_flip_cb, crtc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int omap_crtc_set_property(struct drm_crtc *crtc,
|
||||
struct drm_property *property, uint64_t val)
|
||||
{
|
||||
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
|
||||
struct omap_drm_private *priv = crtc->dev->dev_private;
|
||||
|
||||
if (property == priv->rotation_prop) {
|
||||
crtc->invert_dimensions =
|
||||
!!(val & ((1LL << DRM_ROTATE_90) | (1LL << DRM_ROTATE_270)));
|
||||
}
|
||||
|
||||
return omap_plane_set_property(omap_crtc->plane, property, val);
|
||||
}
|
||||
|
||||
static const struct drm_crtc_funcs omap_crtc_funcs = {
|
||||
.set_config = drm_crtc_helper_set_config,
|
||||
.destroy = omap_crtc_destroy,
|
||||
.page_flip = omap_crtc_page_flip_locked,
|
||||
.set_property = omap_crtc_set_property,
|
||||
};
|
||||
|
||||
static const struct drm_crtc_helper_funcs omap_crtc_helper_funcs = {
|
||||
.dpms = omap_crtc_dpms,
|
||||
.mode_fixup = omap_crtc_mode_fixup,
|
||||
.mode_set = omap_crtc_mode_set,
|
||||
.prepare = omap_crtc_prepare,
|
||||
.commit = omap_crtc_commit,
|
||||
.mode_set_base = omap_crtc_mode_set_base,
|
||||
};
|
||||
|
||||
const struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc)
|
||||
{
|
||||
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
|
||||
return &omap_crtc->timings;
|
||||
}
|
||||
|
||||
enum omap_channel omap_crtc_channel(struct drm_crtc *crtc)
|
||||
{
|
||||
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
|
||||
return omap_crtc->channel;
|
||||
}
|
||||
|
||||
static void omap_crtc_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
|
||||
{
|
||||
struct omap_crtc *omap_crtc =
|
||||
container_of(irq, struct omap_crtc, error_irq);
|
||||
struct drm_crtc *crtc = &omap_crtc->base;
|
||||
DRM_ERROR("%s: errors: %08x\n", omap_crtc->name, irqstatus);
|
||||
/* avoid getting in a flood, unregister the irq until next vblank */
|
||||
__omap_irq_unregister(crtc->dev, &omap_crtc->error_irq);
|
||||
}
|
||||
|
||||
static void omap_crtc_apply_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
|
||||
{
|
||||
struct omap_crtc *omap_crtc =
|
||||
container_of(irq, struct omap_crtc, apply_irq);
|
||||
struct drm_crtc *crtc = &omap_crtc->base;
|
||||
|
||||
if (!omap_crtc->error_irq.registered)
|
||||
__omap_irq_register(crtc->dev, &omap_crtc->error_irq);
|
||||
|
||||
if (!dispc_mgr_go_busy(omap_crtc->channel)) {
|
||||
struct omap_drm_private *priv =
|
||||
crtc->dev->dev_private;
|
||||
DBG("%s: apply done", omap_crtc->name);
|
||||
__omap_irq_unregister(crtc->dev, &omap_crtc->apply_irq);
|
||||
queue_work(priv->wq, &omap_crtc->apply_work);
|
||||
}
|
||||
}
|
||||
|
||||
static void apply_worker(struct work_struct *work)
|
||||
{
|
||||
struct omap_crtc *omap_crtc =
|
||||
container_of(work, struct omap_crtc, apply_work);
|
||||
struct drm_crtc *crtc = &omap_crtc->base;
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct omap_drm_apply *apply, *n;
|
||||
bool need_apply;
|
||||
|
||||
/*
|
||||
* Synchronize everything on mode_config.mutex, to keep
|
||||
* the callbacks and list modification all serialized
|
||||
* with respect to modesetting ioctls from userspace.
|
||||
*/
|
||||
drm_modeset_lock(&crtc->mutex, NULL);
|
||||
dispc_runtime_get();
|
||||
|
||||
/*
|
||||
* If we are still pending a previous update, wait.. when the
|
||||
* pending update completes, we get kicked again.
|
||||
*/
|
||||
if (omap_crtc->apply_irq.registered)
|
||||
goto out;
|
||||
|
||||
/* finish up previous apply's: */
|
||||
list_for_each_entry_safe(apply, n,
|
||||
&omap_crtc->pending_applies, pending_node) {
|
||||
apply->post_apply(apply);
|
||||
list_del(&apply->pending_node);
|
||||
}
|
||||
|
||||
need_apply = !list_empty(&omap_crtc->queued_applies);
|
||||
|
||||
/* then handle the next round of of queued apply's: */
|
||||
list_for_each_entry_safe(apply, n,
|
||||
&omap_crtc->queued_applies, queued_node) {
|
||||
apply->pre_apply(apply);
|
||||
list_del(&apply->queued_node);
|
||||
apply->queued = false;
|
||||
list_add_tail(&apply->pending_node,
|
||||
&omap_crtc->pending_applies);
|
||||
}
|
||||
|
||||
if (need_apply) {
|
||||
enum omap_channel channel = omap_crtc->channel;
|
||||
|
||||
DBG("%s: GO", omap_crtc->name);
|
||||
|
||||
if (dispc_mgr_is_enabled(channel)) {
|
||||
omap_irq_register(dev, &omap_crtc->apply_irq);
|
||||
dispc_mgr_go(channel);
|
||||
} else {
|
||||
struct omap_drm_private *priv = dev->dev_private;
|
||||
queue_work(priv->wq, &omap_crtc->apply_work);
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
dispc_runtime_put();
|
||||
drm_modeset_unlock(&crtc->mutex);
|
||||
}
|
||||
|
||||
int omap_crtc_apply(struct drm_crtc *crtc,
|
||||
struct omap_drm_apply *apply)
|
||||
{
|
||||
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
|
||||
|
||||
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
|
||||
|
||||
/* no need to queue it again if it is already queued: */
|
||||
if (apply->queued)
|
||||
return 0;
|
||||
|
||||
apply->queued = true;
|
||||
list_add_tail(&apply->queued_node, &omap_crtc->queued_applies);
|
||||
|
||||
/*
|
||||
* If there are no currently pending updates, then go ahead and
|
||||
* kick the worker immediately, otherwise it will run again when
|
||||
* the current update finishes.
|
||||
*/
|
||||
if (list_empty(&omap_crtc->pending_applies)) {
|
||||
struct omap_drm_private *priv = crtc->dev->dev_private;
|
||||
queue_work(priv->wq, &omap_crtc->apply_work);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* called only from apply */
|
||||
static void set_enabled(struct drm_crtc *crtc, bool enable)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
|
||||
enum omap_channel channel = omap_crtc->channel;
|
||||
struct omap_irq_wait *wait;
|
||||
u32 framedone_irq, vsync_irq;
|
||||
int ret;
|
||||
|
||||
if (dispc_mgr_is_enabled(channel) == enable)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Digit output produces some sync lost interrupts during the first
|
||||
* frame when enabling, so we need to ignore those.
|
||||
*/
|
||||
omap_irq_unregister(crtc->dev, &omap_crtc->error_irq);
|
||||
|
||||
framedone_irq = dispc_mgr_get_framedone_irq(channel);
|
||||
vsync_irq = dispc_mgr_get_vsync_irq(channel);
|
||||
|
||||
if (enable) {
|
||||
wait = omap_irq_wait_init(dev, vsync_irq, 1);
|
||||
} else {
|
||||
/*
|
||||
* When we disable the digit output, we need to wait for
|
||||
* FRAMEDONE to know that DISPC has finished with the output.
|
||||
*
|
||||
* OMAP2/3 does not have FRAMEDONE irq for digit output, and in
|
||||
* that case we need to use vsync interrupt, and wait for both
|
||||
* even and odd frames.
|
||||
*/
|
||||
|
||||
if (framedone_irq)
|
||||
wait = omap_irq_wait_init(dev, framedone_irq, 1);
|
||||
else
|
||||
wait = omap_irq_wait_init(dev, vsync_irq, 2);
|
||||
}
|
||||
|
||||
dispc_mgr_enable(channel, enable);
|
||||
|
||||
ret = omap_irq_wait(dev, wait, msecs_to_jiffies(100));
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "%s: timeout waiting for %s\n",
|
||||
omap_crtc->name, enable ? "enable" : "disable");
|
||||
}
|
||||
|
||||
omap_irq_register(crtc->dev, &omap_crtc->error_irq);
|
||||
}
|
||||
|
||||
static void omap_crtc_pre_apply(struct omap_drm_apply *apply)
|
||||
{
|
||||
struct omap_crtc *omap_crtc =
|
||||
container_of(apply, struct omap_crtc, apply);
|
||||
struct drm_crtc *crtc = &omap_crtc->base;
|
||||
struct drm_encoder *encoder = NULL;
|
||||
|
||||
DBG("%s: enabled=%d, full=%d", omap_crtc->name,
|
||||
omap_crtc->enabled, omap_crtc->full_update);
|
||||
|
||||
if (omap_crtc->full_update) {
|
||||
struct omap_drm_private *priv = crtc->dev->dev_private;
|
||||
int i;
|
||||
for (i = 0; i < priv->num_encoders; i++) {
|
||||
if (priv->encoders[i]->crtc == crtc) {
|
||||
encoder = priv->encoders[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (omap_crtc->current_encoder && encoder != omap_crtc->current_encoder)
|
||||
omap_encoder_set_enabled(omap_crtc->current_encoder, false);
|
||||
|
||||
omap_crtc->current_encoder = encoder;
|
||||
|
||||
if (!omap_crtc->enabled) {
|
||||
if (encoder)
|
||||
omap_encoder_set_enabled(encoder, false);
|
||||
} else {
|
||||
if (encoder) {
|
||||
omap_encoder_set_enabled(encoder, false);
|
||||
omap_encoder_update(encoder, omap_crtc->mgr,
|
||||
&omap_crtc->timings);
|
||||
omap_encoder_set_enabled(encoder, true);
|
||||
}
|
||||
}
|
||||
|
||||
omap_crtc->full_update = false;
|
||||
}
|
||||
|
||||
static void omap_crtc_post_apply(struct omap_drm_apply *apply)
|
||||
{
|
||||
/* nothing needed for post-apply */
|
||||
}
|
||||
|
||||
void omap_crtc_flush(struct drm_crtc *crtc)
|
||||
{
|
||||
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
|
||||
int loops = 0;
|
||||
|
||||
while (!list_empty(&omap_crtc->pending_applies) ||
|
||||
!list_empty(&omap_crtc->queued_applies) ||
|
||||
omap_crtc->event || omap_crtc->old_fb) {
|
||||
|
||||
if (++loops > 10) {
|
||||
dev_err(crtc->dev->dev,
|
||||
"omap_crtc_flush() timeout\n");
|
||||
break;
|
||||
}
|
||||
|
||||
schedule_timeout_uninterruptible(msecs_to_jiffies(20));
|
||||
}
|
||||
}
|
||||
|
||||
static const char *channel_names[] = {
|
||||
[OMAP_DSS_CHANNEL_LCD] = "lcd",
|
||||
[OMAP_DSS_CHANNEL_DIGIT] = "tv",
|
||||
[OMAP_DSS_CHANNEL_LCD2] = "lcd2",
|
||||
[OMAP_DSS_CHANNEL_LCD3] = "lcd3",
|
||||
};
|
||||
|
||||
void omap_crtc_pre_init(void)
|
||||
{
|
||||
dss_install_mgr_ops(&mgr_ops);
|
||||
}
|
||||
|
||||
void omap_crtc_pre_uninit(void)
|
||||
{
|
||||
dss_uninstall_mgr_ops();
|
||||
}
|
||||
|
||||
/* initialize crtc */
|
||||
struct drm_crtc *omap_crtc_init(struct drm_device *dev,
|
||||
struct drm_plane *plane, enum omap_channel channel, int id)
|
||||
{
|
||||
struct drm_crtc *crtc = NULL;
|
||||
struct omap_crtc *omap_crtc;
|
||||
struct omap_overlay_manager_info *info;
|
||||
|
||||
DBG("%s", channel_names[channel]);
|
||||
|
||||
omap_crtc = kzalloc(sizeof(*omap_crtc), GFP_KERNEL);
|
||||
if (!omap_crtc)
|
||||
goto fail;
|
||||
|
||||
crtc = &omap_crtc->base;
|
||||
|
||||
INIT_WORK(&omap_crtc->page_flip_work, page_flip_worker);
|
||||
INIT_WORK(&omap_crtc->apply_work, apply_worker);
|
||||
|
||||
INIT_LIST_HEAD(&omap_crtc->pending_applies);
|
||||
INIT_LIST_HEAD(&omap_crtc->queued_applies);
|
||||
|
||||
omap_crtc->apply.pre_apply = omap_crtc_pre_apply;
|
||||
omap_crtc->apply.post_apply = omap_crtc_post_apply;
|
||||
|
||||
omap_crtc->channel = channel;
|
||||
omap_crtc->plane = plane;
|
||||
omap_crtc->plane->crtc = crtc;
|
||||
omap_crtc->name = channel_names[channel];
|
||||
omap_crtc->pipe = id;
|
||||
|
||||
omap_crtc->apply_irq.irqmask = pipe2vbl(crtc);
|
||||
omap_crtc->apply_irq.irq = omap_crtc_apply_irq;
|
||||
|
||||
omap_crtc->error_irq.irqmask =
|
||||
dispc_mgr_get_sync_lost_irq(channel);
|
||||
omap_crtc->error_irq.irq = omap_crtc_error_irq;
|
||||
omap_irq_register(dev, &omap_crtc->error_irq);
|
||||
|
||||
/* temporary: */
|
||||
omap_crtc->mgr = omap_dss_get_overlay_manager(channel);
|
||||
|
||||
/* TODO: fix hard-coded setup.. add properties! */
|
||||
info = &omap_crtc->info;
|
||||
info->default_color = 0x00000000;
|
||||
info->trans_key = 0x00000000;
|
||||
info->trans_key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
|
||||
info->trans_enabled = false;
|
||||
|
||||
drm_crtc_init(dev, crtc, &omap_crtc_funcs);
|
||||
drm_crtc_helper_add(crtc, &omap_crtc_helper_funcs);
|
||||
|
||||
omap_plane_install_properties(omap_crtc->plane, &crtc->base);
|
||||
|
||||
omap_crtcs[channel] = omap_crtc;
|
||||
|
||||
return crtc;
|
||||
|
||||
fail:
|
||||
if (crtc)
|
||||
omap_crtc_destroy(crtc);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
125
drivers/gpu/drm/omapdrm/omap_debugfs.c
Normal file
125
drivers/gpu/drm/omapdrm/omap_debugfs.c
Normal file
|
|
@ -0,0 +1,125 @@
|
|||
/*
|
||||
* drivers/gpu/drm/omapdrm/omap_debugfs.c
|
||||
*
|
||||
* Copyright (C) 2011 Texas Instruments
|
||||
* Author: Rob Clark <rob.clark@linaro.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published by
|
||||
* the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "omap_drv.h"
|
||||
#include "omap_dmm_tiler.h"
|
||||
|
||||
#include "drm_fb_helper.h"
|
||||
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
|
||||
static int gem_show(struct seq_file *m, void *arg)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct omap_drm_private *priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
seq_printf(m, "All Objects:\n");
|
||||
omap_gem_describe_objects(&priv->obj_list, m);
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mm_show(struct seq_file *m, void *arg)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
return drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm);
|
||||
}
|
||||
|
||||
static int fb_show(struct seq_file *m, void *arg)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct omap_drm_private *priv = dev->dev_private;
|
||||
struct drm_framebuffer *fb;
|
||||
|
||||
seq_printf(m, "fbcon ");
|
||||
omap_framebuffer_describe(priv->fbdev->fb, m);
|
||||
|
||||
mutex_lock(&dev->mode_config.fb_lock);
|
||||
list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
|
||||
if (fb == priv->fbdev->fb)
|
||||
continue;
|
||||
|
||||
seq_printf(m, "user ");
|
||||
omap_framebuffer_describe(fb, m);
|
||||
}
|
||||
mutex_unlock(&dev->mode_config.fb_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* list of debufs files that are applicable to all devices */
|
||||
static struct drm_info_list omap_debugfs_list[] = {
|
||||
{"gem", gem_show, 0},
|
||||
{"mm", mm_show, 0},
|
||||
{"fb", fb_show, 0},
|
||||
};
|
||||
|
||||
/* list of debugfs files that are specific to devices with dmm/tiler */
|
||||
static struct drm_info_list omap_dmm_debugfs_list[] = {
|
||||
{"tiler_map", tiler_map_show, 0},
|
||||
};
|
||||
|
||||
int omap_debugfs_init(struct drm_minor *minor)
|
||||
{
|
||||
struct drm_device *dev = minor->dev;
|
||||
int ret;
|
||||
|
||||
ret = drm_debugfs_create_files(omap_debugfs_list,
|
||||
ARRAY_SIZE(omap_debugfs_list),
|
||||
minor->debugfs_root, minor);
|
||||
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "could not install omap_debugfs_list\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (dmm_is_available())
|
||||
ret = drm_debugfs_create_files(omap_dmm_debugfs_list,
|
||||
ARRAY_SIZE(omap_dmm_debugfs_list),
|
||||
minor->debugfs_root, minor);
|
||||
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "could not install omap_dmm_debugfs_list\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void omap_debugfs_cleanup(struct drm_minor *minor)
|
||||
{
|
||||
drm_debugfs_remove_files(omap_debugfs_list,
|
||||
ARRAY_SIZE(omap_debugfs_list), minor);
|
||||
if (dmm_is_available())
|
||||
drm_debugfs_remove_files(omap_dmm_debugfs_list,
|
||||
ARRAY_SIZE(omap_dmm_debugfs_list), minor);
|
||||
}
|
||||
|
||||
#endif
|
||||
188
drivers/gpu/drm/omapdrm/omap_dmm_priv.h
Normal file
188
drivers/gpu/drm/omapdrm/omap_dmm_priv.h
Normal file
|
|
@ -0,0 +1,188 @@
|
|||
/*
|
||||
*
|
||||
* Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
|
||||
* Author: Rob Clark <rob@ti.com>
|
||||
* Andy Gross <andy.gross@ti.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation version 2.
|
||||
*
|
||||
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
|
||||
* kind, whether express or implied; without even the implied warranty
|
||||
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
#ifndef OMAP_DMM_PRIV_H
|
||||
#define OMAP_DMM_PRIV_H
|
||||
|
||||
#define DMM_REVISION 0x000
|
||||
#define DMM_HWINFO 0x004
|
||||
#define DMM_LISA_HWINFO 0x008
|
||||
#define DMM_DMM_SYSCONFIG 0x010
|
||||
#define DMM_LISA_LOCK 0x01C
|
||||
#define DMM_LISA_MAP__0 0x040
|
||||
#define DMM_LISA_MAP__1 0x044
|
||||
#define DMM_TILER_HWINFO 0x208
|
||||
#define DMM_TILER_OR__0 0x220
|
||||
#define DMM_TILER_OR__1 0x224
|
||||
#define DMM_PAT_HWINFO 0x408
|
||||
#define DMM_PAT_GEOMETRY 0x40C
|
||||
#define DMM_PAT_CONFIG 0x410
|
||||
#define DMM_PAT_VIEW__0 0x420
|
||||
#define DMM_PAT_VIEW__1 0x424
|
||||
#define DMM_PAT_VIEW_MAP__0 0x440
|
||||
#define DMM_PAT_VIEW_MAP_BASE 0x460
|
||||
#define DMM_PAT_IRQ_EOI 0x478
|
||||
#define DMM_PAT_IRQSTATUS_RAW 0x480
|
||||
#define DMM_PAT_IRQSTATUS 0x490
|
||||
#define DMM_PAT_IRQENABLE_SET 0x4A0
|
||||
#define DMM_PAT_IRQENABLE_CLR 0x4B0
|
||||
#define DMM_PAT_STATUS__0 0x4C0
|
||||
#define DMM_PAT_STATUS__1 0x4C4
|
||||
#define DMM_PAT_STATUS__2 0x4C8
|
||||
#define DMM_PAT_STATUS__3 0x4CC
|
||||
#define DMM_PAT_DESCR__0 0x500
|
||||
#define DMM_PAT_DESCR__1 0x510
|
||||
#define DMM_PAT_DESCR__2 0x520
|
||||
#define DMM_PAT_DESCR__3 0x530
|
||||
#define DMM_PEG_HWINFO 0x608
|
||||
#define DMM_PEG_PRIO 0x620
|
||||
#define DMM_PEG_PRIO_PAT 0x640
|
||||
|
||||
#define DMM_IRQSTAT_DST (1<<0)
|
||||
#define DMM_IRQSTAT_LST (1<<1)
|
||||
#define DMM_IRQSTAT_ERR_INV_DSC (1<<2)
|
||||
#define DMM_IRQSTAT_ERR_INV_DATA (1<<3)
|
||||
#define DMM_IRQSTAT_ERR_UPD_AREA (1<<4)
|
||||
#define DMM_IRQSTAT_ERR_UPD_CTRL (1<<5)
|
||||
#define DMM_IRQSTAT_ERR_UPD_DATA (1<<6)
|
||||
#define DMM_IRQSTAT_ERR_LUT_MISS (1<<7)
|
||||
|
||||
#define DMM_IRQSTAT_ERR_MASK (DMM_IRQ_STAT_ERR_INV_DSC | \
|
||||
DMM_IRQ_STAT_ERR_INV_DATA | \
|
||||
DMM_IRQ_STAT_ERR_UPD_AREA | \
|
||||
DMM_IRQ_STAT_ERR_UPD_CTRL | \
|
||||
DMM_IRQ_STAT_ERR_UPD_DATA | \
|
||||
DMM_IRQ_STAT_ERR_LUT_MISS)
|
||||
|
||||
#define DMM_PATSTATUS_READY (1<<0)
|
||||
#define DMM_PATSTATUS_VALID (1<<1)
|
||||
#define DMM_PATSTATUS_RUN (1<<2)
|
||||
#define DMM_PATSTATUS_DONE (1<<3)
|
||||
#define DMM_PATSTATUS_LINKED (1<<4)
|
||||
#define DMM_PATSTATUS_BYPASSED (1<<7)
|
||||
#define DMM_PATSTATUS_ERR_INV_DESCR (1<<10)
|
||||
#define DMM_PATSTATUS_ERR_INV_DATA (1<<11)
|
||||
#define DMM_PATSTATUS_ERR_UPD_AREA (1<<12)
|
||||
#define DMM_PATSTATUS_ERR_UPD_CTRL (1<<13)
|
||||
#define DMM_PATSTATUS_ERR_UPD_DATA (1<<14)
|
||||
#define DMM_PATSTATUS_ERR_ACCESS (1<<15)
|
||||
|
||||
/* note: don't treat DMM_PATSTATUS_ERR_ACCESS as an error */
|
||||
#define DMM_PATSTATUS_ERR (DMM_PATSTATUS_ERR_INV_DESCR | \
|
||||
DMM_PATSTATUS_ERR_INV_DATA | \
|
||||
DMM_PATSTATUS_ERR_UPD_AREA | \
|
||||
DMM_PATSTATUS_ERR_UPD_CTRL | \
|
||||
DMM_PATSTATUS_ERR_UPD_DATA)
|
||||
|
||||
|
||||
|
||||
enum {
|
||||
PAT_STATUS,
|
||||
PAT_DESCR
|
||||
};
|
||||
|
||||
struct pat_ctrl {
|
||||
u32 start:4;
|
||||
u32 dir:4;
|
||||
u32 lut_id:8;
|
||||
u32 sync:12;
|
||||
u32 ini:4;
|
||||
};
|
||||
|
||||
struct pat {
|
||||
uint32_t next_pa;
|
||||
struct pat_area area;
|
||||
struct pat_ctrl ctrl;
|
||||
uint32_t data_pa;
|
||||
};
|
||||
|
||||
#define DMM_FIXED_RETRY_COUNT 1000
|
||||
|
||||
/* create refill buffer big enough to refill all slots, plus 3 descriptors..
|
||||
* 3 descriptors is probably the worst-case for # of 2d-slices in a 1d area,
|
||||
* but I guess you don't hit that worst case at the same time as full area
|
||||
* refill
|
||||
*/
|
||||
#define DESCR_SIZE 128
|
||||
#define REFILL_BUFFER_SIZE ((4 * 128 * 256) + (3 * DESCR_SIZE))
|
||||
|
||||
/* For OMAP5, a fixed offset is added to all Y coordinates for 1D buffers.
|
||||
* This is used in programming to address the upper portion of the LUT
|
||||
*/
|
||||
#define OMAP5_LUT_OFFSET 128
|
||||
|
||||
struct dmm;
|
||||
|
||||
struct dmm_txn {
|
||||
void *engine_handle;
|
||||
struct tcm *tcm;
|
||||
|
||||
uint8_t *current_va;
|
||||
dma_addr_t current_pa;
|
||||
|
||||
struct pat *last_pat;
|
||||
};
|
||||
|
||||
struct refill_engine {
|
||||
int id;
|
||||
struct dmm *dmm;
|
||||
struct tcm *tcm;
|
||||
|
||||
uint8_t *refill_va;
|
||||
dma_addr_t refill_pa;
|
||||
|
||||
/* only one trans per engine for now */
|
||||
struct dmm_txn txn;
|
||||
|
||||
bool async;
|
||||
|
||||
wait_queue_head_t wait_for_refill;
|
||||
|
||||
struct list_head idle_node;
|
||||
};
|
||||
|
||||
struct dmm {
|
||||
struct device *dev;
|
||||
void __iomem *base;
|
||||
int irq;
|
||||
|
||||
struct page *dummy_page;
|
||||
dma_addr_t dummy_pa;
|
||||
|
||||
void *refill_va;
|
||||
dma_addr_t refill_pa;
|
||||
|
||||
/* refill engines */
|
||||
wait_queue_head_t engine_queue;
|
||||
struct list_head idle_head;
|
||||
struct refill_engine *engines;
|
||||
int num_engines;
|
||||
atomic_t engine_counter;
|
||||
|
||||
/* container information */
|
||||
int container_width;
|
||||
int container_height;
|
||||
int lut_width;
|
||||
int lut_height;
|
||||
int num_lut;
|
||||
|
||||
/* array of LUT - TCM containers */
|
||||
struct tcm **tcm;
|
||||
|
||||
/* allocation list and lock */
|
||||
struct list_head alloc_head;
|
||||
};
|
||||
|
||||
#endif
|
||||
998
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
Normal file
998
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
Normal file
|
|
@ -0,0 +1,998 @@
|
|||
/*
|
||||
* DMM IOMMU driver support functions for TI OMAP processors.
|
||||
*
|
||||
* Author: Rob Clark <rob@ti.com>
|
||||
* Andy Gross <andy.gross@ti.com>
|
||||
*
|
||||
* Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation version 2.
|
||||
*
|
||||
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
|
||||
* kind, whether express or implied; without even the implied warranty
|
||||
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/platform_device.h> /* platform_device() */
|
||||
#include <linux/errno.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/list.h>
|
||||
|
||||
#include "omap_dmm_tiler.h"
|
||||
#include "omap_dmm_priv.h"
|
||||
|
||||
#define DMM_DRIVER_NAME "dmm"
|
||||
|
||||
/* mappings for associating views to luts */
|
||||
static struct tcm *containers[TILFMT_NFORMATS];
|
||||
static struct dmm *omap_dmm;
|
||||
|
||||
/* global spinlock for protecting lists */
|
||||
static DEFINE_SPINLOCK(list_lock);
|
||||
|
||||
/* Geometry table */
|
||||
#define GEOM(xshift, yshift, bytes_per_pixel) { \
|
||||
.x_shft = (xshift), \
|
||||
.y_shft = (yshift), \
|
||||
.cpp = (bytes_per_pixel), \
|
||||
.slot_w = 1 << (SLOT_WIDTH_BITS - (xshift)), \
|
||||
.slot_h = 1 << (SLOT_HEIGHT_BITS - (yshift)), \
|
||||
}
|
||||
|
||||
static const struct {
|
||||
uint32_t x_shft; /* unused X-bits (as part of bpp) */
|
||||
uint32_t y_shft; /* unused Y-bits (as part of bpp) */
|
||||
uint32_t cpp; /* bytes/chars per pixel */
|
||||
uint32_t slot_w; /* width of each slot (in pixels) */
|
||||
uint32_t slot_h; /* height of each slot (in pixels) */
|
||||
} geom[TILFMT_NFORMATS] = {
|
||||
[TILFMT_8BIT] = GEOM(0, 0, 1),
|
||||
[TILFMT_16BIT] = GEOM(0, 1, 2),
|
||||
[TILFMT_32BIT] = GEOM(1, 1, 4),
|
||||
[TILFMT_PAGE] = GEOM(SLOT_WIDTH_BITS, SLOT_HEIGHT_BITS, 1),
|
||||
};
|
||||
|
||||
|
||||
/* lookup table for registers w/ per-engine instances */
|
||||
static const uint32_t reg[][4] = {
|
||||
[PAT_STATUS] = {DMM_PAT_STATUS__0, DMM_PAT_STATUS__1,
|
||||
DMM_PAT_STATUS__2, DMM_PAT_STATUS__3},
|
||||
[PAT_DESCR] = {DMM_PAT_DESCR__0, DMM_PAT_DESCR__1,
|
||||
DMM_PAT_DESCR__2, DMM_PAT_DESCR__3},
|
||||
};
|
||||
|
||||
/* simple allocator to grab next 16 byte aligned memory from txn */
|
||||
static void *alloc_dma(struct dmm_txn *txn, size_t sz, dma_addr_t *pa)
|
||||
{
|
||||
void *ptr;
|
||||
struct refill_engine *engine = txn->engine_handle;
|
||||
|
||||
/* dmm programming requires 16 byte aligned addresses */
|
||||
txn->current_pa = round_up(txn->current_pa, 16);
|
||||
txn->current_va = (void *)round_up((long)txn->current_va, 16);
|
||||
|
||||
ptr = txn->current_va;
|
||||
*pa = txn->current_pa;
|
||||
|
||||
txn->current_pa += sz;
|
||||
txn->current_va += sz;
|
||||
|
||||
BUG_ON((txn->current_va - engine->refill_va) > REFILL_BUFFER_SIZE);
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
/* check status and spin until wait_mask comes true */
|
||||
static int wait_status(struct refill_engine *engine, uint32_t wait_mask)
|
||||
{
|
||||
struct dmm *dmm = engine->dmm;
|
||||
uint32_t r = 0, err, i;
|
||||
|
||||
i = DMM_FIXED_RETRY_COUNT;
|
||||
while (true) {
|
||||
r = readl(dmm->base + reg[PAT_STATUS][engine->id]);
|
||||
err = r & DMM_PATSTATUS_ERR;
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
|
||||
if ((r & wait_mask) == wait_mask)
|
||||
break;
|
||||
|
||||
if (--i == 0)
|
||||
return -ETIMEDOUT;
|
||||
|
||||
udelay(1);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void release_engine(struct refill_engine *engine)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&list_lock, flags);
|
||||
list_add(&engine->idle_node, &omap_dmm->idle_head);
|
||||
spin_unlock_irqrestore(&list_lock, flags);
|
||||
|
||||
atomic_inc(&omap_dmm->engine_counter);
|
||||
wake_up_interruptible(&omap_dmm->engine_queue);
|
||||
}
|
||||
|
||||
static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
|
||||
{
|
||||
struct dmm *dmm = arg;
|
||||
uint32_t status = readl(dmm->base + DMM_PAT_IRQSTATUS);
|
||||
int i;
|
||||
|
||||
/* ack IRQ */
|
||||
writel(status, dmm->base + DMM_PAT_IRQSTATUS);
|
||||
|
||||
for (i = 0; i < dmm->num_engines; i++) {
|
||||
if (status & DMM_IRQSTAT_LST) {
|
||||
wake_up_interruptible(&dmm->engines[i].wait_for_refill);
|
||||
|
||||
if (dmm->engines[i].async)
|
||||
release_engine(&dmm->engines[i]);
|
||||
}
|
||||
|
||||
status >>= 8;
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a handle for a DMM transaction
|
||||
*/
|
||||
static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)
|
||||
{
|
||||
struct dmm_txn *txn = NULL;
|
||||
struct refill_engine *engine = NULL;
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
|
||||
|
||||
/* wait until an engine is available */
|
||||
ret = wait_event_interruptible(omap_dmm->engine_queue,
|
||||
atomic_add_unless(&omap_dmm->engine_counter, -1, 0));
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
/* grab an idle engine */
|
||||
spin_lock_irqsave(&list_lock, flags);
|
||||
if (!list_empty(&dmm->idle_head)) {
|
||||
engine = list_entry(dmm->idle_head.next, struct refill_engine,
|
||||
idle_node);
|
||||
list_del(&engine->idle_node);
|
||||
}
|
||||
spin_unlock_irqrestore(&list_lock, flags);
|
||||
|
||||
BUG_ON(!engine);
|
||||
|
||||
txn = &engine->txn;
|
||||
engine->tcm = tcm;
|
||||
txn->engine_handle = engine;
|
||||
txn->last_pat = NULL;
|
||||
txn->current_va = engine->refill_va;
|
||||
txn->current_pa = engine->refill_pa;
|
||||
|
||||
return txn;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add region to DMM transaction. If pages or pages[i] is NULL, then the
|
||||
* corresponding slot is cleared (ie. dummy_pa is programmed)
|
||||
*/
|
||||
static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
|
||||
struct page **pages, uint32_t npages, uint32_t roll)
|
||||
{
|
||||
dma_addr_t pat_pa = 0, data_pa = 0;
|
||||
uint32_t *data;
|
||||
struct pat *pat;
|
||||
struct refill_engine *engine = txn->engine_handle;
|
||||
int columns = (1 + area->x1 - area->x0);
|
||||
int rows = (1 + area->y1 - area->y0);
|
||||
int i = columns*rows;
|
||||
|
||||
pat = alloc_dma(txn, sizeof(struct pat), &pat_pa);
|
||||
|
||||
if (txn->last_pat)
|
||||
txn->last_pat->next_pa = (uint32_t)pat_pa;
|
||||
|
||||
pat->area = *area;
|
||||
|
||||
/* adjust Y coordinates based off of container parameters */
|
||||
pat->area.y0 += engine->tcm->y_offset;
|
||||
pat->area.y1 += engine->tcm->y_offset;
|
||||
|
||||
pat->ctrl = (struct pat_ctrl){
|
||||
.start = 1,
|
||||
.lut_id = engine->tcm->lut_id,
|
||||
};
|
||||
|
||||
data = alloc_dma(txn, 4*i, &data_pa);
|
||||
/* FIXME: what if data_pa is more than 32-bit ? */
|
||||
pat->data_pa = data_pa;
|
||||
|
||||
while (i--) {
|
||||
int n = i + roll;
|
||||
if (n >= npages)
|
||||
n -= npages;
|
||||
data[i] = (pages && pages[n]) ?
|
||||
page_to_phys(pages[n]) : engine->dmm->dummy_pa;
|
||||
}
|
||||
|
||||
txn->last_pat = pat;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* Commit the DMM transaction.
|
||||
*/
|
||||
static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
|
||||
{
|
||||
int ret = 0;
|
||||
struct refill_engine *engine = txn->engine_handle;
|
||||
struct dmm *dmm = engine->dmm;
|
||||
|
||||
if (!txn->last_pat) {
|
||||
dev_err(engine->dmm->dev, "need at least one txn\n");
|
||||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
txn->last_pat->next_pa = 0;
|
||||
|
||||
/* write to PAT_DESCR to clear out any pending transaction */
|
||||
writel(0x0, dmm->base + reg[PAT_DESCR][engine->id]);
|
||||
|
||||
/* wait for engine ready: */
|
||||
ret = wait_status(engine, DMM_PATSTATUS_READY);
|
||||
if (ret) {
|
||||
ret = -EFAULT;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/* mark whether it is async to denote list management in IRQ handler */
|
||||
engine->async = wait ? false : true;
|
||||
|
||||
/* kick reload */
|
||||
writel(engine->refill_pa,
|
||||
dmm->base + reg[PAT_DESCR][engine->id]);
|
||||
|
||||
if (wait) {
|
||||
if (wait_event_interruptible_timeout(engine->wait_for_refill,
|
||||
wait_status(engine, DMM_PATSTATUS_READY) == 0,
|
||||
msecs_to_jiffies(1)) <= 0) {
|
||||
dev_err(dmm->dev, "timed out waiting for done\n");
|
||||
ret = -ETIMEDOUT;
|
||||
}
|
||||
}
|
||||
|
||||
cleanup:
|
||||
/* only place engine back on list if we are done with it */
|
||||
if (ret || wait)
|
||||
release_engine(engine);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* DMM programming
|
||||
*/
|
||||
static int fill(struct tcm_area *area, struct page **pages,
|
||||
uint32_t npages, uint32_t roll, bool wait)
|
||||
{
|
||||
int ret = 0;
|
||||
struct tcm_area slice, area_s;
|
||||
struct dmm_txn *txn;
|
||||
|
||||
txn = dmm_txn_init(omap_dmm, area->tcm);
|
||||
if (IS_ERR_OR_NULL(txn))
|
||||
return -ENOMEM;
|
||||
|
||||
tcm_for_each_slice(slice, *area, area_s) {
|
||||
struct pat_area p_area = {
|
||||
.x0 = slice.p0.x, .y0 = slice.p0.y,
|
||||
.x1 = slice.p1.x, .y1 = slice.p1.y,
|
||||
};
|
||||
|
||||
dmm_txn_append(txn, &p_area, pages, npages, roll);
|
||||
|
||||
roll += tcm_sizeof(slice);
|
||||
}
|
||||
|
||||
ret = dmm_txn_commit(txn, wait);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Pin/unpin
|
||||
*/
|
||||
|
||||
/* note: slots for which pages[i] == NULL are filled w/ dummy page
|
||||
*/
|
||||
int tiler_pin(struct tiler_block *block, struct page **pages,
|
||||
uint32_t npages, uint32_t roll, bool wait)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = fill(&block->area, pages, npages, roll, wait);
|
||||
|
||||
if (ret)
|
||||
tiler_unpin(block);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int tiler_unpin(struct tiler_block *block)
|
||||
{
|
||||
return fill(&block->area, NULL, 0, 0, false);
|
||||
}
|
||||
|
||||
/*
|
||||
* Reserve/release
|
||||
*/
|
||||
struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w,
|
||||
uint16_t h, uint16_t align)
|
||||
{
|
||||
struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
|
||||
u32 min_align = 128;
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
|
||||
BUG_ON(!validfmt(fmt));
|
||||
|
||||
/* convert width/height to slots */
|
||||
w = DIV_ROUND_UP(w, geom[fmt].slot_w);
|
||||
h = DIV_ROUND_UP(h, geom[fmt].slot_h);
|
||||
|
||||
/* convert alignment to slots */
|
||||
min_align = max(min_align, (geom[fmt].slot_w * geom[fmt].cpp));
|
||||
align = ALIGN(align, min_align);
|
||||
align /= geom[fmt].slot_w * geom[fmt].cpp;
|
||||
|
||||
block->fmt = fmt;
|
||||
|
||||
ret = tcm_reserve_2d(containers[fmt], w, h, align, &block->area);
|
||||
if (ret) {
|
||||
kfree(block);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
/* add to allocation list */
|
||||
spin_lock_irqsave(&list_lock, flags);
|
||||
list_add(&block->alloc_node, &omap_dmm->alloc_head);
|
||||
spin_unlock_irqrestore(&list_lock, flags);
|
||||
|
||||
return block;
|
||||
}
|
||||
|
||||
struct tiler_block *tiler_reserve_1d(size_t size)
|
||||
{
|
||||
struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
|
||||
int num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
unsigned long flags;
|
||||
|
||||
if (!block)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
block->fmt = TILFMT_PAGE;
|
||||
|
||||
if (tcm_reserve_1d(containers[TILFMT_PAGE], num_pages,
|
||||
&block->area)) {
|
||||
kfree(block);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&list_lock, flags);
|
||||
list_add(&block->alloc_node, &omap_dmm->alloc_head);
|
||||
spin_unlock_irqrestore(&list_lock, flags);
|
||||
|
||||
return block;
|
||||
}
|
||||
|
||||
/* note: if you have pin'd pages, you should have already unpin'd first! */
|
||||
int tiler_release(struct tiler_block *block)
|
||||
{
|
||||
int ret = tcm_free(&block->area);
|
||||
unsigned long flags;
|
||||
|
||||
if (block->area.tcm)
|
||||
dev_err(omap_dmm->dev, "failed to release block\n");
|
||||
|
||||
spin_lock_irqsave(&list_lock, flags);
|
||||
list_del(&block->alloc_node);
|
||||
spin_unlock_irqrestore(&list_lock, flags);
|
||||
|
||||
kfree(block);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Utils
|
||||
*/
|
||||
|
||||
/* calculate the tiler space address of a pixel in a view orientation...
|
||||
* below description copied from the display subsystem section of TRM:
|
||||
*
|
||||
* When the TILER is addressed, the bits:
|
||||
* [28:27] = 0x0 for 8-bit tiled
|
||||
* 0x1 for 16-bit tiled
|
||||
* 0x2 for 32-bit tiled
|
||||
* 0x3 for page mode
|
||||
* [31:29] = 0x0 for 0-degree view
|
||||
* 0x1 for 180-degree view + mirroring
|
||||
* 0x2 for 0-degree view + mirroring
|
||||
* 0x3 for 180-degree view
|
||||
* 0x4 for 270-degree view + mirroring
|
||||
* 0x5 for 270-degree view
|
||||
* 0x6 for 90-degree view
|
||||
* 0x7 for 90-degree view + mirroring
|
||||
* Otherwise the bits indicated the corresponding bit address to access
|
||||
* the SDRAM.
|
||||
*/
|
||||
static u32 tiler_get_address(enum tiler_fmt fmt, u32 orient, u32 x, u32 y)
|
||||
{
|
||||
u32 x_bits, y_bits, tmp, x_mask, y_mask, alignment;
|
||||
|
||||
x_bits = CONT_WIDTH_BITS - geom[fmt].x_shft;
|
||||
y_bits = CONT_HEIGHT_BITS - geom[fmt].y_shft;
|
||||
alignment = geom[fmt].x_shft + geom[fmt].y_shft;
|
||||
|
||||
/* validate coordinate */
|
||||
x_mask = MASK(x_bits);
|
||||
y_mask = MASK(y_bits);
|
||||
|
||||
if (x < 0 || x > x_mask || y < 0 || y > y_mask) {
|
||||
DBG("invalid coords: %u < 0 || %u > %u || %u < 0 || %u > %u",
|
||||
x, x, x_mask, y, y, y_mask);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* account for mirroring */
|
||||
if (orient & MASK_X_INVERT)
|
||||
x ^= x_mask;
|
||||
if (orient & MASK_Y_INVERT)
|
||||
y ^= y_mask;
|
||||
|
||||
/* get coordinate address */
|
||||
if (orient & MASK_XY_FLIP)
|
||||
tmp = ((x << y_bits) + y);
|
||||
else
|
||||
tmp = ((y << x_bits) + x);
|
||||
|
||||
return TIL_ADDR((tmp << alignment), orient, fmt);
|
||||
}
|
||||
|
||||
dma_addr_t tiler_ssptr(struct tiler_block *block)
|
||||
{
|
||||
BUG_ON(!validfmt(block->fmt));
|
||||
|
||||
return TILVIEW_8BIT + tiler_get_address(block->fmt, 0,
|
||||
block->area.p0.x * geom[block->fmt].slot_w,
|
||||
block->area.p0.y * geom[block->fmt].slot_h);
|
||||
}
|
||||
|
||||
dma_addr_t tiler_tsptr(struct tiler_block *block, uint32_t orient,
|
||||
uint32_t x, uint32_t y)
|
||||
{
|
||||
struct tcm_pt *p = &block->area.p0;
|
||||
BUG_ON(!validfmt(block->fmt));
|
||||
|
||||
return tiler_get_address(block->fmt, orient,
|
||||
(p->x * geom[block->fmt].slot_w) + x,
|
||||
(p->y * geom[block->fmt].slot_h) + y);
|
||||
}
|
||||
|
||||
void tiler_align(enum tiler_fmt fmt, uint16_t *w, uint16_t *h)
|
||||
{
|
||||
BUG_ON(!validfmt(fmt));
|
||||
*w = round_up(*w, geom[fmt].slot_w);
|
||||
*h = round_up(*h, geom[fmt].slot_h);
|
||||
}
|
||||
|
||||
uint32_t tiler_stride(enum tiler_fmt fmt, uint32_t orient)
|
||||
{
|
||||
BUG_ON(!validfmt(fmt));
|
||||
|
||||
if (orient & MASK_XY_FLIP)
|
||||
return 1 << (CONT_HEIGHT_BITS + geom[fmt].x_shft);
|
||||
else
|
||||
return 1 << (CONT_WIDTH_BITS + geom[fmt].y_shft);
|
||||
}
|
||||
|
||||
size_t tiler_size(enum tiler_fmt fmt, uint16_t w, uint16_t h)
|
||||
{
|
||||
tiler_align(fmt, &w, &h);
|
||||
return geom[fmt].cpp * w * h;
|
||||
}
|
||||
|
||||
size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h)
|
||||
{
|
||||
BUG_ON(!validfmt(fmt));
|
||||
return round_up(geom[fmt].cpp * w, PAGE_SIZE) * h;
|
||||
}
|
||||
|
||||
bool dmm_is_available(void)
|
||||
{
|
||||
return omap_dmm ? true : false;
|
||||
}
|
||||
|
||||
static int omap_dmm_remove(struct platform_device *dev)
|
||||
{
|
||||
struct tiler_block *block, *_block;
|
||||
int i;
|
||||
unsigned long flags;
|
||||
|
||||
if (omap_dmm) {
|
||||
/* free all area regions */
|
||||
spin_lock_irqsave(&list_lock, flags);
|
||||
list_for_each_entry_safe(block, _block, &omap_dmm->alloc_head,
|
||||
alloc_node) {
|
||||
list_del(&block->alloc_node);
|
||||
kfree(block);
|
||||
}
|
||||
spin_unlock_irqrestore(&list_lock, flags);
|
||||
|
||||
for (i = 0; i < omap_dmm->num_lut; i++)
|
||||
if (omap_dmm->tcm && omap_dmm->tcm[i])
|
||||
omap_dmm->tcm[i]->deinit(omap_dmm->tcm[i]);
|
||||
kfree(omap_dmm->tcm);
|
||||
|
||||
kfree(omap_dmm->engines);
|
||||
if (omap_dmm->refill_va)
|
||||
dma_free_writecombine(omap_dmm->dev,
|
||||
REFILL_BUFFER_SIZE * omap_dmm->num_engines,
|
||||
omap_dmm->refill_va,
|
||||
omap_dmm->refill_pa);
|
||||
if (omap_dmm->dummy_page)
|
||||
__free_page(omap_dmm->dummy_page);
|
||||
|
||||
if (omap_dmm->irq > 0)
|
||||
free_irq(omap_dmm->irq, omap_dmm);
|
||||
|
||||
iounmap(omap_dmm->base);
|
||||
kfree(omap_dmm);
|
||||
omap_dmm = NULL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int omap_dmm_probe(struct platform_device *dev)
|
||||
{
|
||||
int ret = -EFAULT, i;
|
||||
struct tcm_area area = {0};
|
||||
u32 hwinfo, pat_geom;
|
||||
struct resource *mem;
|
||||
|
||||
omap_dmm = kzalloc(sizeof(*omap_dmm), GFP_KERNEL);
|
||||
if (!omap_dmm)
|
||||
goto fail;
|
||||
|
||||
/* initialize lists */
|
||||
INIT_LIST_HEAD(&omap_dmm->alloc_head);
|
||||
INIT_LIST_HEAD(&omap_dmm->idle_head);
|
||||
|
||||
init_waitqueue_head(&omap_dmm->engine_queue);
|
||||
|
||||
/* lookup hwmod data - base address and irq */
|
||||
mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
|
||||
if (!mem) {
|
||||
dev_err(&dev->dev, "failed to get base address resource\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
omap_dmm->base = ioremap(mem->start, SZ_2K);
|
||||
|
||||
if (!omap_dmm->base) {
|
||||
dev_err(&dev->dev, "failed to get dmm base address\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
omap_dmm->irq = platform_get_irq(dev, 0);
|
||||
if (omap_dmm->irq < 0) {
|
||||
dev_err(&dev->dev, "failed to get IRQ resource\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
omap_dmm->dev = &dev->dev;
|
||||
|
||||
hwinfo = readl(omap_dmm->base + DMM_PAT_HWINFO);
|
||||
omap_dmm->num_engines = (hwinfo >> 24) & 0x1F;
|
||||
omap_dmm->num_lut = (hwinfo >> 16) & 0x1F;
|
||||
omap_dmm->container_width = 256;
|
||||
omap_dmm->container_height = 128;
|
||||
|
||||
atomic_set(&omap_dmm->engine_counter, omap_dmm->num_engines);
|
||||
|
||||
/* read out actual LUT width and height */
|
||||
pat_geom = readl(omap_dmm->base + DMM_PAT_GEOMETRY);
|
||||
omap_dmm->lut_width = ((pat_geom >> 16) & 0xF) << 5;
|
||||
omap_dmm->lut_height = ((pat_geom >> 24) & 0xF) << 5;
|
||||
|
||||
/* increment LUT by one if on OMAP5 */
|
||||
/* LUT has twice the height, and is split into a separate container */
|
||||
if (omap_dmm->lut_height != omap_dmm->container_height)
|
||||
omap_dmm->num_lut++;
|
||||
|
||||
/* initialize DMM registers */
|
||||
writel(0x88888888, omap_dmm->base + DMM_PAT_VIEW__0);
|
||||
writel(0x88888888, omap_dmm->base + DMM_PAT_VIEW__1);
|
||||
writel(0x80808080, omap_dmm->base + DMM_PAT_VIEW_MAP__0);
|
||||
writel(0x80000000, omap_dmm->base + DMM_PAT_VIEW_MAP_BASE);
|
||||
writel(0x88888888, omap_dmm->base + DMM_TILER_OR__0);
|
||||
writel(0x88888888, omap_dmm->base + DMM_TILER_OR__1);
|
||||
|
||||
ret = request_irq(omap_dmm->irq, omap_dmm_irq_handler, IRQF_SHARED,
|
||||
"omap_dmm_irq_handler", omap_dmm);
|
||||
|
||||
if (ret) {
|
||||
dev_err(&dev->dev, "couldn't register IRQ %d, error %d\n",
|
||||
omap_dmm->irq, ret);
|
||||
omap_dmm->irq = -1;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Enable all interrupts for each refill engine except
|
||||
* ERR_LUT_MISS<n> (which is just advisory, and we don't care
|
||||
* about because we want to be able to refill live scanout
|
||||
* buffers for accelerated pan/scroll) and FILL_DSC<n> which
|
||||
* we just generally don't care about.
|
||||
*/
|
||||
writel(0x7e7e7e7e, omap_dmm->base + DMM_PAT_IRQENABLE_SET);
|
||||
|
||||
omap_dmm->dummy_page = alloc_page(GFP_KERNEL | __GFP_DMA32);
|
||||
if (!omap_dmm->dummy_page) {
|
||||
dev_err(&dev->dev, "could not allocate dummy page\n");
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* set dma mask for device */
|
||||
ret = dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(32));
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page);
|
||||
|
||||
/* alloc refill memory */
|
||||
omap_dmm->refill_va = dma_alloc_writecombine(&dev->dev,
|
||||
REFILL_BUFFER_SIZE * omap_dmm->num_engines,
|
||||
&omap_dmm->refill_pa, GFP_KERNEL);
|
||||
if (!omap_dmm->refill_va) {
|
||||
dev_err(&dev->dev, "could not allocate refill memory\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* alloc engines */
|
||||
omap_dmm->engines = kcalloc(omap_dmm->num_engines,
|
||||
sizeof(struct refill_engine), GFP_KERNEL);
|
||||
if (!omap_dmm->engines) {
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
for (i = 0; i < omap_dmm->num_engines; i++) {
|
||||
omap_dmm->engines[i].id = i;
|
||||
omap_dmm->engines[i].dmm = omap_dmm;
|
||||
omap_dmm->engines[i].refill_va = omap_dmm->refill_va +
|
||||
(REFILL_BUFFER_SIZE * i);
|
||||
omap_dmm->engines[i].refill_pa = omap_dmm->refill_pa +
|
||||
(REFILL_BUFFER_SIZE * i);
|
||||
init_waitqueue_head(&omap_dmm->engines[i].wait_for_refill);
|
||||
|
||||
list_add(&omap_dmm->engines[i].idle_node, &omap_dmm->idle_head);
|
||||
}
|
||||
|
||||
omap_dmm->tcm = kcalloc(omap_dmm->num_lut, sizeof(*omap_dmm->tcm),
|
||||
GFP_KERNEL);
|
||||
if (!omap_dmm->tcm) {
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* init containers */
|
||||
/* Each LUT is associated with a TCM (container manager). We use the
|
||||
lut_id to denote the lut_id used to identify the correct LUT for
|
||||
programming during reill operations */
|
||||
for (i = 0; i < omap_dmm->num_lut; i++) {
|
||||
omap_dmm->tcm[i] = sita_init(omap_dmm->container_width,
|
||||
omap_dmm->container_height,
|
||||
NULL);
|
||||
|
||||
if (!omap_dmm->tcm[i]) {
|
||||
dev_err(&dev->dev, "failed to allocate container\n");
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
omap_dmm->tcm[i]->lut_id = i;
|
||||
}
|
||||
|
||||
/* assign access mode containers to applicable tcm container */
|
||||
/* OMAP 4 has 1 container for all 4 views */
|
||||
/* OMAP 5 has 2 containers, 1 for 2D and 1 for 1D */
|
||||
containers[TILFMT_8BIT] = omap_dmm->tcm[0];
|
||||
containers[TILFMT_16BIT] = omap_dmm->tcm[0];
|
||||
containers[TILFMT_32BIT] = omap_dmm->tcm[0];
|
||||
|
||||
if (omap_dmm->container_height != omap_dmm->lut_height) {
|
||||
/* second LUT is used for PAGE mode. Programming must use
|
||||
y offset that is added to all y coordinates. LUT id is still
|
||||
0, because it is the same LUT, just the upper 128 lines */
|
||||
containers[TILFMT_PAGE] = omap_dmm->tcm[1];
|
||||
omap_dmm->tcm[1]->y_offset = OMAP5_LUT_OFFSET;
|
||||
omap_dmm->tcm[1]->lut_id = 0;
|
||||
} else {
|
||||
containers[TILFMT_PAGE] = omap_dmm->tcm[0];
|
||||
}
|
||||
|
||||
area = (struct tcm_area) {
|
||||
.tcm = NULL,
|
||||
.p1.x = omap_dmm->container_width - 1,
|
||||
.p1.y = omap_dmm->container_height - 1,
|
||||
};
|
||||
|
||||
/* initialize all LUTs to dummy page entries */
|
||||
for (i = 0; i < omap_dmm->num_lut; i++) {
|
||||
area.tcm = omap_dmm->tcm[i];
|
||||
if (fill(&area, NULL, 0, 0, true))
|
||||
dev_err(omap_dmm->dev, "refill failed");
|
||||
}
|
||||
|
||||
dev_info(omap_dmm->dev, "initialized all PAT entries\n");
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
if (omap_dmm_remove(dev))
|
||||
dev_err(&dev->dev, "cleanup failed\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* debugfs support
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
|
||||
static const char *alphabet = "abcdefghijklmnopqrstuvwxyz"
|
||||
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
|
||||
static const char *special = ".,:;'\"`~!^-+";
|
||||
|
||||
static void fill_map(char **map, int xdiv, int ydiv, struct tcm_area *a,
|
||||
char c, bool ovw)
|
||||
{
|
||||
int x, y;
|
||||
for (y = a->p0.y / ydiv; y <= a->p1.y / ydiv; y++)
|
||||
for (x = a->p0.x / xdiv; x <= a->p1.x / xdiv; x++)
|
||||
if (map[y][x] == ' ' || ovw)
|
||||
map[y][x] = c;
|
||||
}
|
||||
|
||||
static void fill_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p,
|
||||
char c)
|
||||
{
|
||||
map[p->y / ydiv][p->x / xdiv] = c;
|
||||
}
|
||||
|
||||
static char read_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p)
|
||||
{
|
||||
return map[p->y / ydiv][p->x / xdiv];
|
||||
}
|
||||
|
||||
static int map_width(int xdiv, int x0, int x1)
|
||||
{
|
||||
return (x1 / xdiv) - (x0 / xdiv) + 1;
|
||||
}
|
||||
|
||||
static void text_map(char **map, int xdiv, char *nice, int yd, int x0, int x1)
|
||||
{
|
||||
char *p = map[yd] + (x0 / xdiv);
|
||||
int w = (map_width(xdiv, x0, x1) - strlen(nice)) / 2;
|
||||
if (w >= 0) {
|
||||
p += w;
|
||||
while (*nice)
|
||||
*p++ = *nice++;
|
||||
}
|
||||
}
|
||||
|
||||
static void map_1d_info(char **map, int xdiv, int ydiv, char *nice,
|
||||
struct tcm_area *a)
|
||||
{
|
||||
sprintf(nice, "%dK", tcm_sizeof(*a) * 4);
|
||||
if (a->p0.y + 1 < a->p1.y) {
|
||||
text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv, 0,
|
||||
256 - 1);
|
||||
} else if (a->p0.y < a->p1.y) {
|
||||
if (strlen(nice) < map_width(xdiv, a->p0.x, 256 - 1))
|
||||
text_map(map, xdiv, nice, a->p0.y / ydiv,
|
||||
a->p0.x + xdiv, 256 - 1);
|
||||
else if (strlen(nice) < map_width(xdiv, 0, a->p1.x))
|
||||
text_map(map, xdiv, nice, a->p1.y / ydiv,
|
||||
0, a->p1.y - xdiv);
|
||||
} else if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x)) {
|
||||
text_map(map, xdiv, nice, a->p0.y / ydiv, a->p0.x, a->p1.x);
|
||||
}
|
||||
}
|
||||
|
||||
static void map_2d_info(char **map, int xdiv, int ydiv, char *nice,
|
||||
struct tcm_area *a)
|
||||
{
|
||||
sprintf(nice, "(%d*%d)", tcm_awidth(*a), tcm_aheight(*a));
|
||||
if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x))
|
||||
text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv,
|
||||
a->p0.x, a->p1.x);
|
||||
}
|
||||
|
||||
int tiler_map_show(struct seq_file *s, void *arg)
|
||||
{
|
||||
int xdiv = 2, ydiv = 1;
|
||||
char **map = NULL, *global_map;
|
||||
struct tiler_block *block;
|
||||
struct tcm_area a, p;
|
||||
int i;
|
||||
const char *m2d = alphabet;
|
||||
const char *a2d = special;
|
||||
const char *m2dp = m2d, *a2dp = a2d;
|
||||
char nice[128];
|
||||
int h_adj;
|
||||
int w_adj;
|
||||
unsigned long flags;
|
||||
int lut_idx;
|
||||
|
||||
|
||||
if (!omap_dmm) {
|
||||
/* early return if dmm/tiler device is not initialized */
|
||||
return 0;
|
||||
}
|
||||
|
||||
h_adj = omap_dmm->container_height / ydiv;
|
||||
w_adj = omap_dmm->container_width / xdiv;
|
||||
|
||||
map = kmalloc(h_adj * sizeof(*map), GFP_KERNEL);
|
||||
global_map = kmalloc((w_adj + 1) * h_adj, GFP_KERNEL);
|
||||
|
||||
if (!map || !global_map)
|
||||
goto error;
|
||||
|
||||
for (lut_idx = 0; lut_idx < omap_dmm->num_lut; lut_idx++) {
|
||||
memset(map, 0, h_adj * sizeof(*map));
|
||||
memset(global_map, ' ', (w_adj + 1) * h_adj);
|
||||
|
||||
for (i = 0; i < omap_dmm->container_height; i++) {
|
||||
map[i] = global_map + i * (w_adj + 1);
|
||||
map[i][w_adj] = 0;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&list_lock, flags);
|
||||
|
||||
list_for_each_entry(block, &omap_dmm->alloc_head, alloc_node) {
|
||||
if (block->area.tcm == omap_dmm->tcm[lut_idx]) {
|
||||
if (block->fmt != TILFMT_PAGE) {
|
||||
fill_map(map, xdiv, ydiv, &block->area,
|
||||
*m2dp, true);
|
||||
if (!*++a2dp)
|
||||
a2dp = a2d;
|
||||
if (!*++m2dp)
|
||||
m2dp = m2d;
|
||||
map_2d_info(map, xdiv, ydiv, nice,
|
||||
&block->area);
|
||||
} else {
|
||||
bool start = read_map_pt(map, xdiv,
|
||||
ydiv, &block->area.p0) == ' ';
|
||||
bool end = read_map_pt(map, xdiv, ydiv,
|
||||
&block->area.p1) == ' ';
|
||||
|
||||
tcm_for_each_slice(a, block->area, p)
|
||||
fill_map(map, xdiv, ydiv, &a,
|
||||
'=', true);
|
||||
fill_map_pt(map, xdiv, ydiv,
|
||||
&block->area.p0,
|
||||
start ? '<' : 'X');
|
||||
fill_map_pt(map, xdiv, ydiv,
|
||||
&block->area.p1,
|
||||
end ? '>' : 'X');
|
||||
map_1d_info(map, xdiv, ydiv, nice,
|
||||
&block->area);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&list_lock, flags);
|
||||
|
||||
if (s) {
|
||||
seq_printf(s, "CONTAINER %d DUMP BEGIN\n", lut_idx);
|
||||
for (i = 0; i < 128; i++)
|
||||
seq_printf(s, "%03d:%s\n", i, map[i]);
|
||||
seq_printf(s, "CONTAINER %d DUMP END\n", lut_idx);
|
||||
} else {
|
||||
dev_dbg(omap_dmm->dev, "CONTAINER %d DUMP BEGIN\n",
|
||||
lut_idx);
|
||||
for (i = 0; i < 128; i++)
|
||||
dev_dbg(omap_dmm->dev, "%03d:%s\n", i, map[i]);
|
||||
dev_dbg(omap_dmm->dev, "CONTAINER %d DUMP END\n",
|
||||
lut_idx);
|
||||
}
|
||||
}
|
||||
|
||||
error:
|
||||
kfree(map);
|
||||
kfree(global_map);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static int omap_dmm_resume(struct device *dev)
|
||||
{
|
||||
struct tcm_area area;
|
||||
int i;
|
||||
|
||||
if (!omap_dmm)
|
||||
return -ENODEV;
|
||||
|
||||
area = (struct tcm_area) {
|
||||
.tcm = NULL,
|
||||
.p1.x = omap_dmm->container_width - 1,
|
||||
.p1.y = omap_dmm->container_height - 1,
|
||||
};
|
||||
|
||||
/* initialize all LUTs to dummy page entries */
|
||||
for (i = 0; i < omap_dmm->num_lut; i++) {
|
||||
area.tcm = omap_dmm->tcm[i];
|
||||
if (fill(&area, NULL, 0, 0, true))
|
||||
dev_err(dev, "refill failed");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops omap_dmm_pm_ops = {
|
||||
.resume = omap_dmm_resume,
|
||||
};
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_OF)
|
||||
static const struct of_device_id dmm_of_match[] = {
|
||||
{ .compatible = "ti,omap4-dmm", },
|
||||
{ .compatible = "ti,omap5-dmm", },
|
||||
{},
|
||||
};
|
||||
#endif
|
||||
|
||||
struct platform_driver omap_dmm_driver = {
|
||||
.probe = omap_dmm_probe,
|
||||
.remove = omap_dmm_remove,
|
||||
.driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = DMM_DRIVER_NAME,
|
||||
.of_match_table = of_match_ptr(dmm_of_match),
|
||||
#ifdef CONFIG_PM
|
||||
.pm = &omap_dmm_pm_ops,
|
||||
#endif
|
||||
},
|
||||
};
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_AUTHOR("Andy Gross <andy.gross@ti.com>");
|
||||
MODULE_DESCRIPTION("OMAP DMM/Tiler Driver");
|
||||
MODULE_ALIAS("platform:" DMM_DRIVER_NAME);
|
||||
141
drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
Normal file
141
drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
Normal file
|
|
@ -0,0 +1,141 @@
|
|||
/*
|
||||
*
|
||||
* Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
|
||||
* Author: Rob Clark <rob@ti.com>
|
||||
* Andy Gross <andy.gross@ti.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation version 2.
|
||||
*
|
||||
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
|
||||
* kind, whether express or implied; without even the implied warranty
|
||||
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
#ifndef OMAP_DMM_TILER_H
|
||||
#define OMAP_DMM_TILER_H
|
||||
|
||||
#include "omap_drv.h"
|
||||
#include "tcm.h"
|
||||
|
||||
enum tiler_fmt {
|
||||
TILFMT_8BIT = 0,
|
||||
TILFMT_16BIT,
|
||||
TILFMT_32BIT,
|
||||
TILFMT_PAGE,
|
||||
TILFMT_NFORMATS
|
||||
};
|
||||
|
||||
struct pat_area {
|
||||
u32 x0:8;
|
||||
u32 y0:8;
|
||||
u32 x1:8;
|
||||
u32 y1:8;
|
||||
};
|
||||
|
||||
struct tiler_block {
|
||||
struct list_head alloc_node; /* node for global block list */
|
||||
struct tcm_area area; /* area */
|
||||
enum tiler_fmt fmt; /* format */
|
||||
};
|
||||
|
||||
/* bits representing the same slot in DMM-TILER hw-block */
|
||||
#define SLOT_WIDTH_BITS 6
|
||||
#define SLOT_HEIGHT_BITS 6
|
||||
|
||||
/* bits reserved to describe coordinates in DMM-TILER hw-block */
|
||||
#define CONT_WIDTH_BITS 14
|
||||
#define CONT_HEIGHT_BITS 13
|
||||
|
||||
/* calculated constants */
|
||||
#define TILER_PAGE (1 << (SLOT_WIDTH_BITS + SLOT_HEIGHT_BITS))
|
||||
#define TILER_WIDTH (1 << (CONT_WIDTH_BITS - SLOT_WIDTH_BITS))
|
||||
#define TILER_HEIGHT (1 << (CONT_HEIGHT_BITS - SLOT_HEIGHT_BITS))
|
||||
|
||||
/*
|
||||
Table 15-11. Coding and Description of TILER Orientations
|
||||
S Y X Description Alternate description
|
||||
0 0 0 0-degree view Natural view
|
||||
0 0 1 0-degree view with vertical mirror 180-degree view with horizontal mirror
|
||||
0 1 0 0-degree view with horizontal mirror 180-degree view with vertical mirror
|
||||
0 1 1 180-degree view
|
||||
1 0 0 90-degree view with vertical mirror 270-degree view with horizontal mirror
|
||||
1 0 1 270-degree view
|
||||
1 1 0 90-degree view
|
||||
1 1 1 90-degree view with horizontal mirror 270-degree view with vertical mirror
|
||||
*/
|
||||
#define MASK_XY_FLIP (1 << 31)
|
||||
#define MASK_Y_INVERT (1 << 30)
|
||||
#define MASK_X_INVERT (1 << 29)
|
||||
#define SHIFT_ACC_MODE 27
|
||||
#define MASK_ACC_MODE 3
|
||||
|
||||
#define MASK(bits) ((1 << (bits)) - 1)
|
||||
|
||||
#define TILVIEW_8BIT 0x60000000u
|
||||
#define TILVIEW_16BIT (TILVIEW_8BIT + VIEW_SIZE)
|
||||
#define TILVIEW_32BIT (TILVIEW_16BIT + VIEW_SIZE)
|
||||
#define TILVIEW_PAGE (TILVIEW_32BIT + VIEW_SIZE)
|
||||
#define TILVIEW_END (TILVIEW_PAGE + VIEW_SIZE)
|
||||
|
||||
/* create tsptr by adding view orientation and access mode */
|
||||
#define TIL_ADDR(x, orient, a)\
|
||||
((u32) (x) | (orient) | ((a) << SHIFT_ACC_MODE))
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
int tiler_map_show(struct seq_file *s, void *arg);
|
||||
#endif
|
||||
|
||||
/* pin/unpin */
|
||||
int tiler_pin(struct tiler_block *block, struct page **pages,
|
||||
uint32_t npages, uint32_t roll, bool wait);
|
||||
int tiler_unpin(struct tiler_block *block);
|
||||
|
||||
/* reserve/release */
|
||||
struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w, uint16_t h,
|
||||
uint16_t align);
|
||||
struct tiler_block *tiler_reserve_1d(size_t size);
|
||||
int tiler_release(struct tiler_block *block);
|
||||
|
||||
/* utilities */
|
||||
dma_addr_t tiler_ssptr(struct tiler_block *block);
|
||||
dma_addr_t tiler_tsptr(struct tiler_block *block, uint32_t orient,
|
||||
uint32_t x, uint32_t y);
|
||||
uint32_t tiler_stride(enum tiler_fmt fmt, uint32_t orient);
|
||||
size_t tiler_size(enum tiler_fmt fmt, uint16_t w, uint16_t h);
|
||||
size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h);
|
||||
void tiler_align(enum tiler_fmt fmt, uint16_t *w, uint16_t *h);
|
||||
bool dmm_is_available(void);
|
||||
|
||||
extern struct platform_driver omap_dmm_driver;
|
||||
|
||||
/* GEM bo flags -> tiler fmt */
|
||||
static inline enum tiler_fmt gem2fmt(uint32_t flags)
|
||||
{
|
||||
switch (flags & OMAP_BO_TILED) {
|
||||
case OMAP_BO_TILED_8:
|
||||
return TILFMT_8BIT;
|
||||
case OMAP_BO_TILED_16:
|
||||
return TILFMT_16BIT;
|
||||
case OMAP_BO_TILED_32:
|
||||
return TILFMT_32BIT;
|
||||
default:
|
||||
return TILFMT_PAGE;
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool validfmt(enum tiler_fmt fmt)
|
||||
{
|
||||
switch (fmt) {
|
||||
case TILFMT_8BIT:
|
||||
case TILFMT_16BIT:
|
||||
case TILFMT_32BIT:
|
||||
case TILFMT_PAGE:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
771
drivers/gpu/drm/omapdrm/omap_drv.c
Normal file
771
drivers/gpu/drm/omapdrm/omap_drv.c
Normal file
|
|
@ -0,0 +1,771 @@
|
|||
/*
|
||||
* drivers/gpu/drm/omapdrm/omap_drv.c
|
||||
*
|
||||
* Copyright (C) 2011 Texas Instruments
|
||||
* Author: Rob Clark <rob@ti.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published by
|
||||
* the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "omap_drv.h"
|
||||
|
||||
#include "drm_crtc_helper.h"
|
||||
#include "drm_fb_helper.h"
|
||||
#include "omap_dmm_tiler.h"
|
||||
|
||||
#define DRIVER_NAME MODULE_NAME
|
||||
#define DRIVER_DESC "OMAP DRM"
|
||||
#define DRIVER_DATE "20110917"
|
||||
#define DRIVER_MAJOR 1
|
||||
#define DRIVER_MINOR 0
|
||||
#define DRIVER_PATCHLEVEL 0
|
||||
|
||||
static int num_crtc = CONFIG_DRM_OMAP_NUM_CRTCS;
|
||||
|
||||
MODULE_PARM_DESC(num_crtc, "Number of overlays to use as CRTCs");
|
||||
module_param(num_crtc, int, 0600);
|
||||
|
||||
/*
|
||||
* mode config funcs
|
||||
*/
|
||||
|
||||
/* Notes about mapping DSS and DRM entities:
|
||||
* CRTC: overlay
|
||||
* encoder: manager.. with some extension to allow one primary CRTC
|
||||
* and zero or more video CRTC's to be mapped to one encoder?
|
||||
* connector: dssdev.. manager can be attached/detached from different
|
||||
* devices
|
||||
*/
|
||||
|
||||
static void omap_fb_output_poll_changed(struct drm_device *dev)
|
||||
{
|
||||
struct omap_drm_private *priv = dev->dev_private;
|
||||
DBG("dev=%p", dev);
|
||||
if (priv->fbdev)
|
||||
drm_fb_helper_hotplug_event(priv->fbdev);
|
||||
}
|
||||
|
||||
static const struct drm_mode_config_funcs omap_mode_config_funcs = {
|
||||
.fb_create = omap_framebuffer_create,
|
||||
.output_poll_changed = omap_fb_output_poll_changed,
|
||||
};
|
||||
|
||||
static int get_connector_type(struct omap_dss_device *dssdev)
|
||||
{
|
||||
switch (dssdev->type) {
|
||||
case OMAP_DISPLAY_TYPE_HDMI:
|
||||
return DRM_MODE_CONNECTOR_HDMIA;
|
||||
case OMAP_DISPLAY_TYPE_DVI:
|
||||
return DRM_MODE_CONNECTOR_DVID;
|
||||
default:
|
||||
return DRM_MODE_CONNECTOR_Unknown;
|
||||
}
|
||||
}
|
||||
|
||||
static bool channel_used(struct drm_device *dev, enum omap_channel channel)
|
||||
{
|
||||
struct omap_drm_private *priv = dev->dev_private;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < priv->num_crtcs; i++) {
|
||||
struct drm_crtc *crtc = priv->crtcs[i];
|
||||
|
||||
if (omap_crtc_channel(crtc) == channel)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
static void omap_disconnect_dssdevs(void)
|
||||
{
|
||||
struct omap_dss_device *dssdev = NULL;
|
||||
|
||||
for_each_dss_dev(dssdev)
|
||||
dssdev->driver->disconnect(dssdev);
|
||||
}
|
||||
|
||||
static int omap_connect_dssdevs(void)
|
||||
{
|
||||
int r;
|
||||
struct omap_dss_device *dssdev = NULL;
|
||||
bool no_displays = true;
|
||||
|
||||
for_each_dss_dev(dssdev) {
|
||||
r = dssdev->driver->connect(dssdev);
|
||||
if (r == -EPROBE_DEFER) {
|
||||
omap_dss_put_device(dssdev);
|
||||
goto cleanup;
|
||||
} else if (r) {
|
||||
dev_warn(dssdev->dev, "could not connect display: %s\n",
|
||||
dssdev->name);
|
||||
} else {
|
||||
no_displays = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (no_displays)
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
/*
|
||||
* if we are deferring probe, we disconnect the devices we previously
|
||||
* connected
|
||||
*/
|
||||
omap_disconnect_dssdevs();
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static int omap_modeset_init(struct drm_device *dev)
|
||||
{
|
||||
struct omap_drm_private *priv = dev->dev_private;
|
||||
struct omap_dss_device *dssdev = NULL;
|
||||
int num_ovls = dss_feat_get_num_ovls();
|
||||
int num_mgrs = dss_feat_get_num_mgrs();
|
||||
int num_crtcs;
|
||||
int i, id = 0;
|
||||
|
||||
drm_mode_config_init(dev);
|
||||
|
||||
omap_drm_irq_install(dev);
|
||||
|
||||
/*
|
||||
* We usually don't want to create a CRTC for each manager, at least
|
||||
* not until we have a way to expose private planes to userspace.
|
||||
* Otherwise there would not be enough video pipes left for drm planes.
|
||||
* We use the num_crtc argument to limit the number of crtcs we create.
|
||||
*/
|
||||
num_crtcs = min3(num_crtc, num_mgrs, num_ovls);
|
||||
|
||||
dssdev = NULL;
|
||||
|
||||
for_each_dss_dev(dssdev) {
|
||||
struct drm_connector *connector;
|
||||
struct drm_encoder *encoder;
|
||||
enum omap_channel channel;
|
||||
struct omap_overlay_manager *mgr;
|
||||
|
||||
if (!omapdss_device_is_connected(dssdev))
|
||||
continue;
|
||||
|
||||
encoder = omap_encoder_init(dev, dssdev);
|
||||
|
||||
if (!encoder) {
|
||||
dev_err(dev->dev, "could not create encoder: %s\n",
|
||||
dssdev->name);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
connector = omap_connector_init(dev,
|
||||
get_connector_type(dssdev), dssdev, encoder);
|
||||
|
||||
if (!connector) {
|
||||
dev_err(dev->dev, "could not create connector: %s\n",
|
||||
dssdev->name);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
BUG_ON(priv->num_encoders >= ARRAY_SIZE(priv->encoders));
|
||||
BUG_ON(priv->num_connectors >= ARRAY_SIZE(priv->connectors));
|
||||
|
||||
priv->encoders[priv->num_encoders++] = encoder;
|
||||
priv->connectors[priv->num_connectors++] = connector;
|
||||
|
||||
drm_mode_connector_attach_encoder(connector, encoder);
|
||||
|
||||
/*
|
||||
* if we have reached the limit of the crtcs we are allowed to
|
||||
* create, let's not try to look for a crtc for this
|
||||
* panel/encoder and onwards, we will, of course, populate the
|
||||
* the possible_crtcs field for all the encoders with the final
|
||||
* set of crtcs we create
|
||||
*/
|
||||
if (id == num_crtcs)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* get the recommended DISPC channel for this encoder. For now,
|
||||
* we only try to get create a crtc out of the recommended, the
|
||||
* other possible channels to which the encoder can connect are
|
||||
* not considered.
|
||||
*/
|
||||
|
||||
mgr = omapdss_find_mgr_from_display(dssdev);
|
||||
channel = mgr->id;
|
||||
/*
|
||||
* if this channel hasn't already been taken by a previously
|
||||
* allocated crtc, we create a new crtc for it
|
||||
*/
|
||||
if (!channel_used(dev, channel)) {
|
||||
struct drm_plane *plane;
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
plane = omap_plane_init(dev, id, true);
|
||||
crtc = omap_crtc_init(dev, plane, channel, id);
|
||||
|
||||
BUG_ON(priv->num_crtcs >= ARRAY_SIZE(priv->crtcs));
|
||||
priv->crtcs[id] = crtc;
|
||||
priv->num_crtcs++;
|
||||
|
||||
priv->planes[id] = plane;
|
||||
priv->num_planes++;
|
||||
|
||||
id++;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* we have allocated crtcs according to the need of the panels/encoders,
|
||||
* adding more crtcs here if needed
|
||||
*/
|
||||
for (; id < num_crtcs; id++) {
|
||||
|
||||
/* find a free manager for this crtc */
|
||||
for (i = 0; i < num_mgrs; i++) {
|
||||
if (!channel_used(dev, i)) {
|
||||
struct drm_plane *plane;
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
plane = omap_plane_init(dev, id, true);
|
||||
crtc = omap_crtc_init(dev, plane, i, id);
|
||||
|
||||
BUG_ON(priv->num_crtcs >=
|
||||
ARRAY_SIZE(priv->crtcs));
|
||||
|
||||
priv->crtcs[id] = crtc;
|
||||
priv->num_crtcs++;
|
||||
|
||||
priv->planes[id] = plane;
|
||||
priv->num_planes++;
|
||||
|
||||
break;
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (i == num_mgrs) {
|
||||
/* this shouldn't really happen */
|
||||
dev_err(dev->dev, "no managers left for crtc\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Create normal planes for the remaining overlays:
|
||||
*/
|
||||
for (; id < num_ovls; id++) {
|
||||
struct drm_plane *plane = omap_plane_init(dev, id, false);
|
||||
|
||||
BUG_ON(priv->num_planes >= ARRAY_SIZE(priv->planes));
|
||||
priv->planes[priv->num_planes++] = plane;
|
||||
}
|
||||
|
||||
for (i = 0; i < priv->num_encoders; i++) {
|
||||
struct drm_encoder *encoder = priv->encoders[i];
|
||||
struct omap_dss_device *dssdev =
|
||||
omap_encoder_get_dssdev(encoder);
|
||||
struct omap_dss_device *output;
|
||||
|
||||
output = omapdss_find_output_from_display(dssdev);
|
||||
|
||||
/* figure out which crtc's we can connect the encoder to: */
|
||||
encoder->possible_crtcs = 0;
|
||||
for (id = 0; id < priv->num_crtcs; id++) {
|
||||
struct drm_crtc *crtc = priv->crtcs[id];
|
||||
enum omap_channel crtc_channel;
|
||||
enum omap_dss_output_id supported_outputs;
|
||||
|
||||
crtc_channel = omap_crtc_channel(crtc);
|
||||
supported_outputs =
|
||||
dss_feat_get_supported_outputs(crtc_channel);
|
||||
|
||||
if (supported_outputs & output->id)
|
||||
encoder->possible_crtcs |= (1 << id);
|
||||
}
|
||||
|
||||
omap_dss_put_device(output);
|
||||
}
|
||||
|
||||
DBG("registered %d planes, %d crtcs, %d encoders and %d connectors\n",
|
||||
priv->num_planes, priv->num_crtcs, priv->num_encoders,
|
||||
priv->num_connectors);
|
||||
|
||||
dev->mode_config.min_width = 32;
|
||||
dev->mode_config.min_height = 32;
|
||||
|
||||
/* note: eventually will need some cpu_is_omapXYZ() type stuff here
|
||||
* to fill in these limits properly on different OMAP generations..
|
||||
*/
|
||||
dev->mode_config.max_width = 2048;
|
||||
dev->mode_config.max_height = 2048;
|
||||
|
||||
dev->mode_config.funcs = &omap_mode_config_funcs;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void omap_modeset_free(struct drm_device *dev)
|
||||
{
|
||||
drm_mode_config_cleanup(dev);
|
||||
}
|
||||
|
||||
/*
|
||||
* drm ioctl funcs
|
||||
*/
|
||||
|
||||
|
||||
static int ioctl_get_param(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct omap_drm_private *priv = dev->dev_private;
|
||||
struct drm_omap_param *args = data;
|
||||
|
||||
DBG("%p: param=%llu", dev, args->param);
|
||||
|
||||
switch (args->param) {
|
||||
case OMAP_PARAM_CHIPSET_ID:
|
||||
args->value = priv->omaprev;
|
||||
break;
|
||||
default:
|
||||
DBG("unknown parameter %lld", args->param);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ioctl_set_param(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_omap_param *args = data;
|
||||
|
||||
switch (args->param) {
|
||||
default:
|
||||
DBG("unknown parameter %lld", args->param);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ioctl_gem_new(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_omap_gem_new *args = data;
|
||||
VERB("%p:%p: size=0x%08x, flags=%08x", dev, file_priv,
|
||||
args->size.bytes, args->flags);
|
||||
return omap_gem_new_handle(dev, file_priv, args->size,
|
||||
args->flags, &args->handle);
|
||||
}
|
||||
|
||||
static int ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_omap_gem_cpu_prep *args = data;
|
||||
struct drm_gem_object *obj;
|
||||
int ret;
|
||||
|
||||
VERB("%p:%p: handle=%d, op=%x", dev, file_priv, args->handle, args->op);
|
||||
|
||||
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
|
||||
if (!obj)
|
||||
return -ENOENT;
|
||||
|
||||
ret = omap_gem_op_sync(obj, args->op);
|
||||
|
||||
if (!ret)
|
||||
ret = omap_gem_op_start(obj, args->op);
|
||||
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_omap_gem_cpu_fini *args = data;
|
||||
struct drm_gem_object *obj;
|
||||
int ret;
|
||||
|
||||
VERB("%p:%p: handle=%d", dev, file_priv, args->handle);
|
||||
|
||||
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
|
||||
if (!obj)
|
||||
return -ENOENT;
|
||||
|
||||
/* XXX flushy, flushy */
|
||||
ret = 0;
|
||||
|
||||
if (!ret)
|
||||
ret = omap_gem_op_finish(obj, args->op);
|
||||
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ioctl_gem_info(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_omap_gem_info *args = data;
|
||||
struct drm_gem_object *obj;
|
||||
int ret = 0;
|
||||
|
||||
VERB("%p:%p: handle=%d", dev, file_priv, args->handle);
|
||||
|
||||
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
|
||||
if (!obj)
|
||||
return -ENOENT;
|
||||
|
||||
args->size = omap_gem_mmap_size(obj);
|
||||
args->offset = omap_gem_mmap_offset(obj);
|
||||
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = {
|
||||
DRM_IOCTL_DEF_DRV(OMAP_GET_PARAM, ioctl_get_param, DRM_UNLOCKED|DRM_AUTH),
|
||||
DRM_IOCTL_DEF_DRV(OMAP_SET_PARAM, ioctl_set_param, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
|
||||
DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH),
|
||||
DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_PREP, ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
|
||||
DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_FINI, ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
|
||||
DRM_IOCTL_DEF_DRV(OMAP_GEM_INFO, ioctl_gem_info, DRM_UNLOCKED|DRM_AUTH),
|
||||
};
|
||||
|
||||
/*
|
||||
* drm driver funcs
|
||||
*/
|
||||
|
||||
/**
|
||||
* load - setup chip and create an initial config
|
||||
* @dev: DRM device
|
||||
* @flags: startup flags
|
||||
*
|
||||
* The driver load routine has to do several things:
|
||||
* - initialize the memory manager
|
||||
* - allocate initial config memory
|
||||
* - setup the DRM framebuffer with the allocated memory
|
||||
*/
|
||||
static int dev_load(struct drm_device *dev, unsigned long flags)
|
||||
{
|
||||
struct omap_drm_platform_data *pdata = dev->dev->platform_data;
|
||||
struct omap_drm_private *priv;
|
||||
int ret;
|
||||
|
||||
DBG("load: dev=%p", dev);
|
||||
|
||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
||||
priv->omaprev = pdata->omaprev;
|
||||
|
||||
dev->dev_private = priv;
|
||||
|
||||
priv->wq = alloc_ordered_workqueue("omapdrm", 0);
|
||||
|
||||
INIT_LIST_HEAD(&priv->obj_list);
|
||||
|
||||
omap_gem_init(dev);
|
||||
|
||||
ret = omap_modeset_init(dev);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "omap_modeset_init failed: ret=%d\n", ret);
|
||||
dev->dev_private = NULL;
|
||||
kfree(priv);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = drm_vblank_init(dev, priv->num_crtcs);
|
||||
if (ret)
|
||||
dev_warn(dev->dev, "could not init vblank\n");
|
||||
|
||||
priv->fbdev = omap_fbdev_init(dev);
|
||||
if (!priv->fbdev) {
|
||||
dev_warn(dev->dev, "omap_fbdev_init failed\n");
|
||||
/* well, limp along without an fbdev.. maybe X11 will work? */
|
||||
}
|
||||
|
||||
/* store off drm_device for use in pm ops */
|
||||
dev_set_drvdata(dev->dev, dev);
|
||||
|
||||
drm_kms_helper_poll_init(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dev_unload(struct drm_device *dev)
|
||||
{
|
||||
struct omap_drm_private *priv = dev->dev_private;
|
||||
int i;
|
||||
|
||||
DBG("unload: dev=%p", dev);
|
||||
|
||||
drm_kms_helper_poll_fini(dev);
|
||||
|
||||
omap_fbdev_free(dev);
|
||||
|
||||
/* flush crtcs so the fbs get released */
|
||||
for (i = 0; i < priv->num_crtcs; i++)
|
||||
omap_crtc_flush(priv->crtcs[i]);
|
||||
|
||||
omap_modeset_free(dev);
|
||||
omap_gem_deinit(dev);
|
||||
|
||||
destroy_workqueue(priv->wq);
|
||||
|
||||
drm_vblank_cleanup(dev);
|
||||
omap_drm_irq_uninstall(dev);
|
||||
|
||||
kfree(dev->dev_private);
|
||||
dev->dev_private = NULL;
|
||||
|
||||
dev_set_drvdata(dev->dev, NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dev_open(struct drm_device *dev, struct drm_file *file)
|
||||
{
|
||||
file->driver_priv = NULL;
|
||||
|
||||
DBG("open: dev=%p, file=%p", dev, file);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* lastclose - clean up after all DRM clients have exited
|
||||
* @dev: DRM device
|
||||
*
|
||||
* Take care of cleaning up after all DRM clients have exited. In the
|
||||
* mode setting case, we want to restore the kernel's initial mode (just
|
||||
* in case the last client left us in a bad state).
|
||||
*/
|
||||
static void dev_lastclose(struct drm_device *dev)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* we don't support vga-switcheroo.. so just make sure the fbdev
|
||||
* mode is active
|
||||
*/
|
||||
struct omap_drm_private *priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
DBG("lastclose: dev=%p", dev);
|
||||
|
||||
if (priv->rotation_prop) {
|
||||
/* need to restore default rotation state.. not sure
|
||||
* if there is a cleaner way to restore properties to
|
||||
* default state? Maybe a flag that properties should
|
||||
* automatically be restored to default state on
|
||||
* lastclose?
|
||||
*/
|
||||
for (i = 0; i < priv->num_crtcs; i++) {
|
||||
drm_object_property_set_value(&priv->crtcs[i]->base,
|
||||
priv->rotation_prop, 0);
|
||||
}
|
||||
|
||||
for (i = 0; i < priv->num_planes; i++) {
|
||||
drm_object_property_set_value(&priv->planes[i]->base,
|
||||
priv->rotation_prop, 0);
|
||||
}
|
||||
}
|
||||
|
||||
ret = drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
|
||||
if (ret)
|
||||
DBG("failed to restore crtc mode");
|
||||
}
|
||||
|
||||
static void dev_preclose(struct drm_device *dev, struct drm_file *file)
|
||||
{
|
||||
DBG("preclose: dev=%p", dev);
|
||||
}
|
||||
|
||||
static void dev_postclose(struct drm_device *dev, struct drm_file *file)
|
||||
{
|
||||
DBG("postclose: dev=%p, file=%p", dev, file);
|
||||
}
|
||||
|
||||
static const struct vm_operations_struct omap_gem_vm_ops = {
|
||||
.fault = omap_gem_fault,
|
||||
.open = drm_gem_vm_open,
|
||||
.close = drm_gem_vm_close,
|
||||
};
|
||||
|
||||
static const struct file_operations omapdriver_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = drm_open,
|
||||
.unlocked_ioctl = drm_ioctl,
|
||||
.release = drm_release,
|
||||
.mmap = omap_gem_mmap,
|
||||
.poll = drm_poll,
|
||||
.read = drm_read,
|
||||
.llseek = noop_llseek,
|
||||
};
|
||||
|
||||
static struct drm_driver omap_drm_driver = {
|
||||
.driver_features =
|
||||
DRIVER_HAVE_IRQ | DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
|
||||
.load = dev_load,
|
||||
.unload = dev_unload,
|
||||
.open = dev_open,
|
||||
.lastclose = dev_lastclose,
|
||||
.preclose = dev_preclose,
|
||||
.postclose = dev_postclose,
|
||||
.set_busid = drm_platform_set_busid,
|
||||
.get_vblank_counter = drm_vblank_count,
|
||||
.enable_vblank = omap_irq_enable_vblank,
|
||||
.disable_vblank = omap_irq_disable_vblank,
|
||||
.irq_preinstall = omap_irq_preinstall,
|
||||
.irq_postinstall = omap_irq_postinstall,
|
||||
.irq_uninstall = omap_irq_uninstall,
|
||||
.irq_handler = omap_irq_handler,
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
.debugfs_init = omap_debugfs_init,
|
||||
.debugfs_cleanup = omap_debugfs_cleanup,
|
||||
#endif
|
||||
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
|
||||
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
|
||||
.gem_prime_export = omap_gem_prime_export,
|
||||
.gem_prime_import = omap_gem_prime_import,
|
||||
.gem_free_object = omap_gem_free_object,
|
||||
.gem_vm_ops = &omap_gem_vm_ops,
|
||||
.dumb_create = omap_gem_dumb_create,
|
||||
.dumb_map_offset = omap_gem_dumb_map_offset,
|
||||
.dumb_destroy = drm_gem_dumb_destroy,
|
||||
.ioctls = ioctls,
|
||||
.num_ioctls = DRM_OMAP_NUM_IOCTLS,
|
||||
.fops = &omapdriver_fops,
|
||||
.name = DRIVER_NAME,
|
||||
.desc = DRIVER_DESC,
|
||||
.date = DRIVER_DATE,
|
||||
.major = DRIVER_MAJOR,
|
||||
.minor = DRIVER_MINOR,
|
||||
.patchlevel = DRIVER_PATCHLEVEL,
|
||||
};
|
||||
|
||||
static int pdev_suspend(struct platform_device *pDevice, pm_message_t state)
|
||||
{
|
||||
DBG("");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pdev_resume(struct platform_device *device)
|
||||
{
|
||||
DBG("");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pdev_shutdown(struct platform_device *device)
|
||||
{
|
||||
DBG("");
|
||||
}
|
||||
|
||||
static int pdev_probe(struct platform_device *device)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (omapdss_is_initialized() == false)
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
omap_crtc_pre_init();
|
||||
|
||||
r = omap_connect_dssdevs();
|
||||
if (r) {
|
||||
omap_crtc_pre_uninit();
|
||||
return r;
|
||||
}
|
||||
|
||||
DBG("%s", device->name);
|
||||
return drm_platform_init(&omap_drm_driver, device);
|
||||
}
|
||||
|
||||
static int pdev_remove(struct platform_device *device)
|
||||
{
|
||||
DBG("");
|
||||
|
||||
drm_put_dev(platform_get_drvdata(device));
|
||||
|
||||
omap_disconnect_dssdevs();
|
||||
omap_crtc_pre_uninit();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static const struct dev_pm_ops omapdrm_pm_ops = {
|
||||
.resume = omap_gem_resume,
|
||||
};
|
||||
#endif
|
||||
|
||||
static struct platform_driver pdev = {
|
||||
.driver = {
|
||||
.name = DRIVER_NAME,
|
||||
.owner = THIS_MODULE,
|
||||
#ifdef CONFIG_PM
|
||||
.pm = &omapdrm_pm_ops,
|
||||
#endif
|
||||
},
|
||||
.probe = pdev_probe,
|
||||
.remove = pdev_remove,
|
||||
.suspend = pdev_suspend,
|
||||
.resume = pdev_resume,
|
||||
.shutdown = pdev_shutdown,
|
||||
};
|
||||
|
||||
static int __init omap_drm_init(void)
|
||||
{
|
||||
int r;
|
||||
|
||||
DBG("init");
|
||||
|
||||
r = platform_driver_register(&omap_dmm_driver);
|
||||
if (r) {
|
||||
pr_err("DMM driver registration failed\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
r = platform_driver_register(&pdev);
|
||||
if (r) {
|
||||
pr_err("omapdrm driver registration failed\n");
|
||||
platform_driver_unregister(&omap_dmm_driver);
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit omap_drm_fini(void)
|
||||
{
|
||||
DBG("fini");
|
||||
|
||||
platform_driver_unregister(&pdev);
|
||||
|
||||
platform_driver_unregister(&omap_dmm_driver);
|
||||
}
|
||||
|
||||
/* need late_initcall() so we load after dss_driver's are loaded */
|
||||
late_initcall(omap_drm_init);
|
||||
module_exit(omap_drm_fini);
|
||||
|
||||
MODULE_AUTHOR("Rob Clark <rob@ti.com>");
|
||||
MODULE_DESCRIPTION("OMAP DRM Display Driver");
|
||||
MODULE_ALIAS("platform:" DRIVER_NAME);
|
||||
MODULE_LICENSE("GPL v2");
|
||||
299
drivers/gpu/drm/omapdrm/omap_drv.h
Normal file
299
drivers/gpu/drm/omapdrm/omap_drv.h
Normal file
|
|
@ -0,0 +1,299 @@
|
|||
/*
|
||||
* drivers/gpu/drm/omapdrm/omap_drv.h
|
||||
*
|
||||
* Copyright (C) 2011 Texas Instruments
|
||||
* Author: Rob Clark <rob@ti.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published by
|
||||
* the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef __OMAP_DRV_H__
|
||||
#define __OMAP_DRV_H__
|
||||
|
||||
#include <video/omapdss.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/omap_drm.h>
|
||||
#include <drm/drm_gem.h>
|
||||
#include <linux/platform_data/omap_drm.h>
|
||||
|
||||
|
||||
#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
|
||||
#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt, ##__VA_ARGS__) /* verbose debug */
|
||||
|
||||
#define MODULE_NAME "omapdrm"
|
||||
|
||||
/* max # of mapper-id's that can be assigned.. todo, come up with a better
|
||||
* (but still inexpensive) way to store/access per-buffer mapper private
|
||||
* data..
|
||||
*/
|
||||
#define MAX_MAPPERS 2
|
||||
|
||||
/* parameters which describe (unrotated) coordinates of scanout within a fb: */
|
||||
struct omap_drm_window {
|
||||
uint32_t rotation;
|
||||
int32_t crtc_x, crtc_y; /* signed because can be offscreen */
|
||||
uint32_t crtc_w, crtc_h;
|
||||
uint32_t src_x, src_y;
|
||||
uint32_t src_w, src_h;
|
||||
};
|
||||
|
||||
/* Once GO bit is set, we can't make further updates to shadowed registers
|
||||
* until the GO bit is cleared. So various parts in the kms code that need
|
||||
* to update shadowed registers queue up a pair of callbacks, pre_apply
|
||||
* which is called before setting GO bit, and post_apply that is called
|
||||
* after GO bit is cleared. The crtc manages the queuing, and everyone
|
||||
* else goes thru omap_crtc_apply() using these callbacks so that the
|
||||
* code which has to deal w/ GO bit state is centralized.
|
||||
*/
|
||||
struct omap_drm_apply {
|
||||
struct list_head pending_node, queued_node;
|
||||
bool queued;
|
||||
void (*pre_apply)(struct omap_drm_apply *apply);
|
||||
void (*post_apply)(struct omap_drm_apply *apply);
|
||||
};
|
||||
|
||||
/* For transiently registering for different DSS irqs that various parts
|
||||
* of the KMS code need during setup/configuration. We these are not
|
||||
* necessarily the same as what drm_vblank_get/put() are requesting, and
|
||||
* the hysteresis in drm_vblank_put() is not necessarily desirable for
|
||||
* internal housekeeping related irq usage.
|
||||
*/
|
||||
struct omap_drm_irq {
|
||||
struct list_head node;
|
||||
uint32_t irqmask;
|
||||
bool registered;
|
||||
void (*irq)(struct omap_drm_irq *irq, uint32_t irqstatus);
|
||||
};
|
||||
|
||||
/* For KMS code that needs to wait for a certain # of IRQs:
|
||||
*/
|
||||
struct omap_irq_wait;
|
||||
struct omap_irq_wait * omap_irq_wait_init(struct drm_device *dev,
|
||||
uint32_t irqmask, int count);
|
||||
int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait,
|
||||
unsigned long timeout);
|
||||
|
||||
struct omap_drm_private {
|
||||
uint32_t omaprev;
|
||||
|
||||
unsigned int num_crtcs;
|
||||
struct drm_crtc *crtcs[8];
|
||||
|
||||
unsigned int num_planes;
|
||||
struct drm_plane *planes[8];
|
||||
|
||||
unsigned int num_encoders;
|
||||
struct drm_encoder *encoders[8];
|
||||
|
||||
unsigned int num_connectors;
|
||||
struct drm_connector *connectors[8];
|
||||
|
||||
struct drm_fb_helper *fbdev;
|
||||
|
||||
struct workqueue_struct *wq;
|
||||
|
||||
/* list of GEM objects: */
|
||||
struct list_head obj_list;
|
||||
|
||||
bool has_dmm;
|
||||
|
||||
/* properties: */
|
||||
struct drm_property *rotation_prop;
|
||||
struct drm_property *zorder_prop;
|
||||
|
||||
/* irq handling: */
|
||||
struct list_head irq_list; /* list of omap_drm_irq */
|
||||
uint32_t vblank_mask; /* irq bits set for userspace vblank */
|
||||
struct omap_drm_irq error_handler;
|
||||
};
|
||||
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
int omap_debugfs_init(struct drm_minor *minor);
|
||||
void omap_debugfs_cleanup(struct drm_minor *minor);
|
||||
void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
|
||||
void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
|
||||
void omap_gem_describe_objects(struct list_head *list, struct seq_file *m);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
int omap_gem_resume(struct device *dev);
|
||||
#endif
|
||||
|
||||
int omap_irq_enable_vblank(struct drm_device *dev, int crtc_id);
|
||||
void omap_irq_disable_vblank(struct drm_device *dev, int crtc_id);
|
||||
irqreturn_t omap_irq_handler(int irq, void *arg);
|
||||
void omap_irq_preinstall(struct drm_device *dev);
|
||||
int omap_irq_postinstall(struct drm_device *dev);
|
||||
void omap_irq_uninstall(struct drm_device *dev);
|
||||
void __omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq);
|
||||
void __omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq);
|
||||
void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq);
|
||||
void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq);
|
||||
int omap_drm_irq_uninstall(struct drm_device *dev);
|
||||
int omap_drm_irq_install(struct drm_device *dev);
|
||||
|
||||
struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev);
|
||||
void omap_fbdev_free(struct drm_device *dev);
|
||||
|
||||
const struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc);
|
||||
enum omap_channel omap_crtc_channel(struct drm_crtc *crtc);
|
||||
int omap_crtc_apply(struct drm_crtc *crtc,
|
||||
struct omap_drm_apply *apply);
|
||||
void omap_crtc_pre_init(void);
|
||||
void omap_crtc_pre_uninit(void);
|
||||
struct drm_crtc *omap_crtc_init(struct drm_device *dev,
|
||||
struct drm_plane *plane, enum omap_channel channel, int id);
|
||||
void omap_crtc_flush(struct drm_crtc *crtc);
|
||||
|
||||
struct drm_plane *omap_plane_init(struct drm_device *dev,
|
||||
int plane_id, bool private_plane);
|
||||
int omap_plane_dpms(struct drm_plane *plane, int mode);
|
||||
int omap_plane_mode_set(struct drm_plane *plane,
|
||||
struct drm_crtc *crtc, struct drm_framebuffer *fb,
|
||||
int crtc_x, int crtc_y,
|
||||
unsigned int crtc_w, unsigned int crtc_h,
|
||||
uint32_t src_x, uint32_t src_y,
|
||||
uint32_t src_w, uint32_t src_h,
|
||||
void (*fxn)(void *), void *arg);
|
||||
void omap_plane_install_properties(struct drm_plane *plane,
|
||||
struct drm_mode_object *obj);
|
||||
int omap_plane_set_property(struct drm_plane *plane,
|
||||
struct drm_property *property, uint64_t val);
|
||||
|
||||
struct drm_encoder *omap_encoder_init(struct drm_device *dev,
|
||||
struct omap_dss_device *dssdev);
|
||||
int omap_encoder_set_enabled(struct drm_encoder *encoder, bool enabled);
|
||||
int omap_encoder_update(struct drm_encoder *encoder,
|
||||
struct omap_overlay_manager *mgr,
|
||||
struct omap_video_timings *timings);
|
||||
|
||||
struct drm_connector *omap_connector_init(struct drm_device *dev,
|
||||
int connector_type, struct omap_dss_device *dssdev,
|
||||
struct drm_encoder *encoder);
|
||||
struct drm_encoder *omap_connector_attached_encoder(
|
||||
struct drm_connector *connector);
|
||||
void omap_connector_flush(struct drm_connector *connector,
|
||||
int x, int y, int w, int h);
|
||||
bool omap_connector_get_hdmi_mode(struct drm_connector *connector);
|
||||
|
||||
void copy_timings_omap_to_drm(struct drm_display_mode *mode,
|
||||
struct omap_video_timings *timings);
|
||||
void copy_timings_drm_to_omap(struct omap_video_timings *timings,
|
||||
struct drm_display_mode *mode);
|
||||
|
||||
uint32_t omap_framebuffer_get_formats(uint32_t *pixel_formats,
|
||||
uint32_t max_formats, enum omap_color_mode supported_modes);
|
||||
struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
|
||||
struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd);
|
||||
struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
|
||||
struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
|
||||
struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p);
|
||||
int omap_framebuffer_pin(struct drm_framebuffer *fb);
|
||||
int omap_framebuffer_unpin(struct drm_framebuffer *fb);
|
||||
void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
|
||||
struct omap_drm_window *win, struct omap_overlay_info *info);
|
||||
struct drm_connector *omap_framebuffer_get_next_connector(
|
||||
struct drm_framebuffer *fb, struct drm_connector *from);
|
||||
void omap_framebuffer_flush(struct drm_framebuffer *fb,
|
||||
int x, int y, int w, int h);
|
||||
|
||||
void omap_gem_init(struct drm_device *dev);
|
||||
void omap_gem_deinit(struct drm_device *dev);
|
||||
|
||||
struct drm_gem_object *omap_gem_new(struct drm_device *dev,
|
||||
union omap_gem_size gsize, uint32_t flags);
|
||||
int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
|
||||
union omap_gem_size gsize, uint32_t flags, uint32_t *handle);
|
||||
void omap_gem_free_object(struct drm_gem_object *obj);
|
||||
void *omap_gem_vaddr(struct drm_gem_object *obj);
|
||||
int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
|
||||
uint32_t handle, uint64_t *offset);
|
||||
int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
|
||||
struct drm_mode_create_dumb *args);
|
||||
int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma);
|
||||
int omap_gem_mmap_obj(struct drm_gem_object *obj,
|
||||
struct vm_area_struct *vma);
|
||||
int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
|
||||
int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op);
|
||||
int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op);
|
||||
int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op);
|
||||
int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
|
||||
void (*fxn)(void *arg), void *arg);
|
||||
int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll);
|
||||
void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff);
|
||||
void omap_gem_dma_sync(struct drm_gem_object *obj,
|
||||
enum dma_data_direction dir);
|
||||
int omap_gem_get_paddr(struct drm_gem_object *obj,
|
||||
dma_addr_t *paddr, bool remap);
|
||||
int omap_gem_put_paddr(struct drm_gem_object *obj);
|
||||
int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
|
||||
bool remap);
|
||||
int omap_gem_put_pages(struct drm_gem_object *obj);
|
||||
uint32_t omap_gem_flags(struct drm_gem_object *obj);
|
||||
int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient,
|
||||
int x, int y, dma_addr_t *paddr);
|
||||
uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj);
|
||||
size_t omap_gem_mmap_size(struct drm_gem_object *obj);
|
||||
int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h);
|
||||
int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient);
|
||||
|
||||
struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
|
||||
struct drm_gem_object *obj, int flags);
|
||||
struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
|
||||
struct dma_buf *buffer);
|
||||
|
||||
static inline int align_pitch(int pitch, int width, int bpp)
|
||||
{
|
||||
int bytespp = (bpp + 7) / 8;
|
||||
/* in case someone tries to feed us a completely bogus stride: */
|
||||
pitch = max(pitch, width * bytespp);
|
||||
/* PVR needs alignment to 8 pixels.. right now that is the most
|
||||
* restrictive stride requirement..
|
||||
*/
|
||||
return ALIGN(pitch, 8 * bytespp);
|
||||
}
|
||||
|
||||
/* map crtc to vblank mask */
|
||||
uint32_t pipe2vbl(struct drm_crtc *crtc);
|
||||
struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder);
|
||||
|
||||
/* should these be made into common util helpers?
|
||||
*/
|
||||
|
||||
static inline int objects_lookup(struct drm_device *dev,
|
||||
struct drm_file *filp, uint32_t pixel_format,
|
||||
struct drm_gem_object **bos, uint32_t *handles)
|
||||
{
|
||||
int i, n = drm_format_num_planes(pixel_format);
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
bos[i] = drm_gem_object_lookup(dev, filp, handles[i]);
|
||||
if (!bos[i])
|
||||
goto fail;
|
||||
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
while (--i > 0)
|
||||
drm_gem_object_unreference_unlocked(bos[i]);
|
||||
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
#endif /* __OMAP_DRV_H__ */
|
||||
218
drivers/gpu/drm/omapdrm/omap_encoder.c
Normal file
218
drivers/gpu/drm/omapdrm/omap_encoder.c
Normal file
|
|
@ -0,0 +1,218 @@
|
|||
/*
|
||||
* drivers/gpu/drm/omapdrm/omap_encoder.c
|
||||
*
|
||||
* Copyright (C) 2011 Texas Instruments
|
||||
* Author: Rob Clark <rob@ti.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published by
|
||||
* the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <drm/drm_edid.h>
|
||||
|
||||
#include "omap_drv.h"
|
||||
|
||||
#include "drm_crtc.h"
|
||||
#include "drm_crtc_helper.h"
|
||||
|
||||
#include <linux/list.h>
|
||||
|
||||
|
||||
/*
|
||||
* encoder funcs
|
||||
*/
|
||||
|
||||
#define to_omap_encoder(x) container_of(x, struct omap_encoder, base)
|
||||
|
||||
/* The encoder and connector both map to same dssdev.. the encoder
|
||||
* handles the 'active' parts, ie. anything the modifies the state
|
||||
* of the hw, and the connector handles the 'read-only' parts, like
|
||||
* detecting connection and reading edid.
|
||||
*/
|
||||
struct omap_encoder {
|
||||
struct drm_encoder base;
|
||||
struct omap_dss_device *dssdev;
|
||||
};
|
||||
|
||||
struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder)
|
||||
{
|
||||
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
|
||||
|
||||
return omap_encoder->dssdev;
|
||||
}
|
||||
|
||||
static void omap_encoder_destroy(struct drm_encoder *encoder)
|
||||
{
|
||||
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
|
||||
|
||||
omap_encoder_set_enabled(encoder, false);
|
||||
|
||||
drm_encoder_cleanup(encoder);
|
||||
kfree(omap_encoder);
|
||||
}
|
||||
|
||||
static const struct drm_encoder_funcs omap_encoder_funcs = {
|
||||
.destroy = omap_encoder_destroy,
|
||||
};
|
||||
|
||||
/*
|
||||
* The CRTC drm_crtc_helper_set_mode() doesn't really give us the right
|
||||
* order.. the easiest way to work around this for now is to make all
|
||||
* the encoder-helper's no-op's and have the omap_crtc code take care
|
||||
* of the sequencing and call us in the right points.
|
||||
*
|
||||
* Eventually to handle connecting CRTCs to different encoders properly,
|
||||
* either the CRTC helpers need to change or we need to replace
|
||||
* drm_crtc_helper_set_mode(), but lets wait until atomic-modeset for
|
||||
* that.
|
||||
*/
|
||||
|
||||
static void omap_encoder_dpms(struct drm_encoder *encoder, int mode)
|
||||
{
|
||||
}
|
||||
|
||||
static bool omap_encoder_mode_fixup(struct drm_encoder *encoder,
|
||||
const struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static void omap_encoder_mode_set(struct drm_encoder *encoder,
|
||||
struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
|
||||
struct omap_dss_device *dssdev = omap_encoder->dssdev;
|
||||
struct drm_connector *connector;
|
||||
bool hdmi_mode;
|
||||
int r;
|
||||
|
||||
hdmi_mode = false;
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
if (connector->encoder == encoder) {
|
||||
hdmi_mode = omap_connector_get_hdmi_mode(connector);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (dssdev->driver->set_hdmi_mode)
|
||||
dssdev->driver->set_hdmi_mode(dssdev, hdmi_mode);
|
||||
|
||||
if (hdmi_mode && dssdev->driver->set_hdmi_infoframe) {
|
||||
struct hdmi_avi_infoframe avi;
|
||||
|
||||
r = drm_hdmi_avi_infoframe_from_display_mode(&avi, adjusted_mode);
|
||||
if (r == 0)
|
||||
dssdev->driver->set_hdmi_infoframe(dssdev, &avi);
|
||||
}
|
||||
}
|
||||
|
||||
static void omap_encoder_prepare(struct drm_encoder *encoder)
|
||||
{
|
||||
}
|
||||
|
||||
static void omap_encoder_commit(struct drm_encoder *encoder)
|
||||
{
|
||||
}
|
||||
|
||||
static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = {
|
||||
.dpms = omap_encoder_dpms,
|
||||
.mode_fixup = omap_encoder_mode_fixup,
|
||||
.mode_set = omap_encoder_mode_set,
|
||||
.prepare = omap_encoder_prepare,
|
||||
.commit = omap_encoder_commit,
|
||||
};
|
||||
|
||||
/*
|
||||
* Instead of relying on the helpers for modeset, the omap_crtc code
|
||||
* calls these functions in the proper sequence.
|
||||
*/
|
||||
|
||||
int omap_encoder_set_enabled(struct drm_encoder *encoder, bool enabled)
|
||||
{
|
||||
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
|
||||
struct omap_dss_device *dssdev = omap_encoder->dssdev;
|
||||
struct omap_dss_driver *dssdrv = dssdev->driver;
|
||||
|
||||
if (enabled) {
|
||||
return dssdrv->enable(dssdev);
|
||||
} else {
|
||||
dssdrv->disable(dssdev);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
int omap_encoder_update(struct drm_encoder *encoder,
|
||||
struct omap_overlay_manager *mgr,
|
||||
struct omap_video_timings *timings)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
|
||||
struct omap_dss_device *dssdev = omap_encoder->dssdev;
|
||||
struct omap_dss_driver *dssdrv = dssdev->driver;
|
||||
int ret;
|
||||
|
||||
dssdev->src->manager = mgr;
|
||||
|
||||
if (dssdrv->check_timings) {
|
||||
ret = dssdrv->check_timings(dssdev, timings);
|
||||
} else {
|
||||
struct omap_video_timings t = {0};
|
||||
|
||||
dssdrv->get_timings(dssdev, &t);
|
||||
|
||||
if (memcmp(timings, &t, sizeof(struct omap_video_timings)))
|
||||
ret = -EINVAL;
|
||||
else
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "could not set timings: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (dssdrv->set_timings)
|
||||
dssdrv->set_timings(dssdev, timings);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* initialize encoder */
|
||||
struct drm_encoder *omap_encoder_init(struct drm_device *dev,
|
||||
struct omap_dss_device *dssdev)
|
||||
{
|
||||
struct drm_encoder *encoder = NULL;
|
||||
struct omap_encoder *omap_encoder;
|
||||
|
||||
omap_encoder = kzalloc(sizeof(*omap_encoder), GFP_KERNEL);
|
||||
if (!omap_encoder)
|
||||
goto fail;
|
||||
|
||||
omap_encoder->dssdev = dssdev;
|
||||
|
||||
encoder = &omap_encoder->base;
|
||||
|
||||
drm_encoder_init(dev, encoder, &omap_encoder_funcs,
|
||||
DRM_MODE_ENCODER_TMDS);
|
||||
drm_encoder_helper_add(encoder, &omap_encoder_helper_funcs);
|
||||
|
||||
return encoder;
|
||||
|
||||
fail:
|
||||
if (encoder)
|
||||
omap_encoder_destroy(encoder);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
485
drivers/gpu/drm/omapdrm/omap_fb.c
Normal file
485
drivers/gpu/drm/omapdrm/omap_fb.c
Normal file
|
|
@ -0,0 +1,485 @@
|
|||
/*
|
||||
* drivers/gpu/drm/omapdrm/omap_fb.c
|
||||
*
|
||||
* Copyright (C) 2011 Texas Instruments
|
||||
* Author: Rob Clark <rob@ti.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published by
|
||||
* the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "omap_drv.h"
|
||||
#include "omap_dmm_tiler.h"
|
||||
|
||||
#include "drm_crtc.h"
|
||||
#include "drm_crtc_helper.h"
|
||||
|
||||
/*
|
||||
* framebuffer funcs
|
||||
*/
|
||||
|
||||
/* per-format info: */
|
||||
struct format {
|
||||
enum omap_color_mode dss_format;
|
||||
uint32_t pixel_format;
|
||||
struct {
|
||||
int stride_bpp; /* this times width is stride */
|
||||
int sub_y; /* sub-sample in y dimension */
|
||||
} planes[4];
|
||||
bool yuv;
|
||||
};
|
||||
|
||||
static const struct format formats[] = {
|
||||
/* 16bpp [A]RGB: */
|
||||
{ OMAP_DSS_COLOR_RGB16, DRM_FORMAT_RGB565, {{2, 1}}, false }, /* RGB16-565 */
|
||||
{ OMAP_DSS_COLOR_RGB12U, DRM_FORMAT_RGBX4444, {{2, 1}}, false }, /* RGB12x-4444 */
|
||||
{ OMAP_DSS_COLOR_RGBX16, DRM_FORMAT_XRGB4444, {{2, 1}}, false }, /* xRGB12-4444 */
|
||||
{ OMAP_DSS_COLOR_RGBA16, DRM_FORMAT_RGBA4444, {{2, 1}}, false }, /* RGBA12-4444 */
|
||||
{ OMAP_DSS_COLOR_ARGB16, DRM_FORMAT_ARGB4444, {{2, 1}}, false }, /* ARGB16-4444 */
|
||||
{ OMAP_DSS_COLOR_XRGB16_1555, DRM_FORMAT_XRGB1555, {{2, 1}}, false }, /* xRGB15-1555 */
|
||||
{ OMAP_DSS_COLOR_ARGB16_1555, DRM_FORMAT_ARGB1555, {{2, 1}}, false }, /* ARGB16-1555 */
|
||||
/* 24bpp RGB: */
|
||||
{ OMAP_DSS_COLOR_RGB24P, DRM_FORMAT_RGB888, {{3, 1}}, false }, /* RGB24-888 */
|
||||
/* 32bpp [A]RGB: */
|
||||
{ OMAP_DSS_COLOR_RGBX32, DRM_FORMAT_RGBX8888, {{4, 1}}, false }, /* RGBx24-8888 */
|
||||
{ OMAP_DSS_COLOR_RGB24U, DRM_FORMAT_XRGB8888, {{4, 1}}, false }, /* xRGB24-8888 */
|
||||
{ OMAP_DSS_COLOR_RGBA32, DRM_FORMAT_RGBA8888, {{4, 1}}, false }, /* RGBA32-8888 */
|
||||
{ OMAP_DSS_COLOR_ARGB32, DRM_FORMAT_ARGB8888, {{4, 1}}, false }, /* ARGB32-8888 */
|
||||
/* YUV: */
|
||||
{ OMAP_DSS_COLOR_NV12, DRM_FORMAT_NV12, {{1, 1}, {1, 2}}, true },
|
||||
{ OMAP_DSS_COLOR_YUV2, DRM_FORMAT_YUYV, {{2, 1}}, true },
|
||||
{ OMAP_DSS_COLOR_UYVY, DRM_FORMAT_UYVY, {{2, 1}}, true },
|
||||
};
|
||||
|
||||
/* convert from overlay's pixel formats bitmask to an array of fourcc's */
|
||||
uint32_t omap_framebuffer_get_formats(uint32_t *pixel_formats,
|
||||
uint32_t max_formats, enum omap_color_mode supported_modes)
|
||||
{
|
||||
uint32_t nformats = 0;
|
||||
int i = 0;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(formats) && nformats < max_formats; i++)
|
||||
if (formats[i].dss_format & supported_modes)
|
||||
pixel_formats[nformats++] = formats[i].pixel_format;
|
||||
|
||||
return nformats;
|
||||
}
|
||||
|
||||
/* per-plane info for the fb: */
|
||||
struct plane {
|
||||
struct drm_gem_object *bo;
|
||||
uint32_t pitch;
|
||||
uint32_t offset;
|
||||
dma_addr_t paddr;
|
||||
};
|
||||
|
||||
#define to_omap_framebuffer(x) container_of(x, struct omap_framebuffer, base)
|
||||
|
||||
struct omap_framebuffer {
|
||||
struct drm_framebuffer base;
|
||||
const struct format *format;
|
||||
struct plane planes[4];
|
||||
};
|
||||
|
||||
static int omap_framebuffer_create_handle(struct drm_framebuffer *fb,
|
||||
struct drm_file *file_priv,
|
||||
unsigned int *handle)
|
||||
{
|
||||
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
|
||||
return drm_gem_handle_create(file_priv,
|
||||
omap_fb->planes[0].bo, handle);
|
||||
}
|
||||
|
||||
static void omap_framebuffer_destroy(struct drm_framebuffer *fb)
|
||||
{
|
||||
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
|
||||
int i, n = drm_format_num_planes(fb->pixel_format);
|
||||
|
||||
DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
|
||||
|
||||
drm_framebuffer_cleanup(fb);
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
struct plane *plane = &omap_fb->planes[i];
|
||||
if (plane->bo)
|
||||
drm_gem_object_unreference_unlocked(plane->bo);
|
||||
}
|
||||
|
||||
kfree(omap_fb);
|
||||
}
|
||||
|
||||
static int omap_framebuffer_dirty(struct drm_framebuffer *fb,
|
||||
struct drm_file *file_priv, unsigned flags, unsigned color,
|
||||
struct drm_clip_rect *clips, unsigned num_clips)
|
||||
{
|
||||
int i;
|
||||
|
||||
drm_modeset_lock_all(fb->dev);
|
||||
|
||||
for (i = 0; i < num_clips; i++) {
|
||||
omap_framebuffer_flush(fb, clips[i].x1, clips[i].y1,
|
||||
clips[i].x2 - clips[i].x1,
|
||||
clips[i].y2 - clips[i].y1);
|
||||
}
|
||||
|
||||
drm_modeset_unlock_all(fb->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct drm_framebuffer_funcs omap_framebuffer_funcs = {
|
||||
.create_handle = omap_framebuffer_create_handle,
|
||||
.destroy = omap_framebuffer_destroy,
|
||||
.dirty = omap_framebuffer_dirty,
|
||||
};
|
||||
|
||||
static uint32_t get_linear_addr(struct plane *plane,
|
||||
const struct format *format, int n, int x, int y)
|
||||
{
|
||||
uint32_t offset;
|
||||
|
||||
offset = plane->offset +
|
||||
(x * format->planes[n].stride_bpp) +
|
||||
(y * plane->pitch / format->planes[n].sub_y);
|
||||
|
||||
return plane->paddr + offset;
|
||||
}
|
||||
|
||||
/* update ovl info for scanout, handles cases of multi-planar fb's, etc.
|
||||
*/
|
||||
void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
|
||||
struct omap_drm_window *win, struct omap_overlay_info *info)
|
||||
{
|
||||
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
|
||||
const struct format *format = omap_fb->format;
|
||||
struct plane *plane = &omap_fb->planes[0];
|
||||
uint32_t x, y, orient = 0;
|
||||
|
||||
info->color_mode = format->dss_format;
|
||||
|
||||
info->pos_x = win->crtc_x;
|
||||
info->pos_y = win->crtc_y;
|
||||
info->out_width = win->crtc_w;
|
||||
info->out_height = win->crtc_h;
|
||||
info->width = win->src_w;
|
||||
info->height = win->src_h;
|
||||
|
||||
x = win->src_x;
|
||||
y = win->src_y;
|
||||
|
||||
if (omap_gem_flags(plane->bo) & OMAP_BO_TILED) {
|
||||
uint32_t w = win->src_w;
|
||||
uint32_t h = win->src_h;
|
||||
|
||||
switch (win->rotation & 0xf) {
|
||||
default:
|
||||
dev_err(fb->dev->dev, "invalid rotation: %02x",
|
||||
(uint32_t)win->rotation);
|
||||
/* fallthru to default to no rotation */
|
||||
case 0:
|
||||
case BIT(DRM_ROTATE_0):
|
||||
orient = 0;
|
||||
break;
|
||||
case BIT(DRM_ROTATE_90):
|
||||
orient = MASK_XY_FLIP | MASK_X_INVERT;
|
||||
break;
|
||||
case BIT(DRM_ROTATE_180):
|
||||
orient = MASK_X_INVERT | MASK_Y_INVERT;
|
||||
break;
|
||||
case BIT(DRM_ROTATE_270):
|
||||
orient = MASK_XY_FLIP | MASK_Y_INVERT;
|
||||
break;
|
||||
}
|
||||
|
||||
if (win->rotation & BIT(DRM_REFLECT_X))
|
||||
orient ^= MASK_X_INVERT;
|
||||
|
||||
if (win->rotation & BIT(DRM_REFLECT_Y))
|
||||
orient ^= MASK_Y_INVERT;
|
||||
|
||||
/* adjust x,y offset for flip/invert: */
|
||||
if (orient & MASK_XY_FLIP)
|
||||
swap(w, h);
|
||||
if (orient & MASK_Y_INVERT)
|
||||
y += h - 1;
|
||||
if (orient & MASK_X_INVERT)
|
||||
x += w - 1;
|
||||
|
||||
omap_gem_rotated_paddr(plane->bo, orient, x, y, &info->paddr);
|
||||
info->rotation_type = OMAP_DSS_ROT_TILER;
|
||||
info->screen_width = omap_gem_tiled_stride(plane->bo, orient);
|
||||
} else {
|
||||
switch (win->rotation & 0xf) {
|
||||
case 0:
|
||||
case BIT(DRM_ROTATE_0):
|
||||
/* OK */
|
||||
break;
|
||||
|
||||
default:
|
||||
dev_warn(fb->dev->dev,
|
||||
"rotation '%d' ignored for non-tiled fb\n",
|
||||
win->rotation);
|
||||
win->rotation = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
info->paddr = get_linear_addr(plane, format, 0, x, y);
|
||||
info->rotation_type = OMAP_DSS_ROT_DMA;
|
||||
info->screen_width = plane->pitch;
|
||||
}
|
||||
|
||||
/* convert to pixels: */
|
||||
info->screen_width /= format->planes[0].stride_bpp;
|
||||
|
||||
if (format->dss_format == OMAP_DSS_COLOR_NV12) {
|
||||
plane = &omap_fb->planes[1];
|
||||
|
||||
if (info->rotation_type == OMAP_DSS_ROT_TILER) {
|
||||
WARN_ON(!(omap_gem_flags(plane->bo) & OMAP_BO_TILED));
|
||||
omap_gem_rotated_paddr(plane->bo, orient,
|
||||
x/2, y/2, &info->p_uv_addr);
|
||||
} else {
|
||||
info->p_uv_addr = get_linear_addr(plane, format, 1, x, y);
|
||||
}
|
||||
} else {
|
||||
info->p_uv_addr = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* pin, prepare for scanout: */
|
||||
int omap_framebuffer_pin(struct drm_framebuffer *fb)
|
||||
{
|
||||
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
|
||||
int ret, i, n = drm_format_num_planes(fb->pixel_format);
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
struct plane *plane = &omap_fb->planes[i];
|
||||
ret = omap_gem_get_paddr(plane->bo, &plane->paddr, true);
|
||||
if (ret)
|
||||
goto fail;
|
||||
omap_gem_dma_sync(plane->bo, DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
for (i--; i >= 0; i--) {
|
||||
struct plane *plane = &omap_fb->planes[i];
|
||||
omap_gem_put_paddr(plane->bo);
|
||||
plane->paddr = 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* unpin, no longer being scanned out: */
|
||||
int omap_framebuffer_unpin(struct drm_framebuffer *fb)
|
||||
{
|
||||
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
|
||||
int ret, i, n = drm_format_num_planes(fb->pixel_format);
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
struct plane *plane = &omap_fb->planes[i];
|
||||
ret = omap_gem_put_paddr(plane->bo);
|
||||
if (ret)
|
||||
goto fail;
|
||||
plane->paddr = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p)
|
||||
{
|
||||
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
|
||||
if (p >= drm_format_num_planes(fb->pixel_format))
|
||||
return NULL;
|
||||
return omap_fb->planes[p].bo;
|
||||
}
|
||||
|
||||
/* iterate thru all the connectors, returning ones that are attached
|
||||
* to the same fb..
|
||||
*/
|
||||
struct drm_connector *omap_framebuffer_get_next_connector(
|
||||
struct drm_framebuffer *fb, struct drm_connector *from)
|
||||
{
|
||||
struct drm_device *dev = fb->dev;
|
||||
struct list_head *connector_list = &dev->mode_config.connector_list;
|
||||
struct drm_connector *connector = from;
|
||||
|
||||
if (!from)
|
||||
return list_first_entry_or_null(connector_list, typeof(*from),
|
||||
head);
|
||||
|
||||
list_for_each_entry_from(connector, connector_list, head) {
|
||||
if (connector != from) {
|
||||
struct drm_encoder *encoder = connector->encoder;
|
||||
struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
|
||||
if (crtc && crtc->primary->fb == fb)
|
||||
return connector;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* flush an area of the framebuffer (in case of manual update display that
|
||||
* is not automatically flushed)
|
||||
*/
|
||||
void omap_framebuffer_flush(struct drm_framebuffer *fb,
|
||||
int x, int y, int w, int h)
|
||||
{
|
||||
struct drm_connector *connector = NULL;
|
||||
|
||||
VERB("flush: %d,%d %dx%d, fb=%p", x, y, w, h, fb);
|
||||
|
||||
/* FIXME: This is racy - no protection against modeset config changes. */
|
||||
while ((connector = omap_framebuffer_get_next_connector(fb, connector))) {
|
||||
/* only consider connectors that are part of a chain */
|
||||
if (connector->encoder && connector->encoder->crtc) {
|
||||
/* TODO: maybe this should propagate thru the crtc who
|
||||
* could do the coordinate translation..
|
||||
*/
|
||||
struct drm_crtc *crtc = connector->encoder->crtc;
|
||||
int cx = max(0, x - crtc->x);
|
||||
int cy = max(0, y - crtc->y);
|
||||
int cw = w + (x - crtc->x) - cx;
|
||||
int ch = h + (y - crtc->y) - cy;
|
||||
|
||||
omap_connector_flush(connector, cx, cy, cw, ch);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
|
||||
{
|
||||
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
|
||||
int i, n = drm_format_num_planes(fb->pixel_format);
|
||||
|
||||
seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height,
|
||||
(char *)&fb->pixel_format);
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
struct plane *plane = &omap_fb->planes[i];
|
||||
seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
|
||||
i, plane->offset, plane->pitch);
|
||||
omap_gem_describe(plane->bo, m);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
|
||||
struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd)
|
||||
{
|
||||
struct drm_gem_object *bos[4];
|
||||
struct drm_framebuffer *fb;
|
||||
int ret;
|
||||
|
||||
ret = objects_lookup(dev, file, mode_cmd->pixel_format,
|
||||
bos, mode_cmd->handles);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
fb = omap_framebuffer_init(dev, mode_cmd, bos);
|
||||
if (IS_ERR(fb)) {
|
||||
int i, n = drm_format_num_planes(mode_cmd->pixel_format);
|
||||
for (i = 0; i < n; i++)
|
||||
drm_gem_object_unreference_unlocked(bos[i]);
|
||||
return fb;
|
||||
}
|
||||
return fb;
|
||||
}
|
||||
|
||||
struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
|
||||
struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos)
|
||||
{
|
||||
struct omap_framebuffer *omap_fb;
|
||||
struct drm_framebuffer *fb = NULL;
|
||||
const struct format *format = NULL;
|
||||
int ret, i, n = drm_format_num_planes(mode_cmd->pixel_format);
|
||||
|
||||
DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%4.4s)",
|
||||
dev, mode_cmd, mode_cmd->width, mode_cmd->height,
|
||||
(char *)&mode_cmd->pixel_format);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(formats); i++) {
|
||||
if (formats[i].pixel_format == mode_cmd->pixel_format) {
|
||||
format = &formats[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!format) {
|
||||
dev_err(dev->dev, "unsupported pixel format: %4.4s\n",
|
||||
(char *)&mode_cmd->pixel_format);
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
omap_fb = kzalloc(sizeof(*omap_fb), GFP_KERNEL);
|
||||
if (!omap_fb) {
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
fb = &omap_fb->base;
|
||||
omap_fb->format = format;
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
struct plane *plane = &omap_fb->planes[i];
|
||||
int size, pitch = mode_cmd->pitches[i];
|
||||
|
||||
if (pitch < (mode_cmd->width * format->planes[i].stride_bpp)) {
|
||||
dev_err(dev->dev, "provided buffer pitch is too small! %d < %d\n",
|
||||
pitch, mode_cmd->width * format->planes[i].stride_bpp);
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
size = pitch * mode_cmd->height / format->planes[i].sub_y;
|
||||
|
||||
if (size > (omap_gem_mmap_size(bos[i]) - mode_cmd->offsets[i])) {
|
||||
dev_err(dev->dev, "provided buffer object is too small! %d < %d\n",
|
||||
bos[i]->size - mode_cmd->offsets[i], size);
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
plane->bo = bos[i];
|
||||
plane->offset = mode_cmd->offsets[i];
|
||||
plane->pitch = pitch;
|
||||
plane->paddr = 0;
|
||||
}
|
||||
|
||||
drm_helper_mode_fill_fb_struct(fb, mode_cmd);
|
||||
|
||||
ret = drm_framebuffer_init(dev, fb, &omap_framebuffer_funcs);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
DBG("create: FB ID: %d (%p)", fb->base.id, fb);
|
||||
|
||||
return fb;
|
||||
|
||||
fail:
|
||||
if (fb)
|
||||
omap_framebuffer_destroy(fb);
|
||||
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
386
drivers/gpu/drm/omapdrm/omap_fbdev.c
Normal file
386
drivers/gpu/drm/omapdrm/omap_fbdev.c
Normal file
|
|
@ -0,0 +1,386 @@
|
|||
/*
|
||||
* drivers/gpu/drm/omapdrm/omap_fbdev.c
|
||||
*
|
||||
* Copyright (C) 2011 Texas Instruments
|
||||
* Author: Rob Clark <rob@ti.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published by
|
||||
* the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "omap_drv.h"
|
||||
|
||||
#include "drm_crtc.h"
|
||||
#include "drm_fb_helper.h"
|
||||
|
||||
MODULE_PARM_DESC(ywrap, "Enable ywrap scrolling (omap44xx and later, default 'y')");
|
||||
static bool ywrap_enabled = true;
|
||||
module_param_named(ywrap, ywrap_enabled, bool, 0644);
|
||||
|
||||
/*
|
||||
* fbdev funcs, to implement legacy fbdev interface on top of drm driver
|
||||
*/
|
||||
|
||||
#define to_omap_fbdev(x) container_of(x, struct omap_fbdev, base)
|
||||
|
||||
struct omap_fbdev {
|
||||
struct drm_fb_helper base;
|
||||
struct drm_framebuffer *fb;
|
||||
struct drm_gem_object *bo;
|
||||
bool ywrap_enabled;
|
||||
|
||||
/* for deferred dmm roll when getting called in atomic ctx */
|
||||
struct work_struct work;
|
||||
};
|
||||
|
||||
static void omap_fbdev_flush(struct fb_info *fbi, int x, int y, int w, int h);
|
||||
static struct drm_fb_helper *get_fb(struct fb_info *fbi);
|
||||
|
||||
static ssize_t omap_fbdev_write(struct fb_info *fbi, const char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
ssize_t res;
|
||||
|
||||
res = fb_sys_write(fbi, buf, count, ppos);
|
||||
omap_fbdev_flush(fbi, 0, 0, fbi->var.xres, fbi->var.yres);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
static void omap_fbdev_fillrect(struct fb_info *fbi,
|
||||
const struct fb_fillrect *rect)
|
||||
{
|
||||
sys_fillrect(fbi, rect);
|
||||
omap_fbdev_flush(fbi, rect->dx, rect->dy, rect->width, rect->height);
|
||||
}
|
||||
|
||||
static void omap_fbdev_copyarea(struct fb_info *fbi,
|
||||
const struct fb_copyarea *area)
|
||||
{
|
||||
sys_copyarea(fbi, area);
|
||||
omap_fbdev_flush(fbi, area->dx, area->dy, area->width, area->height);
|
||||
}
|
||||
|
||||
static void omap_fbdev_imageblit(struct fb_info *fbi,
|
||||
const struct fb_image *image)
|
||||
{
|
||||
sys_imageblit(fbi, image);
|
||||
omap_fbdev_flush(fbi, image->dx, image->dy,
|
||||
image->width, image->height);
|
||||
}
|
||||
|
||||
static void pan_worker(struct work_struct *work)
|
||||
{
|
||||
struct omap_fbdev *fbdev = container_of(work, struct omap_fbdev, work);
|
||||
struct fb_info *fbi = fbdev->base.fbdev;
|
||||
int npages;
|
||||
|
||||
/* DMM roll shifts in 4K pages: */
|
||||
npages = fbi->fix.line_length >> PAGE_SHIFT;
|
||||
omap_gem_roll(fbdev->bo, fbi->var.yoffset * npages);
|
||||
}
|
||||
|
||||
static int omap_fbdev_pan_display(struct fb_var_screeninfo *var,
|
||||
struct fb_info *fbi)
|
||||
{
|
||||
struct drm_fb_helper *helper = get_fb(fbi);
|
||||
struct omap_fbdev *fbdev = to_omap_fbdev(helper);
|
||||
|
||||
if (!helper)
|
||||
goto fallback;
|
||||
|
||||
if (!fbdev->ywrap_enabled)
|
||||
goto fallback;
|
||||
|
||||
if (drm_can_sleep()) {
|
||||
pan_worker(&fbdev->work);
|
||||
} else {
|
||||
struct omap_drm_private *priv = helper->dev->dev_private;
|
||||
queue_work(priv->wq, &fbdev->work);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fallback:
|
||||
return drm_fb_helper_pan_display(var, fbi);
|
||||
}
|
||||
|
||||
static struct fb_ops omap_fb_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
|
||||
/* Note: to properly handle manual update displays, we wrap the
|
||||
* basic fbdev ops which write to the framebuffer
|
||||
*/
|
||||
.fb_read = fb_sys_read,
|
||||
.fb_write = omap_fbdev_write,
|
||||
.fb_fillrect = omap_fbdev_fillrect,
|
||||
.fb_copyarea = omap_fbdev_copyarea,
|
||||
.fb_imageblit = omap_fbdev_imageblit,
|
||||
|
||||
.fb_check_var = drm_fb_helper_check_var,
|
||||
.fb_set_par = drm_fb_helper_set_par,
|
||||
.fb_pan_display = omap_fbdev_pan_display,
|
||||
.fb_blank = drm_fb_helper_blank,
|
||||
.fb_setcmap = drm_fb_helper_setcmap,
|
||||
};
|
||||
|
||||
static int omap_fbdev_create(struct drm_fb_helper *helper,
|
||||
struct drm_fb_helper_surface_size *sizes)
|
||||
{
|
||||
struct omap_fbdev *fbdev = to_omap_fbdev(helper);
|
||||
struct drm_device *dev = helper->dev;
|
||||
struct omap_drm_private *priv = dev->dev_private;
|
||||
struct drm_framebuffer *fb = NULL;
|
||||
union omap_gem_size gsize;
|
||||
struct fb_info *fbi = NULL;
|
||||
struct drm_mode_fb_cmd2 mode_cmd = {0};
|
||||
dma_addr_t paddr;
|
||||
int ret;
|
||||
|
||||
/* only doing ARGB32 since this is what is needed to alpha-blend
|
||||
* with video overlays:
|
||||
*/
|
||||
sizes->surface_bpp = 32;
|
||||
sizes->surface_depth = 32;
|
||||
|
||||
DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
|
||||
sizes->surface_height, sizes->surface_bpp,
|
||||
sizes->fb_width, sizes->fb_height);
|
||||
|
||||
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
|
||||
sizes->surface_depth);
|
||||
|
||||
mode_cmd.width = sizes->surface_width;
|
||||
mode_cmd.height = sizes->surface_height;
|
||||
|
||||
mode_cmd.pitches[0] = align_pitch(
|
||||
mode_cmd.width * ((sizes->surface_bpp + 7) / 8),
|
||||
mode_cmd.width, sizes->surface_bpp);
|
||||
|
||||
fbdev->ywrap_enabled = priv->has_dmm && ywrap_enabled;
|
||||
if (fbdev->ywrap_enabled) {
|
||||
/* need to align pitch to page size if using DMM scrolling */
|
||||
mode_cmd.pitches[0] = ALIGN(mode_cmd.pitches[0], PAGE_SIZE);
|
||||
}
|
||||
|
||||
/* allocate backing bo */
|
||||
gsize = (union omap_gem_size){
|
||||
.bytes = PAGE_ALIGN(mode_cmd.pitches[0] * mode_cmd.height),
|
||||
};
|
||||
DBG("allocating %d bytes for fb %d", gsize.bytes, dev->primary->index);
|
||||
fbdev->bo = omap_gem_new(dev, gsize, OMAP_BO_SCANOUT | OMAP_BO_WC);
|
||||
if (!fbdev->bo) {
|
||||
dev_err(dev->dev, "failed to allocate buffer object\n");
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
fb = omap_framebuffer_init(dev, &mode_cmd, &fbdev->bo);
|
||||
if (IS_ERR(fb)) {
|
||||
dev_err(dev->dev, "failed to allocate fb\n");
|
||||
/* note: if fb creation failed, we can't rely on fb destroy
|
||||
* to unref the bo:
|
||||
*/
|
||||
drm_gem_object_unreference(fbdev->bo);
|
||||
ret = PTR_ERR(fb);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* note: this keeps the bo pinned.. which is perhaps not ideal,
|
||||
* but is needed as long as we use fb_mmap() to mmap to userspace
|
||||
* (since this happens using fix.smem_start). Possibly we could
|
||||
* implement our own mmap using GEM mmap support to avoid this
|
||||
* (non-tiled buffer doesn't need to be pinned for fbcon to write
|
||||
* to it). Then we just need to be sure that we are able to re-
|
||||
* pin it in case of an opps.
|
||||
*/
|
||||
ret = omap_gem_get_paddr(fbdev->bo, &paddr, true);
|
||||
if (ret) {
|
||||
dev_err(dev->dev,
|
||||
"could not map (paddr)! Skipping framebuffer alloc\n");
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
fbi = framebuffer_alloc(0, dev->dev);
|
||||
if (!fbi) {
|
||||
dev_err(dev->dev, "failed to allocate fb info\n");
|
||||
ret = -ENOMEM;
|
||||
goto fail_unlock;
|
||||
}
|
||||
|
||||
DBG("fbi=%p, dev=%p", fbi, dev);
|
||||
|
||||
fbdev->fb = fb;
|
||||
helper->fb = fb;
|
||||
helper->fbdev = fbi;
|
||||
|
||||
fbi->par = helper;
|
||||
fbi->flags = FBINFO_DEFAULT;
|
||||
fbi->fbops = &omap_fb_ops;
|
||||
|
||||
strcpy(fbi->fix.id, MODULE_NAME);
|
||||
|
||||
ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
|
||||
if (ret) {
|
||||
ret = -ENOMEM;
|
||||
goto fail_unlock;
|
||||
}
|
||||
|
||||
drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
|
||||
drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
|
||||
|
||||
dev->mode_config.fb_base = paddr;
|
||||
|
||||
fbi->screen_base = omap_gem_vaddr(fbdev->bo);
|
||||
fbi->screen_size = fbdev->bo->size;
|
||||
fbi->fix.smem_start = paddr;
|
||||
fbi->fix.smem_len = fbdev->bo->size;
|
||||
|
||||
/* if we have DMM, then we can use it for scrolling by just
|
||||
* shuffling pages around in DMM rather than doing sw blit.
|
||||
*/
|
||||
if (fbdev->ywrap_enabled) {
|
||||
DRM_INFO("Enabling DMM ywrap scrolling\n");
|
||||
fbi->flags |= FBINFO_HWACCEL_YWRAP | FBINFO_READS_FAST;
|
||||
fbi->fix.ywrapstep = 1;
|
||||
}
|
||||
|
||||
|
||||
DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
|
||||
DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height);
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return 0;
|
||||
|
||||
fail_unlock:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
fail:
|
||||
|
||||
if (ret) {
|
||||
if (fbi)
|
||||
framebuffer_release(fbi);
|
||||
if (fb) {
|
||||
drm_framebuffer_unregister_private(fb);
|
||||
drm_framebuffer_remove(fb);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct drm_fb_helper_funcs omap_fb_helper_funcs = {
|
||||
.fb_probe = omap_fbdev_create,
|
||||
};
|
||||
|
||||
static struct drm_fb_helper *get_fb(struct fb_info *fbi)
|
||||
{
|
||||
if (!fbi || strcmp(fbi->fix.id, MODULE_NAME)) {
|
||||
/* these are not the fb's you're looking for */
|
||||
return NULL;
|
||||
}
|
||||
return fbi->par;
|
||||
}
|
||||
|
||||
/* flush an area of the framebuffer (in case of manual update display that
|
||||
* is not automatically flushed)
|
||||
*/
|
||||
static void omap_fbdev_flush(struct fb_info *fbi, int x, int y, int w, int h)
|
||||
{
|
||||
struct drm_fb_helper *helper = get_fb(fbi);
|
||||
|
||||
if (!helper)
|
||||
return;
|
||||
|
||||
VERB("flush fbdev: %d,%d %dx%d, fbi=%p", x, y, w, h, fbi);
|
||||
|
||||
omap_framebuffer_flush(helper->fb, x, y, w, h);
|
||||
}
|
||||
|
||||
/* initialize fbdev helper */
|
||||
struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
|
||||
{
|
||||
struct omap_drm_private *priv = dev->dev_private;
|
||||
struct omap_fbdev *fbdev = NULL;
|
||||
struct drm_fb_helper *helper;
|
||||
int ret = 0;
|
||||
|
||||
fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
|
||||
if (!fbdev)
|
||||
goto fail;
|
||||
|
||||
INIT_WORK(&fbdev->work, pan_worker);
|
||||
|
||||
helper = &fbdev->base;
|
||||
|
||||
drm_fb_helper_prepare(dev, helper, &omap_fb_helper_funcs);
|
||||
|
||||
ret = drm_fb_helper_init(dev, helper,
|
||||
priv->num_crtcs, priv->num_connectors);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "could not init fbdev: ret=%d\n", ret);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
drm_fb_helper_single_add_all_connectors(helper);
|
||||
|
||||
/* disable all the possible outputs/crtcs before entering KMS mode */
|
||||
drm_helper_disable_unused_functions(dev);
|
||||
|
||||
drm_fb_helper_initial_config(helper, 32);
|
||||
|
||||
priv->fbdev = helper;
|
||||
|
||||
return helper;
|
||||
|
||||
fail:
|
||||
kfree(fbdev);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void omap_fbdev_free(struct drm_device *dev)
|
||||
{
|
||||
struct omap_drm_private *priv = dev->dev_private;
|
||||
struct drm_fb_helper *helper = priv->fbdev;
|
||||
struct omap_fbdev *fbdev;
|
||||
struct fb_info *fbi;
|
||||
|
||||
DBG();
|
||||
|
||||
fbi = helper->fbdev;
|
||||
|
||||
/* only cleanup framebuffer if it is present */
|
||||
if (fbi) {
|
||||
unregister_framebuffer(fbi);
|
||||
framebuffer_release(fbi);
|
||||
}
|
||||
|
||||
drm_fb_helper_fini(helper);
|
||||
|
||||
fbdev = to_omap_fbdev(priv->fbdev);
|
||||
|
||||
/* release the ref taken in omap_fbdev_create() */
|
||||
omap_gem_put_paddr(fbdev->bo);
|
||||
|
||||
/* this will free the backing object */
|
||||
if (fbdev->fb) {
|
||||
drm_framebuffer_unregister_private(fbdev->fb);
|
||||
drm_framebuffer_remove(fbdev->fb);
|
||||
}
|
||||
|
||||
kfree(fbdev);
|
||||
|
||||
priv->fbdev = NULL;
|
||||
}
|
||||
1482
drivers/gpu/drm/omapdrm/omap_gem.c
Normal file
1482
drivers/gpu/drm/omapdrm/omap_gem.c
Normal file
File diff suppressed because it is too large
Load diff
201
drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
Normal file
201
drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
Normal file
|
|
@ -0,0 +1,201 @@
|
|||
/*
|
||||
* drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
|
||||
*
|
||||
* Copyright (C) 2011 Texas Instruments
|
||||
* Author: Rob Clark <rob.clark@linaro.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published by
|
||||
* the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "omap_drv.h"
|
||||
|
||||
#include <linux/dma-buf.h>
|
||||
|
||||
static struct sg_table *omap_gem_map_dma_buf(
|
||||
struct dma_buf_attachment *attachment,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
struct drm_gem_object *obj = attachment->dmabuf->priv;
|
||||
struct sg_table *sg;
|
||||
dma_addr_t paddr;
|
||||
int ret;
|
||||
|
||||
sg = kzalloc(sizeof(*sg), GFP_KERNEL);
|
||||
if (!sg)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
/* camera, etc, need physically contiguous.. but we need a
|
||||
* better way to know this..
|
||||
*/
|
||||
ret = omap_gem_get_paddr(obj, &paddr, true);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = sg_alloc_table(sg, 1, GFP_KERNEL);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
sg_init_table(sg->sgl, 1);
|
||||
sg_dma_len(sg->sgl) = obj->size;
|
||||
sg_set_page(sg->sgl, pfn_to_page(PFN_DOWN(paddr)), obj->size, 0);
|
||||
sg_dma_address(sg->sgl) = paddr;
|
||||
|
||||
/* this should be after _get_paddr() to ensure we have pages attached */
|
||||
omap_gem_dma_sync(obj, dir);
|
||||
|
||||
return sg;
|
||||
out:
|
||||
kfree(sg);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static void omap_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
|
||||
struct sg_table *sg, enum dma_data_direction dir)
|
||||
{
|
||||
struct drm_gem_object *obj = attachment->dmabuf->priv;
|
||||
omap_gem_put_paddr(obj);
|
||||
sg_free_table(sg);
|
||||
kfree(sg);
|
||||
}
|
||||
|
||||
static void omap_gem_dmabuf_release(struct dma_buf *buffer)
|
||||
{
|
||||
struct drm_gem_object *obj = buffer->priv;
|
||||
/* release reference that was taken when dmabuf was exported
|
||||
* in omap_gem_prime_set()..
|
||||
*/
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
}
|
||||
|
||||
|
||||
static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
|
||||
size_t start, size_t len, enum dma_data_direction dir)
|
||||
{
|
||||
struct drm_gem_object *obj = buffer->priv;
|
||||
struct page **pages;
|
||||
if (omap_gem_flags(obj) & OMAP_BO_TILED) {
|
||||
/* TODO we would need to pin at least part of the buffer to
|
||||
* get de-tiled view. For now just reject it.
|
||||
*/
|
||||
return -ENOMEM;
|
||||
}
|
||||
/* make sure we have the pages: */
|
||||
return omap_gem_get_pages(obj, &pages, true);
|
||||
}
|
||||
|
||||
static void omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer,
|
||||
size_t start, size_t len, enum dma_data_direction dir)
|
||||
{
|
||||
struct drm_gem_object *obj = buffer->priv;
|
||||
omap_gem_put_pages(obj);
|
||||
}
|
||||
|
||||
|
||||
static void *omap_gem_dmabuf_kmap_atomic(struct dma_buf *buffer,
|
||||
unsigned long page_num)
|
||||
{
|
||||
struct drm_gem_object *obj = buffer->priv;
|
||||
struct page **pages;
|
||||
omap_gem_get_pages(obj, &pages, false);
|
||||
omap_gem_cpu_sync(obj, page_num);
|
||||
return kmap_atomic(pages[page_num]);
|
||||
}
|
||||
|
||||
static void omap_gem_dmabuf_kunmap_atomic(struct dma_buf *buffer,
|
||||
unsigned long page_num, void *addr)
|
||||
{
|
||||
kunmap_atomic(addr);
|
||||
}
|
||||
|
||||
static void *omap_gem_dmabuf_kmap(struct dma_buf *buffer,
|
||||
unsigned long page_num)
|
||||
{
|
||||
struct drm_gem_object *obj = buffer->priv;
|
||||
struct page **pages;
|
||||
omap_gem_get_pages(obj, &pages, false);
|
||||
omap_gem_cpu_sync(obj, page_num);
|
||||
return kmap(pages[page_num]);
|
||||
}
|
||||
|
||||
static void omap_gem_dmabuf_kunmap(struct dma_buf *buffer,
|
||||
unsigned long page_num, void *addr)
|
||||
{
|
||||
struct drm_gem_object *obj = buffer->priv;
|
||||
struct page **pages;
|
||||
omap_gem_get_pages(obj, &pages, false);
|
||||
kunmap(pages[page_num]);
|
||||
}
|
||||
|
||||
static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
struct drm_gem_object *obj = buffer->priv;
|
||||
struct drm_device *dev = obj->dev;
|
||||
int ret = 0;
|
||||
|
||||
if (WARN_ON(!obj->filp))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return omap_gem_mmap_obj(obj, vma);
|
||||
}
|
||||
|
||||
static struct dma_buf_ops omap_dmabuf_ops = {
|
||||
.map_dma_buf = omap_gem_map_dma_buf,
|
||||
.unmap_dma_buf = omap_gem_unmap_dma_buf,
|
||||
.release = omap_gem_dmabuf_release,
|
||||
.begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
|
||||
.end_cpu_access = omap_gem_dmabuf_end_cpu_access,
|
||||
.kmap_atomic = omap_gem_dmabuf_kmap_atomic,
|
||||
.kunmap_atomic = omap_gem_dmabuf_kunmap_atomic,
|
||||
.kmap = omap_gem_dmabuf_kmap,
|
||||
.kunmap = omap_gem_dmabuf_kunmap,
|
||||
.mmap = omap_gem_dmabuf_mmap,
|
||||
};
|
||||
|
||||
struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
|
||||
struct drm_gem_object *obj, int flags)
|
||||
{
|
||||
return dma_buf_export(obj, &omap_dmabuf_ops, obj->size, flags, NULL);
|
||||
}
|
||||
|
||||
struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
|
||||
struct dma_buf *buffer)
|
||||
{
|
||||
struct drm_gem_object *obj;
|
||||
|
||||
/* is this one of own objects? */
|
||||
if (buffer->ops == &omap_dmabuf_ops) {
|
||||
obj = buffer->priv;
|
||||
/* is it from our device? */
|
||||
if (obj->dev == dev) {
|
||||
/*
|
||||
* Importing dmabuf exported from out own gem increases
|
||||
* refcount on gem itself instead of f_count of dmabuf.
|
||||
*/
|
||||
drm_gem_object_reference(obj);
|
||||
return obj;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO add support for importing buffers from other devices..
|
||||
* for now we don't need this but would be nice to add eventually
|
||||
*/
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
342
drivers/gpu/drm/omapdrm/omap_irq.c
Normal file
342
drivers/gpu/drm/omapdrm/omap_irq.c
Normal file
|
|
@ -0,0 +1,342 @@
|
|||
/*
|
||||
* drivers/gpu/drm/omapdrm/omap_irq.c
|
||||
*
|
||||
* Copyright (C) 2012 Texas Instruments
|
||||
* Author: Rob Clark <rob.clark@linaro.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published by
|
||||
* the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "omap_drv.h"
|
||||
|
||||
static DEFINE_SPINLOCK(list_lock);
|
||||
|
||||
static void omap_irq_error_handler(struct omap_drm_irq *irq,
|
||||
uint32_t irqstatus)
|
||||
{
|
||||
DRM_ERROR("errors: %08x\n", irqstatus);
|
||||
}
|
||||
|
||||
/* call with list_lock and dispc runtime held */
|
||||
static void omap_irq_update(struct drm_device *dev)
|
||||
{
|
||||
struct omap_drm_private *priv = dev->dev_private;
|
||||
struct omap_drm_irq *irq;
|
||||
uint32_t irqmask = priv->vblank_mask;
|
||||
|
||||
BUG_ON(!spin_is_locked(&list_lock));
|
||||
|
||||
list_for_each_entry(irq, &priv->irq_list, node)
|
||||
irqmask |= irq->irqmask;
|
||||
|
||||
DBG("irqmask=%08x", irqmask);
|
||||
|
||||
dispc_write_irqenable(irqmask);
|
||||
dispc_read_irqenable(); /* flush posted write */
|
||||
}
|
||||
|
||||
void __omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq)
|
||||
{
|
||||
struct omap_drm_private *priv = dev->dev_private;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&list_lock, flags);
|
||||
|
||||
if (!WARN_ON(irq->registered)) {
|
||||
irq->registered = true;
|
||||
list_add(&irq->node, &priv->irq_list);
|
||||
omap_irq_update(dev);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&list_lock, flags);
|
||||
}
|
||||
|
||||
void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq)
|
||||
{
|
||||
dispc_runtime_get();
|
||||
|
||||
__omap_irq_register(dev, irq);
|
||||
|
||||
dispc_runtime_put();
|
||||
}
|
||||
|
||||
void __omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&list_lock, flags);
|
||||
|
||||
if (!WARN_ON(!irq->registered)) {
|
||||
irq->registered = false;
|
||||
list_del(&irq->node);
|
||||
omap_irq_update(dev);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&list_lock, flags);
|
||||
}
|
||||
|
||||
void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq)
|
||||
{
|
||||
dispc_runtime_get();
|
||||
|
||||
__omap_irq_unregister(dev, irq);
|
||||
|
||||
dispc_runtime_put();
|
||||
}
|
||||
|
||||
struct omap_irq_wait {
|
||||
struct omap_drm_irq irq;
|
||||
int count;
|
||||
};
|
||||
|
||||
static DECLARE_WAIT_QUEUE_HEAD(wait_event);
|
||||
|
||||
static void wait_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
|
||||
{
|
||||
struct omap_irq_wait *wait =
|
||||
container_of(irq, struct omap_irq_wait, irq);
|
||||
wait->count--;
|
||||
wake_up_all(&wait_event);
|
||||
}
|
||||
|
||||
struct omap_irq_wait * omap_irq_wait_init(struct drm_device *dev,
|
||||
uint32_t irqmask, int count)
|
||||
{
|
||||
struct omap_irq_wait *wait = kzalloc(sizeof(*wait), GFP_KERNEL);
|
||||
wait->irq.irq = wait_irq;
|
||||
wait->irq.irqmask = irqmask;
|
||||
wait->count = count;
|
||||
omap_irq_register(dev, &wait->irq);
|
||||
return wait;
|
||||
}
|
||||
|
||||
int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait,
|
||||
unsigned long timeout)
|
||||
{
|
||||
int ret = wait_event_timeout(wait_event, (wait->count <= 0), timeout);
|
||||
omap_irq_unregister(dev, &wait->irq);
|
||||
kfree(wait);
|
||||
if (ret == 0)
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* enable_vblank - enable vblank interrupt events
|
||||
* @dev: DRM device
|
||||
* @crtc: which irq to enable
|
||||
*
|
||||
* Enable vblank interrupts for @crtc. If the device doesn't have
|
||||
* a hardware vblank counter, this routine should be a no-op, since
|
||||
* interrupts will have to stay on to keep the count accurate.
|
||||
*
|
||||
* RETURNS
|
||||
* Zero on success, appropriate errno if the given @crtc's vblank
|
||||
* interrupt cannot be enabled.
|
||||
*/
|
||||
int omap_irq_enable_vblank(struct drm_device *dev, int crtc_id)
|
||||
{
|
||||
struct omap_drm_private *priv = dev->dev_private;
|
||||
struct drm_crtc *crtc = priv->crtcs[crtc_id];
|
||||
unsigned long flags;
|
||||
|
||||
DBG("dev=%p, crtc=%d", dev, crtc_id);
|
||||
|
||||
dispc_runtime_get();
|
||||
spin_lock_irqsave(&list_lock, flags);
|
||||
priv->vblank_mask |= pipe2vbl(crtc);
|
||||
omap_irq_update(dev);
|
||||
spin_unlock_irqrestore(&list_lock, flags);
|
||||
dispc_runtime_put();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* disable_vblank - disable vblank interrupt events
|
||||
* @dev: DRM device
|
||||
* @crtc: which irq to enable
|
||||
*
|
||||
* Disable vblank interrupts for @crtc. If the device doesn't have
|
||||
* a hardware vblank counter, this routine should be a no-op, since
|
||||
* interrupts will have to stay on to keep the count accurate.
|
||||
*/
|
||||
void omap_irq_disable_vblank(struct drm_device *dev, int crtc_id)
|
||||
{
|
||||
struct omap_drm_private *priv = dev->dev_private;
|
||||
struct drm_crtc *crtc = priv->crtcs[crtc_id];
|
||||
unsigned long flags;
|
||||
|
||||
DBG("dev=%p, crtc=%d", dev, crtc_id);
|
||||
|
||||
dispc_runtime_get();
|
||||
spin_lock_irqsave(&list_lock, flags);
|
||||
priv->vblank_mask &= ~pipe2vbl(crtc);
|
||||
omap_irq_update(dev);
|
||||
spin_unlock_irqrestore(&list_lock, flags);
|
||||
dispc_runtime_put();
|
||||
}
|
||||
|
||||
irqreturn_t omap_irq_handler(int irq, void *arg)
|
||||
{
|
||||
struct drm_device *dev = (struct drm_device *) arg;
|
||||
struct omap_drm_private *priv = dev->dev_private;
|
||||
struct omap_drm_irq *handler, *n;
|
||||
unsigned long flags;
|
||||
unsigned int id;
|
||||
u32 irqstatus;
|
||||
|
||||
irqstatus = dispc_read_irqstatus();
|
||||
dispc_clear_irqstatus(irqstatus);
|
||||
dispc_read_irqstatus(); /* flush posted write */
|
||||
|
||||
VERB("irqs: %08x", irqstatus);
|
||||
|
||||
for (id = 0; id < priv->num_crtcs; id++) {
|
||||
struct drm_crtc *crtc = priv->crtcs[id];
|
||||
|
||||
if (irqstatus & pipe2vbl(crtc))
|
||||
drm_handle_vblank(dev, id);
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&list_lock, flags);
|
||||
list_for_each_entry_safe(handler, n, &priv->irq_list, node) {
|
||||
if (handler->irqmask & irqstatus) {
|
||||
spin_unlock_irqrestore(&list_lock, flags);
|
||||
handler->irq(handler, handler->irqmask & irqstatus);
|
||||
spin_lock_irqsave(&list_lock, flags);
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&list_lock, flags);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
void omap_irq_preinstall(struct drm_device *dev)
|
||||
{
|
||||
DBG("dev=%p", dev);
|
||||
dispc_runtime_get();
|
||||
dispc_clear_irqstatus(0xffffffff);
|
||||
dispc_runtime_put();
|
||||
}
|
||||
|
||||
int omap_irq_postinstall(struct drm_device *dev)
|
||||
{
|
||||
struct omap_drm_private *priv = dev->dev_private;
|
||||
struct omap_drm_irq *error_handler = &priv->error_handler;
|
||||
|
||||
DBG("dev=%p", dev);
|
||||
|
||||
INIT_LIST_HEAD(&priv->irq_list);
|
||||
|
||||
error_handler->irq = omap_irq_error_handler;
|
||||
error_handler->irqmask = DISPC_IRQ_OCP_ERR;
|
||||
|
||||
/* for now ignore DISPC_IRQ_SYNC_LOST_DIGIT.. really I think
|
||||
* we just need to ignore it while enabling tv-out
|
||||
*/
|
||||
error_handler->irqmask &= ~DISPC_IRQ_SYNC_LOST_DIGIT;
|
||||
|
||||
omap_irq_register(dev, error_handler);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void omap_irq_uninstall(struct drm_device *dev)
|
||||
{
|
||||
DBG("dev=%p", dev);
|
||||
// TODO prolly need to call drm_irq_uninstall() somewhere too
|
||||
}
|
||||
|
||||
/*
|
||||
* We need a special version, instead of just using drm_irq_install(),
|
||||
* because we need to register the irq via omapdss. Once omapdss and
|
||||
* omapdrm are merged together we can assign the dispc hwmod data to
|
||||
* ourselves and drop these and just use drm_irq_{install,uninstall}()
|
||||
*/
|
||||
|
||||
int omap_drm_irq_install(struct drm_device *dev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
if (dev->irq_enabled) {
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return -EBUSY;
|
||||
}
|
||||
dev->irq_enabled = true;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
/* Before installing handler */
|
||||
if (dev->driver->irq_preinstall)
|
||||
dev->driver->irq_preinstall(dev);
|
||||
|
||||
ret = dispc_request_irq(dev->driver->irq_handler, dev);
|
||||
|
||||
if (ret < 0) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
dev->irq_enabled = false;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* After installing handler */
|
||||
if (dev->driver->irq_postinstall)
|
||||
ret = dev->driver->irq_postinstall(dev);
|
||||
|
||||
if (ret < 0) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
dev->irq_enabled = false;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
dispc_free_irq(dev);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int omap_drm_irq_uninstall(struct drm_device *dev)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
bool irq_enabled;
|
||||
int i;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
irq_enabled = dev->irq_enabled;
|
||||
dev->irq_enabled = false;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
/*
|
||||
* Wake up any waiters so they don't hang.
|
||||
*/
|
||||
if (dev->num_crtcs) {
|
||||
spin_lock_irqsave(&dev->vbl_lock, irqflags);
|
||||
for (i = 0; i < dev->num_crtcs; i++) {
|
||||
wake_up(&dev->vblank[i].queue);
|
||||
dev->vblank[i].enabled = false;
|
||||
dev->vblank[i].last =
|
||||
dev->driver->get_vblank_counter(dev, i);
|
||||
}
|
||||
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
|
||||
}
|
||||
|
||||
if (!irq_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
if (dev->driver->irq_uninstall)
|
||||
dev->driver->irq_uninstall(dev);
|
||||
|
||||
dispc_free_irq(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
452
drivers/gpu/drm/omapdrm/omap_plane.c
Normal file
452
drivers/gpu/drm/omapdrm/omap_plane.c
Normal file
|
|
@ -0,0 +1,452 @@
|
|||
/*
|
||||
* drivers/gpu/drm/omapdrm/omap_plane.c
|
||||
*
|
||||
* Copyright (C) 2011 Texas Instruments
|
||||
* Author: Rob Clark <rob.clark@linaro.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published by
|
||||
* the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "drm_flip_work.h"
|
||||
|
||||
#include "omap_drv.h"
|
||||
#include "omap_dmm_tiler.h"
|
||||
|
||||
/* some hackery because omapdss has an 'enum omap_plane' (which would be
|
||||
* better named omap_plane_id).. and compiler seems unhappy about having
|
||||
* both a 'struct omap_plane' and 'enum omap_plane'
|
||||
*/
|
||||
#define omap_plane _omap_plane
|
||||
|
||||
/*
|
||||
* plane funcs
|
||||
*/
|
||||
|
||||
struct callback {
|
||||
void (*fxn)(void *);
|
||||
void *arg;
|
||||
};
|
||||
|
||||
#define to_omap_plane(x) container_of(x, struct omap_plane, base)
|
||||
|
||||
struct omap_plane {
|
||||
struct drm_plane base;
|
||||
int id; /* TODO rename omap_plane -> omap_plane_id in omapdss so I can use the enum */
|
||||
const char *name;
|
||||
struct omap_overlay_info info;
|
||||
struct omap_drm_apply apply;
|
||||
|
||||
/* position/orientation of scanout within the fb: */
|
||||
struct omap_drm_window win;
|
||||
bool enabled;
|
||||
|
||||
/* last fb that we pinned: */
|
||||
struct drm_framebuffer *pinned_fb;
|
||||
|
||||
uint32_t nformats;
|
||||
uint32_t formats[32];
|
||||
|
||||
struct omap_drm_irq error_irq;
|
||||
|
||||
/* for deferring bo unpin's until next post_apply(): */
|
||||
struct drm_flip_work unpin_work;
|
||||
|
||||
// XXX maybe get rid of this and handle vblank in crtc too?
|
||||
struct callback apply_done_cb;
|
||||
};
|
||||
|
||||
static void unpin_worker(struct drm_flip_work *work, void *val)
|
||||
{
|
||||
struct omap_plane *omap_plane =
|
||||
container_of(work, struct omap_plane, unpin_work);
|
||||
struct drm_device *dev = omap_plane->base.dev;
|
||||
|
||||
omap_framebuffer_unpin(val);
|
||||
mutex_lock(&dev->mode_config.mutex);
|
||||
drm_framebuffer_unreference(val);
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
}
|
||||
|
||||
/* update which fb (if any) is pinned for scanout */
|
||||
static int update_pin(struct drm_plane *plane, struct drm_framebuffer *fb)
|
||||
{
|
||||
struct omap_plane *omap_plane = to_omap_plane(plane);
|
||||
struct drm_framebuffer *pinned_fb = omap_plane->pinned_fb;
|
||||
|
||||
if (pinned_fb != fb) {
|
||||
int ret = 0;
|
||||
|
||||
DBG("%p -> %p", pinned_fb, fb);
|
||||
|
||||
if (fb) {
|
||||
drm_framebuffer_reference(fb);
|
||||
ret = omap_framebuffer_pin(fb);
|
||||
}
|
||||
|
||||
if (pinned_fb)
|
||||
drm_flip_work_queue(&omap_plane->unpin_work, pinned_fb);
|
||||
|
||||
if (ret) {
|
||||
dev_err(plane->dev->dev, "could not swap %p -> %p\n",
|
||||
omap_plane->pinned_fb, fb);
|
||||
drm_framebuffer_unreference(fb);
|
||||
omap_plane->pinned_fb = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
omap_plane->pinned_fb = fb;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void omap_plane_pre_apply(struct omap_drm_apply *apply)
|
||||
{
|
||||
struct omap_plane *omap_plane =
|
||||
container_of(apply, struct omap_plane, apply);
|
||||
struct omap_drm_window *win = &omap_plane->win;
|
||||
struct drm_plane *plane = &omap_plane->base;
|
||||
struct drm_device *dev = plane->dev;
|
||||
struct omap_overlay_info *info = &omap_plane->info;
|
||||
struct drm_crtc *crtc = plane->crtc;
|
||||
enum omap_channel channel;
|
||||
bool enabled = omap_plane->enabled && crtc;
|
||||
bool ilace, replication;
|
||||
int ret;
|
||||
|
||||
DBG("%s, enabled=%d", omap_plane->name, enabled);
|
||||
|
||||
/* if fb has changed, pin new fb: */
|
||||
update_pin(plane, enabled ? plane->fb : NULL);
|
||||
|
||||
if (!enabled) {
|
||||
dispc_ovl_enable(omap_plane->id, false);
|
||||
return;
|
||||
}
|
||||
|
||||
channel = omap_crtc_channel(crtc);
|
||||
|
||||
/* update scanout: */
|
||||
omap_framebuffer_update_scanout(plane->fb, win, info);
|
||||
|
||||
DBG("%dx%d -> %dx%d (%d)", info->width, info->height,
|
||||
info->out_width, info->out_height,
|
||||
info->screen_width);
|
||||
DBG("%d,%d %pad %pad", info->pos_x, info->pos_y,
|
||||
&info->paddr, &info->p_uv_addr);
|
||||
|
||||
/* TODO: */
|
||||
ilace = false;
|
||||
replication = false;
|
||||
|
||||
/* and finally, update omapdss: */
|
||||
ret = dispc_ovl_setup(omap_plane->id, info,
|
||||
replication, omap_crtc_timings(crtc), false);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "dispc_ovl_setup failed: %d\n", ret);
|
||||
return;
|
||||
}
|
||||
|
||||
dispc_ovl_enable(omap_plane->id, true);
|
||||
dispc_ovl_set_channel_out(omap_plane->id, channel);
|
||||
}
|
||||
|
||||
static void omap_plane_post_apply(struct omap_drm_apply *apply)
|
||||
{
|
||||
struct omap_plane *omap_plane =
|
||||
container_of(apply, struct omap_plane, apply);
|
||||
struct drm_plane *plane = &omap_plane->base;
|
||||
struct omap_drm_private *priv = plane->dev->dev_private;
|
||||
struct omap_overlay_info *info = &omap_plane->info;
|
||||
struct callback cb;
|
||||
|
||||
cb = omap_plane->apply_done_cb;
|
||||
omap_plane->apply_done_cb.fxn = NULL;
|
||||
|
||||
drm_flip_work_commit(&omap_plane->unpin_work, priv->wq);
|
||||
|
||||
if (cb.fxn)
|
||||
cb.fxn(cb.arg);
|
||||
|
||||
if (omap_plane->enabled) {
|
||||
omap_framebuffer_flush(plane->fb, info->pos_x, info->pos_y,
|
||||
info->out_width, info->out_height);
|
||||
}
|
||||
}
|
||||
|
||||
static int apply(struct drm_plane *plane)
|
||||
{
|
||||
if (plane->crtc) {
|
||||
struct omap_plane *omap_plane = to_omap_plane(plane);
|
||||
return omap_crtc_apply(plane->crtc, &omap_plane->apply);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int omap_plane_mode_set(struct drm_plane *plane,
|
||||
struct drm_crtc *crtc, struct drm_framebuffer *fb,
|
||||
int crtc_x, int crtc_y,
|
||||
unsigned int crtc_w, unsigned int crtc_h,
|
||||
uint32_t src_x, uint32_t src_y,
|
||||
uint32_t src_w, uint32_t src_h,
|
||||
void (*fxn)(void *), void *arg)
|
||||
{
|
||||
struct omap_plane *omap_plane = to_omap_plane(plane);
|
||||
struct omap_drm_window *win = &omap_plane->win;
|
||||
|
||||
win->crtc_x = crtc_x;
|
||||
win->crtc_y = crtc_y;
|
||||
win->crtc_w = crtc_w;
|
||||
win->crtc_h = crtc_h;
|
||||
|
||||
/* src values are in Q16 fixed point, convert to integer: */
|
||||
win->src_x = src_x >> 16;
|
||||
win->src_y = src_y >> 16;
|
||||
win->src_w = src_w >> 16;
|
||||
win->src_h = src_h >> 16;
|
||||
|
||||
if (fxn) {
|
||||
/* omap_crtc should ensure that a new page flip
|
||||
* isn't permitted while there is one pending:
|
||||
*/
|
||||
BUG_ON(omap_plane->apply_done_cb.fxn);
|
||||
|
||||
omap_plane->apply_done_cb.fxn = fxn;
|
||||
omap_plane->apply_done_cb.arg = arg;
|
||||
}
|
||||
|
||||
if (plane->fb)
|
||||
drm_framebuffer_unreference(plane->fb);
|
||||
|
||||
drm_framebuffer_reference(fb);
|
||||
|
||||
plane->fb = fb;
|
||||
plane->crtc = crtc;
|
||||
|
||||
return apply(plane);
|
||||
}
|
||||
|
||||
static int omap_plane_update(struct drm_plane *plane,
|
||||
struct drm_crtc *crtc, struct drm_framebuffer *fb,
|
||||
int crtc_x, int crtc_y,
|
||||
unsigned int crtc_w, unsigned int crtc_h,
|
||||
uint32_t src_x, uint32_t src_y,
|
||||
uint32_t src_w, uint32_t src_h)
|
||||
{
|
||||
struct omap_plane *omap_plane = to_omap_plane(plane);
|
||||
omap_plane->enabled = true;
|
||||
|
||||
/* omap_plane_mode_set() takes adjusted src */
|
||||
switch (omap_plane->win.rotation & 0xf) {
|
||||
case BIT(DRM_ROTATE_90):
|
||||
case BIT(DRM_ROTATE_270):
|
||||
swap(src_w, src_h);
|
||||
break;
|
||||
}
|
||||
|
||||
return omap_plane_mode_set(plane, crtc, fb,
|
||||
crtc_x, crtc_y, crtc_w, crtc_h,
|
||||
src_x, src_y, src_w, src_h,
|
||||
NULL, NULL);
|
||||
}
|
||||
|
||||
static int omap_plane_disable(struct drm_plane *plane)
|
||||
{
|
||||
struct omap_plane *omap_plane = to_omap_plane(plane);
|
||||
omap_plane->win.rotation = BIT(DRM_ROTATE_0);
|
||||
return omap_plane_dpms(plane, DRM_MODE_DPMS_OFF);
|
||||
}
|
||||
|
||||
static void omap_plane_destroy(struct drm_plane *plane)
|
||||
{
|
||||
struct omap_plane *omap_plane = to_omap_plane(plane);
|
||||
|
||||
DBG("%s", omap_plane->name);
|
||||
|
||||
omap_irq_unregister(plane->dev, &omap_plane->error_irq);
|
||||
|
||||
omap_plane_disable(plane);
|
||||
drm_plane_cleanup(plane);
|
||||
|
||||
drm_flip_work_cleanup(&omap_plane->unpin_work);
|
||||
|
||||
kfree(omap_plane);
|
||||
}
|
||||
|
||||
int omap_plane_dpms(struct drm_plane *plane, int mode)
|
||||
{
|
||||
struct omap_plane *omap_plane = to_omap_plane(plane);
|
||||
bool enabled = (mode == DRM_MODE_DPMS_ON);
|
||||
int ret = 0;
|
||||
|
||||
if (enabled != omap_plane->enabled) {
|
||||
omap_plane->enabled = enabled;
|
||||
ret = apply(plane);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* helper to install properties which are common to planes and crtcs */
|
||||
void omap_plane_install_properties(struct drm_plane *plane,
|
||||
struct drm_mode_object *obj)
|
||||
{
|
||||
struct drm_device *dev = plane->dev;
|
||||
struct omap_drm_private *priv = dev->dev_private;
|
||||
struct drm_property *prop;
|
||||
|
||||
if (priv->has_dmm) {
|
||||
prop = priv->rotation_prop;
|
||||
if (!prop) {
|
||||
prop = drm_mode_create_rotation_property(dev,
|
||||
BIT(DRM_ROTATE_0) |
|
||||
BIT(DRM_ROTATE_90) |
|
||||
BIT(DRM_ROTATE_180) |
|
||||
BIT(DRM_ROTATE_270) |
|
||||
BIT(DRM_REFLECT_X) |
|
||||
BIT(DRM_REFLECT_Y));
|
||||
if (prop == NULL)
|
||||
return;
|
||||
priv->rotation_prop = prop;
|
||||
}
|
||||
drm_object_attach_property(obj, prop, 0);
|
||||
}
|
||||
|
||||
prop = priv->zorder_prop;
|
||||
if (!prop) {
|
||||
prop = drm_property_create_range(dev, 0, "zorder", 0, 3);
|
||||
if (prop == NULL)
|
||||
return;
|
||||
priv->zorder_prop = prop;
|
||||
}
|
||||
drm_object_attach_property(obj, prop, 0);
|
||||
}
|
||||
|
||||
int omap_plane_set_property(struct drm_plane *plane,
|
||||
struct drm_property *property, uint64_t val)
|
||||
{
|
||||
struct omap_plane *omap_plane = to_omap_plane(plane);
|
||||
struct omap_drm_private *priv = plane->dev->dev_private;
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (property == priv->rotation_prop) {
|
||||
DBG("%s: rotation: %02x", omap_plane->name, (uint32_t)val);
|
||||
omap_plane->win.rotation = val;
|
||||
ret = apply(plane);
|
||||
} else if (property == priv->zorder_prop) {
|
||||
DBG("%s: zorder: %02x", omap_plane->name, (uint32_t)val);
|
||||
omap_plane->info.zorder = val;
|
||||
ret = apply(plane);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct drm_plane_funcs omap_plane_funcs = {
|
||||
.update_plane = omap_plane_update,
|
||||
.disable_plane = omap_plane_disable,
|
||||
.destroy = omap_plane_destroy,
|
||||
.set_property = omap_plane_set_property,
|
||||
};
|
||||
|
||||
static void omap_plane_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
|
||||
{
|
||||
struct omap_plane *omap_plane =
|
||||
container_of(irq, struct omap_plane, error_irq);
|
||||
DRM_ERROR("%s: errors: %08x\n", omap_plane->name, irqstatus);
|
||||
}
|
||||
|
||||
static const char *plane_names[] = {
|
||||
[OMAP_DSS_GFX] = "gfx",
|
||||
[OMAP_DSS_VIDEO1] = "vid1",
|
||||
[OMAP_DSS_VIDEO2] = "vid2",
|
||||
[OMAP_DSS_VIDEO3] = "vid3",
|
||||
};
|
||||
|
||||
static const uint32_t error_irqs[] = {
|
||||
[OMAP_DSS_GFX] = DISPC_IRQ_GFX_FIFO_UNDERFLOW,
|
||||
[OMAP_DSS_VIDEO1] = DISPC_IRQ_VID1_FIFO_UNDERFLOW,
|
||||
[OMAP_DSS_VIDEO2] = DISPC_IRQ_VID2_FIFO_UNDERFLOW,
|
||||
[OMAP_DSS_VIDEO3] = DISPC_IRQ_VID3_FIFO_UNDERFLOW,
|
||||
};
|
||||
|
||||
/* initialize plane */
|
||||
struct drm_plane *omap_plane_init(struct drm_device *dev,
|
||||
int id, bool private_plane)
|
||||
{
|
||||
struct omap_drm_private *priv = dev->dev_private;
|
||||
struct drm_plane *plane = NULL;
|
||||
struct omap_plane *omap_plane;
|
||||
struct omap_overlay_info *info;
|
||||
int ret;
|
||||
|
||||
DBG("%s: priv=%d", plane_names[id], private_plane);
|
||||
|
||||
omap_plane = kzalloc(sizeof(*omap_plane), GFP_KERNEL);
|
||||
if (!omap_plane)
|
||||
goto fail;
|
||||
|
||||
ret = drm_flip_work_init(&omap_plane->unpin_work, 16,
|
||||
"unpin", unpin_worker);
|
||||
if (ret) {
|
||||
dev_err(dev->dev, "could not allocate unpin FIFO\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
omap_plane->nformats = omap_framebuffer_get_formats(
|
||||
omap_plane->formats, ARRAY_SIZE(omap_plane->formats),
|
||||
dss_feat_get_supported_color_modes(id));
|
||||
omap_plane->id = id;
|
||||
omap_plane->name = plane_names[id];
|
||||
|
||||
plane = &omap_plane->base;
|
||||
|
||||
omap_plane->apply.pre_apply = omap_plane_pre_apply;
|
||||
omap_plane->apply.post_apply = omap_plane_post_apply;
|
||||
|
||||
omap_plane->error_irq.irqmask = error_irqs[id];
|
||||
omap_plane->error_irq.irq = omap_plane_error_irq;
|
||||
omap_irq_register(dev, &omap_plane->error_irq);
|
||||
|
||||
drm_plane_init(dev, plane, (1 << priv->num_crtcs) - 1, &omap_plane_funcs,
|
||||
omap_plane->formats, omap_plane->nformats, private_plane);
|
||||
|
||||
omap_plane_install_properties(plane, &plane->base);
|
||||
|
||||
/* get our starting configuration, set defaults for parameters
|
||||
* we don't currently use, etc:
|
||||
*/
|
||||
info = &omap_plane->info;
|
||||
info->rotation_type = OMAP_DSS_ROT_DMA;
|
||||
info->rotation = OMAP_DSS_ROT_0;
|
||||
info->global_alpha = 0xff;
|
||||
info->mirror = 0;
|
||||
|
||||
/* Set defaults depending on whether we are a CRTC or overlay
|
||||
* layer.
|
||||
* TODO add ioctl to give userspace an API to change this.. this
|
||||
* will come in a subsequent patch.
|
||||
*/
|
||||
if (private_plane)
|
||||
omap_plane->info.zorder = 0;
|
||||
else
|
||||
omap_plane->info.zorder = id;
|
||||
|
||||
return plane;
|
||||
|
||||
fail:
|
||||
if (plane)
|
||||
omap_plane_destroy(plane);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
703
drivers/gpu/drm/omapdrm/tcm-sita.c
Normal file
703
drivers/gpu/drm/omapdrm/tcm-sita.c
Normal file
|
|
@ -0,0 +1,703 @@
|
|||
/*
|
||||
* tcm-sita.c
|
||||
*
|
||||
* SImple Tiler Allocator (SiTA): 2D and 1D allocation(reservation) algorithm
|
||||
*
|
||||
* Authors: Ravi Ramachandra <r.ramachandra@ti.com>,
|
||||
* Lajos Molnar <molnar@ti.com>
|
||||
*
|
||||
* Copyright (C) 2009-2010 Texas Instruments, Inc.
|
||||
*
|
||||
* This package is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
*/
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
#include "tcm-sita.h"
|
||||
|
||||
#define ALIGN_DOWN(value, align) ((value) & ~((align) - 1))
|
||||
|
||||
/* Individual selection criteria for different scan areas */
|
||||
static s32 CR_L2R_T2B = CR_BIAS_HORIZONTAL;
|
||||
static s32 CR_R2L_T2B = CR_DIAGONAL_BALANCE;
|
||||
|
||||
/*********************************************
|
||||
* TCM API - Sita Implementation
|
||||
*********************************************/
|
||||
static s32 sita_reserve_2d(struct tcm *tcm, u16 h, u16 w, u8 align,
|
||||
struct tcm_area *area);
|
||||
static s32 sita_reserve_1d(struct tcm *tcm, u32 slots, struct tcm_area *area);
|
||||
static s32 sita_free(struct tcm *tcm, struct tcm_area *area);
|
||||
static void sita_deinit(struct tcm *tcm);
|
||||
|
||||
/*********************************************
|
||||
* Main Scanner functions
|
||||
*********************************************/
|
||||
static s32 scan_areas_and_find_fit(struct tcm *tcm, u16 w, u16 h, u16 align,
|
||||
struct tcm_area *area);
|
||||
|
||||
static s32 scan_l2r_t2b(struct tcm *tcm, u16 w, u16 h, u16 align,
|
||||
struct tcm_area *field, struct tcm_area *area);
|
||||
|
||||
static s32 scan_r2l_t2b(struct tcm *tcm, u16 w, u16 h, u16 align,
|
||||
struct tcm_area *field, struct tcm_area *area);
|
||||
|
||||
static s32 scan_r2l_b2t_one_dim(struct tcm *tcm, u32 num_slots,
|
||||
struct tcm_area *field, struct tcm_area *area);
|
||||
|
||||
/*********************************************
|
||||
* Support Infrastructure Methods
|
||||
*********************************************/
|
||||
static s32 is_area_free(struct tcm_area ***map, u16 x0, u16 y0, u16 w, u16 h);
|
||||
|
||||
static s32 update_candidate(struct tcm *tcm, u16 x0, u16 y0, u16 w, u16 h,
|
||||
struct tcm_area *field, s32 criteria,
|
||||
struct score *best);
|
||||
|
||||
static void get_nearness_factor(struct tcm_area *field,
|
||||
struct tcm_area *candidate,
|
||||
struct nearness_factor *nf);
|
||||
|
||||
static void get_neighbor_stats(struct tcm *tcm, struct tcm_area *area,
|
||||
struct neighbor_stats *stat);
|
||||
|
||||
static void fill_area(struct tcm *tcm,
|
||||
struct tcm_area *area, struct tcm_area *parent);
|
||||
|
||||
|
||||
/*********************************************/
|
||||
|
||||
/*********************************************
|
||||
* Utility Methods
|
||||
*********************************************/
|
||||
struct tcm *sita_init(u16 width, u16 height, struct tcm_pt *attr)
|
||||
{
|
||||
struct tcm *tcm;
|
||||
struct sita_pvt *pvt;
|
||||
struct tcm_area area = {0};
|
||||
s32 i;
|
||||
|
||||
if (width == 0 || height == 0)
|
||||
return NULL;
|
||||
|
||||
tcm = kmalloc(sizeof(*tcm), GFP_KERNEL);
|
||||
pvt = kmalloc(sizeof(*pvt), GFP_KERNEL);
|
||||
if (!tcm || !pvt)
|
||||
goto error;
|
||||
|
||||
memset(tcm, 0, sizeof(*tcm));
|
||||
memset(pvt, 0, sizeof(*pvt));
|
||||
|
||||
/* Updating the pointers to SiTA implementation APIs */
|
||||
tcm->height = height;
|
||||
tcm->width = width;
|
||||
tcm->reserve_2d = sita_reserve_2d;
|
||||
tcm->reserve_1d = sita_reserve_1d;
|
||||
tcm->free = sita_free;
|
||||
tcm->deinit = sita_deinit;
|
||||
tcm->pvt = (void *)pvt;
|
||||
|
||||
spin_lock_init(&(pvt->lock));
|
||||
|
||||
/* Creating tam map */
|
||||
pvt->map = kmalloc(sizeof(*pvt->map) * tcm->width, GFP_KERNEL);
|
||||
if (!pvt->map)
|
||||
goto error;
|
||||
|
||||
for (i = 0; i < tcm->width; i++) {
|
||||
pvt->map[i] =
|
||||
kmalloc(sizeof(**pvt->map) * tcm->height,
|
||||
GFP_KERNEL);
|
||||
if (pvt->map[i] == NULL) {
|
||||
while (i--)
|
||||
kfree(pvt->map[i]);
|
||||
kfree(pvt->map);
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
|
||||
if (attr && attr->x <= tcm->width && attr->y <= tcm->height) {
|
||||
pvt->div_pt.x = attr->x;
|
||||
pvt->div_pt.y = attr->y;
|
||||
|
||||
} else {
|
||||
/* Defaulting to 3:1 ratio on width for 2D area split */
|
||||
/* Defaulting to 3:1 ratio on height for 2D and 1D split */
|
||||
pvt->div_pt.x = (tcm->width * 3) / 4;
|
||||
pvt->div_pt.y = (tcm->height * 3) / 4;
|
||||
}
|
||||
|
||||
spin_lock(&(pvt->lock));
|
||||
assign(&area, 0, 0, width - 1, height - 1);
|
||||
fill_area(tcm, &area, NULL);
|
||||
spin_unlock(&(pvt->lock));
|
||||
return tcm;
|
||||
|
||||
error:
|
||||
kfree(tcm);
|
||||
kfree(pvt);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void sita_deinit(struct tcm *tcm)
|
||||
{
|
||||
struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
|
||||
struct tcm_area area = {0};
|
||||
s32 i;
|
||||
|
||||
area.p1.x = tcm->width - 1;
|
||||
area.p1.y = tcm->height - 1;
|
||||
|
||||
spin_lock(&(pvt->lock));
|
||||
fill_area(tcm, &area, NULL);
|
||||
spin_unlock(&(pvt->lock));
|
||||
|
||||
for (i = 0; i < tcm->height; i++)
|
||||
kfree(pvt->map[i]);
|
||||
kfree(pvt->map);
|
||||
kfree(pvt);
|
||||
}
|
||||
|
||||
/**
|
||||
* Reserve a 1D area in the container
|
||||
*
|
||||
* @param num_slots size of 1D area
|
||||
* @param area pointer to the area that will be populated with the
|
||||
* reserved area
|
||||
*
|
||||
* @return 0 on success, non-0 error value on failure.
|
||||
*/
|
||||
static s32 sita_reserve_1d(struct tcm *tcm, u32 num_slots,
|
||||
struct tcm_area *area)
|
||||
{
|
||||
s32 ret;
|
||||
struct tcm_area field = {0};
|
||||
struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
|
||||
|
||||
spin_lock(&(pvt->lock));
|
||||
|
||||
/* Scanning entire container */
|
||||
assign(&field, tcm->width - 1, tcm->height - 1, 0, 0);
|
||||
|
||||
ret = scan_r2l_b2t_one_dim(tcm, num_slots, &field, area);
|
||||
if (!ret)
|
||||
/* update map */
|
||||
fill_area(tcm, area, area);
|
||||
|
||||
spin_unlock(&(pvt->lock));
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reserve a 2D area in the container
|
||||
*
|
||||
* @param w width
|
||||
* @param h height
|
||||
* @param area pointer to the area that will be populated with the reserved
|
||||
* area
|
||||
*
|
||||
* @return 0 on success, non-0 error value on failure.
|
||||
*/
|
||||
static s32 sita_reserve_2d(struct tcm *tcm, u16 h, u16 w, u8 align,
|
||||
struct tcm_area *area)
|
||||
{
|
||||
s32 ret;
|
||||
struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
|
||||
|
||||
/* not supporting more than 64 as alignment */
|
||||
if (align > 64)
|
||||
return -EINVAL;
|
||||
|
||||
/* we prefer 1, 32 and 64 as alignment */
|
||||
align = align <= 1 ? 1 : align <= 32 ? 32 : 64;
|
||||
|
||||
spin_lock(&(pvt->lock));
|
||||
ret = scan_areas_and_find_fit(tcm, w, h, align, area);
|
||||
if (!ret)
|
||||
/* update map */
|
||||
fill_area(tcm, area, area);
|
||||
|
||||
spin_unlock(&(pvt->lock));
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Unreserve a previously allocated 2D or 1D area
|
||||
* @param area area to be freed
|
||||
* @return 0 - success
|
||||
*/
|
||||
static s32 sita_free(struct tcm *tcm, struct tcm_area *area)
|
||||
{
|
||||
struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
|
||||
|
||||
spin_lock(&(pvt->lock));
|
||||
|
||||
/* check that this is in fact an existing area */
|
||||
WARN_ON(pvt->map[area->p0.x][area->p0.y] != area ||
|
||||
pvt->map[area->p1.x][area->p1.y] != area);
|
||||
|
||||
/* Clear the contents of the associated tiles in the map */
|
||||
fill_area(tcm, area, NULL);
|
||||
|
||||
spin_unlock(&(pvt->lock));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Note: In general the cordinates in the scan field area relevant to the can
|
||||
* sweep directions. The scan origin (e.g. top-left corner) will always be
|
||||
* the p0 member of the field. Therfore, for a scan from top-left p0.x <= p1.x
|
||||
* and p0.y <= p1.y; whereas, for a scan from bottom-right p1.x <= p0.x and p1.y
|
||||
* <= p0.y
|
||||
*/
|
||||
|
||||
/**
|
||||
* Raster scan horizontally right to left from top to bottom to find a place for
|
||||
* a 2D area of given size inside a scan field.
|
||||
*
|
||||
* @param w width of desired area
|
||||
* @param h height of desired area
|
||||
* @param align desired area alignment
|
||||
* @param area pointer to the area that will be set to the best position
|
||||
* @param field area to scan (inclusive)
|
||||
*
|
||||
* @return 0 on success, non-0 error value on failure.
|
||||
*/
|
||||
static s32 scan_r2l_t2b(struct tcm *tcm, u16 w, u16 h, u16 align,
|
||||
struct tcm_area *field, struct tcm_area *area)
|
||||
{
|
||||
s32 x, y;
|
||||
s16 start_x, end_x, start_y, end_y, found_x = -1;
|
||||
struct tcm_area ***map = ((struct sita_pvt *)tcm->pvt)->map;
|
||||
struct score best = {{0}, {0}, {0}, 0};
|
||||
|
||||
start_x = field->p0.x;
|
||||
end_x = field->p1.x;
|
||||
start_y = field->p0.y;
|
||||
end_y = field->p1.y;
|
||||
|
||||
/* check scan area co-ordinates */
|
||||
if (field->p0.x < field->p1.x ||
|
||||
field->p1.y < field->p0.y)
|
||||
return -EINVAL;
|
||||
|
||||
/* check if allocation would fit in scan area */
|
||||
if (w > LEN(start_x, end_x) || h > LEN(end_y, start_y))
|
||||
return -ENOSPC;
|
||||
|
||||
/* adjust start_x and end_y, as allocation would not fit beyond */
|
||||
start_x = ALIGN_DOWN(start_x - w + 1, align); /* - 1 to be inclusive */
|
||||
end_y = end_y - h + 1;
|
||||
|
||||
/* check if allocation would still fit in scan area */
|
||||
if (start_x < end_x)
|
||||
return -ENOSPC;
|
||||
|
||||
/* scan field top-to-bottom, right-to-left */
|
||||
for (y = start_y; y <= end_y; y++) {
|
||||
for (x = start_x; x >= end_x; x -= align) {
|
||||
if (is_area_free(map, x, y, w, h)) {
|
||||
found_x = x;
|
||||
|
||||
/* update best candidate */
|
||||
if (update_candidate(tcm, x, y, w, h, field,
|
||||
CR_R2L_T2B, &best))
|
||||
goto done;
|
||||
|
||||
/* change upper x bound */
|
||||
end_x = x + 1;
|
||||
break;
|
||||
} else if (map[x][y] && map[x][y]->is2d) {
|
||||
/* step over 2D areas */
|
||||
x = ALIGN(map[x][y]->p0.x - w + 1, align);
|
||||
}
|
||||
}
|
||||
|
||||
/* break if you find a free area shouldering the scan field */
|
||||
if (found_x == start_x)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!best.a.tcm)
|
||||
return -ENOSPC;
|
||||
done:
|
||||
assign(area, best.a.p0.x, best.a.p0.y, best.a.p1.x, best.a.p1.y);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Raster scan horizontally left to right from top to bottom to find a place for
|
||||
* a 2D area of given size inside a scan field.
|
||||
*
|
||||
* @param w width of desired area
|
||||
* @param h height of desired area
|
||||
* @param align desired area alignment
|
||||
* @param area pointer to the area that will be set to the best position
|
||||
* @param field area to scan (inclusive)
|
||||
*
|
||||
* @return 0 on success, non-0 error value on failure.
|
||||
*/
|
||||
static s32 scan_l2r_t2b(struct tcm *tcm, u16 w, u16 h, u16 align,
|
||||
struct tcm_area *field, struct tcm_area *area)
|
||||
{
|
||||
s32 x, y;
|
||||
s16 start_x, end_x, start_y, end_y, found_x = -1;
|
||||
struct tcm_area ***map = ((struct sita_pvt *)tcm->pvt)->map;
|
||||
struct score best = {{0}, {0}, {0}, 0};
|
||||
|
||||
start_x = field->p0.x;
|
||||
end_x = field->p1.x;
|
||||
start_y = field->p0.y;
|
||||
end_y = field->p1.y;
|
||||
|
||||
/* check scan area co-ordinates */
|
||||
if (field->p1.x < field->p0.x ||
|
||||
field->p1.y < field->p0.y)
|
||||
return -EINVAL;
|
||||
|
||||
/* check if allocation would fit in scan area */
|
||||
if (w > LEN(end_x, start_x) || h > LEN(end_y, start_y))
|
||||
return -ENOSPC;
|
||||
|
||||
start_x = ALIGN(start_x, align);
|
||||
|
||||
/* check if allocation would still fit in scan area */
|
||||
if (w > LEN(end_x, start_x))
|
||||
return -ENOSPC;
|
||||
|
||||
/* adjust end_x and end_y, as allocation would not fit beyond */
|
||||
end_x = end_x - w + 1; /* + 1 to be inclusive */
|
||||
end_y = end_y - h + 1;
|
||||
|
||||
/* scan field top-to-bottom, left-to-right */
|
||||
for (y = start_y; y <= end_y; y++) {
|
||||
for (x = start_x; x <= end_x; x += align) {
|
||||
if (is_area_free(map, x, y, w, h)) {
|
||||
found_x = x;
|
||||
|
||||
/* update best candidate */
|
||||
if (update_candidate(tcm, x, y, w, h, field,
|
||||
CR_L2R_T2B, &best))
|
||||
goto done;
|
||||
/* change upper x bound */
|
||||
end_x = x - 1;
|
||||
|
||||
break;
|
||||
} else if (map[x][y] && map[x][y]->is2d) {
|
||||
/* step over 2D areas */
|
||||
x = ALIGN_DOWN(map[x][y]->p1.x, align);
|
||||
}
|
||||
}
|
||||
|
||||
/* break if you find a free area shouldering the scan field */
|
||||
if (found_x == start_x)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!best.a.tcm)
|
||||
return -ENOSPC;
|
||||
done:
|
||||
assign(area, best.a.p0.x, best.a.p0.y, best.a.p1.x, best.a.p1.y);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Raster scan horizontally right to left from bottom to top to find a place
|
||||
* for a 1D area of given size inside a scan field.
|
||||
*
|
||||
* @param num_slots size of desired area
|
||||
* @param align desired area alignment
|
||||
* @param area pointer to the area that will be set to the best
|
||||
* position
|
||||
* @param field area to scan (inclusive)
|
||||
*
|
||||
* @return 0 on success, non-0 error value on failure.
|
||||
*/
|
||||
static s32 scan_r2l_b2t_one_dim(struct tcm *tcm, u32 num_slots,
|
||||
struct tcm_area *field, struct tcm_area *area)
|
||||
{
|
||||
s32 found = 0;
|
||||
s16 x, y;
|
||||
struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
|
||||
struct tcm_area *p;
|
||||
|
||||
/* check scan area co-ordinates */
|
||||
if (field->p0.y < field->p1.y)
|
||||
return -EINVAL;
|
||||
|
||||
/**
|
||||
* Currently we only support full width 1D scan field, which makes sense
|
||||
* since 1D slot-ordering spans the full container width.
|
||||
*/
|
||||
if (tcm->width != field->p0.x - field->p1.x + 1)
|
||||
return -EINVAL;
|
||||
|
||||
/* check if allocation would fit in scan area */
|
||||
if (num_slots > tcm->width * LEN(field->p0.y, field->p1.y))
|
||||
return -ENOSPC;
|
||||
|
||||
x = field->p0.x;
|
||||
y = field->p0.y;
|
||||
|
||||
/* find num_slots consecutive free slots to the left */
|
||||
while (found < num_slots) {
|
||||
if (y < 0)
|
||||
return -ENOSPC;
|
||||
|
||||
/* remember bottom-right corner */
|
||||
if (found == 0) {
|
||||
area->p1.x = x;
|
||||
area->p1.y = y;
|
||||
}
|
||||
|
||||
/* skip busy regions */
|
||||
p = pvt->map[x][y];
|
||||
if (p) {
|
||||
/* move to left of 2D areas, top left of 1D */
|
||||
x = p->p0.x;
|
||||
if (!p->is2d)
|
||||
y = p->p0.y;
|
||||
|
||||
/* start over */
|
||||
found = 0;
|
||||
} else {
|
||||
/* count consecutive free slots */
|
||||
found++;
|
||||
if (found == num_slots)
|
||||
break;
|
||||
}
|
||||
|
||||
/* move to the left */
|
||||
if (x == 0)
|
||||
y--;
|
||||
x = (x ? : tcm->width) - 1;
|
||||
|
||||
}
|
||||
|
||||
/* set top-left corner */
|
||||
area->p0.x = x;
|
||||
area->p0.y = y;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Find a place for a 2D area of given size inside a scan field based on its
|
||||
* alignment needs.
|
||||
*
|
||||
* @param w width of desired area
|
||||
* @param h height of desired area
|
||||
* @param align desired area alignment
|
||||
* @param area pointer to the area that will be set to the best position
|
||||
*
|
||||
* @return 0 on success, non-0 error value on failure.
|
||||
*/
|
||||
static s32 scan_areas_and_find_fit(struct tcm *tcm, u16 w, u16 h, u16 align,
|
||||
struct tcm_area *area)
|
||||
{
|
||||
s32 ret = 0;
|
||||
struct tcm_area field = {0};
|
||||
u16 boundary_x, boundary_y;
|
||||
struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
|
||||
|
||||
if (align > 1) {
|
||||
/* prefer top-left corner */
|
||||
boundary_x = pvt->div_pt.x - 1;
|
||||
boundary_y = pvt->div_pt.y - 1;
|
||||
|
||||
/* expand width and height if needed */
|
||||
if (w > pvt->div_pt.x)
|
||||
boundary_x = tcm->width - 1;
|
||||
if (h > pvt->div_pt.y)
|
||||
boundary_y = tcm->height - 1;
|
||||
|
||||
assign(&field, 0, 0, boundary_x, boundary_y);
|
||||
ret = scan_l2r_t2b(tcm, w, h, align, &field, area);
|
||||
|
||||
/* scan whole container if failed, but do not scan 2x */
|
||||
if (ret != 0 && (boundary_x != tcm->width - 1 ||
|
||||
boundary_y != tcm->height - 1)) {
|
||||
/* scan the entire container if nothing found */
|
||||
assign(&field, 0, 0, tcm->width - 1, tcm->height - 1);
|
||||
ret = scan_l2r_t2b(tcm, w, h, align, &field, area);
|
||||
}
|
||||
} else if (align == 1) {
|
||||
/* prefer top-right corner */
|
||||
boundary_x = pvt->div_pt.x;
|
||||
boundary_y = pvt->div_pt.y - 1;
|
||||
|
||||
/* expand width and height if needed */
|
||||
if (w > (tcm->width - pvt->div_pt.x))
|
||||
boundary_x = 0;
|
||||
if (h > pvt->div_pt.y)
|
||||
boundary_y = tcm->height - 1;
|
||||
|
||||
assign(&field, tcm->width - 1, 0, boundary_x, boundary_y);
|
||||
ret = scan_r2l_t2b(tcm, w, h, align, &field, area);
|
||||
|
||||
/* scan whole container if failed, but do not scan 2x */
|
||||
if (ret != 0 && (boundary_x != 0 ||
|
||||
boundary_y != tcm->height - 1)) {
|
||||
/* scan the entire container if nothing found */
|
||||
assign(&field, tcm->width - 1, 0, 0, tcm->height - 1);
|
||||
ret = scan_r2l_t2b(tcm, w, h, align, &field,
|
||||
area);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* check if an entire area is free */
|
||||
static s32 is_area_free(struct tcm_area ***map, u16 x0, u16 y0, u16 w, u16 h)
|
||||
{
|
||||
u16 x = 0, y = 0;
|
||||
for (y = y0; y < y0 + h; y++) {
|
||||
for (x = x0; x < x0 + w; x++) {
|
||||
if (map[x][y])
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/* fills an area with a parent tcm_area */
|
||||
static void fill_area(struct tcm *tcm, struct tcm_area *area,
|
||||
struct tcm_area *parent)
|
||||
{
|
||||
s32 x, y;
|
||||
struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
|
||||
struct tcm_area a, a_;
|
||||
|
||||
/* set area's tcm; otherwise, enumerator considers it invalid */
|
||||
area->tcm = tcm;
|
||||
|
||||
tcm_for_each_slice(a, *area, a_) {
|
||||
for (x = a.p0.x; x <= a.p1.x; ++x)
|
||||
for (y = a.p0.y; y <= a.p1.y; ++y)
|
||||
pvt->map[x][y] = parent;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Compares a candidate area to the current best area, and if it is a better
|
||||
* fit, it updates the best to this one.
|
||||
*
|
||||
* @param x0, y0, w, h top, left, width, height of candidate area
|
||||
* @param field scan field
|
||||
* @param criteria scan criteria
|
||||
* @param best best candidate and its scores
|
||||
*
|
||||
* @return 1 (true) if the candidate area is known to be the final best, so no
|
||||
* more searching should be performed
|
||||
*/
|
||||
static s32 update_candidate(struct tcm *tcm, u16 x0, u16 y0, u16 w, u16 h,
|
||||
struct tcm_area *field, s32 criteria,
|
||||
struct score *best)
|
||||
{
|
||||
struct score me; /* score for area */
|
||||
|
||||
/*
|
||||
* NOTE: For horizontal bias we always give the first found, because our
|
||||
* scan is horizontal-raster-based and the first candidate will always
|
||||
* have the horizontal bias.
|
||||
*/
|
||||
bool first = criteria & CR_BIAS_HORIZONTAL;
|
||||
|
||||
assign(&me.a, x0, y0, x0 + w - 1, y0 + h - 1);
|
||||
|
||||
/* calculate score for current candidate */
|
||||
if (!first) {
|
||||
get_neighbor_stats(tcm, &me.a, &me.n);
|
||||
me.neighs = me.n.edge + me.n.busy;
|
||||
get_nearness_factor(field, &me.a, &me.f);
|
||||
}
|
||||
|
||||
/* the 1st candidate is always the best */
|
||||
if (!best->a.tcm)
|
||||
goto better;
|
||||
|
||||
BUG_ON(first);
|
||||
|
||||
/* diagonal balance check */
|
||||
if ((criteria & CR_DIAGONAL_BALANCE) &&
|
||||
best->neighs <= me.neighs &&
|
||||
(best->neighs < me.neighs ||
|
||||
/* this implies that neighs and occupied match */
|
||||
best->n.busy < me.n.busy ||
|
||||
(best->n.busy == me.n.busy &&
|
||||
/* check the nearness factor */
|
||||
best->f.x + best->f.y > me.f.x + me.f.y)))
|
||||
goto better;
|
||||
|
||||
/* not better, keep going */
|
||||
return 0;
|
||||
|
||||
better:
|
||||
/* save current area as best */
|
||||
memcpy(best, &me, sizeof(me));
|
||||
best->a.tcm = tcm;
|
||||
return first;
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate the nearness factor of an area in a search field. The nearness
|
||||
* factor is smaller if the area is closer to the search origin.
|
||||
*/
|
||||
static void get_nearness_factor(struct tcm_area *field, struct tcm_area *area,
|
||||
struct nearness_factor *nf)
|
||||
{
|
||||
/**
|
||||
* Using signed math as field coordinates may be reversed if
|
||||
* search direction is right-to-left or bottom-to-top.
|
||||
*/
|
||||
nf->x = (s32)(area->p0.x - field->p0.x) * 1000 /
|
||||
(field->p1.x - field->p0.x);
|
||||
nf->y = (s32)(area->p0.y - field->p0.y) * 1000 /
|
||||
(field->p1.y - field->p0.y);
|
||||
}
|
||||
|
||||
/* get neighbor statistics */
|
||||
static void get_neighbor_stats(struct tcm *tcm, struct tcm_area *area,
|
||||
struct neighbor_stats *stat)
|
||||
{
|
||||
s16 x = 0, y = 0;
|
||||
struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
|
||||
|
||||
/* Clearing any exisiting values */
|
||||
memset(stat, 0, sizeof(*stat));
|
||||
|
||||
/* process top & bottom edges */
|
||||
for (x = area->p0.x; x <= area->p1.x; x++) {
|
||||
if (area->p0.y == 0)
|
||||
stat->edge++;
|
||||
else if (pvt->map[x][area->p0.y - 1])
|
||||
stat->busy++;
|
||||
|
||||
if (area->p1.y == tcm->height - 1)
|
||||
stat->edge++;
|
||||
else if (pvt->map[x][area->p1.y + 1])
|
||||
stat->busy++;
|
||||
}
|
||||
|
||||
/* process left & right edges */
|
||||
for (y = area->p0.y; y <= area->p1.y; ++y) {
|
||||
if (area->p0.x == 0)
|
||||
stat->edge++;
|
||||
else if (pvt->map[area->p0.x - 1][y])
|
||||
stat->busy++;
|
||||
|
||||
if (area->p1.x == tcm->width - 1)
|
||||
stat->edge++;
|
||||
else if (pvt->map[area->p1.x + 1][y])
|
||||
stat->busy++;
|
||||
}
|
||||
}
|
||||
95
drivers/gpu/drm/omapdrm/tcm-sita.h
Normal file
95
drivers/gpu/drm/omapdrm/tcm-sita.h
Normal file
|
|
@ -0,0 +1,95 @@
|
|||
/*
|
||||
* tcm_sita.h
|
||||
*
|
||||
* SImple Tiler Allocator (SiTA) private structures.
|
||||
*
|
||||
* Author: Ravi Ramachandra <r.ramachandra@ti.com>
|
||||
*
|
||||
* Copyright (C) 2009-2011 Texas Instruments, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
*
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* * Neither the name of Texas Instruments Incorporated nor the names of
|
||||
* its contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
||||
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
|
||||
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
|
||||
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
||||
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef _TCM_SITA_H
|
||||
#define _TCM_SITA_H
|
||||
|
||||
#include "tcm.h"
|
||||
|
||||
/* length between two coordinates */
|
||||
#define LEN(a, b) ((a) > (b) ? (a) - (b) + 1 : (b) - (a) + 1)
|
||||
|
||||
enum criteria {
|
||||
CR_MAX_NEIGHS = 0x01,
|
||||
CR_FIRST_FOUND = 0x10,
|
||||
CR_BIAS_HORIZONTAL = 0x20,
|
||||
CR_BIAS_VERTICAL = 0x40,
|
||||
CR_DIAGONAL_BALANCE = 0x80
|
||||
};
|
||||
|
||||
/* nearness to the beginning of the search field from 0 to 1000 */
|
||||
struct nearness_factor {
|
||||
s32 x;
|
||||
s32 y;
|
||||
};
|
||||
|
||||
/*
|
||||
* Statistics on immediately neighboring slots. Edge is the number of
|
||||
* border segments that are also border segments of the scan field. Busy
|
||||
* refers to the number of neighbors that are occupied.
|
||||
*/
|
||||
struct neighbor_stats {
|
||||
u16 edge;
|
||||
u16 busy;
|
||||
};
|
||||
|
||||
/* structure to keep the score of a potential allocation */
|
||||
struct score {
|
||||
struct nearness_factor f;
|
||||
struct neighbor_stats n;
|
||||
struct tcm_area a;
|
||||
u16 neighs; /* number of busy neighbors */
|
||||
};
|
||||
|
||||
struct sita_pvt {
|
||||
spinlock_t lock; /* spinlock to protect access */
|
||||
struct tcm_pt div_pt; /* divider point splitting container */
|
||||
struct tcm_area ***map; /* pointers to the parent area for each slot */
|
||||
};
|
||||
|
||||
/* assign coordinates to area */
|
||||
static inline
|
||||
void assign(struct tcm_area *a, u16 x0, u16 y0, u16 x1, u16 y1)
|
||||
{
|
||||
a->p0.x = x0;
|
||||
a->p0.y = y0;
|
||||
a->p1.x = x1;
|
||||
a->p1.y = y1;
|
||||
}
|
||||
|
||||
#endif
|
||||
328
drivers/gpu/drm/omapdrm/tcm.h
Normal file
328
drivers/gpu/drm/omapdrm/tcm.h
Normal file
|
|
@ -0,0 +1,328 @@
|
|||
/*
|
||||
* tcm.h
|
||||
*
|
||||
* TILER container manager specification and support functions for TI
|
||||
* TILER driver.
|
||||
*
|
||||
* Author: Lajos Molnar <molnar@ti.com>
|
||||
*
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
*
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* * Neither the name of Texas Instruments Incorporated nor the names of
|
||||
* its contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
||||
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
|
||||
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
|
||||
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
||||
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef TCM_H
|
||||
#define TCM_H
|
||||
|
||||
struct tcm;
|
||||
|
||||
/* point */
|
||||
struct tcm_pt {
|
||||
u16 x;
|
||||
u16 y;
|
||||
};
|
||||
|
||||
/* 1d or 2d area */
|
||||
struct tcm_area {
|
||||
bool is2d; /* whether area is 1d or 2d */
|
||||
struct tcm *tcm; /* parent */
|
||||
struct tcm_pt p0;
|
||||
struct tcm_pt p1;
|
||||
};
|
||||
|
||||
struct tcm {
|
||||
u16 width, height; /* container dimensions */
|
||||
int lut_id; /* Lookup table identifier */
|
||||
|
||||
unsigned int y_offset; /* offset to use for y coordinates */
|
||||
|
||||
/* 'pvt' structure shall contain any tcm details (attr) along with
|
||||
linked list of allocated areas and mutex for mutually exclusive access
|
||||
to the list. It may also contain copies of width and height to notice
|
||||
any changes to the publicly available width and height fields. */
|
||||
void *pvt;
|
||||
|
||||
/* function table */
|
||||
s32 (*reserve_2d)(struct tcm *tcm, u16 height, u16 width, u8 align,
|
||||
struct tcm_area *area);
|
||||
s32 (*reserve_1d)(struct tcm *tcm, u32 slots, struct tcm_area *area);
|
||||
s32 (*free) (struct tcm *tcm, struct tcm_area *area);
|
||||
void (*deinit) (struct tcm *tcm);
|
||||
};
|
||||
|
||||
/*=============================================================================
|
||||
BASIC TILER CONTAINER MANAGER INTERFACE
|
||||
=============================================================================*/
|
||||
|
||||
/*
|
||||
* NOTE:
|
||||
*
|
||||
* Since some basic parameter checking is done outside the TCM algorithms,
|
||||
* TCM implementation do NOT have to check the following:
|
||||
*
|
||||
* area pointer is NULL
|
||||
* width and height fits within container
|
||||
* number of pages is more than the size of the container
|
||||
*
|
||||
*/
|
||||
|
||||
struct tcm *sita_init(u16 width, u16 height, struct tcm_pt *attr);
|
||||
|
||||
|
||||
/**
|
||||
* Deinitialize tiler container manager.
|
||||
*
|
||||
* @param tcm Pointer to container manager.
|
||||
*
|
||||
* @return 0 on success, non-0 error value on error. The call
|
||||
* should free as much memory as possible and meaningful
|
||||
* even on failure. Some error codes: -ENODEV: invalid
|
||||
* manager.
|
||||
*/
|
||||
static inline void tcm_deinit(struct tcm *tcm)
|
||||
{
|
||||
if (tcm)
|
||||
tcm->deinit(tcm);
|
||||
}
|
||||
|
||||
/**
|
||||
* Reserves a 2D area in the container.
|
||||
*
|
||||
* @param tcm Pointer to container manager.
|
||||
* @param height Height(in pages) of area to be reserved.
|
||||
* @param width Width(in pages) of area to be reserved.
|
||||
* @param align Alignment requirement for top-left corner of area. Not
|
||||
* all values may be supported by the container manager,
|
||||
* but it must support 0 (1), 32 and 64.
|
||||
* 0 value is equivalent to 1.
|
||||
* @param area Pointer to where the reserved area should be stored.
|
||||
*
|
||||
* @return 0 on success. Non-0 error code on failure. Also,
|
||||
* the tcm field of the area will be set to NULL on
|
||||
* failure. Some error codes: -ENODEV: invalid manager,
|
||||
* -EINVAL: invalid area, -ENOMEM: not enough space for
|
||||
* allocation.
|
||||
*/
|
||||
static inline s32 tcm_reserve_2d(struct tcm *tcm, u16 width, u16 height,
|
||||
u16 align, struct tcm_area *area)
|
||||
{
|
||||
/* perform rudimentary error checking */
|
||||
s32 res = tcm == NULL ? -ENODEV :
|
||||
(area == NULL || width == 0 || height == 0 ||
|
||||
/* align must be a 2 power */
|
||||
(align & (align - 1))) ? -EINVAL :
|
||||
(height > tcm->height || width > tcm->width) ? -ENOMEM : 0;
|
||||
|
||||
if (!res) {
|
||||
area->is2d = true;
|
||||
res = tcm->reserve_2d(tcm, height, width, align, area);
|
||||
area->tcm = res ? NULL : tcm;
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reserves a 1D area in the container.
|
||||
*
|
||||
* @param tcm Pointer to container manager.
|
||||
* @param slots Number of (contiguous) slots to reserve.
|
||||
* @param area Pointer to where the reserved area should be stored.
|
||||
*
|
||||
* @return 0 on success. Non-0 error code on failure. Also,
|
||||
* the tcm field of the area will be set to NULL on
|
||||
* failure. Some error codes: -ENODEV: invalid manager,
|
||||
* -EINVAL: invalid area, -ENOMEM: not enough space for
|
||||
* allocation.
|
||||
*/
|
||||
static inline s32 tcm_reserve_1d(struct tcm *tcm, u32 slots,
|
||||
struct tcm_area *area)
|
||||
{
|
||||
/* perform rudimentary error checking */
|
||||
s32 res = tcm == NULL ? -ENODEV :
|
||||
(area == NULL || slots == 0) ? -EINVAL :
|
||||
slots > (tcm->width * (u32) tcm->height) ? -ENOMEM : 0;
|
||||
|
||||
if (!res) {
|
||||
area->is2d = false;
|
||||
res = tcm->reserve_1d(tcm, slots, area);
|
||||
area->tcm = res ? NULL : tcm;
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
/**
|
||||
* Free a previously reserved area from the container.
|
||||
*
|
||||
* @param area Pointer to area reserved by a prior call to
|
||||
* tcm_reserve_1d or tcm_reserve_2d call, whether
|
||||
* it was successful or not. (Note: all fields of
|
||||
* the structure must match.)
|
||||
*
|
||||
* @return 0 on success. Non-0 error code on failure. Also, the tcm
|
||||
* field of the area is set to NULL on success to avoid subsequent
|
||||
* freeing. This call will succeed even if supplying
|
||||
* the area from a failed reserved call.
|
||||
*/
|
||||
static inline s32 tcm_free(struct tcm_area *area)
|
||||
{
|
||||
s32 res = 0; /* free succeeds by default */
|
||||
|
||||
if (area && area->tcm) {
|
||||
res = area->tcm->free(area->tcm, area);
|
||||
if (res == 0)
|
||||
area->tcm = NULL;
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
/*=============================================================================
|
||||
HELPER FUNCTION FOR ANY TILER CONTAINER MANAGER
|
||||
=============================================================================*/
|
||||
|
||||
/**
|
||||
* This method slices off the topmost 2D slice from the parent area, and stores
|
||||
* it in the 'slice' parameter. The 'parent' parameter will get modified to
|
||||
* contain the remaining portion of the area. If the whole parent area can
|
||||
* fit in a 2D slice, its tcm pointer is set to NULL to mark that it is no
|
||||
* longer a valid area.
|
||||
*
|
||||
* @param parent Pointer to a VALID parent area that will get modified
|
||||
* @param slice Pointer to the slice area that will get modified
|
||||
*/
|
||||
static inline void tcm_slice(struct tcm_area *parent, struct tcm_area *slice)
|
||||
{
|
||||
*slice = *parent;
|
||||
|
||||
/* check if we need to slice */
|
||||
if (slice->tcm && !slice->is2d &&
|
||||
slice->p0.y != slice->p1.y &&
|
||||
(slice->p0.x || (slice->p1.x != slice->tcm->width - 1))) {
|
||||
/* set end point of slice (start always remains) */
|
||||
slice->p1.x = slice->tcm->width - 1;
|
||||
slice->p1.y = (slice->p0.x) ? slice->p0.y : slice->p1.y - 1;
|
||||
/* adjust remaining area */
|
||||
parent->p0.x = 0;
|
||||
parent->p0.y = slice->p1.y + 1;
|
||||
} else {
|
||||
/* mark this as the last slice */
|
||||
parent->tcm = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* Verify if a tcm area is logically valid */
|
||||
static inline bool tcm_area_is_valid(struct tcm_area *area)
|
||||
{
|
||||
return area && area->tcm &&
|
||||
/* coordinate bounds */
|
||||
area->p1.x < area->tcm->width &&
|
||||
area->p1.y < area->tcm->height &&
|
||||
area->p0.y <= area->p1.y &&
|
||||
/* 1D coordinate relationship + p0.x check */
|
||||
((!area->is2d &&
|
||||
area->p0.x < area->tcm->width &&
|
||||
area->p0.x + area->p0.y * area->tcm->width <=
|
||||
area->p1.x + area->p1.y * area->tcm->width) ||
|
||||
/* 2D coordinate relationship */
|
||||
(area->is2d &&
|
||||
area->p0.x <= area->p1.x));
|
||||
}
|
||||
|
||||
/* see if a coordinate is within an area */
|
||||
static inline bool __tcm_is_in(struct tcm_pt *p, struct tcm_area *a)
|
||||
{
|
||||
u16 i;
|
||||
|
||||
if (a->is2d) {
|
||||
return p->x >= a->p0.x && p->x <= a->p1.x &&
|
||||
p->y >= a->p0.y && p->y <= a->p1.y;
|
||||
} else {
|
||||
i = p->x + p->y * a->tcm->width;
|
||||
return i >= a->p0.x + a->p0.y * a->tcm->width &&
|
||||
i <= a->p1.x + a->p1.y * a->tcm->width;
|
||||
}
|
||||
}
|
||||
|
||||
/* calculate area width */
|
||||
static inline u16 __tcm_area_width(struct tcm_area *area)
|
||||
{
|
||||
return area->p1.x - area->p0.x + 1;
|
||||
}
|
||||
|
||||
/* calculate area height */
|
||||
static inline u16 __tcm_area_height(struct tcm_area *area)
|
||||
{
|
||||
return area->p1.y - area->p0.y + 1;
|
||||
}
|
||||
|
||||
/* calculate number of slots in an area */
|
||||
static inline u16 __tcm_sizeof(struct tcm_area *area)
|
||||
{
|
||||
return area->is2d ?
|
||||
__tcm_area_width(area) * __tcm_area_height(area) :
|
||||
(area->p1.x - area->p0.x + 1) + (area->p1.y - area->p0.y) *
|
||||
area->tcm->width;
|
||||
}
|
||||
#define tcm_sizeof(area) __tcm_sizeof(&(area))
|
||||
#define tcm_awidth(area) __tcm_area_width(&(area))
|
||||
#define tcm_aheight(area) __tcm_area_height(&(area))
|
||||
#define tcm_is_in(pt, area) __tcm_is_in(&(pt), &(area))
|
||||
|
||||
/* limit a 1D area to the first N pages */
|
||||
static inline s32 tcm_1d_limit(struct tcm_area *a, u32 num_pg)
|
||||
{
|
||||
if (__tcm_sizeof(a) < num_pg)
|
||||
return -ENOMEM;
|
||||
if (!num_pg)
|
||||
return -EINVAL;
|
||||
|
||||
a->p1.x = (a->p0.x + num_pg - 1) % a->tcm->width;
|
||||
a->p1.y = a->p0.y + ((a->p0.x + num_pg - 1) / a->tcm->width);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Iterate through 2D slices of a valid area. Behaves
|
||||
* syntactically as a for(;;) statement.
|
||||
*
|
||||
* @param var Name of a local variable of type 'struct
|
||||
* tcm_area *' that will get modified to
|
||||
* contain each slice.
|
||||
* @param area Pointer to the VALID parent area. This
|
||||
* structure will not get modified
|
||||
* throughout the loop.
|
||||
*
|
||||
*/
|
||||
#define tcm_for_each_slice(var, area, safe) \
|
||||
for (safe = area, \
|
||||
tcm_slice(&safe, &var); \
|
||||
var.tcm; tcm_slice(&safe, &var))
|
||||
|
||||
#endif
|
||||
Loading…
Add table
Add a link
Reference in a new issue