Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

View file

@ -0,0 +1,15 @@
config DRM_UDL
tristate "DisplayLink"
depends on DRM
depends on USB_SUPPORT
depends on USB_ARCH_HAS_HCD
select USB
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
select FB_DEFERRED_IO
select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
help
This is a KMS driver for the USB displaylink video adapters.
Say M/Y to add support for these devices via drm/kms interfaces.

View file

@ -0,0 +1,6 @@
ccflags-y := -Iinclude/drm
udl-y := udl_drv.o udl_modeset.o udl_connector.o udl_encoder.o udl_main.o udl_fb.o udl_transfer.o udl_gem.o
obj-$(CONFIG_DRM_UDL) := udl.o

View file

@ -0,0 +1,157 @@
/*
* Copyright (C) 2012 Red Hat
* based in parts on udlfb.c:
* Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
* Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
* Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
*
* This file is subject to the terms and conditions of the GNU General Public
* License v2. See the file COPYING in the main directory of this archive for
* more details.
*/
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_crtc_helper.h>
#include "udl_drv.h"
/* dummy connector to just get EDID,
all UDL appear to have a DVI-D */
static u8 *udl_get_edid(struct udl_device *udl)
{
u8 *block;
char *rbuf;
int ret, i;
block = kmalloc(EDID_LENGTH, GFP_KERNEL);
if (block == NULL)
return NULL;
rbuf = kmalloc(2, GFP_KERNEL);
if (rbuf == NULL)
goto error;
for (i = 0; i < EDID_LENGTH; i++) {
ret = usb_control_msg(udl->udev,
usb_rcvctrlpipe(udl->udev, 0), (0x02),
(0x80 | (0x02 << 5)), i << 8, 0xA1, rbuf, 2,
HZ);
if (ret < 1) {
DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret);
goto error;
}
block[i] = rbuf[1];
}
kfree(rbuf);
return block;
error:
kfree(block);
kfree(rbuf);
return NULL;
}
static int udl_get_modes(struct drm_connector *connector)
{
struct udl_device *udl = connector->dev->dev_private;
struct edid *edid;
int ret;
edid = (struct edid *)udl_get_edid(udl);
if (!edid) {
drm_mode_connector_update_edid_property(connector, NULL);
return 0;
}
/*
* We only read the main block, but if the monitor reports extension
* blocks then the drm edid code expects them to be present, so patch
* the extension count to 0.
*/
edid->checksum += edid->extensions;
edid->extensions = 0;
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
kfree(edid);
return ret;
}
static int udl_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct udl_device *udl = connector->dev->dev_private;
if (!udl->sku_pixel_limit)
return 0;
if (mode->vdisplay * mode->hdisplay > udl->sku_pixel_limit)
return MODE_VIRTUAL_Y;
return 0;
}
static enum drm_connector_status
udl_detect(struct drm_connector *connector, bool force)
{
if (drm_device_is_unplugged(connector->dev))
return connector_status_disconnected;
return connector_status_connected;
}
static struct drm_encoder*
udl_best_single_encoder(struct drm_connector *connector)
{
int enc_id = connector->encoder_ids[0];
return drm_encoder_find(connector->dev, enc_id);
}
static int udl_connector_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
return 0;
}
static void udl_connector_destroy(struct drm_connector *connector)
{
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
static struct drm_connector_helper_funcs udl_connector_helper_funcs = {
.get_modes = udl_get_modes,
.mode_valid = udl_mode_valid,
.best_encoder = udl_best_single_encoder,
};
static struct drm_connector_funcs udl_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = udl_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = udl_connector_destroy,
.set_property = udl_connector_set_property,
};
int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder)
{
struct drm_connector *connector;
connector = kzalloc(sizeof(struct drm_connector), GFP_KERNEL);
if (!connector)
return -ENOMEM;
drm_connector_init(dev, connector, &udl_connector_funcs, DRM_MODE_CONNECTOR_DVII);
drm_connector_helper_add(connector, &udl_connector_helper_funcs);
drm_connector_register(connector);
drm_mode_connector_attach_encoder(connector, encoder);
drm_object_attach_property(&connector->base,
dev->mode_config.dirty_info_property,
1);
return 0;
}

View file

@ -0,0 +1,139 @@
/*
* Copyright (C) 2012 Red Hat
*
* This file is subject to the terms and conditions of the GNU General Public
* License v2. See the file COPYING in the main directory of this archive for
* more details.
*/
#include <linux/module.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include "udl_drv.h"
static int udl_driver_set_busid(struct drm_device *d, struct drm_master *m)
{
return 0;
}
static const struct vm_operations_struct udl_gem_vm_ops = {
.fault = udl_gem_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
static const struct file_operations udl_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.mmap = udl_drm_gem_mmap,
.poll = drm_poll,
.read = drm_read,
.unlocked_ioctl = drm_ioctl,
.release = drm_release,
#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
#endif
.llseek = noop_llseek,
};
static struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
.load = udl_driver_load,
.unload = udl_driver_unload,
.set_busid = udl_driver_set_busid,
/* gem hooks */
.gem_free_object = udl_gem_free_object,
.gem_vm_ops = &udl_gem_vm_ops,
.dumb_create = udl_dumb_create,
.dumb_map_offset = udl_gem_mmap,
.dumb_destroy = drm_gem_dumb_destroy,
.fops = &udl_driver_fops,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import = udl_gem_prime_import,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
};
static int udl_usb_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(interface);
struct drm_device *dev;
int r;
dev = drm_dev_alloc(&driver, &interface->dev);
if (!dev)
return -ENOMEM;
r = drm_dev_register(dev, (unsigned long)udev);
if (r)
goto err_free;
usb_set_intfdata(interface, dev);
DRM_INFO("Initialized udl on minor %d\n", dev->primary->index);
return 0;
err_free:
drm_dev_unref(dev);
return r;
}
static void udl_usb_disconnect(struct usb_interface *interface)
{
struct drm_device *dev = usb_get_intfdata(interface);
drm_kms_helper_poll_disable(dev);
drm_connector_unplug_all(dev);
udl_fbdev_unplug(dev);
udl_drop_usb(dev);
drm_unplug_dev(dev);
}
/*
* There are many DisplayLink-based graphics products, all with unique PIDs.
* So we match on DisplayLink's VID + Vendor-Defined Interface Class (0xff)
* We also require a match on SubClass (0x00) and Protocol (0x00),
* which is compatible with all known USB 2.0 era graphics chips and firmware,
* but allows DisplayLink to increment those for any future incompatible chips
*/
static struct usb_device_id id_table[] = {
{.idVendor = 0x17e9, .bInterfaceClass = 0xff,
.bInterfaceSubClass = 0x00,
.bInterfaceProtocol = 0x00,
.match_flags = USB_DEVICE_ID_MATCH_VENDOR |
USB_DEVICE_ID_MATCH_INT_CLASS |
USB_DEVICE_ID_MATCH_INT_SUBCLASS |
USB_DEVICE_ID_MATCH_INT_PROTOCOL,},
{},
};
MODULE_DEVICE_TABLE(usb, id_table);
static struct usb_driver udl_driver = {
.name = "udl",
.probe = udl_usb_probe,
.disconnect = udl_usb_disconnect,
.id_table = id_table,
};
static int __init udl_init(void)
{
return usb_register(&udl_driver);
}
static void __exit udl_exit(void)
{
usb_deregister(&udl_driver);
}
module_init(udl_init);
module_exit(udl_exit);
MODULE_LICENSE("GPL");

View file

@ -0,0 +1,146 @@
/*
* Copyright (C) 2012 Red Hat
*
* based in parts on udlfb.c:
* Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
* Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
* Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
*
* This file is subject to the terms and conditions of the GNU General Public
* License v2. See the file COPYING in the main directory of this archive for
* more details.
*/
#ifndef UDL_DRV_H
#define UDL_DRV_H
#include <linux/usb.h>
#include <drm/drm_gem.h>
#define DRIVER_NAME "udl"
#define DRIVER_DESC "DisplayLink"
#define DRIVER_DATE "20120220"
#define DRIVER_MAJOR 0
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 1
struct udl_device;
struct urb_node {
struct list_head entry;
struct udl_device *dev;
struct delayed_work release_urb_work;
struct urb *urb;
};
struct urb_list {
struct list_head list;
spinlock_t lock;
struct semaphore limit_sem;
int available;
int count;
size_t size;
};
struct udl_fbdev;
struct udl_device {
struct device *dev;
struct drm_device *ddev;
struct usb_device *udev;
int sku_pixel_limit;
struct urb_list urbs;
atomic_t lost_pixels; /* 1 = a render op failed. Need screen refresh */
struct udl_fbdev *fbdev;
char mode_buf[1024];
uint32_t mode_buf_len;
atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
atomic_t bytes_identical; /* saved effort with backbuffer comparison */
atomic_t bytes_sent; /* to usb, after compression including overhead */
atomic_t cpu_kcycles_used; /* transpired during pixel processing */
};
struct udl_gem_object {
struct drm_gem_object base;
struct page **pages;
void *vmapping;
struct sg_table *sg;
};
#define to_udl_bo(x) container_of(x, struct udl_gem_object, base)
struct udl_framebuffer {
struct drm_framebuffer base;
struct udl_gem_object *obj;
bool active_16; /* active on the 16-bit channel */
int x1, y1, x2, y2; /* dirty rect */
spinlock_t dirty_lock;
};
#define to_udl_fb(x) container_of(x, struct udl_framebuffer, base)
/* modeset */
int udl_modeset_init(struct drm_device *dev);
void udl_modeset_cleanup(struct drm_device *dev);
int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder);
struct drm_encoder *udl_encoder_init(struct drm_device *dev);
struct urb *udl_get_urb(struct drm_device *dev);
int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len);
void udl_urb_completion(struct urb *urb);
int udl_driver_load(struct drm_device *dev, unsigned long flags);
int udl_driver_unload(struct drm_device *dev);
int udl_fbdev_init(struct drm_device *dev);
void udl_fbdev_cleanup(struct drm_device *dev);
void udl_fbdev_unplug(struct drm_device *dev);
struct drm_framebuffer *
udl_fb_user_fb_create(struct drm_device *dev,
struct drm_file *file,
struct drm_mode_fb_cmd2 *mode_cmd);
int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
const char *front, char **urb_buf_ptr,
u32 byte_offset, u32 device_byte_offset, u32 byte_width,
int *ident_ptr, int *sent_ptr);
int udl_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
void udl_gem_free_object(struct drm_gem_object *gem_obj);
struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
size_t size);
struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf);
int udl_gem_vmap(struct udl_gem_object *obj);
void udl_gem_vunmap(struct udl_gem_object *obj);
int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
int width, int height);
int udl_drop_usb(struct drm_device *dev);
#define CMD_WRITE_RAW8 "\xAF\x60" /**< 8 bit raw write command. */
#define CMD_WRITE_RL8 "\xAF\x61" /**< 8 bit run length command. */
#define CMD_WRITE_COPY8 "\xAF\x62" /**< 8 bit copy command. */
#define CMD_WRITE_RLX8 "\xAF\x63" /**< 8 bit extended run length command. */
#define CMD_WRITE_RAW16 "\xAF\x68" /**< 16 bit raw write command. */
#define CMD_WRITE_RL16 "\xAF\x69" /**< 16 bit run length command. */
#define CMD_WRITE_COPY16 "\xAF\x6A" /**< 16 bit copy command. */
#define CMD_WRITE_RLX16 "\xAF\x6B" /**< 16 bit extended run length command. */
#endif

View file

@ -0,0 +1,80 @@
/*
* Copyright (C) 2012 Red Hat
* based in parts on udlfb.c:
* Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
* Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
* Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
*
* This file is subject to the terms and conditions of the GNU General Public
* License v2. See the file COPYING in the main directory of this archive for
* more details.
*/
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include "udl_drv.h"
/* dummy encoder */
static void udl_enc_destroy(struct drm_encoder *encoder)
{
drm_encoder_cleanup(encoder);
kfree(encoder);
}
static void udl_encoder_disable(struct drm_encoder *encoder)
{
}
static bool udl_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
}
static void udl_encoder_prepare(struct drm_encoder *encoder)
{
}
static void udl_encoder_commit(struct drm_encoder *encoder)
{
}
static void udl_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
}
static void
udl_encoder_dpms(struct drm_encoder *encoder, int mode)
{
}
static const struct drm_encoder_helper_funcs udl_helper_funcs = {
.dpms = udl_encoder_dpms,
.mode_fixup = udl_mode_fixup,
.prepare = udl_encoder_prepare,
.mode_set = udl_encoder_mode_set,
.commit = udl_encoder_commit,
.disable = udl_encoder_disable,
};
static const struct drm_encoder_funcs udl_enc_funcs = {
.destroy = udl_enc_destroy,
};
struct drm_encoder *udl_encoder_init(struct drm_device *dev)
{
struct drm_encoder *encoder;
encoder = kzalloc(sizeof(struct drm_encoder), GFP_KERNEL);
if (!encoder)
return NULL;
drm_encoder_init(dev, encoder, &udl_enc_funcs, DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &udl_helper_funcs);
encoder->possible_crtcs = 1;
return encoder;
}

View file

@ -0,0 +1,665 @@
/*
* Copyright (C) 2012 Red Hat
*
* based in parts on udlfb.c:
* Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
* Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
* Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
*
* This file is subject to the terms and conditions of the GNU General Public
* License v2. See the file COPYING in the main directory of this archive for
* more details.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/fb.h>
#include <linux/dma-buf.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include "udl_drv.h"
#include <drm/drm_fb_helper.h>
#define DL_DEFIO_WRITE_DELAY (HZ/20) /* fb_deferred_io.delay in jiffies */
static int fb_defio = 0; /* Optionally enable experimental fb_defio mmap support */
static int fb_bpp = 16;
module_param(fb_bpp, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
module_param(fb_defio, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
struct udl_fbdev {
struct drm_fb_helper helper;
struct udl_framebuffer ufb;
struct list_head fbdev_list;
int fb_count;
};
#define DL_ALIGN_UP(x, a) ALIGN(x, a)
#define DL_ALIGN_DOWN(x, a) ALIGN(x-(a-1), a)
/** Read the red component (0..255) of a 32 bpp colour. */
#define DLO_RGB_GETRED(col) (uint8_t)((col) & 0xFF)
/** Read the green component (0..255) of a 32 bpp colour. */
#define DLO_RGB_GETGRN(col) (uint8_t)(((col) >> 8) & 0xFF)
/** Read the blue component (0..255) of a 32 bpp colour. */
#define DLO_RGB_GETBLU(col) (uint8_t)(((col) >> 16) & 0xFF)
/** Return red/green component of a 16 bpp colour number. */
#define DLO_RG16(red, grn) (uint8_t)((((red) & 0xF8) | ((grn) >> 5)) & 0xFF)
/** Return green/blue component of a 16 bpp colour number. */
#define DLO_GB16(grn, blu) (uint8_t)(((((grn) & 0x1C) << 3) | ((blu) >> 3)) & 0xFF)
/** Return 8 bpp colour number from red, green and blue components. */
#define DLO_RGB8(red, grn, blu) ((((red) << 5) | (((grn) & 3) << 3) | ((blu) & 7)) & 0xFF)
#if 0
static uint8_t rgb8(uint32_t col)
{
uint8_t red = DLO_RGB_GETRED(col);
uint8_t grn = DLO_RGB_GETGRN(col);
uint8_t blu = DLO_RGB_GETBLU(col);
return DLO_RGB8(red, grn, blu);
}
static uint16_t rgb16(uint32_t col)
{
uint8_t red = DLO_RGB_GETRED(col);
uint8_t grn = DLO_RGB_GETGRN(col);
uint8_t blu = DLO_RGB_GETBLU(col);
return (DLO_RG16(red, grn) << 8) + DLO_GB16(grn, blu);
}
#endif
/*
* NOTE: fb_defio.c is holding info->fbdefio.mutex
* Touching ANY framebuffer memory that triggers a page fault
* in fb_defio will cause a deadlock, when it also tries to
* grab the same mutex.
*/
static void udlfb_dpy_deferred_io(struct fb_info *info,
struct list_head *pagelist)
{
struct page *cur;
struct fb_deferred_io *fbdefio = info->fbdefio;
struct udl_fbdev *ufbdev = info->par;
struct drm_device *dev = ufbdev->ufb.base.dev;
struct udl_device *udl = dev->dev_private;
struct urb *urb;
char *cmd;
cycles_t start_cycles, end_cycles;
int bytes_sent = 0;
int bytes_identical = 0;
int bytes_rendered = 0;
if (!fb_defio)
return;
start_cycles = get_cycles();
urb = udl_get_urb(dev);
if (!urb)
return;
cmd = urb->transfer_buffer;
/* walk the written page list and render each to device */
list_for_each_entry(cur, &fbdefio->pagelist, lru) {
if (udl_render_hline(dev, (ufbdev->ufb.base.bits_per_pixel / 8),
&urb, (char *) info->fix.smem_start,
&cmd, cur->index << PAGE_SHIFT,
cur->index << PAGE_SHIFT,
PAGE_SIZE, &bytes_identical, &bytes_sent))
goto error;
bytes_rendered += PAGE_SIZE;
}
if (cmd > (char *) urb->transfer_buffer) {
/* Send partial buffer remaining before exiting */
int len = cmd - (char *) urb->transfer_buffer;
udl_submit_urb(dev, urb, len);
bytes_sent += len;
} else
udl_urb_completion(urb);
error:
atomic_add(bytes_sent, &udl->bytes_sent);
atomic_add(bytes_identical, &udl->bytes_identical);
atomic_add(bytes_rendered, &udl->bytes_rendered);
end_cycles = get_cycles();
atomic_add(((unsigned int) ((end_cycles - start_cycles)
>> 10)), /* Kcycles */
&udl->cpu_kcycles_used);
}
int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
int width, int height)
{
struct drm_device *dev = fb->base.dev;
struct udl_device *udl = dev->dev_private;
int i, ret;
char *cmd;
cycles_t start_cycles, end_cycles;
int bytes_sent = 0;
int bytes_identical = 0;
struct urb *urb;
int aligned_x;
int bpp = (fb->base.bits_per_pixel / 8);
int x2, y2;
bool store_for_later = false;
unsigned long flags;
if (!fb->active_16)
return 0;
if (!fb->obj->vmapping) {
ret = udl_gem_vmap(fb->obj);
if (ret == -ENOMEM) {
DRM_ERROR("failed to vmap fb\n");
return 0;
}
if (!fb->obj->vmapping) {
DRM_ERROR("failed to vmapping\n");
return 0;
}
}
aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long));
width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long));
x = aligned_x;
if ((width <= 0) ||
(x + width > fb->base.width) ||
(y + height > fb->base.height))
return -EINVAL;
/* if we are in atomic just store the info
can't test inside spin lock */
if (in_atomic())
store_for_later = true;
x2 = x + width - 1;
y2 = y + height - 1;
spin_lock_irqsave(&fb->dirty_lock, flags);
if (fb->y1 < y)
y = fb->y1;
if (fb->y2 > y2)
y2 = fb->y2;
if (fb->x1 < x)
x = fb->x1;
if (fb->x2 > x2)
x2 = fb->x2;
if (store_for_later) {
fb->x1 = x;
fb->x2 = x2;
fb->y1 = y;
fb->y2 = y2;
spin_unlock_irqrestore(&fb->dirty_lock, flags);
return 0;
}
fb->x1 = fb->y1 = INT_MAX;
fb->x2 = fb->y2 = 0;
spin_unlock_irqrestore(&fb->dirty_lock, flags);
start_cycles = get_cycles();
urb = udl_get_urb(dev);
if (!urb)
return 0;
cmd = urb->transfer_buffer;
for (i = y; i <= y2 ; i++) {
const int line_offset = fb->base.pitches[0] * i;
const int byte_offset = line_offset + (x * bpp);
const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp);
if (udl_render_hline(dev, bpp, &urb,
(char *) fb->obj->vmapping,
&cmd, byte_offset, dev_byte_offset,
(x2 - x + 1) * bpp,
&bytes_identical, &bytes_sent))
goto error;
}
if (cmd > (char *) urb->transfer_buffer) {
/* Send partial buffer remaining before exiting */
int len = cmd - (char *) urb->transfer_buffer;
ret = udl_submit_urb(dev, urb, len);
bytes_sent += len;
} else
udl_urb_completion(urb);
error:
atomic_add(bytes_sent, &udl->bytes_sent);
atomic_add(bytes_identical, &udl->bytes_identical);
atomic_add(width*height*bpp, &udl->bytes_rendered);
end_cycles = get_cycles();
atomic_add(((unsigned int) ((end_cycles - start_cycles)
>> 10)), /* Kcycles */
&udl->cpu_kcycles_used);
return 0;
}
static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
unsigned long start = vma->vm_start;
unsigned long size = vma->vm_end - vma->vm_start;
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
unsigned long page, pos;
if (offset + size > info->fix.smem_len)
return -EINVAL;
pos = (unsigned long)info->fix.smem_start + offset;
pr_notice("mmap() framebuffer addr:%lu size:%lu\n",
pos, size);
while (size > 0) {
page = vmalloc_to_pfn((void *)pos);
if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED))
return -EAGAIN;
start += PAGE_SIZE;
pos += PAGE_SIZE;
if (size > PAGE_SIZE)
size -= PAGE_SIZE;
else
size = 0;
}
/* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
return 0;
}
static void udl_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct udl_fbdev *ufbdev = info->par;
sys_fillrect(info, rect);
udl_handle_damage(&ufbdev->ufb, rect->dx, rect->dy, rect->width,
rect->height);
}
static void udl_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
struct udl_fbdev *ufbdev = info->par;
sys_copyarea(info, region);
udl_handle_damage(&ufbdev->ufb, region->dx, region->dy, region->width,
region->height);
}
static void udl_fb_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct udl_fbdev *ufbdev = info->par;
sys_imageblit(info, image);
udl_handle_damage(&ufbdev->ufb, image->dx, image->dy, image->width,
image->height);
}
/*
* It's common for several clients to have framebuffer open simultaneously.
* e.g. both fbcon and X. Makes things interesting.
* Assumes caller is holding info->lock (for open and release at least)
*/
static int udl_fb_open(struct fb_info *info, int user)
{
struct udl_fbdev *ufbdev = info->par;
struct drm_device *dev = ufbdev->ufb.base.dev;
struct udl_device *udl = dev->dev_private;
/* If the USB device is gone, we don't accept new opens */
if (drm_device_is_unplugged(udl->ddev))
return -ENODEV;
ufbdev->fb_count++;
if (fb_defio && (info->fbdefio == NULL)) {
/* enable defio at last moment if not disabled by client */
struct fb_deferred_io *fbdefio;
fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
if (fbdefio) {
fbdefio->delay = DL_DEFIO_WRITE_DELAY;
fbdefio->deferred_io = udlfb_dpy_deferred_io;
}
info->fbdefio = fbdefio;
fb_deferred_io_init(info);
}
pr_notice("open /dev/fb%d user=%d fb_info=%p count=%d\n",
info->node, user, info, ufbdev->fb_count);
return 0;
}
/*
* Assumes caller is holding info->lock mutex (for open and release at least)
*/
static int udl_fb_release(struct fb_info *info, int user)
{
struct udl_fbdev *ufbdev = info->par;
ufbdev->fb_count--;
if ((ufbdev->fb_count == 0) && (info->fbdefio)) {
fb_deferred_io_cleanup(info);
kfree(info->fbdefio);
info->fbdefio = NULL;
info->fbops->fb_mmap = udl_fb_mmap;
}
pr_warn("released /dev/fb%d user=%d count=%d\n",
info->node, user, ufbdev->fb_count);
return 0;
}
static struct fb_ops udlfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_fillrect = udl_fb_fillrect,
.fb_copyarea = udl_fb_copyarea,
.fb_imageblit = udl_fb_imageblit,
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
.fb_debug_enter = drm_fb_helper_debug_enter,
.fb_debug_leave = drm_fb_helper_debug_leave,
.fb_mmap = udl_fb_mmap,
.fb_open = udl_fb_open,
.fb_release = udl_fb_release,
};
static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
struct drm_file *file,
unsigned flags, unsigned color,
struct drm_clip_rect *clips,
unsigned num_clips)
{
struct udl_framebuffer *ufb = to_udl_fb(fb);
int i;
int ret = 0;
drm_modeset_lock_all(fb->dev);
if (!ufb->active_16)
goto unlock;
if (ufb->obj->base.import_attach) {
ret = dma_buf_begin_cpu_access(ufb->obj->base.import_attach->dmabuf,
0, ufb->obj->base.size,
DMA_FROM_DEVICE);
if (ret)
goto unlock;
}
for (i = 0; i < num_clips; i++) {
ret = udl_handle_damage(ufb, clips[i].x1, clips[i].y1,
clips[i].x2 - clips[i].x1,
clips[i].y2 - clips[i].y1);
if (ret)
break;
}
if (ufb->obj->base.import_attach) {
dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf,
0, ufb->obj->base.size,
DMA_FROM_DEVICE);
}
unlock:
drm_modeset_unlock_all(fb->dev);
return ret;
}
static void udl_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct udl_framebuffer *ufb = to_udl_fb(fb);
if (ufb->obj)
drm_gem_object_unreference_unlocked(&ufb->obj->base);
drm_framebuffer_cleanup(fb);
kfree(ufb);
}
static const struct drm_framebuffer_funcs udlfb_funcs = {
.destroy = udl_user_framebuffer_destroy,
.dirty = udl_user_framebuffer_dirty,
};
static int
udl_framebuffer_init(struct drm_device *dev,
struct udl_framebuffer *ufb,
struct drm_mode_fb_cmd2 *mode_cmd,
struct udl_gem_object *obj)
{
int ret;
spin_lock_init(&ufb->dirty_lock);
ufb->obj = obj;
drm_helper_mode_fill_fb_struct(&ufb->base, mode_cmd);
ret = drm_framebuffer_init(dev, &ufb->base, &udlfb_funcs);
return ret;
}
static int udlfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct udl_fbdev *ufbdev =
container_of(helper, struct udl_fbdev, helper);
struct drm_device *dev = ufbdev->helper.dev;
struct fb_info *info;
struct device *device = dev->dev;
struct drm_framebuffer *fb;
struct drm_mode_fb_cmd2 mode_cmd;
struct udl_gem_object *obj;
uint32_t size;
int ret = 0;
if (sizes->surface_bpp == 24)
sizes->surface_bpp = 32;
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
size = mode_cmd.pitches[0] * mode_cmd.height;
size = ALIGN(size, PAGE_SIZE);
obj = udl_gem_alloc_object(dev, size);
if (!obj)
goto out;
ret = udl_gem_vmap(obj);
if (ret) {
DRM_ERROR("failed to vmap fb\n");
goto out_gfree;
}
info = framebuffer_alloc(0, device);
if (!info) {
ret = -ENOMEM;
goto out_gfree;
}
info->par = ufbdev;
ret = udl_framebuffer_init(dev, &ufbdev->ufb, &mode_cmd, obj);
if (ret)
goto out_gfree;
fb = &ufbdev->ufb.base;
ufbdev->helper.fb = fb;
ufbdev->helper.fbdev = info;
strcpy(info->fix.id, "udldrmfb");
info->screen_base = ufbdev->ufb.obj->vmapping;
info->fix.smem_len = size;
info->fix.smem_start = (unsigned long)ufbdev->ufb.obj->vmapping;
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &udlfb_ops;
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(info, &ufbdev->helper, sizes->fb_width, sizes->fb_height);
ret = fb_alloc_cmap(&info->cmap, 256, 0);
if (ret) {
ret = -ENOMEM;
goto out_gfree;
}
DRM_DEBUG_KMS("allocated %dx%d vmal %p\n",
fb->width, fb->height,
ufbdev->ufb.obj->vmapping);
return ret;
out_gfree:
drm_gem_object_unreference(&ufbdev->ufb.obj->base);
out:
return ret;
}
static const struct drm_fb_helper_funcs udl_fb_helper_funcs = {
.fb_probe = udlfb_create,
};
static void udl_fbdev_destroy(struct drm_device *dev,
struct udl_fbdev *ufbdev)
{
struct fb_info *info;
if (ufbdev->helper.fbdev) {
info = ufbdev->helper.fbdev;
unregister_framebuffer(info);
if (info->cmap.len)
fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
}
drm_fb_helper_fini(&ufbdev->helper);
drm_framebuffer_unregister_private(&ufbdev->ufb.base);
drm_framebuffer_cleanup(&ufbdev->ufb.base);
drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base);
}
int udl_fbdev_init(struct drm_device *dev)
{
struct udl_device *udl = dev->dev_private;
int bpp_sel = fb_bpp;
struct udl_fbdev *ufbdev;
int ret;
ufbdev = kzalloc(sizeof(struct udl_fbdev), GFP_KERNEL);
if (!ufbdev)
return -ENOMEM;
udl->fbdev = ufbdev;
drm_fb_helper_prepare(dev, &ufbdev->helper, &udl_fb_helper_funcs);
ret = drm_fb_helper_init(dev, &ufbdev->helper,
1, 1);
if (ret) {
kfree(ufbdev);
return ret;
}
drm_fb_helper_single_add_all_connectors(&ufbdev->helper);
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(dev);
drm_fb_helper_initial_config(&ufbdev->helper, bpp_sel);
return 0;
}
void udl_fbdev_cleanup(struct drm_device *dev)
{
struct udl_device *udl = dev->dev_private;
if (!udl->fbdev)
return;
udl_fbdev_destroy(dev, udl->fbdev);
kfree(udl->fbdev);
udl->fbdev = NULL;
}
void udl_fbdev_unplug(struct drm_device *dev)
{
struct udl_device *udl = dev->dev_private;
struct udl_fbdev *ufbdev;
if (!udl->fbdev)
return;
ufbdev = udl->fbdev;
if (ufbdev->helper.fbdev) {
struct fb_info *info;
info = ufbdev->helper.fbdev;
unlink_framebuffer(info);
}
}
struct drm_framebuffer *
udl_fb_user_fb_create(struct drm_device *dev,
struct drm_file *file,
struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_gem_object *obj;
struct udl_framebuffer *ufb;
int ret;
uint32_t size;
obj = drm_gem_object_lookup(dev, file, mode_cmd->handles[0]);
if (obj == NULL)
return ERR_PTR(-ENOENT);
size = mode_cmd->pitches[0] * mode_cmd->height;
size = ALIGN(size, PAGE_SIZE);
if (size > obj->size) {
DRM_ERROR("object size not sufficient for fb %d %zu %d %d\n", size, obj->size, mode_cmd->pitches[0], mode_cmd->height);
return ERR_PTR(-ENOMEM);
}
ufb = kzalloc(sizeof(*ufb), GFP_KERNEL);
if (ufb == NULL)
return ERR_PTR(-ENOMEM);
ret = udl_framebuffer_init(dev, ufb, mode_cmd, to_udl_bo(obj));
if (ret) {
kfree(ufb);
return ERR_PTR(-EINVAL);
}
return &ufb->base;
}

View file

@ -0,0 +1,292 @@
/*
* Copyright (C) 2012 Red Hat
*
* This file is subject to the terms and conditions of the GNU General Public
* License v2. See the file COPYING in the main directory of this archive for
* more details.
*/
#include <drm/drmP.h>
#include "udl_drv.h"
#include <linux/shmem_fs.h>
#include <linux/dma-buf.h>
struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
size_t size)
{
struct udl_gem_object *obj;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (obj == NULL)
return NULL;
if (drm_gem_object_init(dev, &obj->base, size) != 0) {
kfree(obj);
return NULL;
}
return obj;
}
static int
udl_gem_create(struct drm_file *file,
struct drm_device *dev,
uint64_t size,
uint32_t *handle_p)
{
struct udl_gem_object *obj;
int ret;
u32 handle;
size = roundup(size, PAGE_SIZE);
obj = udl_gem_alloc_object(dev, size);
if (obj == NULL)
return -ENOMEM;
ret = drm_gem_handle_create(file, &obj->base, &handle);
if (ret) {
drm_gem_object_release(&obj->base);
kfree(obj);
return ret;
}
drm_gem_object_unreference(&obj->base);
*handle_p = handle;
return 0;
}
int udl_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
args->pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
args->size = args->pitch * args->height;
return udl_gem_create(file, dev,
args->size, &args->handle);
}
int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
int ret;
ret = drm_gem_mmap(filp, vma);
if (ret)
return ret;
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_flags |= VM_MIXEDMAP;
return ret;
}
int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data);
struct page *page;
unsigned int page_offset;
int ret = 0;
page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
PAGE_SHIFT;
if (!obj->pages)
return VM_FAULT_SIGBUS;
page = obj->pages[page_offset];
ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
switch (ret) {
case -EAGAIN:
case 0:
case -ERESTARTSYS:
return VM_FAULT_NOPAGE;
case -ENOMEM:
return VM_FAULT_OOM;
default:
return VM_FAULT_SIGBUS;
}
}
static int udl_gem_get_pages(struct udl_gem_object *obj)
{
struct page **pages;
if (obj->pages)
return 0;
pages = drm_gem_get_pages(&obj->base);
if (IS_ERR(pages))
return PTR_ERR(pages);
obj->pages = pages;
return 0;
}
static void udl_gem_put_pages(struct udl_gem_object *obj)
{
if (obj->base.import_attach) {
drm_free_large(obj->pages);
obj->pages = NULL;
return;
}
drm_gem_put_pages(&obj->base, obj->pages, false, false);
obj->pages = NULL;
}
int udl_gem_vmap(struct udl_gem_object *obj)
{
int page_count = obj->base.size / PAGE_SIZE;
int ret;
if (obj->base.import_attach) {
obj->vmapping = dma_buf_vmap(obj->base.import_attach->dmabuf);
if (!obj->vmapping)
return -ENOMEM;
return 0;
}
ret = udl_gem_get_pages(obj);
if (ret)
return ret;
obj->vmapping = vmap(obj->pages, page_count, 0, PAGE_KERNEL);
if (!obj->vmapping)
return -ENOMEM;
return 0;
}
void udl_gem_vunmap(struct udl_gem_object *obj)
{
if (obj->base.import_attach) {
dma_buf_vunmap(obj->base.import_attach->dmabuf, obj->vmapping);
return;
}
if (obj->vmapping)
vunmap(obj->vmapping);
udl_gem_put_pages(obj);
}
void udl_gem_free_object(struct drm_gem_object *gem_obj)
{
struct udl_gem_object *obj = to_udl_bo(gem_obj);
if (obj->vmapping)
udl_gem_vunmap(obj);
if (gem_obj->import_attach) {
drm_prime_gem_destroy(gem_obj, obj->sg);
put_device(gem_obj->dev->dev);
}
if (obj->pages)
udl_gem_put_pages(obj);
drm_gem_free_mmap_offset(gem_obj);
}
/* the dumb interface doesn't work with the GEM straight MMAP
interface, it expects to do MMAP on the drm fd, like normal */
int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
uint32_t handle, uint64_t *offset)
{
struct udl_gem_object *gobj;
struct drm_gem_object *obj;
int ret = 0;
mutex_lock(&dev->struct_mutex);
obj = drm_gem_object_lookup(dev, file, handle);
if (obj == NULL) {
ret = -ENOENT;
goto unlock;
}
gobj = to_udl_bo(obj);
ret = udl_gem_get_pages(gobj);
if (ret)
goto out;
ret = drm_gem_create_mmap_offset(obj);
if (ret)
goto out;
*offset = drm_vma_node_offset_addr(&gobj->base.vma_node);
out:
drm_gem_object_unreference(&gobj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
static int udl_prime_create(struct drm_device *dev,
size_t size,
struct sg_table *sg,
struct udl_gem_object **obj_p)
{
struct udl_gem_object *obj;
int npages;
npages = size / PAGE_SIZE;
*obj_p = NULL;
obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
if (!obj)
return -ENOMEM;
obj->sg = sg;
obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
if (obj->pages == NULL) {
DRM_ERROR("obj pages is NULL %d\n", npages);
return -ENOMEM;
}
drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
*obj_p = obj;
return 0;
}
struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
{
struct dma_buf_attachment *attach;
struct sg_table *sg;
struct udl_gem_object *uobj;
int ret;
/* need to attach */
get_device(dev->dev);
attach = dma_buf_attach(dma_buf, dev->dev);
if (IS_ERR(attach)) {
put_device(dev->dev);
return ERR_CAST(attach);
}
get_dma_buf(dma_buf);
sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
if (IS_ERR(sg)) {
ret = PTR_ERR(sg);
goto fail_detach;
}
ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
if (ret) {
goto fail_unmap;
}
uobj->base.import_attach = attach;
return &uobj->base;
fail_unmap:
dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
fail_detach:
dma_buf_detach(dma_buf, attach);
dma_buf_put(dma_buf);
put_device(dev->dev);
return ERR_PTR(ret);
}

View file

@ -0,0 +1,352 @@
/*
* Copyright (C) 2012 Red Hat
*
* based in parts on udlfb.c:
* Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
* Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
* Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
*
* This file is subject to the terms and conditions of the GNU General Public
* License v2. See the file COPYING in the main directory of this archive for
* more details.
*/
#include <drm/drmP.h>
#include "udl_drv.h"
/* -BULK_SIZE as per usb-skeleton. Can we get full page and avoid overhead? */
#define BULK_SIZE 512
#define MAX_TRANSFER (PAGE_SIZE*16 - BULK_SIZE)
#define WRITES_IN_FLIGHT (4)
#define MAX_VENDOR_DESCRIPTOR_SIZE 256
#define GET_URB_TIMEOUT HZ
#define FREE_URB_TIMEOUT (HZ*2)
static int udl_parse_vendor_descriptor(struct drm_device *dev,
struct usb_device *usbdev)
{
struct udl_device *udl = dev->dev_private;
char *desc;
char *buf;
char *desc_end;
u8 total_len = 0;
buf = kzalloc(MAX_VENDOR_DESCRIPTOR_SIZE, GFP_KERNEL);
if (!buf)
return false;
desc = buf;
total_len = usb_get_descriptor(usbdev, 0x5f, /* vendor specific */
0, desc, MAX_VENDOR_DESCRIPTOR_SIZE);
if (total_len > 5) {
DRM_INFO("vendor descriptor length:%x data:%11ph\n",
total_len, desc);
if ((desc[0] != total_len) || /* descriptor length */
(desc[1] != 0x5f) || /* vendor descriptor type */
(desc[2] != 0x01) || /* version (2 bytes) */
(desc[3] != 0x00) ||
(desc[4] != total_len - 2)) /* length after type */
goto unrecognized;
desc_end = desc + total_len;
desc += 5; /* the fixed header we've already parsed */
while (desc < desc_end) {
u8 length;
u16 key;
key = le16_to_cpu(*((u16 *) desc));
desc += sizeof(u16);
length = *desc;
desc++;
switch (key) {
case 0x0200: { /* max_area */
u32 max_area;
max_area = le32_to_cpu(*((u32 *)desc));
DRM_DEBUG("DL chip limited to %d pixel modes\n",
max_area);
udl->sku_pixel_limit = max_area;
break;
}
default:
break;
}
desc += length;
}
}
goto success;
unrecognized:
/* allow udlfb to load for now even if firmware unrecognized */
DRM_ERROR("Unrecognized vendor firmware descriptor\n");
success:
kfree(buf);
return true;
}
static void udl_release_urb_work(struct work_struct *work)
{
struct urb_node *unode = container_of(work, struct urb_node,
release_urb_work.work);
up(&unode->dev->urbs.limit_sem);
}
void udl_urb_completion(struct urb *urb)
{
struct urb_node *unode = urb->context;
struct udl_device *udl = unode->dev;
unsigned long flags;
/* sync/async unlink faults aren't errors */
if (urb->status) {
if (!(urb->status == -ENOENT ||
urb->status == -ECONNRESET ||
urb->status == -ESHUTDOWN)) {
DRM_ERROR("%s - nonzero write bulk status received: %d\n",
__func__, urb->status);
atomic_set(&udl->lost_pixels, 1);
}
}
urb->transfer_buffer_length = udl->urbs.size; /* reset to actual */
spin_lock_irqsave(&udl->urbs.lock, flags);
list_add_tail(&unode->entry, &udl->urbs.list);
udl->urbs.available++;
spin_unlock_irqrestore(&udl->urbs.lock, flags);
#if 0
/*
* When using fb_defio, we deadlock if up() is called
* while another is waiting. So queue to another process.
*/
if (fb_defio)
schedule_delayed_work(&unode->release_urb_work, 0);
else
#endif
up(&udl->urbs.limit_sem);
}
static void udl_free_urb_list(struct drm_device *dev)
{
struct udl_device *udl = dev->dev_private;
int count = udl->urbs.count;
struct list_head *node;
struct urb_node *unode;
struct urb *urb;
int ret;
unsigned long flags;
DRM_DEBUG("Waiting for completes and freeing all render urbs\n");
/* keep waiting and freeing, until we've got 'em all */
while (count--) {
/* Getting interrupted means a leak, but ok at shutdown*/
ret = down_interruptible(&udl->urbs.limit_sem);
if (ret)
break;
spin_lock_irqsave(&udl->urbs.lock, flags);
node = udl->urbs.list.next; /* have reserved one with sem */
list_del_init(node);
spin_unlock_irqrestore(&udl->urbs.lock, flags);
unode = list_entry(node, struct urb_node, entry);
urb = unode->urb;
/* Free each separately allocated piece */
usb_free_coherent(urb->dev, udl->urbs.size,
urb->transfer_buffer, urb->transfer_dma);
usb_free_urb(urb);
kfree(node);
}
udl->urbs.count = 0;
}
static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
{
struct udl_device *udl = dev->dev_private;
int i = 0;
struct urb *urb;
struct urb_node *unode;
char *buf;
spin_lock_init(&udl->urbs.lock);
udl->urbs.size = size;
INIT_LIST_HEAD(&udl->urbs.list);
while (i < count) {
unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL);
if (!unode)
break;
unode->dev = udl;
INIT_DELAYED_WORK(&unode->release_urb_work,
udl_release_urb_work);
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
kfree(unode);
break;
}
unode->urb = urb;
buf = usb_alloc_coherent(udl->udev, MAX_TRANSFER, GFP_KERNEL,
&urb->transfer_dma);
if (!buf) {
kfree(unode);
usb_free_urb(urb);
break;
}
/* urb->transfer_buffer_length set to actual before submit */
usb_fill_bulk_urb(urb, udl->udev, usb_sndbulkpipe(udl->udev, 1),
buf, size, udl_urb_completion, unode);
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
list_add_tail(&unode->entry, &udl->urbs.list);
i++;
}
sema_init(&udl->urbs.limit_sem, i);
udl->urbs.count = i;
udl->urbs.available = i;
DRM_DEBUG("allocated %d %d byte urbs\n", i, (int) size);
return i;
}
struct urb *udl_get_urb(struct drm_device *dev)
{
struct udl_device *udl = dev->dev_private;
int ret = 0;
struct list_head *entry;
struct urb_node *unode;
struct urb *urb = NULL;
unsigned long flags;
/* Wait for an in-flight buffer to complete and get re-queued */
ret = down_timeout(&udl->urbs.limit_sem, GET_URB_TIMEOUT);
if (ret) {
atomic_set(&udl->lost_pixels, 1);
DRM_INFO("wait for urb interrupted: %x available: %d\n",
ret, udl->urbs.available);
goto error;
}
spin_lock_irqsave(&udl->urbs.lock, flags);
BUG_ON(list_empty(&udl->urbs.list)); /* reserved one with limit_sem */
entry = udl->urbs.list.next;
list_del_init(entry);
udl->urbs.available--;
spin_unlock_irqrestore(&udl->urbs.lock, flags);
unode = list_entry(entry, struct urb_node, entry);
urb = unode->urb;
error:
return urb;
}
int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
{
struct udl_device *udl = dev->dev_private;
int ret;
BUG_ON(len > udl->urbs.size);
urb->transfer_buffer_length = len; /* set to actual payload len */
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret) {
udl_urb_completion(urb); /* because no one else will */
atomic_set(&udl->lost_pixels, 1);
DRM_ERROR("usb_submit_urb error %x\n", ret);
}
return ret;
}
int udl_driver_load(struct drm_device *dev, unsigned long flags)
{
struct usb_device *udev = (void*)flags;
struct udl_device *udl;
int ret = -ENOMEM;
DRM_DEBUG("\n");
udl = kzalloc(sizeof(struct udl_device), GFP_KERNEL);
if (!udl)
return -ENOMEM;
udl->udev = udev;
udl->ddev = dev;
dev->dev_private = udl;
if (!udl_parse_vendor_descriptor(dev, udl->udev)) {
ret = -ENODEV;
DRM_ERROR("firmware not recognized. Assume incompatible device\n");
goto err;
}
if (!udl_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
DRM_ERROR("udl_alloc_urb_list failed\n");
goto err;
}
DRM_DEBUG("\n");
ret = udl_modeset_init(dev);
if (ret)
goto err;
ret = udl_fbdev_init(dev);
if (ret)
goto err;
ret = drm_vblank_init(dev, 1);
if (ret)
goto err_fb;
return 0;
err_fb:
udl_fbdev_cleanup(dev);
err:
if (udl->urbs.count)
udl_free_urb_list(dev);
kfree(udl);
DRM_ERROR("%d\n", ret);
return ret;
}
int udl_drop_usb(struct drm_device *dev)
{
udl_free_urb_list(dev);
return 0;
}
int udl_driver_unload(struct drm_device *dev)
{
struct udl_device *udl = dev->dev_private;
drm_vblank_cleanup(dev);
if (udl->urbs.count)
udl_free_urb_list(dev);
udl_fbdev_cleanup(dev);
udl_modeset_cleanup(dev);
kfree(udl);
return 0;
}

View file

@ -0,0 +1,459 @@
/*
* Copyright (C) 2012 Red Hat
*
* based in parts on udlfb.c:
* Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
* Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
* Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
* This file is subject to the terms and conditions of the GNU General Public
* License v2. See the file COPYING in the main directory of this archive for
* more details.
*/
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include "udl_drv.h"
/*
* All DisplayLink bulk operations start with 0xAF, followed by specific code
* All operations are written to buffers which then later get sent to device
*/
static char *udl_set_register(char *buf, u8 reg, u8 val)
{
*buf++ = 0xAF;
*buf++ = 0x20;
*buf++ = reg;
*buf++ = val;
return buf;
}
static char *udl_vidreg_lock(char *buf)
{
return udl_set_register(buf, 0xFF, 0x00);
}
static char *udl_vidreg_unlock(char *buf)
{
return udl_set_register(buf, 0xFF, 0xFF);
}
/*
* On/Off for driving the DisplayLink framebuffer to the display
* 0x00 H and V sync on
* 0x01 H and V sync off (screen blank but powered)
* 0x07 DPMS powerdown (requires modeset to come back)
*/
static char *udl_set_blank(char *buf, int dpms_mode)
{
u8 reg;
switch (dpms_mode) {
case DRM_MODE_DPMS_OFF:
reg = 0x07;
break;
case DRM_MODE_DPMS_STANDBY:
reg = 0x05;
break;
case DRM_MODE_DPMS_SUSPEND:
reg = 0x01;
break;
case DRM_MODE_DPMS_ON:
reg = 0x00;
break;
}
return udl_set_register(buf, 0x1f, reg);
}
static char *udl_set_color_depth(char *buf, u8 selection)
{
return udl_set_register(buf, 0x00, selection);
}
static char *udl_set_base16bpp(char *wrptr, u32 base)
{
/* the base pointer is 16 bits wide, 0x20 is hi byte. */
wrptr = udl_set_register(wrptr, 0x20, base >> 16);
wrptr = udl_set_register(wrptr, 0x21, base >> 8);
return udl_set_register(wrptr, 0x22, base);
}
/*
* DisplayLink HW has separate 16bpp and 8bpp framebuffers.
* In 24bpp modes, the low 323 RGB bits go in the 8bpp framebuffer
*/
static char *udl_set_base8bpp(char *wrptr, u32 base)
{
wrptr = udl_set_register(wrptr, 0x26, base >> 16);
wrptr = udl_set_register(wrptr, 0x27, base >> 8);
return udl_set_register(wrptr, 0x28, base);
}
static char *udl_set_register_16(char *wrptr, u8 reg, u16 value)
{
wrptr = udl_set_register(wrptr, reg, value >> 8);
return udl_set_register(wrptr, reg+1, value);
}
/*
* This is kind of weird because the controller takes some
* register values in a different byte order than other registers.
*/
static char *udl_set_register_16be(char *wrptr, u8 reg, u16 value)
{
wrptr = udl_set_register(wrptr, reg, value);
return udl_set_register(wrptr, reg+1, value >> 8);
}
/*
* LFSR is linear feedback shift register. The reason we have this is
* because the display controller needs to minimize the clock depth of
* various counters used in the display path. So this code reverses the
* provided value into the lfsr16 value by counting backwards to get
* the value that needs to be set in the hardware comparator to get the
* same actual count. This makes sense once you read above a couple of
* times and think about it from a hardware perspective.
*/
static u16 udl_lfsr16(u16 actual_count)
{
u32 lv = 0xFFFF; /* This is the lfsr value that the hw starts with */
while (actual_count--) {
lv = ((lv << 1) |
(((lv >> 15) ^ (lv >> 4) ^ (lv >> 2) ^ (lv >> 1)) & 1))
& 0xFFFF;
}
return (u16) lv;
}
/*
* This does LFSR conversion on the value that is to be written.
* See LFSR explanation above for more detail.
*/
static char *udl_set_register_lfsr16(char *wrptr, u8 reg, u16 value)
{
return udl_set_register_16(wrptr, reg, udl_lfsr16(value));
}
/*
* This takes a standard fbdev screeninfo struct and all of its monitor mode
* details and converts them into the DisplayLink equivalent register commands.
ERR(vreg(dev, 0x00, (color_depth == 16) ? 0 : 1));
ERR(vreg_lfsr16(dev, 0x01, xDisplayStart));
ERR(vreg_lfsr16(dev, 0x03, xDisplayEnd));
ERR(vreg_lfsr16(dev, 0x05, yDisplayStart));
ERR(vreg_lfsr16(dev, 0x07, yDisplayEnd));
ERR(vreg_lfsr16(dev, 0x09, xEndCount));
ERR(vreg_lfsr16(dev, 0x0B, hSyncStart));
ERR(vreg_lfsr16(dev, 0x0D, hSyncEnd));
ERR(vreg_big_endian(dev, 0x0F, hPixels));
ERR(vreg_lfsr16(dev, 0x11, yEndCount));
ERR(vreg_lfsr16(dev, 0x13, vSyncStart));
ERR(vreg_lfsr16(dev, 0x15, vSyncEnd));
ERR(vreg_big_endian(dev, 0x17, vPixels));
ERR(vreg_little_endian(dev, 0x1B, pixelClock5KHz));
ERR(vreg(dev, 0x1F, 0));
ERR(vbuf(dev, WRITE_VIDREG_UNLOCK, DSIZEOF(WRITE_VIDREG_UNLOCK)));
*/
static char *udl_set_vid_cmds(char *wrptr, struct drm_display_mode *mode)
{
u16 xds, yds;
u16 xde, yde;
u16 yec;
/* x display start */
xds = mode->crtc_htotal - mode->crtc_hsync_start;
wrptr = udl_set_register_lfsr16(wrptr, 0x01, xds);
/* x display end */
xde = xds + mode->crtc_hdisplay;
wrptr = udl_set_register_lfsr16(wrptr, 0x03, xde);
/* y display start */
yds = mode->crtc_vtotal - mode->crtc_vsync_start;
wrptr = udl_set_register_lfsr16(wrptr, 0x05, yds);
/* y display end */
yde = yds + mode->crtc_vdisplay;
wrptr = udl_set_register_lfsr16(wrptr, 0x07, yde);
/* x end count is active + blanking - 1 */
wrptr = udl_set_register_lfsr16(wrptr, 0x09,
mode->crtc_htotal - 1);
/* libdlo hardcodes hsync start to 1 */
wrptr = udl_set_register_lfsr16(wrptr, 0x0B, 1);
/* hsync end is width of sync pulse + 1 */
wrptr = udl_set_register_lfsr16(wrptr, 0x0D,
mode->crtc_hsync_end - mode->crtc_hsync_start + 1);
/* hpixels is active pixels */
wrptr = udl_set_register_16(wrptr, 0x0F, mode->hdisplay);
/* yendcount is vertical active + vertical blanking */
yec = mode->crtc_vtotal;
wrptr = udl_set_register_lfsr16(wrptr, 0x11, yec);
/* libdlo hardcodes vsync start to 0 */
wrptr = udl_set_register_lfsr16(wrptr, 0x13, 0);
/* vsync end is width of vsync pulse */
wrptr = udl_set_register_lfsr16(wrptr, 0x15, mode->crtc_vsync_end - mode->crtc_vsync_start);
/* vpixels is active pixels */
wrptr = udl_set_register_16(wrptr, 0x17, mode->crtc_vdisplay);
wrptr = udl_set_register_16be(wrptr, 0x1B,
mode->clock / 5);
return wrptr;
}
static char *udl_dummy_render(char *wrptr)
{
*wrptr++ = 0xAF;
*wrptr++ = 0x6A; /* copy */
*wrptr++ = 0x00; /* from addr */
*wrptr++ = 0x00;
*wrptr++ = 0x00;
*wrptr++ = 0x01; /* one pixel */
*wrptr++ = 0x00; /* to address */
*wrptr++ = 0x00;
*wrptr++ = 0x00;
return wrptr;
}
static int udl_crtc_write_mode_to_hw(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct udl_device *udl = dev->dev_private;
struct urb *urb;
char *buf;
int retval;
urb = udl_get_urb(dev);
if (!urb)
return -ENOMEM;
buf = (char *)urb->transfer_buffer;
memcpy(buf, udl->mode_buf, udl->mode_buf_len);
retval = udl_submit_urb(dev, urb, udl->mode_buf_len);
DRM_INFO("write mode info %d\n", udl->mode_buf_len);
return retval;
}
static void udl_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
struct udl_device *udl = dev->dev_private;
int retval;
if (mode == DRM_MODE_DPMS_OFF) {
char *buf;
struct urb *urb;
urb = udl_get_urb(dev);
if (!urb)
return;
buf = (char *)urb->transfer_buffer;
buf = udl_vidreg_lock(buf);
buf = udl_set_blank(buf, mode);
buf = udl_vidreg_unlock(buf);
buf = udl_dummy_render(buf);
retval = udl_submit_urb(dev, urb, buf - (char *)
urb->transfer_buffer);
} else {
if (udl->mode_buf_len == 0) {
DRM_ERROR("Trying to enable DPMS with no mode\n");
return;
}
udl_crtc_write_mode_to_hw(crtc);
}
}
static bool udl_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
}
#if 0
static int
udl_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
int x, int y, enum mode_set_atomic state)
{
return 0;
}
static int
udl_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
return 0;
}
#endif
static int udl_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
struct udl_framebuffer *ufb = to_udl_fb(crtc->primary->fb);
struct udl_device *udl = dev->dev_private;
char *buf;
char *wrptr;
int color_depth = 0;
buf = (char *)udl->mode_buf;
/* for now we just clip 24 -> 16 - if we fix that fix this */
/*if (crtc->fb->bits_per_pixel != 16)
color_depth = 1; */
/* This first section has to do with setting the base address on the
* controller * associated with the display. There are 2 base
* pointers, currently, we only * use the 16 bpp segment.
*/
wrptr = udl_vidreg_lock(buf);
wrptr = udl_set_color_depth(wrptr, color_depth);
/* set base for 16bpp segment to 0 */
wrptr = udl_set_base16bpp(wrptr, 0);
/* set base for 8bpp segment to end of fb */
wrptr = udl_set_base8bpp(wrptr, 2 * mode->vdisplay * mode->hdisplay);
wrptr = udl_set_vid_cmds(wrptr, adjusted_mode);
wrptr = udl_set_blank(wrptr, DRM_MODE_DPMS_ON);
wrptr = udl_vidreg_unlock(wrptr);
wrptr = udl_dummy_render(wrptr);
ufb->active_16 = true;
if (old_fb) {
struct udl_framebuffer *uold_fb = to_udl_fb(old_fb);
uold_fb->active_16 = false;
}
udl->mode_buf_len = wrptr - buf;
/* damage all of it */
udl_handle_damage(ufb, 0, 0, ufb->base.width, ufb->base.height);
return 0;
}
static void udl_crtc_disable(struct drm_crtc *crtc)
{
udl_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
}
static void udl_crtc_destroy(struct drm_crtc *crtc)
{
drm_crtc_cleanup(crtc);
kfree(crtc);
}
static int udl_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t page_flip_flags)
{
struct udl_framebuffer *ufb = to_udl_fb(fb);
struct drm_device *dev = crtc->dev;
unsigned long flags;
udl_handle_damage(ufb, 0, 0, fb->width, fb->height);
spin_lock_irqsave(&dev->event_lock, flags);
if (event)
drm_send_vblank_event(dev, 0, event);
spin_unlock_irqrestore(&dev->event_lock, flags);
crtc->primary->fb = fb;
return 0;
}
static void udl_crtc_prepare(struct drm_crtc *crtc)
{
}
static void udl_crtc_commit(struct drm_crtc *crtc)
{
udl_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
}
static struct drm_crtc_helper_funcs udl_helper_funcs = {
.dpms = udl_crtc_dpms,
.mode_fixup = udl_crtc_mode_fixup,
.mode_set = udl_crtc_mode_set,
.prepare = udl_crtc_prepare,
.commit = udl_crtc_commit,
.disable = udl_crtc_disable,
};
static const struct drm_crtc_funcs udl_crtc_funcs = {
.set_config = drm_crtc_helper_set_config,
.destroy = udl_crtc_destroy,
.page_flip = udl_crtc_page_flip,
};
static int udl_crtc_init(struct drm_device *dev)
{
struct drm_crtc *crtc;
crtc = kzalloc(sizeof(struct drm_crtc) + sizeof(struct drm_connector *), GFP_KERNEL);
if (crtc == NULL)
return -ENOMEM;
drm_crtc_init(dev, crtc, &udl_crtc_funcs);
drm_crtc_helper_add(crtc, &udl_helper_funcs);
return 0;
}
static const struct drm_mode_config_funcs udl_mode_funcs = {
.fb_create = udl_fb_user_fb_create,
.output_poll_changed = NULL,
};
int udl_modeset_init(struct drm_device *dev)
{
struct drm_encoder *encoder;
drm_mode_config_init(dev);
dev->mode_config.min_width = 640;
dev->mode_config.min_height = 480;
dev->mode_config.max_width = 2048;
dev->mode_config.max_height = 2048;
dev->mode_config.prefer_shadow = 0;
dev->mode_config.preferred_depth = 24;
dev->mode_config.funcs = &udl_mode_funcs;
drm_mode_create_dirty_info_property(dev);
udl_crtc_init(dev);
encoder = udl_encoder_init(dev);
udl_connector_init(dev, encoder);
return 0;
}
void udl_modeset_cleanup(struct drm_device *dev)
{
drm_mode_config_cleanup(dev);
}

View file

@ -0,0 +1,264 @@
/*
* Copyright (C) 2012 Red Hat
* based in parts on udlfb.c:
* Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
* Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
* Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
*
* This file is subject to the terms and conditions of the GNU General Public
* License v2. See the file COPYING in the main directory of this archive for
* more details.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/fb.h>
#include <linux/prefetch.h>
#include <drm/drmP.h>
#include "udl_drv.h"
#define MAX_CMD_PIXELS 255
#define RLX_HEADER_BYTES 7
#define MIN_RLX_PIX_BYTES 4
#define MIN_RLX_CMD_BYTES (RLX_HEADER_BYTES + MIN_RLX_PIX_BYTES)
#define RLE_HEADER_BYTES 6
#define MIN_RLE_PIX_BYTES 3
#define MIN_RLE_CMD_BYTES (RLE_HEADER_BYTES + MIN_RLE_PIX_BYTES)
#define RAW_HEADER_BYTES 6
#define MIN_RAW_PIX_BYTES 2
#define MIN_RAW_CMD_BYTES (RAW_HEADER_BYTES + MIN_RAW_PIX_BYTES)
/*
* Trims identical data from front and back of line
* Sets new front buffer address and width
* And returns byte count of identical pixels
* Assumes CPU natural alignment (unsigned long)
* for back and front buffer ptrs and width
*/
#if 0
static int udl_trim_hline(const u8 *bback, const u8 **bfront, int *width_bytes)
{
int j, k;
const unsigned long *back = (const unsigned long *) bback;
const unsigned long *front = (const unsigned long *) *bfront;
const int width = *width_bytes / sizeof(unsigned long);
int identical = width;
int start = width;
int end = width;
prefetch((void *) front);
prefetch((void *) back);
for (j = 0; j < width; j++) {
if (back[j] != front[j]) {
start = j;
break;
}
}
for (k = width - 1; k > j; k--) {
if (back[k] != front[k]) {
end = k+1;
break;
}
}
identical = start + (width - end);
*bfront = (u8 *) &front[start];
*width_bytes = (end - start) * sizeof(unsigned long);
return identical * sizeof(unsigned long);
}
#endif
static inline u16 pixel32_to_be16(const uint32_t pixel)
{
return (((pixel >> 3) & 0x001f) |
((pixel >> 5) & 0x07e0) |
((pixel >> 8) & 0xf800));
}
static bool pixel_repeats(const void *pixel, const uint32_t repeat, int bpp)
{
if (bpp == 2)
return *(const uint16_t *)pixel == repeat;
else
return *(const uint32_t *)pixel == repeat;
}
/*
* Render a command stream for an encoded horizontal line segment of pixels.
*
* A command buffer holds several commands.
* It always begins with a fresh command header
* (the protocol doesn't require this, but we enforce it to allow
* multiple buffers to be potentially encoded and sent in parallel).
* A single command encodes one contiguous horizontal line of pixels
*
* The function relies on the client to do all allocation, so that
* rendering can be done directly to output buffers (e.g. USB URBs).
* The function fills the supplied command buffer, providing information
* on where it left off, so the client may call in again with additional
* buffers if the line will take several buffers to complete.
*
* A single command can transmit a maximum of 256 pixels,
* regardless of the compression ratio (protocol design limit).
* To the hardware, 0 for a size byte means 256
*
* Rather than 256 pixel commands which are either rl or raw encoded,
* the rlx command simply assumes alternating raw and rl spans within one cmd.
* This has a slightly larger header overhead, but produces more even results.
* It also processes all data (read and write) in a single pass.
* Performance benchmarks of common cases show it having just slightly better
* compression than 256 pixel raw or rle commands, with similar CPU consumpion.
* But for very rl friendly data, will compress not quite as well.
*/
static void udl_compress_hline16(
const u8 **pixel_start_ptr,
const u8 *const pixel_end,
uint32_t *device_address_ptr,
uint8_t **command_buffer_ptr,
const uint8_t *const cmd_buffer_end, int bpp)
{
const u8 *pixel = *pixel_start_ptr;
uint32_t dev_addr = *device_address_ptr;
uint8_t *cmd = *command_buffer_ptr;
while ((pixel_end > pixel) &&
(cmd_buffer_end - MIN_RLX_CMD_BYTES > cmd)) {
uint8_t *raw_pixels_count_byte = NULL;
uint8_t *cmd_pixels_count_byte = NULL;
const u8 *raw_pixel_start = NULL;
const u8 *cmd_pixel_start, *cmd_pixel_end = NULL;
prefetchw((void *) cmd); /* pull in one cache line at least */
*cmd++ = 0xaf;
*cmd++ = 0x6b;
*cmd++ = (uint8_t) ((dev_addr >> 16) & 0xFF);
*cmd++ = (uint8_t) ((dev_addr >> 8) & 0xFF);
*cmd++ = (uint8_t) ((dev_addr) & 0xFF);
cmd_pixels_count_byte = cmd++; /* we'll know this later */
cmd_pixel_start = pixel;
raw_pixels_count_byte = cmd++; /* we'll know this later */
raw_pixel_start = pixel;
cmd_pixel_end = pixel + (min(MAX_CMD_PIXELS + 1,
min((int)(pixel_end - pixel) / bpp,
(int)(cmd_buffer_end - cmd) / 2))) * bpp;
prefetch_range((void *) pixel, (cmd_pixel_end - pixel) * bpp);
while (pixel < cmd_pixel_end) {
const u8 *const start = pixel;
u32 repeating_pixel;
if (bpp == 2) {
repeating_pixel = *(uint16_t *)pixel;
*(uint16_t *)cmd = cpu_to_be16(repeating_pixel);
} else {
repeating_pixel = *(uint32_t *)pixel;
*(uint16_t *)cmd = cpu_to_be16(pixel32_to_be16(repeating_pixel));
}
cmd += 2;
pixel += bpp;
if (unlikely((pixel < cmd_pixel_end) &&
(pixel_repeats(pixel, repeating_pixel, bpp)))) {
/* go back and fill in raw pixel count */
*raw_pixels_count_byte = (((start -
raw_pixel_start) / bpp) + 1) & 0xFF;
while ((pixel < cmd_pixel_end) &&
(pixel_repeats(pixel, repeating_pixel, bpp))) {
pixel += bpp;
}
/* immediately after raw data is repeat byte */
*cmd++ = (((pixel - start) / bpp) - 1) & 0xFF;
/* Then start another raw pixel span */
raw_pixel_start = pixel;
raw_pixels_count_byte = cmd++;
}
}
if (pixel > raw_pixel_start) {
/* finalize last RAW span */
*raw_pixels_count_byte = ((pixel-raw_pixel_start) / bpp) & 0xFF;
}
*cmd_pixels_count_byte = ((pixel - cmd_pixel_start) / bpp) & 0xFF;
dev_addr += ((pixel - cmd_pixel_start) / bpp) * 2;
}
if (cmd_buffer_end <= MIN_RLX_CMD_BYTES + cmd) {
/* Fill leftover bytes with no-ops */
if (cmd_buffer_end > cmd)
memset(cmd, 0xAF, cmd_buffer_end - cmd);
cmd = (uint8_t *) cmd_buffer_end;
}
*command_buffer_ptr = cmd;
*pixel_start_ptr = pixel;
*device_address_ptr = dev_addr;
return;
}
/*
* There are 3 copies of every pixel: The front buffer that the fbdev
* client renders to, the actual framebuffer across the USB bus in hardware
* (that we can only write to, slowly, and can never read), and (optionally)
* our shadow copy that tracks what's been sent to that hardware buffer.
*/
int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
const char *front, char **urb_buf_ptr,
u32 byte_offset, u32 device_byte_offset,
u32 byte_width,
int *ident_ptr, int *sent_ptr)
{
const u8 *line_start, *line_end, *next_pixel;
u32 base16 = 0 + (device_byte_offset / bpp) * 2;
struct urb *urb = *urb_ptr;
u8 *cmd = *urb_buf_ptr;
u8 *cmd_end = (u8 *) urb->transfer_buffer + urb->transfer_buffer_length;
BUG_ON(!(bpp == 2 || bpp == 4));
line_start = (u8 *) (front + byte_offset);
next_pixel = line_start;
line_end = next_pixel + byte_width;
while (next_pixel < line_end) {
udl_compress_hline16(&next_pixel,
line_end, &base16,
(u8 **) &cmd, (u8 *) cmd_end, bpp);
if (cmd >= cmd_end) {
int len = cmd - (u8 *) urb->transfer_buffer;
if (udl_submit_urb(dev, urb, len))
return 1; /* lost pixels is set */
*sent_ptr += len;
urb = udl_get_urb(dev);
if (!urb)
return 1; /* lost_pixels is set */
*urb_ptr = urb;
cmd = urb->transfer_buffer;
cmd_end = &cmd[urb->transfer_buffer_length];
}
}
*urb_buf_ptr = cmd;
return 0;
}