mirror of
https://github.com/AetherDroid/android_kernel_samsung_on5xelte.git
synced 2025-09-08 01:08:03 -04:00
Fixed MTP to work with TWRP
This commit is contained in:
commit
f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions
15
drivers/gpu/drm/qxl/Kconfig
Normal file
15
drivers/gpu/drm/qxl/Kconfig
Normal file
|
@ -0,0 +1,15 @@
|
|||
config DRM_QXL
|
||||
tristate "QXL virtual GPU"
|
||||
depends on DRM && PCI
|
||||
select FB_SYS_FILLRECT
|
||||
select FB_SYS_COPYAREA
|
||||
select FB_SYS_IMAGEBLIT
|
||||
select FB_DEFERRED_IO
|
||||
select DRM_KMS_HELPER
|
||||
select DRM_KMS_FB_HELPER
|
||||
select DRM_TTM
|
||||
select CRC32
|
||||
help
|
||||
QXL virtual GPU for Spice virtualization desktop integration.
|
||||
Do not enable this driver unless your distro ships a corresponding
|
||||
X.org QXL driver that can handle kernel modesetting.
|
9
drivers/gpu/drm/qxl/Makefile
Normal file
9
drivers/gpu/drm/qxl/Makefile
Normal file
|
@ -0,0 +1,9 @@
|
|||
#
|
||||
# Makefile for the drm device driver. This driver provides support for the
|
||||
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
|
||||
|
||||
ccflags-y := -Iinclude/drm
|
||||
|
||||
qxl-y := qxl_drv.o qxl_kms.o qxl_display.o qxl_ttm.o qxl_fb.o qxl_object.o qxl_gem.o qxl_cmd.o qxl_image.o qxl_draw.o qxl_debugfs.o qxl_irq.o qxl_dumb.o qxl_ioctl.o qxl_release.o qxl_prime.o
|
||||
|
||||
obj-$(CONFIG_DRM_QXL)+= qxl.o
|
686
drivers/gpu/drm/qxl/qxl_cmd.c
Normal file
686
drivers/gpu/drm/qxl/qxl_cmd.c
Normal file
|
@ -0,0 +1,686 @@
|
|||
/*
|
||||
* Copyright 2013 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alon Levy
|
||||
*/
|
||||
|
||||
/* QXL cmd/ring handling */
|
||||
|
||||
#include "qxl_drv.h"
|
||||
#include "qxl_object.h"
|
||||
|
||||
static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap);
|
||||
|
||||
struct ring {
|
||||
struct qxl_ring_header header;
|
||||
uint8_t elements[0];
|
||||
};
|
||||
|
||||
struct qxl_ring {
|
||||
struct ring *ring;
|
||||
int element_size;
|
||||
int n_elements;
|
||||
int prod_notify;
|
||||
wait_queue_head_t *push_event;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
void qxl_ring_free(struct qxl_ring *ring)
|
||||
{
|
||||
kfree(ring);
|
||||
}
|
||||
|
||||
void qxl_ring_init_hdr(struct qxl_ring *ring)
|
||||
{
|
||||
ring->ring->header.notify_on_prod = ring->n_elements;
|
||||
}
|
||||
|
||||
struct qxl_ring *
|
||||
qxl_ring_create(struct qxl_ring_header *header,
|
||||
int element_size,
|
||||
int n_elements,
|
||||
int prod_notify,
|
||||
bool set_prod_notify,
|
||||
wait_queue_head_t *push_event)
|
||||
{
|
||||
struct qxl_ring *ring;
|
||||
|
||||
ring = kmalloc(sizeof(*ring), GFP_KERNEL);
|
||||
if (!ring)
|
||||
return NULL;
|
||||
|
||||
ring->ring = (struct ring *)header;
|
||||
ring->element_size = element_size;
|
||||
ring->n_elements = n_elements;
|
||||
ring->prod_notify = prod_notify;
|
||||
ring->push_event = push_event;
|
||||
if (set_prod_notify)
|
||||
qxl_ring_init_hdr(ring);
|
||||
spin_lock_init(&ring->lock);
|
||||
return ring;
|
||||
}
|
||||
|
||||
static int qxl_check_header(struct qxl_ring *ring)
|
||||
{
|
||||
int ret;
|
||||
struct qxl_ring_header *header = &(ring->ring->header);
|
||||
unsigned long flags;
|
||||
spin_lock_irqsave(&ring->lock, flags);
|
||||
ret = header->prod - header->cons < header->num_items;
|
||||
if (ret == 0)
|
||||
header->notify_on_cons = header->cons + 1;
|
||||
spin_unlock_irqrestore(&ring->lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int qxl_check_idle(struct qxl_ring *ring)
|
||||
{
|
||||
int ret;
|
||||
struct qxl_ring_header *header = &(ring->ring->header);
|
||||
unsigned long flags;
|
||||
spin_lock_irqsave(&ring->lock, flags);
|
||||
ret = header->prod == header->cons;
|
||||
spin_unlock_irqrestore(&ring->lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int qxl_ring_push(struct qxl_ring *ring,
|
||||
const void *new_elt, bool interruptible)
|
||||
{
|
||||
struct qxl_ring_header *header = &(ring->ring->header);
|
||||
uint8_t *elt;
|
||||
int idx, ret;
|
||||
unsigned long flags;
|
||||
spin_lock_irqsave(&ring->lock, flags);
|
||||
if (header->prod - header->cons == header->num_items) {
|
||||
header->notify_on_cons = header->cons + 1;
|
||||
mb();
|
||||
spin_unlock_irqrestore(&ring->lock, flags);
|
||||
if (!drm_can_sleep()) {
|
||||
while (!qxl_check_header(ring))
|
||||
udelay(1);
|
||||
} else {
|
||||
if (interruptible) {
|
||||
ret = wait_event_interruptible(*ring->push_event,
|
||||
qxl_check_header(ring));
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
wait_event(*ring->push_event,
|
||||
qxl_check_header(ring));
|
||||
}
|
||||
|
||||
}
|
||||
spin_lock_irqsave(&ring->lock, flags);
|
||||
}
|
||||
|
||||
idx = header->prod & (ring->n_elements - 1);
|
||||
elt = ring->ring->elements + idx * ring->element_size;
|
||||
|
||||
memcpy((void *)elt, new_elt, ring->element_size);
|
||||
|
||||
header->prod++;
|
||||
|
||||
mb();
|
||||
|
||||
if (header->prod == header->notify_on_prod)
|
||||
outb(0, ring->prod_notify);
|
||||
|
||||
spin_unlock_irqrestore(&ring->lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool qxl_ring_pop(struct qxl_ring *ring,
|
||||
void *element)
|
||||
{
|
||||
volatile struct qxl_ring_header *header = &(ring->ring->header);
|
||||
volatile uint8_t *ring_elt;
|
||||
int idx;
|
||||
unsigned long flags;
|
||||
spin_lock_irqsave(&ring->lock, flags);
|
||||
if (header->cons == header->prod) {
|
||||
header->notify_on_prod = header->cons + 1;
|
||||
spin_unlock_irqrestore(&ring->lock, flags);
|
||||
return false;
|
||||
}
|
||||
|
||||
idx = header->cons & (ring->n_elements - 1);
|
||||
ring_elt = ring->ring->elements + idx * ring->element_size;
|
||||
|
||||
memcpy(element, (void *)ring_elt, ring->element_size);
|
||||
|
||||
header->cons++;
|
||||
|
||||
spin_unlock_irqrestore(&ring->lock, flags);
|
||||
return true;
|
||||
}
|
||||
|
||||
int
|
||||
qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release,
|
||||
uint32_t type, bool interruptible)
|
||||
{
|
||||
struct qxl_command cmd;
|
||||
struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
|
||||
|
||||
cmd.type = type;
|
||||
cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset);
|
||||
|
||||
return qxl_ring_push(qdev->command_ring, &cmd, interruptible);
|
||||
}
|
||||
|
||||
int
|
||||
qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release,
|
||||
uint32_t type, bool interruptible)
|
||||
{
|
||||
struct qxl_command cmd;
|
||||
struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
|
||||
|
||||
cmd.type = type;
|
||||
cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset);
|
||||
|
||||
return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible);
|
||||
}
|
||||
|
||||
bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush)
|
||||
{
|
||||
if (!qxl_check_idle(qdev->release_ring)) {
|
||||
queue_work(qdev->gc_queue, &qdev->gc_work);
|
||||
if (flush)
|
||||
flush_work(&qdev->gc_work);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
int qxl_garbage_collect(struct qxl_device *qdev)
|
||||
{
|
||||
struct qxl_release *release;
|
||||
uint64_t id, next_id;
|
||||
int i = 0;
|
||||
union qxl_release_info *info;
|
||||
|
||||
while (qxl_ring_pop(qdev->release_ring, &id)) {
|
||||
QXL_INFO(qdev, "popped %lld\n", id);
|
||||
while (id) {
|
||||
release = qxl_release_from_id_locked(qdev, id);
|
||||
if (release == NULL)
|
||||
break;
|
||||
|
||||
info = qxl_release_map(qdev, release);
|
||||
next_id = info->next;
|
||||
qxl_release_unmap(qdev, release, info);
|
||||
|
||||
QXL_INFO(qdev, "popped %lld, next %lld\n", id,
|
||||
next_id);
|
||||
|
||||
switch (release->type) {
|
||||
case QXL_RELEASE_DRAWABLE:
|
||||
case QXL_RELEASE_SURFACE_CMD:
|
||||
case QXL_RELEASE_CURSOR_CMD:
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("unexpected release type\n");
|
||||
break;
|
||||
}
|
||||
id = next_id;
|
||||
|
||||
qxl_release_free(qdev, release);
|
||||
++i;
|
||||
}
|
||||
}
|
||||
|
||||
QXL_INFO(qdev, "%s: %lld\n", __func__, i);
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
int qxl_alloc_bo_reserved(struct qxl_device *qdev,
|
||||
struct qxl_release *release,
|
||||
unsigned long size,
|
||||
struct qxl_bo **_bo)
|
||||
{
|
||||
struct qxl_bo *bo;
|
||||
int ret;
|
||||
|
||||
ret = qxl_bo_create(qdev, size, false /* not kernel - device */,
|
||||
false, QXL_GEM_DOMAIN_VRAM, NULL, &bo);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to allocate VRAM BO\n");
|
||||
return ret;
|
||||
}
|
||||
ret = qxl_release_list_add(release, bo);
|
||||
if (ret)
|
||||
goto out_unref;
|
||||
|
||||
*_bo = bo;
|
||||
return 0;
|
||||
out_unref:
|
||||
qxl_bo_unref(&bo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr)
|
||||
{
|
||||
int irq_num;
|
||||
long addr = qdev->io_base + port;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&qdev->async_io_mutex);
|
||||
irq_num = atomic_read(&qdev->irq_received_io_cmd);
|
||||
if (qdev->last_sent_io_cmd > irq_num) {
|
||||
if (intr)
|
||||
ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
|
||||
atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
|
||||
else
|
||||
ret = wait_event_timeout(qdev->io_cmd_event,
|
||||
atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
|
||||
/* 0 is timeout, just bail the "hw" has gone away */
|
||||
if (ret <= 0)
|
||||
goto out;
|
||||
irq_num = atomic_read(&qdev->irq_received_io_cmd);
|
||||
}
|
||||
outb(val, addr);
|
||||
qdev->last_sent_io_cmd = irq_num + 1;
|
||||
if (intr)
|
||||
ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
|
||||
atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
|
||||
else
|
||||
ret = wait_event_timeout(qdev->io_cmd_event,
|
||||
atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
|
||||
out:
|
||||
if (ret > 0)
|
||||
ret = 0;
|
||||
mutex_unlock(&qdev->async_io_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void wait_for_io_cmd(struct qxl_device *qdev, uint8_t val, long port)
|
||||
{
|
||||
int ret;
|
||||
|
||||
restart:
|
||||
ret = wait_for_io_cmd_user(qdev, val, port, false);
|
||||
if (ret == -ERESTARTSYS)
|
||||
goto restart;
|
||||
}
|
||||
|
||||
int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf,
|
||||
const struct qxl_rect *area)
|
||||
{
|
||||
int surface_id;
|
||||
uint32_t surface_width, surface_height;
|
||||
int ret;
|
||||
|
||||
if (!surf->hw_surf_alloc)
|
||||
DRM_ERROR("got io update area with no hw surface\n");
|
||||
|
||||
if (surf->is_primary)
|
||||
surface_id = 0;
|
||||
else
|
||||
surface_id = surf->surface_id;
|
||||
surface_width = surf->surf.width;
|
||||
surface_height = surf->surf.height;
|
||||
|
||||
if (area->left < 0 || area->top < 0 ||
|
||||
area->right > surface_width || area->bottom > surface_height) {
|
||||
qxl_io_log(qdev, "%s: not doing area update for "
|
||||
"%d, (%d,%d,%d,%d) (%d,%d)\n", __func__, surface_id, area->left,
|
||||
area->top, area->right, area->bottom, surface_width, surface_height);
|
||||
return -EINVAL;
|
||||
}
|
||||
mutex_lock(&qdev->update_area_mutex);
|
||||
qdev->ram_header->update_area = *area;
|
||||
qdev->ram_header->update_surface = surface_id;
|
||||
ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC, true);
|
||||
mutex_unlock(&qdev->update_area_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void qxl_io_notify_oom(struct qxl_device *qdev)
|
||||
{
|
||||
outb(0, qdev->io_base + QXL_IO_NOTIFY_OOM);
|
||||
}
|
||||
|
||||
void qxl_io_flush_release(struct qxl_device *qdev)
|
||||
{
|
||||
outb(0, qdev->io_base + QXL_IO_FLUSH_RELEASE);
|
||||
}
|
||||
|
||||
void qxl_io_flush_surfaces(struct qxl_device *qdev)
|
||||
{
|
||||
wait_for_io_cmd(qdev, 0, QXL_IO_FLUSH_SURFACES_ASYNC);
|
||||
}
|
||||
|
||||
|
||||
void qxl_io_destroy_primary(struct qxl_device *qdev)
|
||||
{
|
||||
wait_for_io_cmd(qdev, 0, QXL_IO_DESTROY_PRIMARY_ASYNC);
|
||||
}
|
||||
|
||||
void qxl_io_create_primary(struct qxl_device *qdev,
|
||||
unsigned offset, struct qxl_bo *bo)
|
||||
{
|
||||
struct qxl_surface_create *create;
|
||||
|
||||
QXL_INFO(qdev, "%s: qdev %p, ram_header %p\n", __func__, qdev,
|
||||
qdev->ram_header);
|
||||
create = &qdev->ram_header->create_surface;
|
||||
create->format = bo->surf.format;
|
||||
create->width = bo->surf.width;
|
||||
create->height = bo->surf.height;
|
||||
create->stride = bo->surf.stride;
|
||||
create->mem = qxl_bo_physical_address(qdev, bo, offset);
|
||||
|
||||
QXL_INFO(qdev, "%s: mem = %llx, from %p\n", __func__, create->mem,
|
||||
bo->kptr);
|
||||
|
||||
create->flags = QXL_SURF_FLAG_KEEP_DATA;
|
||||
create->type = QXL_SURF_TYPE_PRIMARY;
|
||||
|
||||
wait_for_io_cmd(qdev, 0, QXL_IO_CREATE_PRIMARY_ASYNC);
|
||||
}
|
||||
|
||||
void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id)
|
||||
{
|
||||
QXL_INFO(qdev, "qxl_memslot_add %d\n", id);
|
||||
wait_for_io_cmd(qdev, id, QXL_IO_MEMSLOT_ADD_ASYNC);
|
||||
}
|
||||
|
||||
void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
|
||||
va_start(args, fmt);
|
||||
vsnprintf(qdev->ram_header->log_buf, QXL_LOG_BUF_SIZE, fmt, args);
|
||||
va_end(args);
|
||||
/*
|
||||
* DO not do a DRM output here - this will call printk, which will
|
||||
* call back into qxl for rendering (qxl_fb)
|
||||
*/
|
||||
outb(0, qdev->io_base + QXL_IO_LOG);
|
||||
}
|
||||
|
||||
void qxl_io_reset(struct qxl_device *qdev)
|
||||
{
|
||||
outb(0, qdev->io_base + QXL_IO_RESET);
|
||||
}
|
||||
|
||||
void qxl_io_monitors_config(struct qxl_device *qdev)
|
||||
{
|
||||
qxl_io_log(qdev, "%s: %d [%dx%d+%d+%d]\n", __func__,
|
||||
qdev->monitors_config ?
|
||||
qdev->monitors_config->count : -1,
|
||||
qdev->monitors_config && qdev->monitors_config->count ?
|
||||
qdev->monitors_config->heads[0].width : -1,
|
||||
qdev->monitors_config && qdev->monitors_config->count ?
|
||||
qdev->monitors_config->heads[0].height : -1,
|
||||
qdev->monitors_config && qdev->monitors_config->count ?
|
||||
qdev->monitors_config->heads[0].x : -1,
|
||||
qdev->monitors_config && qdev->monitors_config->count ?
|
||||
qdev->monitors_config->heads[0].y : -1
|
||||
);
|
||||
|
||||
wait_for_io_cmd(qdev, 0, QXL_IO_MONITORS_CONFIG_ASYNC);
|
||||
}
|
||||
|
||||
int qxl_surface_id_alloc(struct qxl_device *qdev,
|
||||
struct qxl_bo *surf)
|
||||
{
|
||||
uint32_t handle;
|
||||
int idr_ret;
|
||||
int count = 0;
|
||||
again:
|
||||
idr_preload(GFP_ATOMIC);
|
||||
spin_lock(&qdev->surf_id_idr_lock);
|
||||
idr_ret = idr_alloc(&qdev->surf_id_idr, NULL, 1, 0, GFP_NOWAIT);
|
||||
spin_unlock(&qdev->surf_id_idr_lock);
|
||||
idr_preload_end();
|
||||
if (idr_ret < 0)
|
||||
return idr_ret;
|
||||
handle = idr_ret;
|
||||
|
||||
if (handle >= qdev->rom->n_surfaces) {
|
||||
count++;
|
||||
spin_lock(&qdev->surf_id_idr_lock);
|
||||
idr_remove(&qdev->surf_id_idr, handle);
|
||||
spin_unlock(&qdev->surf_id_idr_lock);
|
||||
qxl_reap_surface_id(qdev, 2);
|
||||
goto again;
|
||||
}
|
||||
surf->surface_id = handle;
|
||||
|
||||
spin_lock(&qdev->surf_id_idr_lock);
|
||||
qdev->last_alloced_surf_id = handle;
|
||||
spin_unlock(&qdev->surf_id_idr_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void qxl_surface_id_dealloc(struct qxl_device *qdev,
|
||||
uint32_t surface_id)
|
||||
{
|
||||
spin_lock(&qdev->surf_id_idr_lock);
|
||||
idr_remove(&qdev->surf_id_idr, surface_id);
|
||||
spin_unlock(&qdev->surf_id_idr_lock);
|
||||
}
|
||||
|
||||
int qxl_hw_surface_alloc(struct qxl_device *qdev,
|
||||
struct qxl_bo *surf,
|
||||
struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct qxl_surface_cmd *cmd;
|
||||
struct qxl_release *release;
|
||||
int ret;
|
||||
|
||||
if (surf->hw_surf_alloc)
|
||||
return 0;
|
||||
|
||||
ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_CREATE,
|
||||
NULL,
|
||||
&release);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = qxl_release_reserve_list(release, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
|
||||
cmd->type = QXL_SURFACE_CMD_CREATE;
|
||||
cmd->u.surface_create.format = surf->surf.format;
|
||||
cmd->u.surface_create.width = surf->surf.width;
|
||||
cmd->u.surface_create.height = surf->surf.height;
|
||||
cmd->u.surface_create.stride = surf->surf.stride;
|
||||
if (new_mem) {
|
||||
int slot_id = surf->type == QXL_GEM_DOMAIN_VRAM ? qdev->main_mem_slot : qdev->surfaces_mem_slot;
|
||||
struct qxl_memslot *slot = &(qdev->mem_slots[slot_id]);
|
||||
|
||||
/* TODO - need to hold one of the locks to read tbo.offset */
|
||||
cmd->u.surface_create.data = slot->high_bits;
|
||||
|
||||
cmd->u.surface_create.data |= (new_mem->start << PAGE_SHIFT) + surf->tbo.bdev->man[new_mem->mem_type].gpu_offset;
|
||||
} else
|
||||
cmd->u.surface_create.data = qxl_bo_physical_address(qdev, surf, 0);
|
||||
cmd->surface_id = surf->surface_id;
|
||||
qxl_release_unmap(qdev, release, &cmd->release_info);
|
||||
|
||||
surf->surf_create = release;
|
||||
|
||||
/* no need to add a release to the fence for this surface bo,
|
||||
since it is only released when we ask to destroy the surface
|
||||
and it would never signal otherwise */
|
||||
qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
|
||||
qxl_release_fence_buffer_objects(release);
|
||||
|
||||
surf->hw_surf_alloc = true;
|
||||
spin_lock(&qdev->surf_id_idr_lock);
|
||||
idr_replace(&qdev->surf_id_idr, surf, surf->surface_id);
|
||||
spin_unlock(&qdev->surf_id_idr_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int qxl_hw_surface_dealloc(struct qxl_device *qdev,
|
||||
struct qxl_bo *surf)
|
||||
{
|
||||
struct qxl_surface_cmd *cmd;
|
||||
struct qxl_release *release;
|
||||
int ret;
|
||||
int id;
|
||||
|
||||
if (!surf->hw_surf_alloc)
|
||||
return 0;
|
||||
|
||||
ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_DESTROY,
|
||||
surf->surf_create,
|
||||
&release);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
surf->surf_create = NULL;
|
||||
/* remove the surface from the idr, but not the surface id yet */
|
||||
spin_lock(&qdev->surf_id_idr_lock);
|
||||
idr_replace(&qdev->surf_id_idr, NULL, surf->surface_id);
|
||||
spin_unlock(&qdev->surf_id_idr_lock);
|
||||
surf->hw_surf_alloc = false;
|
||||
|
||||
id = surf->surface_id;
|
||||
surf->surface_id = 0;
|
||||
|
||||
release->surface_release_id = id;
|
||||
cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
|
||||
cmd->type = QXL_SURFACE_CMD_DESTROY;
|
||||
cmd->surface_id = id;
|
||||
qxl_release_unmap(qdev, release, &cmd->release_info);
|
||||
|
||||
qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
|
||||
|
||||
qxl_release_fence_buffer_objects(release);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf)
|
||||
{
|
||||
struct qxl_rect rect;
|
||||
int ret;
|
||||
|
||||
/* if we are evicting, we need to make sure the surface is up
|
||||
to date */
|
||||
rect.left = 0;
|
||||
rect.right = surf->surf.width;
|
||||
rect.top = 0;
|
||||
rect.bottom = surf->surf.height;
|
||||
retry:
|
||||
ret = qxl_io_update_area(qdev, surf, &rect);
|
||||
if (ret == -ERESTARTSYS)
|
||||
goto retry;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void qxl_surface_evict_locked(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area)
|
||||
{
|
||||
/* no need to update area if we are just freeing the surface normally */
|
||||
if (do_update_area)
|
||||
qxl_update_surface(qdev, surf);
|
||||
|
||||
/* nuke the surface id at the hw */
|
||||
qxl_hw_surface_dealloc(qdev, surf);
|
||||
}
|
||||
|
||||
void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area)
|
||||
{
|
||||
mutex_lock(&qdev->surf_evict_mutex);
|
||||
qxl_surface_evict_locked(qdev, surf, do_update_area);
|
||||
mutex_unlock(&qdev->surf_evict_mutex);
|
||||
}
|
||||
|
||||
static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stall)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = qxl_bo_reserve(surf, false);
|
||||
if (ret == -EBUSY)
|
||||
return -EBUSY;
|
||||
|
||||
if (stall)
|
||||
mutex_unlock(&qdev->surf_evict_mutex);
|
||||
|
||||
ret = ttm_bo_wait(&surf->tbo, true, true, !stall);
|
||||
|
||||
if (stall)
|
||||
mutex_lock(&qdev->surf_evict_mutex);
|
||||
if (ret == -EBUSY) {
|
||||
qxl_bo_unreserve(surf);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
qxl_surface_evict_locked(qdev, surf, true);
|
||||
qxl_bo_unreserve(surf);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap)
|
||||
{
|
||||
int num_reaped = 0;
|
||||
int i, ret;
|
||||
bool stall = false;
|
||||
int start = 0;
|
||||
|
||||
mutex_lock(&qdev->surf_evict_mutex);
|
||||
again:
|
||||
|
||||
spin_lock(&qdev->surf_id_idr_lock);
|
||||
start = qdev->last_alloced_surf_id + 1;
|
||||
spin_unlock(&qdev->surf_id_idr_lock);
|
||||
|
||||
for (i = start; i < start + qdev->rom->n_surfaces; i++) {
|
||||
void *objptr;
|
||||
int surfid = i % qdev->rom->n_surfaces;
|
||||
|
||||
/* this avoids the case where the objects is in the
|
||||
idr but has been evicted half way - its makes
|
||||
the idr lookup atomic with the eviction */
|
||||
spin_lock(&qdev->surf_id_idr_lock);
|
||||
objptr = idr_find(&qdev->surf_id_idr, surfid);
|
||||
spin_unlock(&qdev->surf_id_idr_lock);
|
||||
|
||||
if (!objptr)
|
||||
continue;
|
||||
|
||||
ret = qxl_reap_surf(qdev, objptr, stall);
|
||||
if (ret == 0)
|
||||
num_reaped++;
|
||||
if (num_reaped >= max_to_reap)
|
||||
break;
|
||||
}
|
||||
if (num_reaped == 0 && stall == false) {
|
||||
stall = true;
|
||||
goto again;
|
||||
}
|
||||
|
||||
mutex_unlock(&qdev->surf_evict_mutex);
|
||||
if (num_reaped) {
|
||||
usleep_range(500, 1000);
|
||||
qxl_queue_garbage_collect(qdev, true);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
149
drivers/gpu/drm/qxl/qxl_debugfs.c
Normal file
149
drivers/gpu/drm/qxl/qxl_debugfs.c
Normal file
|
@ -0,0 +1,149 @@
|
|||
/*
|
||||
* Copyright (C) 2009 Red Hat <bskeggs@redhat.com>
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining
|
||||
* a copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sublicense, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial
|
||||
* portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
|
||||
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
/*
|
||||
* Authors:
|
||||
* Alon Levy <alevy@redhat.com>
|
||||
*/
|
||||
|
||||
#include <linux/debugfs.h>
|
||||
|
||||
#include "drmP.h"
|
||||
#include "qxl_drv.h"
|
||||
#include "qxl_object.h"
|
||||
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
static int
|
||||
qxl_debugfs_irq_received(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct qxl_device *qdev = node->minor->dev->dev_private;
|
||||
|
||||
seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
|
||||
seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
|
||||
seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
|
||||
seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
|
||||
seq_printf(m, "%d\n", qdev->irq_received_error);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
qxl_debugfs_buffers_info(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct qxl_device *qdev = node->minor->dev->dev_private;
|
||||
struct qxl_bo *bo;
|
||||
|
||||
list_for_each_entry(bo, &qdev->gem.objects, list) {
|
||||
struct reservation_object_list *fobj;
|
||||
int rel;
|
||||
|
||||
rcu_read_lock();
|
||||
fobj = rcu_dereference(bo->tbo.resv->fence);
|
||||
rel = fobj ? fobj->shared_count : 0;
|
||||
rcu_read_unlock();
|
||||
|
||||
seq_printf(m, "size %ld, pc %d, num releases %d\n",
|
||||
(unsigned long)bo->gem_base.size,
|
||||
bo->pin_count, rel);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct drm_info_list qxl_debugfs_list[] = {
|
||||
{ "irq_received", qxl_debugfs_irq_received, 0, NULL },
|
||||
{ "qxl_buffers", qxl_debugfs_buffers_info, 0, NULL },
|
||||
};
|
||||
#define QXL_DEBUGFS_ENTRIES ARRAY_SIZE(qxl_debugfs_list)
|
||||
#endif
|
||||
|
||||
int
|
||||
qxl_debugfs_init(struct drm_minor *minor)
|
||||
{
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
drm_debugfs_create_files(qxl_debugfs_list, QXL_DEBUGFS_ENTRIES,
|
||||
minor->debugfs_root, minor);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
qxl_debugfs_takedown(struct drm_minor *minor)
|
||||
{
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
drm_debugfs_remove_files(qxl_debugfs_list, QXL_DEBUGFS_ENTRIES,
|
||||
minor);
|
||||
#endif
|
||||
}
|
||||
|
||||
int qxl_debugfs_add_files(struct qxl_device *qdev,
|
||||
struct drm_info_list *files,
|
||||
unsigned nfiles)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < qdev->debugfs_count; i++) {
|
||||
if (qdev->debugfs[i].files == files) {
|
||||
/* Already registered */
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
i = qdev->debugfs_count + 1;
|
||||
if (i > QXL_DEBUGFS_MAX_COMPONENTS) {
|
||||
DRM_ERROR("Reached maximum number of debugfs components.\n");
|
||||
DRM_ERROR("Report so we increase QXL_DEBUGFS_MAX_COMPONENTS.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
qdev->debugfs[qdev->debugfs_count].files = files;
|
||||
qdev->debugfs[qdev->debugfs_count].num_files = nfiles;
|
||||
qdev->debugfs_count = i;
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
drm_debugfs_create_files(files, nfiles,
|
||||
qdev->ddev->control->debugfs_root,
|
||||
qdev->ddev->control);
|
||||
drm_debugfs_create_files(files, nfiles,
|
||||
qdev->ddev->primary->debugfs_root,
|
||||
qdev->ddev->primary);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
void qxl_debugfs_remove_files(struct qxl_device *qdev)
|
||||
{
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < qdev->debugfs_count; i++) {
|
||||
drm_debugfs_remove_files(qdev->debugfs[i].files,
|
||||
qdev->debugfs[i].num_files,
|
||||
qdev->ddev->control);
|
||||
drm_debugfs_remove_files(qdev->debugfs[i].files,
|
||||
qdev->debugfs[i].num_files,
|
||||
qdev->ddev->primary);
|
||||
}
|
||||
#endif
|
||||
}
|
879
drivers/gpu/drm/qxl/qxl_dev.h
Normal file
879
drivers/gpu/drm/qxl/qxl_dev.h
Normal file
|
@ -0,0 +1,879 @@
|
|||
/*
|
||||
Copyright (C) 2009 Red Hat, Inc.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived
|
||||
from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS "AS
|
||||
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
|
||||
#ifndef H_QXL_DEV
|
||||
#define H_QXL_DEV
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/*
|
||||
* from spice-protocol
|
||||
* Release 0.10.0
|
||||
*/
|
||||
|
||||
/* enums.h */
|
||||
|
||||
enum SpiceImageType {
|
||||
SPICE_IMAGE_TYPE_BITMAP,
|
||||
SPICE_IMAGE_TYPE_QUIC,
|
||||
SPICE_IMAGE_TYPE_RESERVED,
|
||||
SPICE_IMAGE_TYPE_LZ_PLT = 100,
|
||||
SPICE_IMAGE_TYPE_LZ_RGB,
|
||||
SPICE_IMAGE_TYPE_GLZ_RGB,
|
||||
SPICE_IMAGE_TYPE_FROM_CACHE,
|
||||
SPICE_IMAGE_TYPE_SURFACE,
|
||||
SPICE_IMAGE_TYPE_JPEG,
|
||||
SPICE_IMAGE_TYPE_FROM_CACHE_LOSSLESS,
|
||||
SPICE_IMAGE_TYPE_ZLIB_GLZ_RGB,
|
||||
SPICE_IMAGE_TYPE_JPEG_ALPHA,
|
||||
|
||||
SPICE_IMAGE_TYPE_ENUM_END
|
||||
};
|
||||
|
||||
enum SpiceBitmapFmt {
|
||||
SPICE_BITMAP_FMT_INVALID,
|
||||
SPICE_BITMAP_FMT_1BIT_LE,
|
||||
SPICE_BITMAP_FMT_1BIT_BE,
|
||||
SPICE_BITMAP_FMT_4BIT_LE,
|
||||
SPICE_BITMAP_FMT_4BIT_BE,
|
||||
SPICE_BITMAP_FMT_8BIT,
|
||||
SPICE_BITMAP_FMT_16BIT,
|
||||
SPICE_BITMAP_FMT_24BIT,
|
||||
SPICE_BITMAP_FMT_32BIT,
|
||||
SPICE_BITMAP_FMT_RGBA,
|
||||
|
||||
SPICE_BITMAP_FMT_ENUM_END
|
||||
};
|
||||
|
||||
enum SpiceSurfaceFmt {
|
||||
SPICE_SURFACE_FMT_INVALID,
|
||||
SPICE_SURFACE_FMT_1_A,
|
||||
SPICE_SURFACE_FMT_8_A = 8,
|
||||
SPICE_SURFACE_FMT_16_555 = 16,
|
||||
SPICE_SURFACE_FMT_32_xRGB = 32,
|
||||
SPICE_SURFACE_FMT_16_565 = 80,
|
||||
SPICE_SURFACE_FMT_32_ARGB = 96,
|
||||
|
||||
SPICE_SURFACE_FMT_ENUM_END
|
||||
};
|
||||
|
||||
enum SpiceClipType {
|
||||
SPICE_CLIP_TYPE_NONE,
|
||||
SPICE_CLIP_TYPE_RECTS,
|
||||
|
||||
SPICE_CLIP_TYPE_ENUM_END
|
||||
};
|
||||
|
||||
enum SpiceRopd {
|
||||
SPICE_ROPD_INVERS_SRC = (1 << 0),
|
||||
SPICE_ROPD_INVERS_BRUSH = (1 << 1),
|
||||
SPICE_ROPD_INVERS_DEST = (1 << 2),
|
||||
SPICE_ROPD_OP_PUT = (1 << 3),
|
||||
SPICE_ROPD_OP_OR = (1 << 4),
|
||||
SPICE_ROPD_OP_AND = (1 << 5),
|
||||
SPICE_ROPD_OP_XOR = (1 << 6),
|
||||
SPICE_ROPD_OP_BLACKNESS = (1 << 7),
|
||||
SPICE_ROPD_OP_WHITENESS = (1 << 8),
|
||||
SPICE_ROPD_OP_INVERS = (1 << 9),
|
||||
SPICE_ROPD_INVERS_RES = (1 << 10),
|
||||
|
||||
SPICE_ROPD_MASK = 0x7ff
|
||||
};
|
||||
|
||||
enum SpiceBrushType {
|
||||
SPICE_BRUSH_TYPE_NONE,
|
||||
SPICE_BRUSH_TYPE_SOLID,
|
||||
SPICE_BRUSH_TYPE_PATTERN,
|
||||
|
||||
SPICE_BRUSH_TYPE_ENUM_END
|
||||
};
|
||||
|
||||
enum SpiceCursorType {
|
||||
SPICE_CURSOR_TYPE_ALPHA,
|
||||
SPICE_CURSOR_TYPE_MONO,
|
||||
SPICE_CURSOR_TYPE_COLOR4,
|
||||
SPICE_CURSOR_TYPE_COLOR8,
|
||||
SPICE_CURSOR_TYPE_COLOR16,
|
||||
SPICE_CURSOR_TYPE_COLOR24,
|
||||
SPICE_CURSOR_TYPE_COLOR32,
|
||||
|
||||
SPICE_CURSOR_TYPE_ENUM_END
|
||||
};
|
||||
|
||||
/* qxl_dev.h */
|
||||
|
||||
#pragma pack(push, 1)
|
||||
|
||||
#define REDHAT_PCI_VENDOR_ID 0x1b36
|
||||
|
||||
/* 0x100-0x11f reserved for spice, 0x1ff used for unstable work */
|
||||
#define QXL_DEVICE_ID_STABLE 0x0100
|
||||
|
||||
enum {
|
||||
QXL_REVISION_STABLE_V04 = 0x01,
|
||||
QXL_REVISION_STABLE_V06 = 0x02,
|
||||
QXL_REVISION_STABLE_V10 = 0x03,
|
||||
QXL_REVISION_STABLE_V12 = 0x04,
|
||||
};
|
||||
|
||||
#define QXL_DEVICE_ID_DEVEL 0x01ff
|
||||
#define QXL_REVISION_DEVEL 0x01
|
||||
|
||||
#define QXL_ROM_MAGIC (*(uint32_t *)"QXRO")
|
||||
#define QXL_RAM_MAGIC (*(uint32_t *)"QXRA")
|
||||
|
||||
enum {
|
||||
QXL_RAM_RANGE_INDEX,
|
||||
QXL_VRAM_RANGE_INDEX,
|
||||
QXL_ROM_RANGE_INDEX,
|
||||
QXL_IO_RANGE_INDEX,
|
||||
|
||||
QXL_PCI_RANGES
|
||||
};
|
||||
|
||||
/* qxl-1 compat: append only */
|
||||
enum {
|
||||
QXL_IO_NOTIFY_CMD,
|
||||
QXL_IO_NOTIFY_CURSOR,
|
||||
QXL_IO_UPDATE_AREA,
|
||||
QXL_IO_UPDATE_IRQ,
|
||||
QXL_IO_NOTIFY_OOM,
|
||||
QXL_IO_RESET,
|
||||
QXL_IO_SET_MODE, /* qxl-1 */
|
||||
QXL_IO_LOG,
|
||||
/* appended for qxl-2 */
|
||||
QXL_IO_MEMSLOT_ADD,
|
||||
QXL_IO_MEMSLOT_DEL,
|
||||
QXL_IO_DETACH_PRIMARY,
|
||||
QXL_IO_ATTACH_PRIMARY,
|
||||
QXL_IO_CREATE_PRIMARY,
|
||||
QXL_IO_DESTROY_PRIMARY,
|
||||
QXL_IO_DESTROY_SURFACE_WAIT,
|
||||
QXL_IO_DESTROY_ALL_SURFACES,
|
||||
/* appended for qxl-3 */
|
||||
QXL_IO_UPDATE_AREA_ASYNC,
|
||||
QXL_IO_MEMSLOT_ADD_ASYNC,
|
||||
QXL_IO_CREATE_PRIMARY_ASYNC,
|
||||
QXL_IO_DESTROY_PRIMARY_ASYNC,
|
||||
QXL_IO_DESTROY_SURFACE_ASYNC,
|
||||
QXL_IO_DESTROY_ALL_SURFACES_ASYNC,
|
||||
QXL_IO_FLUSH_SURFACES_ASYNC,
|
||||
QXL_IO_FLUSH_RELEASE,
|
||||
/* appended for qxl-4 */
|
||||
QXL_IO_MONITORS_CONFIG_ASYNC,
|
||||
|
||||
QXL_IO_RANGE_SIZE
|
||||
};
|
||||
|
||||
typedef uint64_t QXLPHYSICAL;
|
||||
typedef int32_t QXLFIXED; /* fixed 28.4 */
|
||||
|
||||
struct qxl_point_fix {
|
||||
QXLFIXED x;
|
||||
QXLFIXED y;
|
||||
};
|
||||
|
||||
struct qxl_point {
|
||||
int32_t x;
|
||||
int32_t y;
|
||||
};
|
||||
|
||||
struct qxl_point_1_6 {
|
||||
int16_t x;
|
||||
int16_t y;
|
||||
};
|
||||
|
||||
struct qxl_rect {
|
||||
int32_t top;
|
||||
int32_t left;
|
||||
int32_t bottom;
|
||||
int32_t right;
|
||||
};
|
||||
|
||||
struct qxl_urect {
|
||||
uint32_t top;
|
||||
uint32_t left;
|
||||
uint32_t bottom;
|
||||
uint32_t right;
|
||||
};
|
||||
|
||||
/* qxl-1 compat: append only */
|
||||
struct qxl_rom {
|
||||
uint32_t magic;
|
||||
uint32_t id;
|
||||
uint32_t update_id;
|
||||
uint32_t compression_level;
|
||||
uint32_t log_level;
|
||||
uint32_t mode; /* qxl-1 */
|
||||
uint32_t modes_offset;
|
||||
uint32_t num_io_pages;
|
||||
uint32_t pages_offset; /* qxl-1 */
|
||||
uint32_t draw_area_offset; /* qxl-1 */
|
||||
uint32_t surface0_area_size; /* qxl-1 name: draw_area_size */
|
||||
uint32_t ram_header_offset;
|
||||
uint32_t mm_clock;
|
||||
/* appended for qxl-2 */
|
||||
uint32_t n_surfaces;
|
||||
uint64_t flags;
|
||||
uint8_t slots_start;
|
||||
uint8_t slots_end;
|
||||
uint8_t slot_gen_bits;
|
||||
uint8_t slot_id_bits;
|
||||
uint8_t slot_generation;
|
||||
/* appended for qxl-4 */
|
||||
uint8_t client_present;
|
||||
uint8_t client_capabilities[58];
|
||||
uint32_t client_monitors_config_crc;
|
||||
struct {
|
||||
uint16_t count;
|
||||
uint16_t padding;
|
||||
struct qxl_urect heads[64];
|
||||
} client_monitors_config;
|
||||
};
|
||||
|
||||
/* qxl-1 compat: fixed */
|
||||
struct qxl_mode {
|
||||
uint32_t id;
|
||||
uint32_t x_res;
|
||||
uint32_t y_res;
|
||||
uint32_t bits;
|
||||
uint32_t stride;
|
||||
uint32_t x_mili;
|
||||
uint32_t y_mili;
|
||||
uint32_t orientation;
|
||||
};
|
||||
|
||||
/* qxl-1 compat: fixed */
|
||||
struct qxl_modes {
|
||||
uint32_t n_modes;
|
||||
struct qxl_mode modes[0];
|
||||
};
|
||||
|
||||
/* qxl-1 compat: append only */
|
||||
enum qxl_cmd_type {
|
||||
QXL_CMD_NOP,
|
||||
QXL_CMD_DRAW,
|
||||
QXL_CMD_UPDATE,
|
||||
QXL_CMD_CURSOR,
|
||||
QXL_CMD_MESSAGE,
|
||||
QXL_CMD_SURFACE,
|
||||
};
|
||||
|
||||
/* qxl-1 compat: fixed */
|
||||
struct qxl_command {
|
||||
QXLPHYSICAL data;
|
||||
uint32_t type;
|
||||
uint32_t padding;
|
||||
};
|
||||
|
||||
#define QXL_COMMAND_FLAG_COMPAT (1<<0)
|
||||
#define QXL_COMMAND_FLAG_COMPAT_16BPP (2<<0)
|
||||
|
||||
struct qxl_command_ext {
|
||||
struct qxl_command cmd;
|
||||
uint32_t group_id;
|
||||
uint32_t flags;
|
||||
};
|
||||
|
||||
struct qxl_mem_slot {
|
||||
uint64_t mem_start;
|
||||
uint64_t mem_end;
|
||||
};
|
||||
|
||||
#define QXL_SURF_TYPE_PRIMARY 0
|
||||
|
||||
#define QXL_SURF_FLAG_KEEP_DATA (1 << 0)
|
||||
|
||||
struct qxl_surface_create {
|
||||
uint32_t width;
|
||||
uint32_t height;
|
||||
int32_t stride;
|
||||
uint32_t format;
|
||||
uint32_t position;
|
||||
uint32_t mouse_mode;
|
||||
uint32_t flags;
|
||||
uint32_t type;
|
||||
QXLPHYSICAL mem;
|
||||
};
|
||||
|
||||
#define QXL_COMMAND_RING_SIZE 32
|
||||
#define QXL_CURSOR_RING_SIZE 32
|
||||
#define QXL_RELEASE_RING_SIZE 8
|
||||
|
||||
#define QXL_LOG_BUF_SIZE 4096
|
||||
|
||||
#define QXL_INTERRUPT_DISPLAY (1 << 0)
|
||||
#define QXL_INTERRUPT_CURSOR (1 << 1)
|
||||
#define QXL_INTERRUPT_IO_CMD (1 << 2)
|
||||
#define QXL_INTERRUPT_ERROR (1 << 3)
|
||||
#define QXL_INTERRUPT_CLIENT (1 << 4)
|
||||
#define QXL_INTERRUPT_CLIENT_MONITORS_CONFIG (1 << 5)
|
||||
|
||||
struct qxl_ring_header {
|
||||
uint32_t num_items;
|
||||
uint32_t prod;
|
||||
uint32_t notify_on_prod;
|
||||
uint32_t cons;
|
||||
uint32_t notify_on_cons;
|
||||
};
|
||||
|
||||
/* qxl-1 compat: append only */
|
||||
struct qxl_ram_header {
|
||||
uint32_t magic;
|
||||
uint32_t int_pending;
|
||||
uint32_t int_mask;
|
||||
uint8_t log_buf[QXL_LOG_BUF_SIZE];
|
||||
struct qxl_ring_header cmd_ring_hdr;
|
||||
struct qxl_command cmd_ring[QXL_COMMAND_RING_SIZE];
|
||||
struct qxl_ring_header cursor_ring_hdr;
|
||||
struct qxl_command cursor_ring[QXL_CURSOR_RING_SIZE];
|
||||
struct qxl_ring_header release_ring_hdr;
|
||||
uint64_t release_ring[QXL_RELEASE_RING_SIZE];
|
||||
struct qxl_rect update_area;
|
||||
/* appended for qxl-2 */
|
||||
uint32_t update_surface;
|
||||
struct qxl_mem_slot mem_slot;
|
||||
struct qxl_surface_create create_surface;
|
||||
uint64_t flags;
|
||||
|
||||
/* appended for qxl-4 */
|
||||
|
||||
/* used by QXL_IO_MONITORS_CONFIG_ASYNC */
|
||||
QXLPHYSICAL monitors_config;
|
||||
uint8_t guest_capabilities[64];
|
||||
};
|
||||
|
||||
union qxl_release_info {
|
||||
uint64_t id; /* in */
|
||||
uint64_t next; /* out */
|
||||
};
|
||||
|
||||
struct qxl_release_info_ext {
|
||||
union qxl_release_info *info;
|
||||
uint32_t group_id;
|
||||
};
|
||||
|
||||
struct qxl_data_chunk {
|
||||
uint32_t data_size;
|
||||
QXLPHYSICAL prev_chunk;
|
||||
QXLPHYSICAL next_chunk;
|
||||
uint8_t data[0];
|
||||
};
|
||||
|
||||
struct qxl_message {
|
||||
union qxl_release_info release_info;
|
||||
uint8_t data[0];
|
||||
};
|
||||
|
||||
struct qxl_compat_update_cmd {
|
||||
union qxl_release_info release_info;
|
||||
struct qxl_rect area;
|
||||
uint32_t update_id;
|
||||
};
|
||||
|
||||
struct qxl_update_cmd {
|
||||
union qxl_release_info release_info;
|
||||
struct qxl_rect area;
|
||||
uint32_t update_id;
|
||||
uint32_t surface_id;
|
||||
};
|
||||
|
||||
struct qxl_cursor_header {
|
||||
uint64_t unique;
|
||||
uint16_t type;
|
||||
uint16_t width;
|
||||
uint16_t height;
|
||||
uint16_t hot_spot_x;
|
||||
uint16_t hot_spot_y;
|
||||
};
|
||||
|
||||
struct qxl_cursor {
|
||||
struct qxl_cursor_header header;
|
||||
uint32_t data_size;
|
||||
struct qxl_data_chunk chunk;
|
||||
};
|
||||
|
||||
enum {
|
||||
QXL_CURSOR_SET,
|
||||
QXL_CURSOR_MOVE,
|
||||
QXL_CURSOR_HIDE,
|
||||
QXL_CURSOR_TRAIL,
|
||||
};
|
||||
|
||||
#define QXL_CURSOR_DEVICE_DATA_SIZE 128
|
||||
|
||||
struct qxl_cursor_cmd {
|
||||
union qxl_release_info release_info;
|
||||
uint8_t type;
|
||||
union {
|
||||
struct {
|
||||
struct qxl_point_1_6 position;
|
||||
uint8_t visible;
|
||||
QXLPHYSICAL shape;
|
||||
} set;
|
||||
struct {
|
||||
uint16_t length;
|
||||
uint16_t frequency;
|
||||
} trail;
|
||||
struct qxl_point_1_6 position;
|
||||
} u;
|
||||
/* todo: dynamic size from rom */
|
||||
uint8_t device_data[QXL_CURSOR_DEVICE_DATA_SIZE];
|
||||
};
|
||||
|
||||
enum {
|
||||
QXL_DRAW_NOP,
|
||||
QXL_DRAW_FILL,
|
||||
QXL_DRAW_OPAQUE,
|
||||
QXL_DRAW_COPY,
|
||||
QXL_COPY_BITS,
|
||||
QXL_DRAW_BLEND,
|
||||
QXL_DRAW_BLACKNESS,
|
||||
QXL_DRAW_WHITENESS,
|
||||
QXL_DRAW_INVERS,
|
||||
QXL_DRAW_ROP3,
|
||||
QXL_DRAW_STROKE,
|
||||
QXL_DRAW_TEXT,
|
||||
QXL_DRAW_TRANSPARENT,
|
||||
QXL_DRAW_ALPHA_BLEND,
|
||||
QXL_DRAW_COMPOSITE
|
||||
};
|
||||
|
||||
struct qxl_raster_glyph {
|
||||
struct qxl_point render_pos;
|
||||
struct qxl_point glyph_origin;
|
||||
uint16_t width;
|
||||
uint16_t height;
|
||||
uint8_t data[0];
|
||||
};
|
||||
|
||||
struct qxl_string {
|
||||
uint32_t data_size;
|
||||
uint16_t length;
|
||||
uint16_t flags;
|
||||
struct qxl_data_chunk chunk;
|
||||
};
|
||||
|
||||
struct qxl_copy_bits {
|
||||
struct qxl_point src_pos;
|
||||
};
|
||||
|
||||
enum qxl_effect_type {
|
||||
QXL_EFFECT_BLEND = 0,
|
||||
QXL_EFFECT_OPAQUE = 1,
|
||||
QXL_EFFECT_REVERT_ON_DUP = 2,
|
||||
QXL_EFFECT_BLACKNESS_ON_DUP = 3,
|
||||
QXL_EFFECT_WHITENESS_ON_DUP = 4,
|
||||
QXL_EFFECT_NOP_ON_DUP = 5,
|
||||
QXL_EFFECT_NOP = 6,
|
||||
QXL_EFFECT_OPAQUE_BRUSH = 7
|
||||
};
|
||||
|
||||
struct qxl_pattern {
|
||||
QXLPHYSICAL pat;
|
||||
struct qxl_point pos;
|
||||
};
|
||||
|
||||
struct qxl_brush {
|
||||
uint32_t type;
|
||||
union {
|
||||
uint32_t color;
|
||||
struct qxl_pattern pattern;
|
||||
} u;
|
||||
};
|
||||
|
||||
struct qxl_q_mask {
|
||||
uint8_t flags;
|
||||
struct qxl_point pos;
|
||||
QXLPHYSICAL bitmap;
|
||||
};
|
||||
|
||||
struct qxl_fill {
|
||||
struct qxl_brush brush;
|
||||
uint16_t rop_descriptor;
|
||||
struct qxl_q_mask mask;
|
||||
};
|
||||
|
||||
struct qxl_opaque {
|
||||
QXLPHYSICAL src_bitmap;
|
||||
struct qxl_rect src_area;
|
||||
struct qxl_brush brush;
|
||||
uint16_t rop_descriptor;
|
||||
uint8_t scale_mode;
|
||||
struct qxl_q_mask mask;
|
||||
};
|
||||
|
||||
struct qxl_copy {
|
||||
QXLPHYSICAL src_bitmap;
|
||||
struct qxl_rect src_area;
|
||||
uint16_t rop_descriptor;
|
||||
uint8_t scale_mode;
|
||||
struct qxl_q_mask mask;
|
||||
};
|
||||
|
||||
struct qxl_transparent {
|
||||
QXLPHYSICAL src_bitmap;
|
||||
struct qxl_rect src_area;
|
||||
uint32_t src_color;
|
||||
uint32_t true_color;
|
||||
};
|
||||
|
||||
struct qxl_alpha_blend {
|
||||
uint16_t alpha_flags;
|
||||
uint8_t alpha;
|
||||
QXLPHYSICAL src_bitmap;
|
||||
struct qxl_rect src_area;
|
||||
};
|
||||
|
||||
struct qxl_compat_alpha_blend {
|
||||
uint8_t alpha;
|
||||
QXLPHYSICAL src_bitmap;
|
||||
struct qxl_rect src_area;
|
||||
};
|
||||
|
||||
struct qxl_rop_3 {
|
||||
QXLPHYSICAL src_bitmap;
|
||||
struct qxl_rect src_area;
|
||||
struct qxl_brush brush;
|
||||
uint8_t rop3;
|
||||
uint8_t scale_mode;
|
||||
struct qxl_q_mask mask;
|
||||
};
|
||||
|
||||
struct qxl_line_attr {
|
||||
uint8_t flags;
|
||||
uint8_t join_style;
|
||||
uint8_t end_style;
|
||||
uint8_t style_nseg;
|
||||
QXLFIXED width;
|
||||
QXLFIXED miter_limit;
|
||||
QXLPHYSICAL style;
|
||||
};
|
||||
|
||||
struct qxl_stroke {
|
||||
QXLPHYSICAL path;
|
||||
struct qxl_line_attr attr;
|
||||
struct qxl_brush brush;
|
||||
uint16_t fore_mode;
|
||||
uint16_t back_mode;
|
||||
};
|
||||
|
||||
struct qxl_text {
|
||||
QXLPHYSICAL str;
|
||||
struct qxl_rect back_area;
|
||||
struct qxl_brush fore_brush;
|
||||
struct qxl_brush back_brush;
|
||||
uint16_t fore_mode;
|
||||
uint16_t back_mode;
|
||||
};
|
||||
|
||||
struct qxl_mask {
|
||||
struct qxl_q_mask mask;
|
||||
};
|
||||
|
||||
struct qxl_clip {
|
||||
uint32_t type;
|
||||
QXLPHYSICAL data;
|
||||
};
|
||||
|
||||
enum qxl_operator {
|
||||
QXL_OP_CLEAR = 0x00,
|
||||
QXL_OP_SOURCE = 0x01,
|
||||
QXL_OP_DST = 0x02,
|
||||
QXL_OP_OVER = 0x03,
|
||||
QXL_OP_OVER_REVERSE = 0x04,
|
||||
QXL_OP_IN = 0x05,
|
||||
QXL_OP_IN_REVERSE = 0x06,
|
||||
QXL_OP_OUT = 0x07,
|
||||
QXL_OP_OUT_REVERSE = 0x08,
|
||||
QXL_OP_ATOP = 0x09,
|
||||
QXL_OP_ATOP_REVERSE = 0x0a,
|
||||
QXL_OP_XOR = 0x0b,
|
||||
QXL_OP_ADD = 0x0c,
|
||||
QXL_OP_SATURATE = 0x0d,
|
||||
/* Note the jump here from 0x0d to 0x30 */
|
||||
QXL_OP_MULTIPLY = 0x30,
|
||||
QXL_OP_SCREEN = 0x31,
|
||||
QXL_OP_OVERLAY = 0x32,
|
||||
QXL_OP_DARKEN = 0x33,
|
||||
QXL_OP_LIGHTEN = 0x34,
|
||||
QXL_OP_COLOR_DODGE = 0x35,
|
||||
QXL_OP_COLOR_BURN = 0x36,
|
||||
QXL_OP_HARD_LIGHT = 0x37,
|
||||
QXL_OP_SOFT_LIGHT = 0x38,
|
||||
QXL_OP_DIFFERENCE = 0x39,
|
||||
QXL_OP_EXCLUSION = 0x3a,
|
||||
QXL_OP_HSL_HUE = 0x3b,
|
||||
QXL_OP_HSL_SATURATION = 0x3c,
|
||||
QXL_OP_HSL_COLOR = 0x3d,
|
||||
QXL_OP_HSL_LUMINOSITY = 0x3e
|
||||
};
|
||||
|
||||
struct qxl_transform {
|
||||
uint32_t t00;
|
||||
uint32_t t01;
|
||||
uint32_t t02;
|
||||
uint32_t t10;
|
||||
uint32_t t11;
|
||||
uint32_t t12;
|
||||
};
|
||||
|
||||
/* The flags field has the following bit fields:
|
||||
*
|
||||
* operator: [ 0 - 7 ]
|
||||
* src_filter: [ 8 - 10 ]
|
||||
* mask_filter: [ 11 - 13 ]
|
||||
* src_repeat: [ 14 - 15 ]
|
||||
* mask_repeat: [ 16 - 17 ]
|
||||
* component_alpha: [ 18 - 18 ]
|
||||
* reserved: [ 19 - 31 ]
|
||||
*
|
||||
* The repeat and filter values are those of pixman:
|
||||
* REPEAT_NONE = 0
|
||||
* REPEAT_NORMAL = 1
|
||||
* REPEAT_PAD = 2
|
||||
* REPEAT_REFLECT = 3
|
||||
*
|
||||
* The filter values are:
|
||||
* FILTER_NEAREST = 0
|
||||
* FILTER_BILINEAR = 1
|
||||
*/
|
||||
struct qxl_composite {
|
||||
uint32_t flags;
|
||||
|
||||
QXLPHYSICAL src;
|
||||
QXLPHYSICAL src_transform; /* May be NULL */
|
||||
QXLPHYSICAL mask; /* May be NULL */
|
||||
QXLPHYSICAL mask_transform; /* May be NULL */
|
||||
struct qxl_point_1_6 src_origin;
|
||||
struct qxl_point_1_6 mask_origin;
|
||||
};
|
||||
|
||||
struct qxl_compat_drawable {
|
||||
union qxl_release_info release_info;
|
||||
uint8_t effect;
|
||||
uint8_t type;
|
||||
uint16_t bitmap_offset;
|
||||
struct qxl_rect bitmap_area;
|
||||
struct qxl_rect bbox;
|
||||
struct qxl_clip clip;
|
||||
uint32_t mm_time;
|
||||
union {
|
||||
struct qxl_fill fill;
|
||||
struct qxl_opaque opaque;
|
||||
struct qxl_copy copy;
|
||||
struct qxl_transparent transparent;
|
||||
struct qxl_compat_alpha_blend alpha_blend;
|
||||
struct qxl_copy_bits copy_bits;
|
||||
struct qxl_copy blend;
|
||||
struct qxl_rop_3 rop3;
|
||||
struct qxl_stroke stroke;
|
||||
struct qxl_text text;
|
||||
struct qxl_mask blackness;
|
||||
struct qxl_mask invers;
|
||||
struct qxl_mask whiteness;
|
||||
} u;
|
||||
};
|
||||
|
||||
struct qxl_drawable {
|
||||
union qxl_release_info release_info;
|
||||
uint32_t surface_id;
|
||||
uint8_t effect;
|
||||
uint8_t type;
|
||||
uint8_t self_bitmap;
|
||||
struct qxl_rect self_bitmap_area;
|
||||
struct qxl_rect bbox;
|
||||
struct qxl_clip clip;
|
||||
uint32_t mm_time;
|
||||
int32_t surfaces_dest[3];
|
||||
struct qxl_rect surfaces_rects[3];
|
||||
union {
|
||||
struct qxl_fill fill;
|
||||
struct qxl_opaque opaque;
|
||||
struct qxl_copy copy;
|
||||
struct qxl_transparent transparent;
|
||||
struct qxl_alpha_blend alpha_blend;
|
||||
struct qxl_copy_bits copy_bits;
|
||||
struct qxl_copy blend;
|
||||
struct qxl_rop_3 rop3;
|
||||
struct qxl_stroke stroke;
|
||||
struct qxl_text text;
|
||||
struct qxl_mask blackness;
|
||||
struct qxl_mask invers;
|
||||
struct qxl_mask whiteness;
|
||||
struct qxl_composite composite;
|
||||
} u;
|
||||
};
|
||||
|
||||
enum qxl_surface_cmd_type {
|
||||
QXL_SURFACE_CMD_CREATE,
|
||||
QXL_SURFACE_CMD_DESTROY,
|
||||
};
|
||||
|
||||
struct qxl_surface {
|
||||
uint32_t format;
|
||||
uint32_t width;
|
||||
uint32_t height;
|
||||
int32_t stride;
|
||||
QXLPHYSICAL data;
|
||||
};
|
||||
|
||||
struct qxl_surface_cmd {
|
||||
union qxl_release_info release_info;
|
||||
uint32_t surface_id;
|
||||
uint8_t type;
|
||||
uint32_t flags;
|
||||
union {
|
||||
struct qxl_surface surface_create;
|
||||
} u;
|
||||
};
|
||||
|
||||
struct qxl_clip_rects {
|
||||
uint32_t num_rects;
|
||||
struct qxl_data_chunk chunk;
|
||||
};
|
||||
|
||||
enum {
|
||||
QXL_PATH_BEGIN = (1 << 0),
|
||||
QXL_PATH_END = (1 << 1),
|
||||
QXL_PATH_CLOSE = (1 << 3),
|
||||
QXL_PATH_BEZIER = (1 << 4),
|
||||
};
|
||||
|
||||
struct qxl_path_seg {
|
||||
uint32_t flags;
|
||||
uint32_t count;
|
||||
struct qxl_point_fix points[0];
|
||||
};
|
||||
|
||||
struct qxl_path {
|
||||
uint32_t data_size;
|
||||
struct qxl_data_chunk chunk;
|
||||
};
|
||||
|
||||
enum {
|
||||
QXL_IMAGE_GROUP_DRIVER,
|
||||
QXL_IMAGE_GROUP_DEVICE,
|
||||
QXL_IMAGE_GROUP_RED,
|
||||
QXL_IMAGE_GROUP_DRIVER_DONT_CACHE,
|
||||
};
|
||||
|
||||
struct qxl_image_id {
|
||||
uint32_t group;
|
||||
uint32_t unique;
|
||||
};
|
||||
|
||||
union qxl_image_id_union {
|
||||
struct qxl_image_id id;
|
||||
uint64_t value;
|
||||
};
|
||||
|
||||
enum qxl_image_flags {
|
||||
QXL_IMAGE_CACHE = (1 << 0),
|
||||
QXL_IMAGE_HIGH_BITS_SET = (1 << 1),
|
||||
};
|
||||
|
||||
enum qxl_bitmap_flags {
|
||||
QXL_BITMAP_DIRECT = (1 << 0),
|
||||
QXL_BITMAP_UNSTABLE = (1 << 1),
|
||||
QXL_BITMAP_TOP_DOWN = (1 << 2), /* == SPICE_BITMAP_FLAGS_TOP_DOWN */
|
||||
};
|
||||
|
||||
#define QXL_SET_IMAGE_ID(image, _group, _unique) { \
|
||||
(image)->descriptor.id = (((uint64_t)_unique) << 32) | _group; \
|
||||
}
|
||||
|
||||
struct qxl_image_descriptor {
|
||||
uint64_t id;
|
||||
uint8_t type;
|
||||
uint8_t flags;
|
||||
uint32_t width;
|
||||
uint32_t height;
|
||||
};
|
||||
|
||||
struct qxl_palette {
|
||||
uint64_t unique;
|
||||
uint16_t num_ents;
|
||||
uint32_t ents[0];
|
||||
};
|
||||
|
||||
struct qxl_bitmap {
|
||||
uint8_t format;
|
||||
uint8_t flags;
|
||||
uint32_t x;
|
||||
uint32_t y;
|
||||
uint32_t stride;
|
||||
QXLPHYSICAL palette;
|
||||
QXLPHYSICAL data; /* data[0] ? */
|
||||
};
|
||||
|
||||
struct qxl_surface_id {
|
||||
uint32_t surface_id;
|
||||
};
|
||||
|
||||
struct qxl_encoder_data {
|
||||
uint32_t data_size;
|
||||
uint8_t data[0];
|
||||
};
|
||||
|
||||
struct qxl_image {
|
||||
struct qxl_image_descriptor descriptor;
|
||||
union { /* variable length */
|
||||
struct qxl_bitmap bitmap;
|
||||
struct qxl_encoder_data quic;
|
||||
struct qxl_surface_id surface_image;
|
||||
} u;
|
||||
};
|
||||
|
||||
/* A QXLHead is a single monitor output backed by a QXLSurface.
|
||||
* x and y offsets are unsigned since they are used in relation to
|
||||
* the given surface, not the same as the x, y coordinates in the guest
|
||||
* screen reference frame. */
|
||||
struct qxl_head {
|
||||
uint32_t id;
|
||||
uint32_t surface_id;
|
||||
uint32_t width;
|
||||
uint32_t height;
|
||||
uint32_t x;
|
||||
uint32_t y;
|
||||
uint32_t flags;
|
||||
};
|
||||
|
||||
struct qxl_monitors_config {
|
||||
uint16_t count;
|
||||
uint16_t max_allowed; /* If it is 0 no fixed limit is given by the
|
||||
driver */
|
||||
struct qxl_head heads[0];
|
||||
};
|
||||
|
||||
#pragma pack(pop)
|
||||
|
||||
#endif /* _H_QXL_DEV */
|
1092
drivers/gpu/drm/qxl/qxl_display.c
Normal file
1092
drivers/gpu/drm/qxl/qxl_display.c
Normal file
File diff suppressed because it is too large
Load diff
487
drivers/gpu/drm/qxl/qxl_draw.c
Normal file
487
drivers/gpu/drm/qxl/qxl_draw.c
Normal file
|
@ -0,0 +1,487 @@
|
|||
/*
|
||||
* Copyright 2011 Red Hat, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* on the rights to use, copy, modify, merge, publish, distribute, sub
|
||||
* license, and/or sell copies of the Software, and to permit persons to whom
|
||||
* the Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "qxl_drv.h"
|
||||
#include "qxl_object.h"
|
||||
|
||||
static int alloc_clips(struct qxl_device *qdev,
|
||||
struct qxl_release *release,
|
||||
unsigned num_clips,
|
||||
struct qxl_bo **clips_bo)
|
||||
{
|
||||
int size = sizeof(struct qxl_clip_rects) + sizeof(struct qxl_rect) * num_clips;
|
||||
|
||||
return qxl_alloc_bo_reserved(qdev, release, size, clips_bo);
|
||||
}
|
||||
|
||||
/* returns a pointer to the already allocated qxl_rect array inside
|
||||
* the qxl_clip_rects. This is *not* the same as the memory allocated
|
||||
* on the device, it is offset to qxl_clip_rects.chunk.data */
|
||||
static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev,
|
||||
struct qxl_drawable *drawable,
|
||||
unsigned num_clips,
|
||||
struct qxl_bo *clips_bo)
|
||||
{
|
||||
struct qxl_clip_rects *dev_clips;
|
||||
int ret;
|
||||
|
||||
ret = qxl_bo_kmap(clips_bo, (void **)&dev_clips);
|
||||
if (ret) {
|
||||
return NULL;
|
||||
}
|
||||
dev_clips->num_rects = num_clips;
|
||||
dev_clips->chunk.next_chunk = 0;
|
||||
dev_clips->chunk.prev_chunk = 0;
|
||||
dev_clips->chunk.data_size = sizeof(struct qxl_rect) * num_clips;
|
||||
return (struct qxl_rect *)dev_clips->chunk.data;
|
||||
}
|
||||
|
||||
static int
|
||||
alloc_drawable(struct qxl_device *qdev, struct qxl_release **release)
|
||||
{
|
||||
int ret;
|
||||
ret = qxl_alloc_release_reserved(qdev, sizeof(struct qxl_drawable),
|
||||
QXL_RELEASE_DRAWABLE, release,
|
||||
NULL);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
free_drawable(struct qxl_device *qdev, struct qxl_release *release)
|
||||
{
|
||||
qxl_release_free(qdev, release);
|
||||
}
|
||||
|
||||
/* release needs to be reserved at this point */
|
||||
static int
|
||||
make_drawable(struct qxl_device *qdev, int surface, uint8_t type,
|
||||
const struct qxl_rect *rect,
|
||||
struct qxl_release *release)
|
||||
{
|
||||
struct qxl_drawable *drawable;
|
||||
int i;
|
||||
|
||||
drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
|
||||
if (!drawable)
|
||||
return -ENOMEM;
|
||||
|
||||
drawable->type = type;
|
||||
|
||||
drawable->surface_id = surface; /* Only primary for now */
|
||||
drawable->effect = QXL_EFFECT_OPAQUE;
|
||||
drawable->self_bitmap = 0;
|
||||
drawable->self_bitmap_area.top = 0;
|
||||
drawable->self_bitmap_area.left = 0;
|
||||
drawable->self_bitmap_area.bottom = 0;
|
||||
drawable->self_bitmap_area.right = 0;
|
||||
/* FIXME: add clipping */
|
||||
drawable->clip.type = SPICE_CLIP_TYPE_NONE;
|
||||
|
||||
/*
|
||||
* surfaces_dest[i] should apparently be filled out with the
|
||||
* surfaces that we depend on, and surface_rects should be
|
||||
* filled with the rectangles of those surfaces that we
|
||||
* are going to use.
|
||||
*/
|
||||
for (i = 0; i < 3; ++i)
|
||||
drawable->surfaces_dest[i] = -1;
|
||||
|
||||
if (rect)
|
||||
drawable->bbox = *rect;
|
||||
|
||||
drawable->mm_time = qdev->rom->mm_clock;
|
||||
qxl_release_unmap(qdev, release, &drawable->release_info);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int alloc_palette_object(struct qxl_device *qdev,
|
||||
struct qxl_release *release,
|
||||
struct qxl_bo **palette_bo)
|
||||
{
|
||||
return qxl_alloc_bo_reserved(qdev, release,
|
||||
sizeof(struct qxl_palette) + sizeof(uint32_t) * 2,
|
||||
palette_bo);
|
||||
}
|
||||
|
||||
static int qxl_palette_create_1bit(struct qxl_bo *palette_bo,
|
||||
struct qxl_release *release,
|
||||
const struct qxl_fb_image *qxl_fb_image)
|
||||
{
|
||||
const struct fb_image *fb_image = &qxl_fb_image->fb_image;
|
||||
uint32_t visual = qxl_fb_image->visual;
|
||||
const uint32_t *pseudo_palette = qxl_fb_image->pseudo_palette;
|
||||
struct qxl_palette *pal;
|
||||
int ret;
|
||||
uint32_t fgcolor, bgcolor;
|
||||
static uint64_t unique; /* we make no attempt to actually set this
|
||||
* correctly globaly, since that would require
|
||||
* tracking all of our palettes. */
|
||||
ret = qxl_bo_kmap(palette_bo, (void **)&pal);
|
||||
pal->num_ents = 2;
|
||||
pal->unique = unique++;
|
||||
if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) {
|
||||
/* NB: this is the only used branch currently. */
|
||||
fgcolor = pseudo_palette[fb_image->fg_color];
|
||||
bgcolor = pseudo_palette[fb_image->bg_color];
|
||||
} else {
|
||||
fgcolor = fb_image->fg_color;
|
||||
bgcolor = fb_image->bg_color;
|
||||
}
|
||||
pal->ents[0] = bgcolor;
|
||||
pal->ents[1] = fgcolor;
|
||||
qxl_bo_kunmap(palette_bo);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
|
||||
int stride /* filled in if 0 */)
|
||||
{
|
||||
struct qxl_device *qdev = qxl_fb_image->qdev;
|
||||
struct qxl_drawable *drawable;
|
||||
struct qxl_rect rect;
|
||||
const struct fb_image *fb_image = &qxl_fb_image->fb_image;
|
||||
int x = fb_image->dx;
|
||||
int y = fb_image->dy;
|
||||
int width = fb_image->width;
|
||||
int height = fb_image->height;
|
||||
const char *src = fb_image->data;
|
||||
int depth = fb_image->depth;
|
||||
struct qxl_release *release;
|
||||
struct qxl_image *image;
|
||||
int ret;
|
||||
struct qxl_drm_image *dimage;
|
||||
struct qxl_bo *palette_bo = NULL;
|
||||
if (stride == 0)
|
||||
stride = depth * width / 8;
|
||||
|
||||
ret = alloc_drawable(qdev, &release);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
ret = qxl_image_alloc_objects(qdev, release,
|
||||
&dimage,
|
||||
height, stride);
|
||||
if (ret)
|
||||
goto out_free_drawable;
|
||||
|
||||
if (depth == 1) {
|
||||
ret = alloc_palette_object(qdev, release, &palette_bo);
|
||||
if (ret)
|
||||
goto out_free_image;
|
||||
}
|
||||
|
||||
/* do a reservation run over all the objects we just allocated */
|
||||
ret = qxl_release_reserve_list(release, true);
|
||||
if (ret)
|
||||
goto out_free_palette;
|
||||
|
||||
rect.left = x;
|
||||
rect.right = x + width;
|
||||
rect.top = y;
|
||||
rect.bottom = y + height;
|
||||
|
||||
ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &rect, release);
|
||||
if (ret) {
|
||||
qxl_release_backoff_reserve_list(release);
|
||||
goto out_free_palette;
|
||||
}
|
||||
|
||||
ret = qxl_image_init(qdev, release, dimage,
|
||||
(const uint8_t *)src, 0, 0,
|
||||
width, height, depth, stride);
|
||||
if (ret) {
|
||||
qxl_release_backoff_reserve_list(release);
|
||||
qxl_release_free(qdev, release);
|
||||
return;
|
||||
}
|
||||
|
||||
if (depth == 1) {
|
||||
void *ptr;
|
||||
ret = qxl_palette_create_1bit(palette_bo, release, qxl_fb_image);
|
||||
|
||||
ptr = qxl_bo_kmap_atomic_page(qdev, dimage->bo, 0);
|
||||
image = ptr;
|
||||
image->u.bitmap.palette =
|
||||
qxl_bo_physical_address(qdev, palette_bo, 0);
|
||||
qxl_bo_kunmap_atomic_page(qdev, dimage->bo, ptr);
|
||||
}
|
||||
|
||||
drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
|
||||
|
||||
drawable->u.copy.src_area.top = 0;
|
||||
drawable->u.copy.src_area.bottom = height;
|
||||
drawable->u.copy.src_area.left = 0;
|
||||
drawable->u.copy.src_area.right = width;
|
||||
|
||||
drawable->u.copy.rop_descriptor = SPICE_ROPD_OP_PUT;
|
||||
drawable->u.copy.scale_mode = 0;
|
||||
drawable->u.copy.mask.flags = 0;
|
||||
drawable->u.copy.mask.pos.x = 0;
|
||||
drawable->u.copy.mask.pos.y = 0;
|
||||
drawable->u.copy.mask.bitmap = 0;
|
||||
|
||||
drawable->u.copy.src_bitmap =
|
||||
qxl_bo_physical_address(qdev, dimage->bo, 0);
|
||||
qxl_release_unmap(qdev, release, &drawable->release_info);
|
||||
|
||||
qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
|
||||
qxl_release_fence_buffer_objects(release);
|
||||
|
||||
out_free_palette:
|
||||
if (palette_bo)
|
||||
qxl_bo_unref(&palette_bo);
|
||||
out_free_image:
|
||||
qxl_image_free_objects(qdev, dimage);
|
||||
out_free_drawable:
|
||||
if (ret)
|
||||
free_drawable(qdev, release);
|
||||
}
|
||||
|
||||
/* push a draw command using the given clipping rectangles as
|
||||
* the sources from the shadow framebuffer.
|
||||
*
|
||||
* Right now implementing with a single draw and a clip list. Clip
|
||||
* lists are known to be a problem performance wise, this can be solved
|
||||
* by treating them differently in the server.
|
||||
*/
|
||||
void qxl_draw_dirty_fb(struct qxl_device *qdev,
|
||||
struct qxl_framebuffer *qxl_fb,
|
||||
struct qxl_bo *bo,
|
||||
unsigned flags, unsigned color,
|
||||
struct drm_clip_rect *clips,
|
||||
unsigned num_clips, int inc)
|
||||
{
|
||||
/*
|
||||
* TODO: if flags & DRM_MODE_FB_DIRTY_ANNOTATE_FILL then we should
|
||||
* send a fill command instead, much cheaper.
|
||||
*
|
||||
* See include/drm/drm_mode.h
|
||||
*/
|
||||
struct drm_clip_rect *clips_ptr;
|
||||
int i;
|
||||
int left, right, top, bottom;
|
||||
int width, height;
|
||||
struct qxl_drawable *drawable;
|
||||
struct qxl_rect drawable_rect;
|
||||
struct qxl_rect *rects;
|
||||
int stride = qxl_fb->base.pitches[0];
|
||||
/* depth is not actually interesting, we don't mask with it */
|
||||
int depth = qxl_fb->base.bits_per_pixel;
|
||||
uint8_t *surface_base;
|
||||
struct qxl_release *release;
|
||||
struct qxl_bo *clips_bo;
|
||||
struct qxl_drm_image *dimage;
|
||||
int ret;
|
||||
|
||||
ret = alloc_drawable(qdev, &release);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
left = clips->x1;
|
||||
right = clips->x2;
|
||||
top = clips->y1;
|
||||
bottom = clips->y2;
|
||||
|
||||
/* skip the first clip rect */
|
||||
for (i = 1, clips_ptr = clips + inc;
|
||||
i < num_clips; i++, clips_ptr += inc) {
|
||||
left = min_t(int, left, (int)clips_ptr->x1);
|
||||
right = max_t(int, right, (int)clips_ptr->x2);
|
||||
top = min_t(int, top, (int)clips_ptr->y1);
|
||||
bottom = max_t(int, bottom, (int)clips_ptr->y2);
|
||||
}
|
||||
|
||||
width = right - left;
|
||||
height = bottom - top;
|
||||
|
||||
ret = alloc_clips(qdev, release, num_clips, &clips_bo);
|
||||
if (ret)
|
||||
goto out_free_drawable;
|
||||
|
||||
ret = qxl_image_alloc_objects(qdev, release,
|
||||
&dimage,
|
||||
height, stride);
|
||||
if (ret)
|
||||
goto out_free_clips;
|
||||
|
||||
/* do a reservation run over all the objects we just allocated */
|
||||
ret = qxl_release_reserve_list(release, true);
|
||||
if (ret)
|
||||
goto out_free_image;
|
||||
|
||||
drawable_rect.left = left;
|
||||
drawable_rect.right = right;
|
||||
drawable_rect.top = top;
|
||||
drawable_rect.bottom = bottom;
|
||||
|
||||
ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &drawable_rect,
|
||||
release);
|
||||
if (ret)
|
||||
goto out_release_backoff;
|
||||
|
||||
ret = qxl_bo_kmap(bo, (void **)&surface_base);
|
||||
if (ret)
|
||||
goto out_release_backoff;
|
||||
|
||||
|
||||
ret = qxl_image_init(qdev, release, dimage, surface_base,
|
||||
left, top, width, height, depth, stride);
|
||||
qxl_bo_kunmap(bo);
|
||||
if (ret)
|
||||
goto out_release_backoff;
|
||||
|
||||
rects = drawable_set_clipping(qdev, drawable, num_clips, clips_bo);
|
||||
if (!rects)
|
||||
goto out_release_backoff;
|
||||
|
||||
drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
|
||||
|
||||
drawable->clip.type = SPICE_CLIP_TYPE_RECTS;
|
||||
drawable->clip.data = qxl_bo_physical_address(qdev,
|
||||
clips_bo, 0);
|
||||
|
||||
drawable->u.copy.src_area.top = 0;
|
||||
drawable->u.copy.src_area.bottom = height;
|
||||
drawable->u.copy.src_area.left = 0;
|
||||
drawable->u.copy.src_area.right = width;
|
||||
|
||||
drawable->u.copy.rop_descriptor = SPICE_ROPD_OP_PUT;
|
||||
drawable->u.copy.scale_mode = 0;
|
||||
drawable->u.copy.mask.flags = 0;
|
||||
drawable->u.copy.mask.pos.x = 0;
|
||||
drawable->u.copy.mask.pos.y = 0;
|
||||
drawable->u.copy.mask.bitmap = 0;
|
||||
|
||||
drawable->u.copy.src_bitmap = qxl_bo_physical_address(qdev, dimage->bo, 0);
|
||||
qxl_release_unmap(qdev, release, &drawable->release_info);
|
||||
|
||||
clips_ptr = clips;
|
||||
for (i = 0; i < num_clips; i++, clips_ptr += inc) {
|
||||
rects[i].left = clips_ptr->x1;
|
||||
rects[i].right = clips_ptr->x2;
|
||||
rects[i].top = clips_ptr->y1;
|
||||
rects[i].bottom = clips_ptr->y2;
|
||||
}
|
||||
qxl_bo_kunmap(clips_bo);
|
||||
|
||||
qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
|
||||
qxl_release_fence_buffer_objects(release);
|
||||
|
||||
out_release_backoff:
|
||||
if (ret)
|
||||
qxl_release_backoff_reserve_list(release);
|
||||
out_free_image:
|
||||
qxl_image_free_objects(qdev, dimage);
|
||||
out_free_clips:
|
||||
qxl_bo_unref(&clips_bo);
|
||||
out_free_drawable:
|
||||
/* only free drawable on error */
|
||||
if (ret)
|
||||
free_drawable(qdev, release);
|
||||
|
||||
}
|
||||
|
||||
void qxl_draw_copyarea(struct qxl_device *qdev,
|
||||
u32 width, u32 height,
|
||||
u32 sx, u32 sy,
|
||||
u32 dx, u32 dy)
|
||||
{
|
||||
struct qxl_drawable *drawable;
|
||||
struct qxl_rect rect;
|
||||
struct qxl_release *release;
|
||||
int ret;
|
||||
|
||||
ret = alloc_drawable(qdev, &release);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
/* do a reservation run over all the objects we just allocated */
|
||||
ret = qxl_release_reserve_list(release, true);
|
||||
if (ret)
|
||||
goto out_free_release;
|
||||
|
||||
rect.left = dx;
|
||||
rect.top = dy;
|
||||
rect.right = dx + width;
|
||||
rect.bottom = dy + height;
|
||||
ret = make_drawable(qdev, 0, QXL_COPY_BITS, &rect, release);
|
||||
if (ret) {
|
||||
qxl_release_backoff_reserve_list(release);
|
||||
goto out_free_release;
|
||||
}
|
||||
|
||||
drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
|
||||
drawable->u.copy_bits.src_pos.x = sx;
|
||||
drawable->u.copy_bits.src_pos.y = sy;
|
||||
qxl_release_unmap(qdev, release, &drawable->release_info);
|
||||
|
||||
qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
|
||||
qxl_release_fence_buffer_objects(release);
|
||||
|
||||
out_free_release:
|
||||
if (ret)
|
||||
free_drawable(qdev, release);
|
||||
}
|
||||
|
||||
void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec)
|
||||
{
|
||||
struct qxl_device *qdev = qxl_draw_fill_rec->qdev;
|
||||
struct qxl_rect rect = qxl_draw_fill_rec->rect;
|
||||
uint32_t color = qxl_draw_fill_rec->color;
|
||||
uint16_t rop = qxl_draw_fill_rec->rop;
|
||||
struct qxl_drawable *drawable;
|
||||
struct qxl_release *release;
|
||||
int ret;
|
||||
|
||||
ret = alloc_drawable(qdev, &release);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
/* do a reservation run over all the objects we just allocated */
|
||||
ret = qxl_release_reserve_list(release, true);
|
||||
if (ret)
|
||||
goto out_free_release;
|
||||
|
||||
ret = make_drawable(qdev, 0, QXL_DRAW_FILL, &rect, release);
|
||||
if (ret) {
|
||||
qxl_release_backoff_reserve_list(release);
|
||||
goto out_free_release;
|
||||
}
|
||||
|
||||
drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
|
||||
drawable->u.fill.brush.type = SPICE_BRUSH_TYPE_SOLID;
|
||||
drawable->u.fill.brush.u.color = color;
|
||||
drawable->u.fill.rop_descriptor = rop;
|
||||
drawable->u.fill.mask.flags = 0;
|
||||
drawable->u.fill.mask.pos.x = 0;
|
||||
drawable->u.fill.mask.pos.y = 0;
|
||||
drawable->u.fill.mask.bitmap = 0;
|
||||
|
||||
qxl_release_unmap(qdev, release, &drawable->release_info);
|
||||
|
||||
qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
|
||||
qxl_release_fence_buffer_objects(release);
|
||||
|
||||
out_free_release:
|
||||
if (ret)
|
||||
free_drawable(qdev, release);
|
||||
}
|
295
drivers/gpu/drm/qxl/qxl_drv.c
Normal file
295
drivers/gpu/drm/qxl/qxl_drv.c
Normal file
|
@ -0,0 +1,295 @@
|
|||
/* vim: set ts=8 sw=8 tw=78 ai noexpandtab */
|
||||
/* qxl_drv.c -- QXL driver -*- linux-c -*-
|
||||
*
|
||||
* Copyright 2011 Red Hat, Inc.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Dave Airlie <airlie@redhat.com>
|
||||
* Alon Levy <alevy@redhat.com>
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/console.h>
|
||||
|
||||
#include "drmP.h"
|
||||
#include "drm/drm.h"
|
||||
#include "drm_crtc_helper.h"
|
||||
#include "qxl_drv.h"
|
||||
#include "qxl_object.h"
|
||||
|
||||
extern int qxl_max_ioctls;
|
||||
static const struct pci_device_id pciidlist[] = {
|
||||
{ 0x1b36, 0x100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8,
|
||||
0xffff00, 0 },
|
||||
{ 0x1b36, 0x100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_OTHER << 8,
|
||||
0xffff00, 0 },
|
||||
{ 0, 0, 0 },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, pciidlist);
|
||||
|
||||
static int qxl_modeset = -1;
|
||||
int qxl_num_crtc = 4;
|
||||
|
||||
MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
|
||||
module_param_named(modeset, qxl_modeset, int, 0400);
|
||||
|
||||
MODULE_PARM_DESC(num_heads, "Number of virtual crtcs to expose (default 4)");
|
||||
module_param_named(num_heads, qxl_num_crtc, int, 0400);
|
||||
|
||||
static struct drm_driver qxl_driver;
|
||||
static struct pci_driver qxl_pci_driver;
|
||||
|
||||
static int
|
||||
qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
{
|
||||
if (pdev->revision < 4) {
|
||||
DRM_ERROR("qxl too old, doesn't support client_monitors_config,"
|
||||
" use xf86-video-qxl in user mode");
|
||||
return -EINVAL; /* TODO: ENODEV ? */
|
||||
}
|
||||
return drm_get_pci_dev(pdev, ent, &qxl_driver);
|
||||
}
|
||||
|
||||
static void
|
||||
qxl_pci_remove(struct pci_dev *pdev)
|
||||
{
|
||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||
|
||||
drm_put_dev(dev);
|
||||
}
|
||||
|
||||
static const struct file_operations qxl_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = drm_open,
|
||||
.release = drm_release,
|
||||
.unlocked_ioctl = drm_ioctl,
|
||||
.poll = drm_poll,
|
||||
.read = drm_read,
|
||||
.mmap = qxl_mmap,
|
||||
};
|
||||
|
||||
static int qxl_drm_freeze(struct drm_device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = dev->pdev;
|
||||
struct qxl_device *qdev = dev->dev_private;
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
drm_kms_helper_poll_disable(dev);
|
||||
|
||||
console_lock();
|
||||
qxl_fbdev_set_suspend(qdev, 1);
|
||||
console_unlock();
|
||||
|
||||
/* unpin the front buffers */
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
|
||||
if (crtc->enabled)
|
||||
(*crtc_funcs->disable)(crtc);
|
||||
}
|
||||
|
||||
qxl_destroy_monitors_object(qdev);
|
||||
qxl_surf_evict(qdev);
|
||||
qxl_vram_evict(qdev);
|
||||
|
||||
while (!qxl_check_idle(qdev->command_ring));
|
||||
while (!qxl_check_idle(qdev->release_ring))
|
||||
qxl_queue_garbage_collect(qdev, 1);
|
||||
|
||||
pci_save_state(pdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qxl_drm_resume(struct drm_device *dev, bool thaw)
|
||||
{
|
||||
struct qxl_device *qdev = dev->dev_private;
|
||||
|
||||
qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
|
||||
if (!thaw) {
|
||||
qxl_reinit_memslots(qdev);
|
||||
qxl_ring_init_hdr(qdev->release_ring);
|
||||
}
|
||||
|
||||
qxl_create_monitors_object(qdev);
|
||||
drm_helper_resume_force_mode(dev);
|
||||
|
||||
console_lock();
|
||||
qxl_fbdev_set_suspend(qdev, 0);
|
||||
console_unlock();
|
||||
|
||||
drm_kms_helper_poll_enable(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qxl_pm_suspend(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
int error;
|
||||
|
||||
error = qxl_drm_freeze(drm_dev);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
pci_disable_device(pdev);
|
||||
pci_set_power_state(pdev, PCI_D3hot);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qxl_pm_resume(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
|
||||
pci_set_power_state(pdev, PCI_D0);
|
||||
pci_restore_state(pdev);
|
||||
if (pci_enable_device(pdev)) {
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return qxl_drm_resume(drm_dev, false);
|
||||
}
|
||||
|
||||
static int qxl_pm_thaw(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
|
||||
return qxl_drm_resume(drm_dev, true);
|
||||
}
|
||||
|
||||
static int qxl_pm_freeze(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
|
||||
return qxl_drm_freeze(drm_dev);
|
||||
}
|
||||
|
||||
static int qxl_pm_restore(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
struct qxl_device *qdev = drm_dev->dev_private;
|
||||
|
||||
qxl_io_reset(qdev);
|
||||
return qxl_drm_resume(drm_dev, false);
|
||||
}
|
||||
|
||||
static u32 qxl_noop_get_vblank_counter(struct drm_device *dev, int crtc)
|
||||
{
|
||||
return dev->vblank[crtc].count.counter;
|
||||
}
|
||||
|
||||
static int qxl_noop_enable_vblank(struct drm_device *dev, int crtc)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qxl_noop_disable_vblank(struct drm_device *dev, int crtc)
|
||||
{
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops qxl_pm_ops = {
|
||||
.suspend = qxl_pm_suspend,
|
||||
.resume = qxl_pm_resume,
|
||||
.freeze = qxl_pm_freeze,
|
||||
.thaw = qxl_pm_thaw,
|
||||
.poweroff = qxl_pm_freeze,
|
||||
.restore = qxl_pm_restore,
|
||||
};
|
||||
static struct pci_driver qxl_pci_driver = {
|
||||
.name = DRIVER_NAME,
|
||||
.id_table = pciidlist,
|
||||
.probe = qxl_pci_probe,
|
||||
.remove = qxl_pci_remove,
|
||||
.driver.pm = &qxl_pm_ops,
|
||||
};
|
||||
|
||||
static struct drm_driver qxl_driver = {
|
||||
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
|
||||
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
|
||||
.load = qxl_driver_load,
|
||||
.unload = qxl_driver_unload,
|
||||
.get_vblank_counter = qxl_noop_get_vblank_counter,
|
||||
.enable_vblank = qxl_noop_enable_vblank,
|
||||
.disable_vblank = qxl_noop_disable_vblank,
|
||||
|
||||
.set_busid = drm_pci_set_busid,
|
||||
|
||||
.dumb_create = qxl_mode_dumb_create,
|
||||
.dumb_map_offset = qxl_mode_dumb_mmap,
|
||||
.dumb_destroy = drm_gem_dumb_destroy,
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
.debugfs_init = qxl_debugfs_init,
|
||||
.debugfs_cleanup = qxl_debugfs_takedown,
|
||||
#endif
|
||||
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
|
||||
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
|
||||
.gem_prime_export = drm_gem_prime_export,
|
||||
.gem_prime_import = drm_gem_prime_import,
|
||||
.gem_prime_pin = qxl_gem_prime_pin,
|
||||
.gem_prime_unpin = qxl_gem_prime_unpin,
|
||||
.gem_prime_get_sg_table = qxl_gem_prime_get_sg_table,
|
||||
.gem_prime_import_sg_table = qxl_gem_prime_import_sg_table,
|
||||
.gem_prime_vmap = qxl_gem_prime_vmap,
|
||||
.gem_prime_vunmap = qxl_gem_prime_vunmap,
|
||||
.gem_prime_mmap = qxl_gem_prime_mmap,
|
||||
.gem_free_object = qxl_gem_object_free,
|
||||
.gem_open_object = qxl_gem_object_open,
|
||||
.gem_close_object = qxl_gem_object_close,
|
||||
.fops = &qxl_fops,
|
||||
.ioctls = qxl_ioctls,
|
||||
.irq_handler = qxl_irq_handler,
|
||||
.name = DRIVER_NAME,
|
||||
.desc = DRIVER_DESC,
|
||||
.date = DRIVER_DATE,
|
||||
.major = 0,
|
||||
.minor = 1,
|
||||
.patchlevel = 0,
|
||||
};
|
||||
|
||||
static int __init qxl_init(void)
|
||||
{
|
||||
#ifdef CONFIG_VGA_CONSOLE
|
||||
if (vgacon_text_force() && qxl_modeset == -1)
|
||||
return -EINVAL;
|
||||
#endif
|
||||
|
||||
if (qxl_modeset == 0)
|
||||
return -EINVAL;
|
||||
qxl_driver.num_ioctls = qxl_max_ioctls;
|
||||
return drm_pci_init(&qxl_driver, &qxl_pci_driver);
|
||||
}
|
||||
|
||||
static void __exit qxl_exit(void)
|
||||
{
|
||||
drm_pci_exit(&qxl_driver, &qxl_pci_driver);
|
||||
}
|
||||
|
||||
module_init(qxl_init);
|
||||
module_exit(qxl_exit);
|
||||
|
||||
MODULE_AUTHOR(DRIVER_AUTHOR);
|
||||
MODULE_DESCRIPTION(DRIVER_DESC);
|
||||
MODULE_LICENSE("GPL and additional rights");
|
576
drivers/gpu/drm/qxl/qxl_drv.h
Normal file
576
drivers/gpu/drm/qxl/qxl_drv.h
Normal file
|
@ -0,0 +1,576 @@
|
|||
/*
|
||||
* Copyright 2013 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alon Levy
|
||||
*/
|
||||
|
||||
|
||||
#ifndef QXL_DRV_H
|
||||
#define QXL_DRV_H
|
||||
|
||||
/*
|
||||
* Definitions taken from spice-protocol, plus kernel driver specific bits.
|
||||
*/
|
||||
|
||||
#include <linux/fence.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
#include "drmP.h"
|
||||
#include "drm_crtc.h"
|
||||
#include <ttm/ttm_bo_api.h>
|
||||
#include <ttm/ttm_bo_driver.h>
|
||||
#include <ttm/ttm_placement.h>
|
||||
#include <ttm/ttm_module.h>
|
||||
|
||||
#include <drm/drm_gem.h>
|
||||
|
||||
/* just for ttm_validate_buffer */
|
||||
#include <ttm/ttm_execbuf_util.h>
|
||||
|
||||
#include <drm/qxl_drm.h>
|
||||
#include "qxl_dev.h"
|
||||
|
||||
#define DRIVER_AUTHOR "Dave Airlie"
|
||||
|
||||
#define DRIVER_NAME "qxl"
|
||||
#define DRIVER_DESC "RH QXL"
|
||||
#define DRIVER_DATE "20120117"
|
||||
|
||||
#define DRIVER_MAJOR 0
|
||||
#define DRIVER_MINOR 1
|
||||
#define DRIVER_PATCHLEVEL 0
|
||||
|
||||
#define QXL_DEBUGFS_MAX_COMPONENTS 32
|
||||
|
||||
extern int qxl_log_level;
|
||||
extern int qxl_num_crtc;
|
||||
|
||||
enum {
|
||||
QXL_INFO_LEVEL = 1,
|
||||
QXL_DEBUG_LEVEL = 2,
|
||||
};
|
||||
|
||||
#define QXL_INFO(qdev, fmt, ...) do { \
|
||||
if (qxl_log_level >= QXL_INFO_LEVEL) { \
|
||||
qxl_io_log(qdev, fmt, __VA_ARGS__); \
|
||||
} \
|
||||
} while (0)
|
||||
#define QXL_DEBUG(qdev, fmt, ...) do { \
|
||||
if (qxl_log_level >= QXL_DEBUG_LEVEL) { \
|
||||
qxl_io_log(qdev, fmt, __VA_ARGS__); \
|
||||
} \
|
||||
} while (0)
|
||||
#define QXL_INFO_ONCE(qdev, fmt, ...) do { \
|
||||
static int done; \
|
||||
if (!done) { \
|
||||
done = 1; \
|
||||
QXL_INFO(qdev, fmt, __VA_ARGS__); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define DRM_FILE_OFFSET 0x100000000ULL
|
||||
#define DRM_FILE_PAGE_OFFSET (DRM_FILE_OFFSET >> PAGE_SHIFT)
|
||||
|
||||
#define QXL_INTERRUPT_MASK (\
|
||||
QXL_INTERRUPT_DISPLAY |\
|
||||
QXL_INTERRUPT_CURSOR |\
|
||||
QXL_INTERRUPT_IO_CMD |\
|
||||
QXL_INTERRUPT_CLIENT_MONITORS_CONFIG)
|
||||
|
||||
struct qxl_bo {
|
||||
/* Protected by gem.mutex */
|
||||
struct list_head list;
|
||||
/* Protected by tbo.reserved */
|
||||
struct ttm_place placements[3];
|
||||
struct ttm_placement placement;
|
||||
struct ttm_buffer_object tbo;
|
||||
struct ttm_bo_kmap_obj kmap;
|
||||
unsigned pin_count;
|
||||
void *kptr;
|
||||
int type;
|
||||
|
||||
/* Constant after initialization */
|
||||
struct drm_gem_object gem_base;
|
||||
bool is_primary; /* is this now a primary surface */
|
||||
bool hw_surf_alloc;
|
||||
struct qxl_surface surf;
|
||||
uint32_t surface_id;
|
||||
struct qxl_release *surf_create;
|
||||
};
|
||||
#define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, gem_base)
|
||||
#define to_qxl_bo(tobj) container_of((tobj), struct qxl_bo, tbo)
|
||||
|
||||
struct qxl_gem {
|
||||
struct mutex mutex;
|
||||
struct list_head objects;
|
||||
};
|
||||
|
||||
struct qxl_bo_list {
|
||||
struct ttm_validate_buffer tv;
|
||||
};
|
||||
|
||||
struct qxl_crtc {
|
||||
struct drm_crtc base;
|
||||
int index;
|
||||
int cur_x;
|
||||
int cur_y;
|
||||
};
|
||||
|
||||
struct qxl_output {
|
||||
int index;
|
||||
struct drm_connector base;
|
||||
struct drm_encoder enc;
|
||||
};
|
||||
|
||||
struct qxl_framebuffer {
|
||||
struct drm_framebuffer base;
|
||||
struct drm_gem_object *obj;
|
||||
};
|
||||
|
||||
#define to_qxl_crtc(x) container_of(x, struct qxl_crtc, base)
|
||||
#define drm_connector_to_qxl_output(x) container_of(x, struct qxl_output, base)
|
||||
#define drm_encoder_to_qxl_output(x) container_of(x, struct qxl_output, enc)
|
||||
#define to_qxl_framebuffer(x) container_of(x, struct qxl_framebuffer, base)
|
||||
|
||||
struct qxl_mman {
|
||||
struct ttm_bo_global_ref bo_global_ref;
|
||||
struct drm_global_reference mem_global_ref;
|
||||
bool mem_global_referenced;
|
||||
struct ttm_bo_device bdev;
|
||||
};
|
||||
|
||||
struct qxl_mode_info {
|
||||
int num_modes;
|
||||
struct qxl_mode *modes;
|
||||
bool mode_config_initialized;
|
||||
|
||||
/* pointer to fbdev info structure */
|
||||
struct qxl_fbdev *qfbdev;
|
||||
};
|
||||
|
||||
|
||||
struct qxl_memslot {
|
||||
uint8_t generation;
|
||||
uint64_t start_phys_addr;
|
||||
uint64_t end_phys_addr;
|
||||
uint64_t high_bits;
|
||||
};
|
||||
|
||||
enum {
|
||||
QXL_RELEASE_DRAWABLE,
|
||||
QXL_RELEASE_SURFACE_CMD,
|
||||
QXL_RELEASE_CURSOR_CMD,
|
||||
};
|
||||
|
||||
/* drm_ prefix to differentiate from qxl_release_info in
|
||||
* spice-protocol/qxl_dev.h */
|
||||
#define QXL_MAX_RES 96
|
||||
struct qxl_release {
|
||||
struct fence base;
|
||||
|
||||
int id;
|
||||
int type;
|
||||
uint32_t release_offset;
|
||||
uint32_t surface_release_id;
|
||||
struct ww_acquire_ctx ticket;
|
||||
struct list_head bos;
|
||||
};
|
||||
|
||||
struct qxl_drm_chunk {
|
||||
struct list_head head;
|
||||
struct qxl_bo *bo;
|
||||
};
|
||||
|
||||
struct qxl_drm_image {
|
||||
struct qxl_bo *bo;
|
||||
struct list_head chunk_list;
|
||||
};
|
||||
|
||||
struct qxl_fb_image {
|
||||
struct qxl_device *qdev;
|
||||
uint32_t pseudo_palette[16];
|
||||
struct fb_image fb_image;
|
||||
uint32_t visual;
|
||||
};
|
||||
|
||||
struct qxl_draw_fill {
|
||||
struct qxl_device *qdev;
|
||||
struct qxl_rect rect;
|
||||
uint32_t color;
|
||||
uint16_t rop;
|
||||
};
|
||||
|
||||
/*
|
||||
* Debugfs
|
||||
*/
|
||||
struct qxl_debugfs {
|
||||
struct drm_info_list *files;
|
||||
unsigned num_files;
|
||||
};
|
||||
|
||||
int qxl_debugfs_add_files(struct qxl_device *rdev,
|
||||
struct drm_info_list *files,
|
||||
unsigned nfiles);
|
||||
int qxl_debugfs_fence_init(struct qxl_device *rdev);
|
||||
void qxl_debugfs_remove_files(struct qxl_device *qdev);
|
||||
|
||||
struct qxl_device;
|
||||
|
||||
struct qxl_device {
|
||||
struct device *dev;
|
||||
struct drm_device *ddev;
|
||||
struct pci_dev *pdev;
|
||||
unsigned long flags;
|
||||
|
||||
resource_size_t vram_base, vram_size;
|
||||
resource_size_t surfaceram_base, surfaceram_size;
|
||||
resource_size_t rom_base, rom_size;
|
||||
struct qxl_rom *rom;
|
||||
|
||||
struct qxl_mode *modes;
|
||||
struct qxl_bo *monitors_config_bo;
|
||||
struct qxl_monitors_config *monitors_config;
|
||||
|
||||
/* last received client_monitors_config */
|
||||
struct qxl_monitors_config *client_monitors_config;
|
||||
|
||||
int io_base;
|
||||
void *ram;
|
||||
struct qxl_mman mman;
|
||||
struct qxl_gem gem;
|
||||
struct qxl_mode_info mode_info;
|
||||
|
||||
struct fb_info *fbdev_info;
|
||||
struct qxl_framebuffer *fbdev_qfb;
|
||||
void *ram_physical;
|
||||
|
||||
struct qxl_ring *release_ring;
|
||||
struct qxl_ring *command_ring;
|
||||
struct qxl_ring *cursor_ring;
|
||||
|
||||
struct qxl_ram_header *ram_header;
|
||||
|
||||
bool primary_created;
|
||||
|
||||
struct qxl_memslot *mem_slots;
|
||||
uint8_t n_mem_slots;
|
||||
|
||||
uint8_t main_mem_slot;
|
||||
uint8_t surfaces_mem_slot;
|
||||
uint8_t slot_id_bits;
|
||||
uint8_t slot_gen_bits;
|
||||
uint64_t va_slot_mask;
|
||||
|
||||
spinlock_t release_lock;
|
||||
struct idr release_idr;
|
||||
uint32_t release_seqno;
|
||||
spinlock_t release_idr_lock;
|
||||
struct mutex async_io_mutex;
|
||||
unsigned int last_sent_io_cmd;
|
||||
|
||||
/* interrupt handling */
|
||||
atomic_t irq_received;
|
||||
atomic_t irq_received_display;
|
||||
atomic_t irq_received_cursor;
|
||||
atomic_t irq_received_io_cmd;
|
||||
unsigned irq_received_error;
|
||||
wait_queue_head_t display_event;
|
||||
wait_queue_head_t cursor_event;
|
||||
wait_queue_head_t io_cmd_event;
|
||||
struct work_struct client_monitors_config_work;
|
||||
|
||||
/* debugfs */
|
||||
struct qxl_debugfs debugfs[QXL_DEBUGFS_MAX_COMPONENTS];
|
||||
unsigned debugfs_count;
|
||||
|
||||
struct mutex update_area_mutex;
|
||||
|
||||
struct idr surf_id_idr;
|
||||
spinlock_t surf_id_idr_lock;
|
||||
int last_alloced_surf_id;
|
||||
|
||||
struct mutex surf_evict_mutex;
|
||||
struct io_mapping *vram_mapping;
|
||||
struct io_mapping *surface_mapping;
|
||||
|
||||
/* */
|
||||
struct mutex release_mutex;
|
||||
struct qxl_bo *current_release_bo[3];
|
||||
int current_release_bo_offset[3];
|
||||
|
||||
struct workqueue_struct *gc_queue;
|
||||
struct work_struct gc_work;
|
||||
|
||||
struct work_struct fb_work;
|
||||
|
||||
struct drm_property *hotplug_mode_update_property;
|
||||
};
|
||||
|
||||
/* forward declaration for QXL_INFO_IO */
|
||||
void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...);
|
||||
|
||||
extern const struct drm_ioctl_desc qxl_ioctls[];
|
||||
extern int qxl_max_ioctl;
|
||||
|
||||
int qxl_driver_load(struct drm_device *dev, unsigned long flags);
|
||||
int qxl_driver_unload(struct drm_device *dev);
|
||||
|
||||
int qxl_modeset_init(struct qxl_device *qdev);
|
||||
void qxl_modeset_fini(struct qxl_device *qdev);
|
||||
|
||||
int qxl_bo_init(struct qxl_device *qdev);
|
||||
void qxl_bo_fini(struct qxl_device *qdev);
|
||||
|
||||
void qxl_reinit_memslots(struct qxl_device *qdev);
|
||||
int qxl_surf_evict(struct qxl_device *qdev);
|
||||
int qxl_vram_evict(struct qxl_device *qdev);
|
||||
|
||||
struct qxl_ring *qxl_ring_create(struct qxl_ring_header *header,
|
||||
int element_size,
|
||||
int n_elements,
|
||||
int prod_notify,
|
||||
bool set_prod_notify,
|
||||
wait_queue_head_t *push_event);
|
||||
void qxl_ring_free(struct qxl_ring *ring);
|
||||
void qxl_ring_init_hdr(struct qxl_ring *ring);
|
||||
int qxl_check_idle(struct qxl_ring *ring);
|
||||
|
||||
static inline void *
|
||||
qxl_fb_virtual_address(struct qxl_device *qdev, unsigned long physical)
|
||||
{
|
||||
QXL_INFO(qdev, "not implemented (%lu)\n", physical);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline uint64_t
|
||||
qxl_bo_physical_address(struct qxl_device *qdev, struct qxl_bo *bo,
|
||||
unsigned long offset)
|
||||
{
|
||||
int slot_id = bo->type == QXL_GEM_DOMAIN_VRAM ? qdev->main_mem_slot : qdev->surfaces_mem_slot;
|
||||
struct qxl_memslot *slot = &(qdev->mem_slots[slot_id]);
|
||||
|
||||
/* TODO - need to hold one of the locks to read tbo.offset */
|
||||
return slot->high_bits | (bo->tbo.offset + offset);
|
||||
}
|
||||
|
||||
/* qxl_fb.c */
|
||||
#define QXLFB_CONN_LIMIT 1
|
||||
|
||||
int qxl_fbdev_init(struct qxl_device *qdev);
|
||||
void qxl_fbdev_fini(struct qxl_device *qdev);
|
||||
int qxl_get_handle_for_primary_fb(struct qxl_device *qdev,
|
||||
struct drm_file *file_priv,
|
||||
uint32_t *handle);
|
||||
void qxl_fbdev_set_suspend(struct qxl_device *qdev, int state);
|
||||
|
||||
/* qxl_display.c */
|
||||
int
|
||||
qxl_framebuffer_init(struct drm_device *dev,
|
||||
struct qxl_framebuffer *rfb,
|
||||
struct drm_mode_fb_cmd2 *mode_cmd,
|
||||
struct drm_gem_object *obj);
|
||||
void qxl_display_read_client_monitors_config(struct qxl_device *qdev);
|
||||
void qxl_send_monitors_config(struct qxl_device *qdev);
|
||||
int qxl_create_monitors_object(struct qxl_device *qdev);
|
||||
int qxl_destroy_monitors_object(struct qxl_device *qdev);
|
||||
|
||||
/* used by qxl_debugfs only */
|
||||
void qxl_crtc_set_from_monitors_config(struct qxl_device *qdev);
|
||||
void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count);
|
||||
|
||||
/* qxl_gem.c */
|
||||
int qxl_gem_init(struct qxl_device *qdev);
|
||||
void qxl_gem_fini(struct qxl_device *qdev);
|
||||
int qxl_gem_object_create(struct qxl_device *qdev, int size,
|
||||
int alignment, int initial_domain,
|
||||
bool discardable, bool kernel,
|
||||
struct qxl_surface *surf,
|
||||
struct drm_gem_object **obj);
|
||||
int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
|
||||
struct drm_file *file_priv,
|
||||
u32 domain,
|
||||
size_t size,
|
||||
struct qxl_surface *surf,
|
||||
struct qxl_bo **qobj,
|
||||
uint32_t *handle);
|
||||
void qxl_gem_object_free(struct drm_gem_object *gobj);
|
||||
int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv);
|
||||
void qxl_gem_object_close(struct drm_gem_object *obj,
|
||||
struct drm_file *file_priv);
|
||||
void qxl_bo_force_delete(struct qxl_device *qdev);
|
||||
int qxl_bo_kmap(struct qxl_bo *bo, void **ptr);
|
||||
|
||||
/* qxl_dumb.c */
|
||||
int qxl_mode_dumb_create(struct drm_file *file_priv,
|
||||
struct drm_device *dev,
|
||||
struct drm_mode_create_dumb *args);
|
||||
int qxl_mode_dumb_mmap(struct drm_file *filp,
|
||||
struct drm_device *dev,
|
||||
uint32_t handle, uint64_t *offset_p);
|
||||
|
||||
|
||||
/* qxl ttm */
|
||||
int qxl_ttm_init(struct qxl_device *qdev);
|
||||
void qxl_ttm_fini(struct qxl_device *qdev);
|
||||
int qxl_mmap(struct file *filp, struct vm_area_struct *vma);
|
||||
|
||||
/* qxl image */
|
||||
|
||||
int qxl_image_init(struct qxl_device *qdev,
|
||||
struct qxl_release *release,
|
||||
struct qxl_drm_image *dimage,
|
||||
const uint8_t *data,
|
||||
int x, int y, int width, int height,
|
||||
int depth, int stride);
|
||||
int
|
||||
qxl_image_alloc_objects(struct qxl_device *qdev,
|
||||
struct qxl_release *release,
|
||||
struct qxl_drm_image **image_ptr,
|
||||
int height, int stride);
|
||||
void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage);
|
||||
|
||||
void qxl_update_screen(struct qxl_device *qxl);
|
||||
|
||||
/* qxl io operations (qxl_cmd.c) */
|
||||
|
||||
void qxl_io_create_primary(struct qxl_device *qdev,
|
||||
unsigned offset,
|
||||
struct qxl_bo *bo);
|
||||
void qxl_io_destroy_primary(struct qxl_device *qdev);
|
||||
void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id);
|
||||
void qxl_io_notify_oom(struct qxl_device *qdev);
|
||||
|
||||
int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf,
|
||||
const struct qxl_rect *area);
|
||||
|
||||
void qxl_io_reset(struct qxl_device *qdev);
|
||||
void qxl_io_monitors_config(struct qxl_device *qdev);
|
||||
int qxl_ring_push(struct qxl_ring *ring, const void *new_elt, bool interruptible);
|
||||
void qxl_io_flush_release(struct qxl_device *qdev);
|
||||
void qxl_io_flush_surfaces(struct qxl_device *qdev);
|
||||
|
||||
union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
|
||||
struct qxl_release *release);
|
||||
void qxl_release_unmap(struct qxl_device *qdev,
|
||||
struct qxl_release *release,
|
||||
union qxl_release_info *info);
|
||||
int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo);
|
||||
int qxl_release_reserve_list(struct qxl_release *release, bool no_intr);
|
||||
void qxl_release_backoff_reserve_list(struct qxl_release *release);
|
||||
void qxl_release_fence_buffer_objects(struct qxl_release *release);
|
||||
|
||||
int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
|
||||
enum qxl_surface_cmd_type surface_cmd_type,
|
||||
struct qxl_release *create_rel,
|
||||
struct qxl_release **release);
|
||||
int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
|
||||
int type, struct qxl_release **release,
|
||||
struct qxl_bo **rbo);
|
||||
|
||||
int
|
||||
qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release,
|
||||
uint32_t type, bool interruptible);
|
||||
int
|
||||
qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release,
|
||||
uint32_t type, bool interruptible);
|
||||
int qxl_alloc_bo_reserved(struct qxl_device *qdev,
|
||||
struct qxl_release *release,
|
||||
unsigned long size,
|
||||
struct qxl_bo **_bo);
|
||||
/* qxl drawing commands */
|
||||
|
||||
void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
|
||||
int stride /* filled in if 0 */);
|
||||
|
||||
void qxl_draw_dirty_fb(struct qxl_device *qdev,
|
||||
struct qxl_framebuffer *qxl_fb,
|
||||
struct qxl_bo *bo,
|
||||
unsigned flags, unsigned color,
|
||||
struct drm_clip_rect *clips,
|
||||
unsigned num_clips, int inc);
|
||||
|
||||
void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec);
|
||||
|
||||
void qxl_draw_copyarea(struct qxl_device *qdev,
|
||||
u32 width, u32 height,
|
||||
u32 sx, u32 sy,
|
||||
u32 dx, u32 dy);
|
||||
|
||||
void qxl_release_free(struct qxl_device *qdev,
|
||||
struct qxl_release *release);
|
||||
|
||||
/* used by qxl_debugfs_release */
|
||||
struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
|
||||
uint64_t id);
|
||||
|
||||
bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush);
|
||||
int qxl_garbage_collect(struct qxl_device *qdev);
|
||||
|
||||
/* debugfs */
|
||||
|
||||
int qxl_debugfs_init(struct drm_minor *minor);
|
||||
void qxl_debugfs_takedown(struct drm_minor *minor);
|
||||
|
||||
/* qxl_prime.c */
|
||||
int qxl_gem_prime_pin(struct drm_gem_object *obj);
|
||||
void qxl_gem_prime_unpin(struct drm_gem_object *obj);
|
||||
struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj);
|
||||
struct drm_gem_object *qxl_gem_prime_import_sg_table(
|
||||
struct drm_device *dev, struct dma_buf_attachment *attach,
|
||||
struct sg_table *sgt);
|
||||
void *qxl_gem_prime_vmap(struct drm_gem_object *obj);
|
||||
void qxl_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
|
||||
int qxl_gem_prime_mmap(struct drm_gem_object *obj,
|
||||
struct vm_area_struct *vma);
|
||||
|
||||
/* qxl_irq.c */
|
||||
int qxl_irq_init(struct qxl_device *qdev);
|
||||
irqreturn_t qxl_irq_handler(int irq, void *arg);
|
||||
|
||||
/* qxl_fb.c */
|
||||
int qxl_fb_init(struct qxl_device *qdev);
|
||||
bool qxl_fbdev_qobj_is_fb(struct qxl_device *qdev, struct qxl_bo *qobj);
|
||||
|
||||
int qxl_debugfs_add_files(struct qxl_device *qdev,
|
||||
struct drm_info_list *files,
|
||||
unsigned nfiles);
|
||||
|
||||
int qxl_surface_id_alloc(struct qxl_device *qdev,
|
||||
struct qxl_bo *surf);
|
||||
void qxl_surface_id_dealloc(struct qxl_device *qdev,
|
||||
uint32_t surface_id);
|
||||
int qxl_hw_surface_alloc(struct qxl_device *qdev,
|
||||
struct qxl_bo *surf,
|
||||
struct ttm_mem_reg *mem);
|
||||
int qxl_hw_surface_dealloc(struct qxl_device *qdev,
|
||||
struct qxl_bo *surf);
|
||||
|
||||
int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo);
|
||||
|
||||
struct qxl_drv_surface *
|
||||
qxl_surface_lookup(struct drm_device *dev, int surface_id);
|
||||
void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freeing);
|
||||
int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf);
|
||||
|
||||
#endif
|
86
drivers/gpu/drm/qxl/qxl_dumb.c
Normal file
86
drivers/gpu/drm/qxl/qxl_dumb.c
Normal file
|
@ -0,0 +1,86 @@
|
|||
/*
|
||||
* Copyright 2013 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alon Levy
|
||||
*/
|
||||
|
||||
#include "qxl_drv.h"
|
||||
#include "qxl_object.h"
|
||||
|
||||
/* dumb ioctls implementation */
|
||||
|
||||
int qxl_mode_dumb_create(struct drm_file *file_priv,
|
||||
struct drm_device *dev,
|
||||
struct drm_mode_create_dumb *args)
|
||||
{
|
||||
struct qxl_device *qdev = dev->dev_private;
|
||||
struct qxl_bo *qobj;
|
||||
uint32_t handle;
|
||||
int r;
|
||||
struct qxl_surface surf;
|
||||
uint32_t pitch, format;
|
||||
pitch = args->width * ((args->bpp + 1) / 8);
|
||||
args->size = pitch * args->height;
|
||||
args->size = ALIGN(args->size, PAGE_SIZE);
|
||||
|
||||
switch (args->bpp) {
|
||||
case 16:
|
||||
format = SPICE_SURFACE_FMT_16_565;
|
||||
break;
|
||||
case 32:
|
||||
format = SPICE_SURFACE_FMT_32_xRGB;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
surf.width = args->width;
|
||||
surf.height = args->height;
|
||||
surf.stride = pitch;
|
||||
surf.format = format;
|
||||
r = qxl_gem_object_create_with_handle(qdev, file_priv,
|
||||
QXL_GEM_DOMAIN_VRAM,
|
||||
args->size, &surf, &qobj,
|
||||
&handle);
|
||||
if (r)
|
||||
return r;
|
||||
args->pitch = pitch;
|
||||
args->handle = handle;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int qxl_mode_dumb_mmap(struct drm_file *file_priv,
|
||||
struct drm_device *dev,
|
||||
uint32_t handle, uint64_t *offset_p)
|
||||
{
|
||||
struct drm_gem_object *gobj;
|
||||
struct qxl_bo *qobj;
|
||||
|
||||
BUG_ON(!offset_p);
|
||||
gobj = drm_gem_object_lookup(dev, file_priv, handle);
|
||||
if (gobj == NULL)
|
||||
return -ENOENT;
|
||||
qobj = gem_to_qxl_bo(gobj);
|
||||
*offset_p = qxl_bo_mmap_offset(qobj);
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
return 0;
|
||||
}
|
719
drivers/gpu/drm/qxl/qxl_fb.c
Normal file
719
drivers/gpu/drm/qxl/qxl_fb.c
Normal file
|
@ -0,0 +1,719 @@
|
|||
/*
|
||||
* Copyright © 2013 Red Hat
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* David Airlie
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/fb.h>
|
||||
|
||||
#include "drmP.h"
|
||||
#include "drm/drm.h"
|
||||
#include "drm/drm_crtc.h"
|
||||
#include "drm/drm_crtc_helper.h"
|
||||
#include "qxl_drv.h"
|
||||
|
||||
#include "qxl_object.h"
|
||||
#include "drm_fb_helper.h"
|
||||
|
||||
#define QXL_DIRTY_DELAY (HZ / 30)
|
||||
|
||||
#define QXL_FB_OP_FILLRECT 1
|
||||
#define QXL_FB_OP_COPYAREA 2
|
||||
#define QXL_FB_OP_IMAGEBLIT 3
|
||||
|
||||
struct qxl_fb_op {
|
||||
struct list_head head;
|
||||
int op_type;
|
||||
union {
|
||||
struct fb_fillrect fr;
|
||||
struct fb_copyarea ca;
|
||||
struct fb_image ib;
|
||||
} op;
|
||||
void *img_data;
|
||||
};
|
||||
|
||||
struct qxl_fbdev {
|
||||
struct drm_fb_helper helper;
|
||||
struct qxl_framebuffer qfb;
|
||||
struct list_head fbdev_list;
|
||||
struct qxl_device *qdev;
|
||||
|
||||
spinlock_t delayed_ops_lock;
|
||||
struct list_head delayed_ops;
|
||||
void *shadow;
|
||||
int size;
|
||||
|
||||
/* dirty memory logging */
|
||||
struct {
|
||||
spinlock_t lock;
|
||||
bool active;
|
||||
unsigned x1;
|
||||
unsigned y1;
|
||||
unsigned x2;
|
||||
unsigned y2;
|
||||
} dirty;
|
||||
};
|
||||
|
||||
static void qxl_fb_image_init(struct qxl_fb_image *qxl_fb_image,
|
||||
struct qxl_device *qdev, struct fb_info *info,
|
||||
const struct fb_image *image)
|
||||
{
|
||||
qxl_fb_image->qdev = qdev;
|
||||
if (info) {
|
||||
qxl_fb_image->visual = info->fix.visual;
|
||||
if (qxl_fb_image->visual == FB_VISUAL_TRUECOLOR ||
|
||||
qxl_fb_image->visual == FB_VISUAL_DIRECTCOLOR)
|
||||
memcpy(&qxl_fb_image->pseudo_palette,
|
||||
info->pseudo_palette,
|
||||
sizeof(qxl_fb_image->pseudo_palette));
|
||||
} else {
|
||||
/* fallback */
|
||||
if (image->depth == 1)
|
||||
qxl_fb_image->visual = FB_VISUAL_MONO10;
|
||||
else
|
||||
qxl_fb_image->visual = FB_VISUAL_DIRECTCOLOR;
|
||||
}
|
||||
if (image) {
|
||||
memcpy(&qxl_fb_image->fb_image, image,
|
||||
sizeof(qxl_fb_image->fb_image));
|
||||
}
|
||||
}
|
||||
|
||||
static void qxl_fb_dirty_flush(struct fb_info *info)
|
||||
{
|
||||
struct qxl_fbdev *qfbdev = info->par;
|
||||
struct qxl_device *qdev = qfbdev->qdev;
|
||||
struct qxl_fb_image qxl_fb_image;
|
||||
struct fb_image *image = &qxl_fb_image.fb_image;
|
||||
u32 x1, x2, y1, y2;
|
||||
|
||||
/* TODO: hard coding 32 bpp */
|
||||
int stride = qfbdev->qfb.base.pitches[0];
|
||||
|
||||
x1 = qfbdev->dirty.x1;
|
||||
x2 = qfbdev->dirty.x2;
|
||||
y1 = qfbdev->dirty.y1;
|
||||
y2 = qfbdev->dirty.y2;
|
||||
/*
|
||||
* we are using a shadow draw buffer, at qdev->surface0_shadow
|
||||
*/
|
||||
qxl_io_log(qdev, "dirty x[%d, %d], y[%d, %d]", x1, x2, y1, y2);
|
||||
image->dx = x1;
|
||||
image->dy = y1;
|
||||
image->width = x2 - x1;
|
||||
image->height = y2 - y1;
|
||||
image->fg_color = 0xffffffff; /* unused, just to avoid uninitialized
|
||||
warnings */
|
||||
image->bg_color = 0;
|
||||
image->depth = 32; /* TODO: take from somewhere? */
|
||||
image->cmap.start = 0;
|
||||
image->cmap.len = 0;
|
||||
image->cmap.red = NULL;
|
||||
image->cmap.green = NULL;
|
||||
image->cmap.blue = NULL;
|
||||
image->cmap.transp = NULL;
|
||||
image->data = qfbdev->shadow + (x1 * 4) + (stride * y1);
|
||||
|
||||
qxl_fb_image_init(&qxl_fb_image, qdev, info, NULL);
|
||||
qxl_draw_opaque_fb(&qxl_fb_image, stride);
|
||||
qfbdev->dirty.x1 = 0;
|
||||
qfbdev->dirty.x2 = 0;
|
||||
qfbdev->dirty.y1 = 0;
|
||||
qfbdev->dirty.y2 = 0;
|
||||
}
|
||||
|
||||
static void qxl_deferred_io(struct fb_info *info,
|
||||
struct list_head *pagelist)
|
||||
{
|
||||
struct qxl_fbdev *qfbdev = info->par;
|
||||
unsigned long start, end, min, max;
|
||||
struct page *page;
|
||||
int y1, y2;
|
||||
|
||||
min = ULONG_MAX;
|
||||
max = 0;
|
||||
list_for_each_entry(page, pagelist, lru) {
|
||||
start = page->index << PAGE_SHIFT;
|
||||
end = start + PAGE_SIZE - 1;
|
||||
min = min(min, start);
|
||||
max = max(max, end);
|
||||
}
|
||||
|
||||
if (min < max) {
|
||||
y1 = min / info->fix.line_length;
|
||||
y2 = (max / info->fix.line_length) + 1;
|
||||
|
||||
/* TODO: add spin lock? */
|
||||
/* spin_lock_irqsave(&qfbdev->dirty.lock, flags); */
|
||||
qfbdev->dirty.x1 = 0;
|
||||
qfbdev->dirty.y1 = y1;
|
||||
qfbdev->dirty.x2 = info->var.xres;
|
||||
qfbdev->dirty.y2 = y2;
|
||||
/* spin_unlock_irqrestore(&qfbdev->dirty.lock, flags); */
|
||||
}
|
||||
|
||||
qxl_fb_dirty_flush(info);
|
||||
};
|
||||
|
||||
|
||||
static struct fb_deferred_io qxl_defio = {
|
||||
.delay = QXL_DIRTY_DELAY,
|
||||
.deferred_io = qxl_deferred_io,
|
||||
};
|
||||
|
||||
static void qxl_fb_delayed_fillrect(struct qxl_fbdev *qfbdev,
|
||||
const struct fb_fillrect *fb_rect)
|
||||
{
|
||||
struct qxl_fb_op *op;
|
||||
unsigned long flags;
|
||||
|
||||
op = kmalloc(sizeof(struct qxl_fb_op), GFP_ATOMIC | __GFP_NOWARN);
|
||||
if (!op)
|
||||
return;
|
||||
|
||||
op->op.fr = *fb_rect;
|
||||
op->img_data = NULL;
|
||||
op->op_type = QXL_FB_OP_FILLRECT;
|
||||
|
||||
spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
|
||||
list_add_tail(&op->head, &qfbdev->delayed_ops);
|
||||
spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
|
||||
}
|
||||
|
||||
static void qxl_fb_delayed_copyarea(struct qxl_fbdev *qfbdev,
|
||||
const struct fb_copyarea *fb_copy)
|
||||
{
|
||||
struct qxl_fb_op *op;
|
||||
unsigned long flags;
|
||||
|
||||
op = kmalloc(sizeof(struct qxl_fb_op), GFP_ATOMIC | __GFP_NOWARN);
|
||||
if (!op)
|
||||
return;
|
||||
|
||||
op->op.ca = *fb_copy;
|
||||
op->img_data = NULL;
|
||||
op->op_type = QXL_FB_OP_COPYAREA;
|
||||
|
||||
spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
|
||||
list_add_tail(&op->head, &qfbdev->delayed_ops);
|
||||
spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
|
||||
}
|
||||
|
||||
static void qxl_fb_delayed_imageblit(struct qxl_fbdev *qfbdev,
|
||||
const struct fb_image *fb_image)
|
||||
{
|
||||
struct qxl_fb_op *op;
|
||||
unsigned long flags;
|
||||
uint32_t size = fb_image->width * fb_image->height * (fb_image->depth >= 8 ? fb_image->depth / 8 : 1);
|
||||
|
||||
op = kmalloc(sizeof(struct qxl_fb_op) + size, GFP_ATOMIC | __GFP_NOWARN);
|
||||
if (!op)
|
||||
return;
|
||||
|
||||
op->op.ib = *fb_image;
|
||||
op->img_data = (void *)(op + 1);
|
||||
op->op_type = QXL_FB_OP_IMAGEBLIT;
|
||||
|
||||
memcpy(op->img_data, fb_image->data, size);
|
||||
|
||||
op->op.ib.data = op->img_data;
|
||||
spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
|
||||
list_add_tail(&op->head, &qfbdev->delayed_ops);
|
||||
spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
|
||||
}
|
||||
|
||||
static void qxl_fb_fillrect_internal(struct fb_info *info,
|
||||
const struct fb_fillrect *fb_rect)
|
||||
{
|
||||
struct qxl_fbdev *qfbdev = info->par;
|
||||
struct qxl_device *qdev = qfbdev->qdev;
|
||||
struct qxl_rect rect;
|
||||
uint32_t color;
|
||||
int x = fb_rect->dx;
|
||||
int y = fb_rect->dy;
|
||||
int width = fb_rect->width;
|
||||
int height = fb_rect->height;
|
||||
uint16_t rop;
|
||||
struct qxl_draw_fill qxl_draw_fill_rec;
|
||||
|
||||
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
|
||||
info->fix.visual == FB_VISUAL_DIRECTCOLOR)
|
||||
color = ((u32 *) (info->pseudo_palette))[fb_rect->color];
|
||||
else
|
||||
color = fb_rect->color;
|
||||
rect.left = x;
|
||||
rect.right = x + width;
|
||||
rect.top = y;
|
||||
rect.bottom = y + height;
|
||||
switch (fb_rect->rop) {
|
||||
case ROP_XOR:
|
||||
rop = SPICE_ROPD_OP_XOR;
|
||||
break;
|
||||
case ROP_COPY:
|
||||
rop = SPICE_ROPD_OP_PUT;
|
||||
break;
|
||||
default:
|
||||
pr_err("qxl_fb_fillrect(): unknown rop, "
|
||||
"defaulting to SPICE_ROPD_OP_PUT\n");
|
||||
rop = SPICE_ROPD_OP_PUT;
|
||||
}
|
||||
qxl_draw_fill_rec.qdev = qdev;
|
||||
qxl_draw_fill_rec.rect = rect;
|
||||
qxl_draw_fill_rec.color = color;
|
||||
qxl_draw_fill_rec.rop = rop;
|
||||
|
||||
qxl_draw_fill(&qxl_draw_fill_rec);
|
||||
}
|
||||
|
||||
static void qxl_fb_fillrect(struct fb_info *info,
|
||||
const struct fb_fillrect *fb_rect)
|
||||
{
|
||||
struct qxl_fbdev *qfbdev = info->par;
|
||||
struct qxl_device *qdev = qfbdev->qdev;
|
||||
|
||||
if (!drm_can_sleep()) {
|
||||
qxl_fb_delayed_fillrect(qfbdev, fb_rect);
|
||||
schedule_work(&qdev->fb_work);
|
||||
return;
|
||||
}
|
||||
/* make sure any previous work is done */
|
||||
flush_work(&qdev->fb_work);
|
||||
qxl_fb_fillrect_internal(info, fb_rect);
|
||||
}
|
||||
|
||||
static void qxl_fb_copyarea_internal(struct fb_info *info,
|
||||
const struct fb_copyarea *region)
|
||||
{
|
||||
struct qxl_fbdev *qfbdev = info->par;
|
||||
|
||||
qxl_draw_copyarea(qfbdev->qdev,
|
||||
region->width, region->height,
|
||||
region->sx, region->sy,
|
||||
region->dx, region->dy);
|
||||
}
|
||||
|
||||
static void qxl_fb_copyarea(struct fb_info *info,
|
||||
const struct fb_copyarea *region)
|
||||
{
|
||||
struct qxl_fbdev *qfbdev = info->par;
|
||||
struct qxl_device *qdev = qfbdev->qdev;
|
||||
|
||||
if (!drm_can_sleep()) {
|
||||
qxl_fb_delayed_copyarea(qfbdev, region);
|
||||
schedule_work(&qdev->fb_work);
|
||||
return;
|
||||
}
|
||||
/* make sure any previous work is done */
|
||||
flush_work(&qdev->fb_work);
|
||||
qxl_fb_copyarea_internal(info, region);
|
||||
}
|
||||
|
||||
static void qxl_fb_imageblit_safe(struct qxl_fb_image *qxl_fb_image)
|
||||
{
|
||||
qxl_draw_opaque_fb(qxl_fb_image, 0);
|
||||
}
|
||||
|
||||
static void qxl_fb_imageblit_internal(struct fb_info *info,
|
||||
const struct fb_image *image)
|
||||
{
|
||||
struct qxl_fbdev *qfbdev = info->par;
|
||||
struct qxl_fb_image qxl_fb_image;
|
||||
|
||||
/* ensure proper order rendering operations - TODO: must do this
|
||||
* for everything. */
|
||||
qxl_fb_image_init(&qxl_fb_image, qfbdev->qdev, info, image);
|
||||
qxl_fb_imageblit_safe(&qxl_fb_image);
|
||||
}
|
||||
|
||||
static void qxl_fb_imageblit(struct fb_info *info,
|
||||
const struct fb_image *image)
|
||||
{
|
||||
struct qxl_fbdev *qfbdev = info->par;
|
||||
struct qxl_device *qdev = qfbdev->qdev;
|
||||
|
||||
if (!drm_can_sleep()) {
|
||||
qxl_fb_delayed_imageblit(qfbdev, image);
|
||||
schedule_work(&qdev->fb_work);
|
||||
return;
|
||||
}
|
||||
/* make sure any previous work is done */
|
||||
flush_work(&qdev->fb_work);
|
||||
qxl_fb_imageblit_internal(info, image);
|
||||
}
|
||||
|
||||
static void qxl_fb_work(struct work_struct *work)
|
||||
{
|
||||
struct qxl_device *qdev = container_of(work, struct qxl_device, fb_work);
|
||||
unsigned long flags;
|
||||
struct qxl_fb_op *entry, *tmp;
|
||||
struct qxl_fbdev *qfbdev = qdev->mode_info.qfbdev;
|
||||
|
||||
/* since the irq context just adds entries to the end of the
|
||||
list dropping the lock should be fine, as entry isn't modified
|
||||
in the operation code */
|
||||
spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
|
||||
list_for_each_entry_safe(entry, tmp, &qfbdev->delayed_ops, head) {
|
||||
spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
|
||||
switch (entry->op_type) {
|
||||
case QXL_FB_OP_FILLRECT:
|
||||
qxl_fb_fillrect_internal(qfbdev->helper.fbdev, &entry->op.fr);
|
||||
break;
|
||||
case QXL_FB_OP_COPYAREA:
|
||||
qxl_fb_copyarea_internal(qfbdev->helper.fbdev, &entry->op.ca);
|
||||
break;
|
||||
case QXL_FB_OP_IMAGEBLIT:
|
||||
qxl_fb_imageblit_internal(qfbdev->helper.fbdev, &entry->op.ib);
|
||||
break;
|
||||
}
|
||||
spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
|
||||
list_del(&entry->head);
|
||||
kfree(entry);
|
||||
}
|
||||
spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
|
||||
}
|
||||
|
||||
int qxl_fb_init(struct qxl_device *qdev)
|
||||
{
|
||||
INIT_WORK(&qdev->fb_work, qxl_fb_work);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct fb_ops qxlfb_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.fb_check_var = drm_fb_helper_check_var,
|
||||
.fb_set_par = drm_fb_helper_set_par, /* TODO: copy vmwgfx */
|
||||
.fb_fillrect = qxl_fb_fillrect,
|
||||
.fb_copyarea = qxl_fb_copyarea,
|
||||
.fb_imageblit = qxl_fb_imageblit,
|
||||
.fb_pan_display = drm_fb_helper_pan_display,
|
||||
.fb_blank = drm_fb_helper_blank,
|
||||
.fb_setcmap = drm_fb_helper_setcmap,
|
||||
.fb_debug_enter = drm_fb_helper_debug_enter,
|
||||
.fb_debug_leave = drm_fb_helper_debug_leave,
|
||||
};
|
||||
|
||||
static void qxlfb_destroy_pinned_object(struct drm_gem_object *gobj)
|
||||
{
|
||||
struct qxl_bo *qbo = gem_to_qxl_bo(gobj);
|
||||
int ret;
|
||||
|
||||
ret = qxl_bo_reserve(qbo, false);
|
||||
if (likely(ret == 0)) {
|
||||
qxl_bo_kunmap(qbo);
|
||||
qxl_bo_unpin(qbo);
|
||||
qxl_bo_unreserve(qbo);
|
||||
}
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
}
|
||||
|
||||
int qxl_get_handle_for_primary_fb(struct qxl_device *qdev,
|
||||
struct drm_file *file_priv,
|
||||
uint32_t *handle)
|
||||
{
|
||||
int r;
|
||||
struct drm_gem_object *gobj = qdev->fbdev_qfb->obj;
|
||||
|
||||
BUG_ON(!gobj);
|
||||
/* drm_get_handle_create adds a reference - good */
|
||||
r = drm_gem_handle_create(file_priv, gobj, handle);
|
||||
if (r)
|
||||
return r;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qxlfb_create_pinned_object(struct qxl_fbdev *qfbdev,
|
||||
struct drm_mode_fb_cmd2 *mode_cmd,
|
||||
struct drm_gem_object **gobj_p)
|
||||
{
|
||||
struct qxl_device *qdev = qfbdev->qdev;
|
||||
struct drm_gem_object *gobj = NULL;
|
||||
struct qxl_bo *qbo = NULL;
|
||||
int ret;
|
||||
int aligned_size, size;
|
||||
int height = mode_cmd->height;
|
||||
int bpp;
|
||||
int depth;
|
||||
|
||||
drm_fb_get_bpp_depth(mode_cmd->pixel_format, &bpp, &depth);
|
||||
|
||||
size = mode_cmd->pitches[0] * height;
|
||||
aligned_size = ALIGN(size, PAGE_SIZE);
|
||||
/* TODO: unallocate and reallocate surface0 for real. Hack to just
|
||||
* have a large enough surface0 for 1024x768 Xorg 32bpp mode */
|
||||
ret = qxl_gem_object_create(qdev, aligned_size, 0,
|
||||
QXL_GEM_DOMAIN_SURFACE,
|
||||
false, /* is discardable */
|
||||
false, /* is kernel (false means device) */
|
||||
NULL,
|
||||
&gobj);
|
||||
if (ret) {
|
||||
pr_err("failed to allocate framebuffer (%d)\n",
|
||||
aligned_size);
|
||||
return -ENOMEM;
|
||||
}
|
||||
qbo = gem_to_qxl_bo(gobj);
|
||||
|
||||
qbo->surf.width = mode_cmd->width;
|
||||
qbo->surf.height = mode_cmd->height;
|
||||
qbo->surf.stride = mode_cmd->pitches[0];
|
||||
qbo->surf.format = SPICE_SURFACE_FMT_32_xRGB;
|
||||
ret = qxl_bo_reserve(qbo, false);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_unref;
|
||||
ret = qxl_bo_pin(qbo, QXL_GEM_DOMAIN_SURFACE, NULL);
|
||||
if (ret) {
|
||||
qxl_bo_unreserve(qbo);
|
||||
goto out_unref;
|
||||
}
|
||||
ret = qxl_bo_kmap(qbo, NULL);
|
||||
qxl_bo_unreserve(qbo); /* unreserve, will be mmaped */
|
||||
if (ret)
|
||||
goto out_unref;
|
||||
|
||||
*gobj_p = gobj;
|
||||
return 0;
|
||||
out_unref:
|
||||
qxlfb_destroy_pinned_object(gobj);
|
||||
*gobj_p = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int qxlfb_create(struct qxl_fbdev *qfbdev,
|
||||
struct drm_fb_helper_surface_size *sizes)
|
||||
{
|
||||
struct qxl_device *qdev = qfbdev->qdev;
|
||||
struct fb_info *info;
|
||||
struct drm_framebuffer *fb = NULL;
|
||||
struct drm_mode_fb_cmd2 mode_cmd;
|
||||
struct drm_gem_object *gobj = NULL;
|
||||
struct qxl_bo *qbo = NULL;
|
||||
struct device *device = &qdev->pdev->dev;
|
||||
int ret;
|
||||
int size;
|
||||
int bpp = sizes->surface_bpp;
|
||||
int depth = sizes->surface_depth;
|
||||
void *shadow;
|
||||
|
||||
mode_cmd.width = sizes->surface_width;
|
||||
mode_cmd.height = sizes->surface_height;
|
||||
|
||||
mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 1) / 8), 64);
|
||||
mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
|
||||
|
||||
ret = qxlfb_create_pinned_object(qfbdev, &mode_cmd, &gobj);
|
||||
qbo = gem_to_qxl_bo(gobj);
|
||||
QXL_INFO(qdev, "%s: %dx%d %d\n", __func__, mode_cmd.width,
|
||||
mode_cmd.height, mode_cmd.pitches[0]);
|
||||
|
||||
shadow = vmalloc(mode_cmd.pitches[0] * mode_cmd.height);
|
||||
/* TODO: what's the usual response to memory allocation errors? */
|
||||
BUG_ON(!shadow);
|
||||
QXL_INFO(qdev,
|
||||
"surface0 at gpu offset %lld, mmap_offset %lld (virt %p, shadow %p)\n",
|
||||
qxl_bo_gpu_offset(qbo),
|
||||
qxl_bo_mmap_offset(qbo),
|
||||
qbo->kptr,
|
||||
shadow);
|
||||
size = mode_cmd.pitches[0] * mode_cmd.height;
|
||||
|
||||
info = framebuffer_alloc(0, device);
|
||||
if (info == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto out_unref;
|
||||
}
|
||||
|
||||
info->par = qfbdev;
|
||||
|
||||
qxl_framebuffer_init(qdev->ddev, &qfbdev->qfb, &mode_cmd, gobj);
|
||||
|
||||
fb = &qfbdev->qfb.base;
|
||||
|
||||
/* setup helper with fb data */
|
||||
qfbdev->helper.fb = fb;
|
||||
qfbdev->helper.fbdev = info;
|
||||
qfbdev->shadow = shadow;
|
||||
strcpy(info->fix.id, "qxldrmfb");
|
||||
|
||||
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
|
||||
|
||||
info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT;
|
||||
info->fbops = &qxlfb_ops;
|
||||
|
||||
/*
|
||||
* TODO: using gobj->size in various places in this function. Not sure
|
||||
* what the difference between the different sizes is.
|
||||
*/
|
||||
info->fix.smem_start = qdev->vram_base; /* TODO - correct? */
|
||||
info->fix.smem_len = gobj->size;
|
||||
info->screen_base = qfbdev->shadow;
|
||||
info->screen_size = gobj->size;
|
||||
|
||||
drm_fb_helper_fill_var(info, &qfbdev->helper, sizes->fb_width,
|
||||
sizes->fb_height);
|
||||
|
||||
/* setup aperture base/size for vesafb takeover */
|
||||
info->apertures = alloc_apertures(1);
|
||||
if (!info->apertures) {
|
||||
ret = -ENOMEM;
|
||||
goto out_unref;
|
||||
}
|
||||
info->apertures->ranges[0].base = qdev->ddev->mode_config.fb_base;
|
||||
info->apertures->ranges[0].size = qdev->vram_size;
|
||||
|
||||
info->fix.mmio_start = 0;
|
||||
info->fix.mmio_len = 0;
|
||||
|
||||
if (info->screen_base == NULL) {
|
||||
ret = -ENOSPC;
|
||||
goto out_unref;
|
||||
}
|
||||
|
||||
ret = fb_alloc_cmap(&info->cmap, 256, 0);
|
||||
if (ret) {
|
||||
ret = -ENOMEM;
|
||||
goto out_unref;
|
||||
}
|
||||
|
||||
info->fbdefio = &qxl_defio;
|
||||
fb_deferred_io_init(info);
|
||||
|
||||
qdev->fbdev_info = info;
|
||||
qdev->fbdev_qfb = &qfbdev->qfb;
|
||||
DRM_INFO("fb mappable at 0x%lX, size %lu\n", info->fix.smem_start, (unsigned long)info->screen_size);
|
||||
DRM_INFO("fb: depth %d, pitch %d, width %d, height %d\n", fb->depth, fb->pitches[0], fb->width, fb->height);
|
||||
return 0;
|
||||
|
||||
out_unref:
|
||||
if (qbo) {
|
||||
ret = qxl_bo_reserve(qbo, false);
|
||||
if (likely(ret == 0)) {
|
||||
qxl_bo_kunmap(qbo);
|
||||
qxl_bo_unpin(qbo);
|
||||
qxl_bo_unreserve(qbo);
|
||||
}
|
||||
}
|
||||
if (fb && ret) {
|
||||
drm_gem_object_unreference(gobj);
|
||||
drm_framebuffer_cleanup(fb);
|
||||
kfree(fb);
|
||||
}
|
||||
drm_gem_object_unreference(gobj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int qxl_fb_find_or_create_single(
|
||||
struct drm_fb_helper *helper,
|
||||
struct drm_fb_helper_surface_size *sizes)
|
||||
{
|
||||
struct qxl_fbdev *qfbdev =
|
||||
container_of(helper, struct qxl_fbdev, helper);
|
||||
int new_fb = 0;
|
||||
int ret;
|
||||
|
||||
if (!helper->fb) {
|
||||
ret = qxlfb_create(qfbdev, sizes);
|
||||
if (ret)
|
||||
return ret;
|
||||
new_fb = 1;
|
||||
}
|
||||
return new_fb;
|
||||
}
|
||||
|
||||
static int qxl_fbdev_destroy(struct drm_device *dev, struct qxl_fbdev *qfbdev)
|
||||
{
|
||||
struct fb_info *info;
|
||||
struct qxl_framebuffer *qfb = &qfbdev->qfb;
|
||||
|
||||
if (qfbdev->helper.fbdev) {
|
||||
info = qfbdev->helper.fbdev;
|
||||
|
||||
unregister_framebuffer(info);
|
||||
framebuffer_release(info);
|
||||
}
|
||||
if (qfb->obj) {
|
||||
qxlfb_destroy_pinned_object(qfb->obj);
|
||||
qfb->obj = NULL;
|
||||
}
|
||||
drm_fb_helper_fini(&qfbdev->helper);
|
||||
vfree(qfbdev->shadow);
|
||||
drm_framebuffer_cleanup(&qfb->base);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct drm_fb_helper_funcs qxl_fb_helper_funcs = {
|
||||
.fb_probe = qxl_fb_find_or_create_single,
|
||||
};
|
||||
|
||||
int qxl_fbdev_init(struct qxl_device *qdev)
|
||||
{
|
||||
struct qxl_fbdev *qfbdev;
|
||||
int bpp_sel = 32; /* TODO: parameter from somewhere? */
|
||||
int ret;
|
||||
|
||||
qfbdev = kzalloc(sizeof(struct qxl_fbdev), GFP_KERNEL);
|
||||
if (!qfbdev)
|
||||
return -ENOMEM;
|
||||
|
||||
qfbdev->qdev = qdev;
|
||||
qdev->mode_info.qfbdev = qfbdev;
|
||||
spin_lock_init(&qfbdev->delayed_ops_lock);
|
||||
INIT_LIST_HEAD(&qfbdev->delayed_ops);
|
||||
|
||||
drm_fb_helper_prepare(qdev->ddev, &qfbdev->helper,
|
||||
&qxl_fb_helper_funcs);
|
||||
|
||||
ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper,
|
||||
qxl_num_crtc /* num_crtc - QXL supports just 1 */,
|
||||
QXLFB_CONN_LIMIT);
|
||||
if (ret) {
|
||||
kfree(qfbdev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
drm_fb_helper_single_add_all_connectors(&qfbdev->helper);
|
||||
drm_fb_helper_initial_config(&qfbdev->helper, bpp_sel);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void qxl_fbdev_fini(struct qxl_device *qdev)
|
||||
{
|
||||
if (!qdev->mode_info.qfbdev)
|
||||
return;
|
||||
|
||||
qxl_fbdev_destroy(qdev->ddev, qdev->mode_info.qfbdev);
|
||||
kfree(qdev->mode_info.qfbdev);
|
||||
qdev->mode_info.qfbdev = NULL;
|
||||
}
|
||||
|
||||
void qxl_fbdev_set_suspend(struct qxl_device *qdev, int state)
|
||||
{
|
||||
fb_set_suspend(qdev->mode_info.qfbdev->helper.fbdev, state);
|
||||
}
|
||||
|
||||
bool qxl_fbdev_qobj_is_fb(struct qxl_device *qdev, struct qxl_bo *qobj)
|
||||
{
|
||||
if (qobj == gem_to_qxl_bo(qdev->mode_info.qfbdev->qfb.obj))
|
||||
return true;
|
||||
return false;
|
||||
}
|
117
drivers/gpu/drm/qxl/qxl_gem.c
Normal file
117
drivers/gpu/drm/qxl/qxl_gem.c
Normal file
|
@ -0,0 +1,117 @@
|
|||
/*
|
||||
* Copyright 2013 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alon Levy
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
#include "drm/drm.h"
|
||||
#include "qxl_drv.h"
|
||||
#include "qxl_object.h"
|
||||
|
||||
void qxl_gem_object_free(struct drm_gem_object *gobj)
|
||||
{
|
||||
struct qxl_bo *qobj = gem_to_qxl_bo(gobj);
|
||||
|
||||
if (qobj)
|
||||
qxl_bo_unref(&qobj);
|
||||
}
|
||||
|
||||
int qxl_gem_object_create(struct qxl_device *qdev, int size,
|
||||
int alignment, int initial_domain,
|
||||
bool discardable, bool kernel,
|
||||
struct qxl_surface *surf,
|
||||
struct drm_gem_object **obj)
|
||||
{
|
||||
struct qxl_bo *qbo;
|
||||
int r;
|
||||
|
||||
*obj = NULL;
|
||||
/* At least align on page size */
|
||||
if (alignment < PAGE_SIZE)
|
||||
alignment = PAGE_SIZE;
|
||||
r = qxl_bo_create(qdev, size, kernel, false, initial_domain, surf, &qbo);
|
||||
if (r) {
|
||||
if (r != -ERESTARTSYS)
|
||||
DRM_ERROR(
|
||||
"Failed to allocate GEM object (%d, %d, %u, %d)\n",
|
||||
size, initial_domain, alignment, r);
|
||||
return r;
|
||||
}
|
||||
*obj = &qbo->gem_base;
|
||||
|
||||
mutex_lock(&qdev->gem.mutex);
|
||||
list_add_tail(&qbo->list, &qdev->gem.objects);
|
||||
mutex_unlock(&qdev->gem.mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
|
||||
struct drm_file *file_priv,
|
||||
u32 domain,
|
||||
size_t size,
|
||||
struct qxl_surface *surf,
|
||||
struct qxl_bo **qobj,
|
||||
uint32_t *handle)
|
||||
{
|
||||
struct drm_gem_object *gobj;
|
||||
int r;
|
||||
|
||||
BUG_ON(!qobj);
|
||||
BUG_ON(!handle);
|
||||
|
||||
r = qxl_gem_object_create(qdev, size, 0,
|
||||
domain,
|
||||
false, false, surf,
|
||||
&gobj);
|
||||
if (r)
|
||||
return -ENOMEM;
|
||||
r = drm_gem_handle_create(file_priv, gobj, handle);
|
||||
if (r)
|
||||
return r;
|
||||
/* drop reference from allocate - handle holds it now */
|
||||
*qobj = gem_to_qxl_bo(gobj);
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void qxl_gem_object_close(struct drm_gem_object *obj,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
}
|
||||
|
||||
int qxl_gem_init(struct qxl_device *qdev)
|
||||
{
|
||||
INIT_LIST_HEAD(&qdev->gem.objects);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void qxl_gem_fini(struct qxl_device *qdev)
|
||||
{
|
||||
qxl_bo_force_delete(qdev);
|
||||
}
|
237
drivers/gpu/drm/qxl/qxl_image.c
Normal file
237
drivers/gpu/drm/qxl/qxl_image.c
Normal file
|
@ -0,0 +1,237 @@
|
|||
/*
|
||||
* Copyright 2013 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alon Levy
|
||||
*/
|
||||
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "qxl_drv.h"
|
||||
#include "qxl_object.h"
|
||||
|
||||
static int
|
||||
qxl_allocate_chunk(struct qxl_device *qdev,
|
||||
struct qxl_release *release,
|
||||
struct qxl_drm_image *image,
|
||||
unsigned int chunk_size)
|
||||
{
|
||||
struct qxl_drm_chunk *chunk;
|
||||
int ret;
|
||||
|
||||
chunk = kmalloc(sizeof(struct qxl_drm_chunk), GFP_KERNEL);
|
||||
if (!chunk)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = qxl_alloc_bo_reserved(qdev, release, chunk_size, &chunk->bo);
|
||||
if (ret) {
|
||||
kfree(chunk);
|
||||
return ret;
|
||||
}
|
||||
|
||||
list_add_tail(&chunk->head, &image->chunk_list);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
qxl_image_alloc_objects(struct qxl_device *qdev,
|
||||
struct qxl_release *release,
|
||||
struct qxl_drm_image **image_ptr,
|
||||
int height, int stride)
|
||||
{
|
||||
struct qxl_drm_image *image;
|
||||
int ret;
|
||||
|
||||
image = kmalloc(sizeof(struct qxl_drm_image), GFP_KERNEL);
|
||||
if (!image)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_LIST_HEAD(&image->chunk_list);
|
||||
|
||||
ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_image), &image->bo);
|
||||
if (ret) {
|
||||
kfree(image);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = qxl_allocate_chunk(qdev, release, image, sizeof(struct qxl_data_chunk) + stride * height);
|
||||
if (ret) {
|
||||
qxl_bo_unref(&image->bo);
|
||||
kfree(image);
|
||||
return ret;
|
||||
}
|
||||
*image_ptr = image;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage)
|
||||
{
|
||||
struct qxl_drm_chunk *chunk, *tmp;
|
||||
|
||||
list_for_each_entry_safe(chunk, tmp, &dimage->chunk_list, head) {
|
||||
qxl_bo_unref(&chunk->bo);
|
||||
kfree(chunk);
|
||||
}
|
||||
|
||||
qxl_bo_unref(&dimage->bo);
|
||||
kfree(dimage);
|
||||
}
|
||||
|
||||
static int
|
||||
qxl_image_init_helper(struct qxl_device *qdev,
|
||||
struct qxl_release *release,
|
||||
struct qxl_drm_image *dimage,
|
||||
const uint8_t *data,
|
||||
int width, int height,
|
||||
int depth, unsigned int hash,
|
||||
int stride)
|
||||
{
|
||||
struct qxl_drm_chunk *drv_chunk;
|
||||
struct qxl_image *image;
|
||||
struct qxl_data_chunk *chunk;
|
||||
int i;
|
||||
int chunk_stride;
|
||||
int linesize = width * depth / 8;
|
||||
struct qxl_bo *chunk_bo, *image_bo;
|
||||
void *ptr;
|
||||
/* Chunk */
|
||||
/* FIXME: Check integer overflow */
|
||||
/* TODO: variable number of chunks */
|
||||
|
||||
drv_chunk = list_first_entry(&dimage->chunk_list, struct qxl_drm_chunk, head);
|
||||
|
||||
chunk_bo = drv_chunk->bo;
|
||||
chunk_stride = stride; /* TODO: should use linesize, but it renders
|
||||
wrong (check the bitmaps are sent correctly
|
||||
first) */
|
||||
|
||||
ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0);
|
||||
chunk = ptr;
|
||||
chunk->data_size = height * chunk_stride;
|
||||
chunk->prev_chunk = 0;
|
||||
chunk->next_chunk = 0;
|
||||
qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
|
||||
|
||||
{
|
||||
void *k_data, *i_data;
|
||||
int remain;
|
||||
int page;
|
||||
int size;
|
||||
if (stride == linesize && chunk_stride == stride) {
|
||||
remain = linesize * height;
|
||||
page = 0;
|
||||
i_data = (void *)data;
|
||||
|
||||
while (remain > 0) {
|
||||
ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page << PAGE_SHIFT);
|
||||
|
||||
if (page == 0) {
|
||||
chunk = ptr;
|
||||
k_data = chunk->data;
|
||||
size = PAGE_SIZE - offsetof(struct qxl_data_chunk, data);
|
||||
} else {
|
||||
k_data = ptr;
|
||||
size = PAGE_SIZE;
|
||||
}
|
||||
size = min(size, remain);
|
||||
|
||||
memcpy(k_data, i_data, size);
|
||||
|
||||
qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
|
||||
i_data += size;
|
||||
remain -= size;
|
||||
page++;
|
||||
}
|
||||
} else {
|
||||
unsigned page_base, page_offset, out_offset;
|
||||
for (i = 0 ; i < height ; ++i) {
|
||||
i_data = (void *)data + i * stride;
|
||||
remain = linesize;
|
||||
out_offset = offsetof(struct qxl_data_chunk, data) + i * chunk_stride;
|
||||
|
||||
while (remain > 0) {
|
||||
page_base = out_offset & PAGE_MASK;
|
||||
page_offset = offset_in_page(out_offset);
|
||||
size = min((int)(PAGE_SIZE - page_offset), remain);
|
||||
|
||||
ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page_base);
|
||||
k_data = ptr + page_offset;
|
||||
memcpy(k_data, i_data, size);
|
||||
qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
|
||||
remain -= size;
|
||||
i_data += size;
|
||||
out_offset += size;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
qxl_bo_kunmap(chunk_bo);
|
||||
|
||||
image_bo = dimage->bo;
|
||||
ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0);
|
||||
image = ptr;
|
||||
|
||||
image->descriptor.id = 0;
|
||||
image->descriptor.type = SPICE_IMAGE_TYPE_BITMAP;
|
||||
|
||||
image->descriptor.flags = 0;
|
||||
image->descriptor.width = width;
|
||||
image->descriptor.height = height;
|
||||
|
||||
switch (depth) {
|
||||
case 1:
|
||||
/* TODO: BE? check by arch? */
|
||||
image->u.bitmap.format = SPICE_BITMAP_FMT_1BIT_BE;
|
||||
break;
|
||||
case 24:
|
||||
image->u.bitmap.format = SPICE_BITMAP_FMT_24BIT;
|
||||
break;
|
||||
case 32:
|
||||
image->u.bitmap.format = SPICE_BITMAP_FMT_32BIT;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("unsupported image bit depth\n");
|
||||
return -EINVAL; /* TODO: cleanup */
|
||||
}
|
||||
image->u.bitmap.flags = QXL_BITMAP_TOP_DOWN;
|
||||
image->u.bitmap.x = width;
|
||||
image->u.bitmap.y = height;
|
||||
image->u.bitmap.stride = chunk_stride;
|
||||
image->u.bitmap.palette = 0;
|
||||
image->u.bitmap.data = qxl_bo_physical_address(qdev, chunk_bo, 0);
|
||||
|
||||
qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int qxl_image_init(struct qxl_device *qdev,
|
||||
struct qxl_release *release,
|
||||
struct qxl_drm_image *dimage,
|
||||
const uint8_t *data,
|
||||
int x, int y, int width, int height,
|
||||
int depth, int stride)
|
||||
{
|
||||
data += y * stride + x * (depth / 8);
|
||||
return qxl_image_init_helper(qdev, release, dimage, data,
|
||||
width, height, depth, 0, stride);
|
||||
}
|
454
drivers/gpu/drm/qxl/qxl_ioctl.c
Normal file
454
drivers/gpu/drm/qxl/qxl_ioctl.c
Normal file
|
@ -0,0 +1,454 @@
|
|||
/*
|
||||
* Copyright 2013 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alon Levy
|
||||
*/
|
||||
|
||||
#include "qxl_drv.h"
|
||||
#include "qxl_object.h"
|
||||
|
||||
/*
|
||||
* TODO: allocating a new gem(in qxl_bo) for each request.
|
||||
* This is wasteful since bo's are page aligned.
|
||||
*/
|
||||
static int qxl_alloc_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct qxl_device *qdev = dev->dev_private;
|
||||
struct drm_qxl_alloc *qxl_alloc = data;
|
||||
int ret;
|
||||
struct qxl_bo *qobj;
|
||||
uint32_t handle;
|
||||
u32 domain = QXL_GEM_DOMAIN_VRAM;
|
||||
|
||||
if (qxl_alloc->size == 0) {
|
||||
DRM_ERROR("invalid size %d\n", qxl_alloc->size);
|
||||
return -EINVAL;
|
||||
}
|
||||
ret = qxl_gem_object_create_with_handle(qdev, file_priv,
|
||||
domain,
|
||||
qxl_alloc->size,
|
||||
NULL,
|
||||
&qobj, &handle);
|
||||
if (ret) {
|
||||
DRM_ERROR("%s: failed to create gem ret=%d\n",
|
||||
__func__, ret);
|
||||
return -ENOMEM;
|
||||
}
|
||||
qxl_alloc->handle = handle;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qxl_map_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct qxl_device *qdev = dev->dev_private;
|
||||
struct drm_qxl_map *qxl_map = data;
|
||||
|
||||
return qxl_mode_dumb_mmap(file_priv, qdev->ddev, qxl_map->handle,
|
||||
&qxl_map->offset);
|
||||
}
|
||||
|
||||
struct qxl_reloc_info {
|
||||
int type;
|
||||
struct qxl_bo *dst_bo;
|
||||
uint32_t dst_offset;
|
||||
struct qxl_bo *src_bo;
|
||||
int src_offset;
|
||||
};
|
||||
|
||||
/*
|
||||
* dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's
|
||||
* are on vram).
|
||||
* *(dst + dst_off) = qxl_bo_physical_address(src, src_off)
|
||||
*/
|
||||
static void
|
||||
apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
|
||||
{
|
||||
void *reloc_page;
|
||||
reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
|
||||
*(uint64_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
|
||||
info->src_bo,
|
||||
info->src_offset);
|
||||
qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
|
||||
}
|
||||
|
||||
static void
|
||||
apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
|
||||
{
|
||||
uint32_t id = 0;
|
||||
void *reloc_page;
|
||||
|
||||
if (info->src_bo && !info->src_bo->is_primary)
|
||||
id = info->src_bo->surface_id;
|
||||
|
||||
reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
|
||||
*(uint32_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = id;
|
||||
qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
|
||||
}
|
||||
|
||||
/* return holding the reference to this object */
|
||||
static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev,
|
||||
struct drm_file *file_priv, uint64_t handle,
|
||||
struct qxl_release *release)
|
||||
{
|
||||
struct drm_gem_object *gobj;
|
||||
struct qxl_bo *qobj;
|
||||
int ret;
|
||||
|
||||
gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle);
|
||||
if (!gobj)
|
||||
return NULL;
|
||||
|
||||
qobj = gem_to_qxl_bo(gobj);
|
||||
|
||||
ret = qxl_release_list_add(release, qobj);
|
||||
if (ret)
|
||||
return NULL;
|
||||
|
||||
return qobj;
|
||||
}
|
||||
|
||||
/*
|
||||
* Usage of execbuffer:
|
||||
* Relocations need to take into account the full QXLDrawable size.
|
||||
* However, the command as passed from user space must *not* contain the initial
|
||||
* QXLReleaseInfo struct (first XXX bytes)
|
||||
*/
|
||||
static int qxl_process_single_command(struct qxl_device *qdev,
|
||||
struct drm_qxl_command *cmd,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct qxl_reloc_info *reloc_info;
|
||||
int release_type;
|
||||
struct qxl_release *release;
|
||||
struct qxl_bo *cmd_bo;
|
||||
void *fb_cmd;
|
||||
int i, j, ret, num_relocs;
|
||||
int unwritten;
|
||||
|
||||
switch (cmd->type) {
|
||||
case QXL_CMD_DRAW:
|
||||
release_type = QXL_RELEASE_DRAWABLE;
|
||||
break;
|
||||
case QXL_CMD_SURFACE:
|
||||
case QXL_CMD_CURSOR:
|
||||
default:
|
||||
DRM_DEBUG("Only draw commands in execbuffers\n");
|
||||
return -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
if (cmd->command_size > PAGE_SIZE - sizeof(union qxl_release_info))
|
||||
return -EINVAL;
|
||||
|
||||
if (!access_ok(VERIFY_READ,
|
||||
(void *)(unsigned long)cmd->command,
|
||||
cmd->command_size))
|
||||
return -EFAULT;
|
||||
|
||||
reloc_info = kmalloc(sizeof(struct qxl_reloc_info) * cmd->relocs_num, GFP_KERNEL);
|
||||
if (!reloc_info)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = qxl_alloc_release_reserved(qdev,
|
||||
sizeof(union qxl_release_info) +
|
||||
cmd->command_size,
|
||||
release_type,
|
||||
&release,
|
||||
&cmd_bo);
|
||||
if (ret)
|
||||
goto out_free_reloc;
|
||||
|
||||
/* TODO copy slow path code from i915 */
|
||||
fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
|
||||
unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
|
||||
|
||||
{
|
||||
struct qxl_drawable *draw = fb_cmd;
|
||||
draw->mm_time = qdev->rom->mm_clock;
|
||||
}
|
||||
|
||||
qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
|
||||
if (unwritten) {
|
||||
DRM_ERROR("got unwritten %d\n", unwritten);
|
||||
ret = -EFAULT;
|
||||
goto out_free_release;
|
||||
}
|
||||
|
||||
/* fill out reloc info structs */
|
||||
num_relocs = 0;
|
||||
for (i = 0; i < cmd->relocs_num; ++i) {
|
||||
struct drm_qxl_reloc reloc;
|
||||
|
||||
if (copy_from_user(&reloc,
|
||||
&((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
|
||||
sizeof(reloc))) {
|
||||
ret = -EFAULT;
|
||||
goto out_free_bos;
|
||||
}
|
||||
|
||||
/* add the bos to the list of bos to validate -
|
||||
need to validate first then process relocs? */
|
||||
if (reloc.reloc_type != QXL_RELOC_TYPE_BO && reloc.reloc_type != QXL_RELOC_TYPE_SURF) {
|
||||
DRM_DEBUG("unknown reloc type %d\n", reloc_info[i].type);
|
||||
|
||||
ret = -EINVAL;
|
||||
goto out_free_bos;
|
||||
}
|
||||
reloc_info[i].type = reloc.reloc_type;
|
||||
|
||||
if (reloc.dst_handle) {
|
||||
reloc_info[i].dst_bo = qxlhw_handle_to_bo(qdev, file_priv,
|
||||
reloc.dst_handle, release);
|
||||
if (!reloc_info[i].dst_bo) {
|
||||
ret = -EINVAL;
|
||||
reloc_info[i].src_bo = NULL;
|
||||
goto out_free_bos;
|
||||
}
|
||||
reloc_info[i].dst_offset = reloc.dst_offset;
|
||||
} else {
|
||||
reloc_info[i].dst_bo = cmd_bo;
|
||||
reloc_info[i].dst_offset = reloc.dst_offset + release->release_offset;
|
||||
}
|
||||
num_relocs++;
|
||||
|
||||
/* reserve and validate the reloc dst bo */
|
||||
if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) {
|
||||
reloc_info[i].src_bo =
|
||||
qxlhw_handle_to_bo(qdev, file_priv,
|
||||
reloc.src_handle, release);
|
||||
if (!reloc_info[i].src_bo) {
|
||||
if (reloc_info[i].dst_bo != cmd_bo)
|
||||
drm_gem_object_unreference_unlocked(&reloc_info[i].dst_bo->gem_base);
|
||||
ret = -EINVAL;
|
||||
goto out_free_bos;
|
||||
}
|
||||
reloc_info[i].src_offset = reloc.src_offset;
|
||||
} else {
|
||||
reloc_info[i].src_bo = NULL;
|
||||
reloc_info[i].src_offset = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* validate all buffers */
|
||||
ret = qxl_release_reserve_list(release, false);
|
||||
if (ret)
|
||||
goto out_free_bos;
|
||||
|
||||
for (i = 0; i < cmd->relocs_num; ++i) {
|
||||
if (reloc_info[i].type == QXL_RELOC_TYPE_BO)
|
||||
apply_reloc(qdev, &reloc_info[i]);
|
||||
else if (reloc_info[i].type == QXL_RELOC_TYPE_SURF)
|
||||
apply_surf_reloc(qdev, &reloc_info[i]);
|
||||
}
|
||||
|
||||
ret = qxl_push_command_ring_release(qdev, release, cmd->type, true);
|
||||
if (ret)
|
||||
qxl_release_backoff_reserve_list(release);
|
||||
else
|
||||
qxl_release_fence_buffer_objects(release);
|
||||
|
||||
out_free_bos:
|
||||
for (j = 0; j < num_relocs; j++) {
|
||||
if (reloc_info[j].dst_bo != cmd_bo)
|
||||
drm_gem_object_unreference_unlocked(&reloc_info[j].dst_bo->gem_base);
|
||||
if (reloc_info[j].src_bo && reloc_info[j].src_bo != cmd_bo)
|
||||
drm_gem_object_unreference_unlocked(&reloc_info[j].src_bo->gem_base);
|
||||
}
|
||||
out_free_release:
|
||||
if (ret)
|
||||
qxl_release_free(qdev, release);
|
||||
out_free_reloc:
|
||||
kfree(reloc_info);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct qxl_device *qdev = dev->dev_private;
|
||||
struct drm_qxl_execbuffer *execbuffer = data;
|
||||
struct drm_qxl_command user_cmd;
|
||||
int cmd_num;
|
||||
int ret;
|
||||
|
||||
for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
|
||||
|
||||
struct drm_qxl_command *commands =
|
||||
(struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
|
||||
|
||||
if (copy_from_user(&user_cmd, &commands[cmd_num],
|
||||
sizeof(user_cmd)))
|
||||
return -EFAULT;
|
||||
|
||||
ret = qxl_process_single_command(qdev, &user_cmd, file_priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file)
|
||||
{
|
||||
struct qxl_device *qdev = dev->dev_private;
|
||||
struct drm_qxl_update_area *update_area = data;
|
||||
struct qxl_rect area = {.left = update_area->left,
|
||||
.top = update_area->top,
|
||||
.right = update_area->right,
|
||||
.bottom = update_area->bottom};
|
||||
int ret;
|
||||
struct drm_gem_object *gobj = NULL;
|
||||
struct qxl_bo *qobj = NULL;
|
||||
|
||||
if (update_area->left >= update_area->right ||
|
||||
update_area->top >= update_area->bottom)
|
||||
return -EINVAL;
|
||||
|
||||
gobj = drm_gem_object_lookup(dev, file, update_area->handle);
|
||||
if (gobj == NULL)
|
||||
return -ENOENT;
|
||||
|
||||
qobj = gem_to_qxl_bo(gobj);
|
||||
|
||||
ret = qxl_bo_reserve(qobj, false);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (!qobj->pin_count) {
|
||||
qxl_ttm_placement_from_domain(qobj, qobj->type, false);
|
||||
ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
|
||||
true, false);
|
||||
if (unlikely(ret))
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = qxl_bo_check_id(qdev, qobj);
|
||||
if (ret)
|
||||
goto out2;
|
||||
if (!qobj->surface_id)
|
||||
DRM_ERROR("got update area for surface with no id %d\n", update_area->handle);
|
||||
ret = qxl_io_update_area(qdev, qobj, &area);
|
||||
|
||||
out2:
|
||||
qxl_bo_unreserve(qobj);
|
||||
|
||||
out:
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int qxl_getparam_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct qxl_device *qdev = dev->dev_private;
|
||||
struct drm_qxl_getparam *param = data;
|
||||
|
||||
switch (param->param) {
|
||||
case QXL_PARAM_NUM_SURFACES:
|
||||
param->value = qdev->rom->n_surfaces;
|
||||
break;
|
||||
case QXL_PARAM_MAX_RELOCS:
|
||||
param->value = QXL_MAX_RES;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qxl_clientcap_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct qxl_device *qdev = dev->dev_private;
|
||||
struct drm_qxl_clientcap *param = data;
|
||||
int byte, idx;
|
||||
|
||||
byte = param->index / 8;
|
||||
idx = param->index % 8;
|
||||
|
||||
if (qdev->pdev->revision < 4)
|
||||
return -ENOSYS;
|
||||
|
||||
if (byte >= 58)
|
||||
return -ENOSYS;
|
||||
|
||||
if (qdev->rom->client_capabilities[byte] & (1 << idx))
|
||||
return 0;
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file)
|
||||
{
|
||||
struct qxl_device *qdev = dev->dev_private;
|
||||
struct drm_qxl_alloc_surf *param = data;
|
||||
struct qxl_bo *qobj;
|
||||
int handle;
|
||||
int ret;
|
||||
int size, actual_stride;
|
||||
struct qxl_surface surf;
|
||||
|
||||
/* work out size allocate bo with handle */
|
||||
actual_stride = param->stride < 0 ? -param->stride : param->stride;
|
||||
size = actual_stride * param->height + actual_stride;
|
||||
|
||||
surf.format = param->format;
|
||||
surf.width = param->width;
|
||||
surf.height = param->height;
|
||||
surf.stride = param->stride;
|
||||
surf.data = 0;
|
||||
|
||||
ret = qxl_gem_object_create_with_handle(qdev, file,
|
||||
QXL_GEM_DOMAIN_SURFACE,
|
||||
size,
|
||||
&surf,
|
||||
&qobj, &handle);
|
||||
if (ret) {
|
||||
DRM_ERROR("%s: failed to create gem ret=%d\n",
|
||||
__func__, ret);
|
||||
return -ENOMEM;
|
||||
} else
|
||||
param->handle = handle;
|
||||
return ret;
|
||||
}
|
||||
|
||||
const struct drm_ioctl_desc qxl_ioctls[] = {
|
||||
DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH|DRM_UNLOCKED),
|
||||
|
||||
DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH|DRM_UNLOCKED),
|
||||
|
||||
DRM_IOCTL_DEF_DRV(QXL_EXECBUFFER, qxl_execbuffer_ioctl,
|
||||
DRM_AUTH|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF_DRV(QXL_UPDATE_AREA, qxl_update_area_ioctl,
|
||||
DRM_AUTH|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF_DRV(QXL_GETPARAM, qxl_getparam_ioctl,
|
||||
DRM_AUTH|DRM_UNLOCKED),
|
||||
DRM_IOCTL_DEF_DRV(QXL_CLIENTCAP, qxl_clientcap_ioctl,
|
||||
DRM_AUTH|DRM_UNLOCKED),
|
||||
|
||||
DRM_IOCTL_DEF_DRV(QXL_ALLOC_SURF, qxl_alloc_surf_ioctl,
|
||||
DRM_AUTH|DRM_UNLOCKED),
|
||||
};
|
||||
|
||||
int qxl_max_ioctls = ARRAY_SIZE(qxl_ioctls);
|
100
drivers/gpu/drm/qxl/qxl_irq.c
Normal file
100
drivers/gpu/drm/qxl/qxl_irq.c
Normal file
|
@ -0,0 +1,100 @@
|
|||
/*
|
||||
* Copyright 2013 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alon Levy
|
||||
*/
|
||||
|
||||
#include "qxl_drv.h"
|
||||
|
||||
irqreturn_t qxl_irq_handler(int irq, void *arg)
|
||||
{
|
||||
struct drm_device *dev = (struct drm_device *) arg;
|
||||
struct qxl_device *qdev = (struct qxl_device *)dev->dev_private;
|
||||
uint32_t pending;
|
||||
|
||||
pending = xchg(&qdev->ram_header->int_pending, 0);
|
||||
|
||||
if (!pending)
|
||||
return IRQ_NONE;
|
||||
|
||||
atomic_inc(&qdev->irq_received);
|
||||
|
||||
if (pending & QXL_INTERRUPT_DISPLAY) {
|
||||
atomic_inc(&qdev->irq_received_display);
|
||||
wake_up_all(&qdev->display_event);
|
||||
qxl_queue_garbage_collect(qdev, false);
|
||||
}
|
||||
if (pending & QXL_INTERRUPT_CURSOR) {
|
||||
atomic_inc(&qdev->irq_received_cursor);
|
||||
wake_up_all(&qdev->cursor_event);
|
||||
}
|
||||
if (pending & QXL_INTERRUPT_IO_CMD) {
|
||||
atomic_inc(&qdev->irq_received_io_cmd);
|
||||
wake_up_all(&qdev->io_cmd_event);
|
||||
}
|
||||
if (pending & QXL_INTERRUPT_ERROR) {
|
||||
/* TODO: log it, reset device (only way to exit this condition)
|
||||
* (do it a certain number of times, afterwards admit defeat,
|
||||
* to avoid endless loops).
|
||||
*/
|
||||
qdev->irq_received_error++;
|
||||
qxl_io_log(qdev, "%s: driver is in bug mode.\n", __func__);
|
||||
}
|
||||
if (pending & QXL_INTERRUPT_CLIENT_MONITORS_CONFIG) {
|
||||
qxl_io_log(qdev, "QXL_INTERRUPT_CLIENT_MONITORS_CONFIG\n");
|
||||
schedule_work(&qdev->client_monitors_config_work);
|
||||
}
|
||||
qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
|
||||
outb(0, qdev->io_base + QXL_IO_UPDATE_IRQ);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void qxl_client_monitors_config_work_func(struct work_struct *work)
|
||||
{
|
||||
struct qxl_device *qdev = container_of(work, struct qxl_device,
|
||||
client_monitors_config_work);
|
||||
|
||||
qxl_display_read_client_monitors_config(qdev);
|
||||
}
|
||||
|
||||
int qxl_irq_init(struct qxl_device *qdev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
init_waitqueue_head(&qdev->display_event);
|
||||
init_waitqueue_head(&qdev->cursor_event);
|
||||
init_waitqueue_head(&qdev->io_cmd_event);
|
||||
INIT_WORK(&qdev->client_monitors_config_work,
|
||||
qxl_client_monitors_config_work_func);
|
||||
atomic_set(&qdev->irq_received, 0);
|
||||
atomic_set(&qdev->irq_received_display, 0);
|
||||
atomic_set(&qdev->irq_received_cursor, 0);
|
||||
atomic_set(&qdev->irq_received_io_cmd, 0);
|
||||
qdev->irq_received_error = 0;
|
||||
ret = drm_irq_install(qdev->ddev, qdev->ddev->pdev->irq);
|
||||
qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Failed installing irq: %d\n", ret);
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
350
drivers/gpu/drm/qxl/qxl_kms.c
Normal file
350
drivers/gpu/drm/qxl/qxl_kms.c
Normal file
|
@ -0,0 +1,350 @@
|
|||
/*
|
||||
* Copyright 2013 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alon Levy
|
||||
*/
|
||||
|
||||
#include "qxl_drv.h"
|
||||
#include "qxl_object.h"
|
||||
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <linux/io-mapping.h>
|
||||
|
||||
int qxl_log_level;
|
||||
|
||||
static void qxl_dump_mode(struct qxl_device *qdev, void *p)
|
||||
{
|
||||
struct qxl_mode *m = p;
|
||||
DRM_DEBUG_KMS("%d: %dx%d %d bits, stride %d, %dmm x %dmm, orientation %d\n",
|
||||
m->id, m->x_res, m->y_res, m->bits, m->stride, m->x_mili,
|
||||
m->y_mili, m->orientation);
|
||||
}
|
||||
|
||||
static bool qxl_check_device(struct qxl_device *qdev)
|
||||
{
|
||||
struct qxl_rom *rom = qdev->rom;
|
||||
int mode_offset;
|
||||
int i;
|
||||
|
||||
if (rom->magic != 0x4f525851) {
|
||||
DRM_ERROR("bad rom signature %x\n", rom->magic);
|
||||
return false;
|
||||
}
|
||||
|
||||
DRM_INFO("Device Version %d.%d\n", rom->id, rom->update_id);
|
||||
DRM_INFO("Compression level %d log level %d\n", rom->compression_level,
|
||||
rom->log_level);
|
||||
DRM_INFO("Currently using mode #%d, list at 0x%x\n",
|
||||
rom->mode, rom->modes_offset);
|
||||
DRM_INFO("%d io pages at offset 0x%x\n",
|
||||
rom->num_io_pages, rom->pages_offset);
|
||||
DRM_INFO("%d byte draw area at offset 0x%x\n",
|
||||
rom->surface0_area_size, rom->draw_area_offset);
|
||||
|
||||
qdev->vram_size = rom->surface0_area_size;
|
||||
DRM_INFO("RAM header offset: 0x%x\n", rom->ram_header_offset);
|
||||
|
||||
mode_offset = rom->modes_offset / 4;
|
||||
qdev->mode_info.num_modes = ((u32 *)rom)[mode_offset];
|
||||
DRM_INFO("rom modes offset 0x%x for %d modes\n", rom->modes_offset,
|
||||
qdev->mode_info.num_modes);
|
||||
qdev->mode_info.modes = (void *)((uint32_t *)rom + mode_offset + 1);
|
||||
for (i = 0; i < qdev->mode_info.num_modes; i++)
|
||||
qxl_dump_mode(qdev, qdev->mode_info.modes + i);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void setup_hw_slot(struct qxl_device *qdev, int slot_index,
|
||||
struct qxl_memslot *slot)
|
||||
{
|
||||
qdev->ram_header->mem_slot.mem_start = slot->start_phys_addr;
|
||||
qdev->ram_header->mem_slot.mem_end = slot->end_phys_addr;
|
||||
qxl_io_memslot_add(qdev, slot_index);
|
||||
}
|
||||
|
||||
static uint8_t setup_slot(struct qxl_device *qdev, uint8_t slot_index_offset,
|
||||
unsigned long start_phys_addr, unsigned long end_phys_addr)
|
||||
{
|
||||
uint64_t high_bits;
|
||||
struct qxl_memslot *slot;
|
||||
uint8_t slot_index;
|
||||
|
||||
slot_index = qdev->rom->slots_start + slot_index_offset;
|
||||
slot = &qdev->mem_slots[slot_index];
|
||||
slot->start_phys_addr = start_phys_addr;
|
||||
slot->end_phys_addr = end_phys_addr;
|
||||
|
||||
setup_hw_slot(qdev, slot_index, slot);
|
||||
|
||||
slot->generation = qdev->rom->slot_generation;
|
||||
high_bits = slot_index << qdev->slot_gen_bits;
|
||||
high_bits |= slot->generation;
|
||||
high_bits <<= (64 - (qdev->slot_gen_bits + qdev->slot_id_bits));
|
||||
slot->high_bits = high_bits;
|
||||
return slot_index;
|
||||
}
|
||||
|
||||
void qxl_reinit_memslots(struct qxl_device *qdev)
|
||||
{
|
||||
setup_hw_slot(qdev, qdev->main_mem_slot, &qdev->mem_slots[qdev->main_mem_slot]);
|
||||
setup_hw_slot(qdev, qdev->surfaces_mem_slot, &qdev->mem_slots[qdev->surfaces_mem_slot]);
|
||||
}
|
||||
|
||||
static void qxl_gc_work(struct work_struct *work)
|
||||
{
|
||||
struct qxl_device *qdev = container_of(work, struct qxl_device, gc_work);
|
||||
qxl_garbage_collect(qdev);
|
||||
}
|
||||
|
||||
static int qxl_device_init(struct qxl_device *qdev,
|
||||
struct drm_device *ddev,
|
||||
struct pci_dev *pdev,
|
||||
unsigned long flags)
|
||||
{
|
||||
int r, sb;
|
||||
|
||||
qdev->dev = &pdev->dev;
|
||||
qdev->ddev = ddev;
|
||||
qdev->pdev = pdev;
|
||||
qdev->flags = flags;
|
||||
|
||||
mutex_init(&qdev->gem.mutex);
|
||||
mutex_init(&qdev->update_area_mutex);
|
||||
mutex_init(&qdev->release_mutex);
|
||||
mutex_init(&qdev->surf_evict_mutex);
|
||||
INIT_LIST_HEAD(&qdev->gem.objects);
|
||||
|
||||
qdev->rom_base = pci_resource_start(pdev, 2);
|
||||
qdev->rom_size = pci_resource_len(pdev, 2);
|
||||
qdev->vram_base = pci_resource_start(pdev, 0);
|
||||
qdev->io_base = pci_resource_start(pdev, 3);
|
||||
|
||||
qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0));
|
||||
|
||||
if (pci_resource_len(pdev, 4) > 0) {
|
||||
/* 64bit surface bar present */
|
||||
sb = 4;
|
||||
qdev->surfaceram_base = pci_resource_start(pdev, sb);
|
||||
qdev->surfaceram_size = pci_resource_len(pdev, sb);
|
||||
qdev->surface_mapping =
|
||||
io_mapping_create_wc(qdev->surfaceram_base,
|
||||
qdev->surfaceram_size);
|
||||
}
|
||||
if (qdev->surface_mapping == NULL) {
|
||||
/* 64bit surface bar not present (or mapping failed) */
|
||||
sb = 1;
|
||||
qdev->surfaceram_base = pci_resource_start(pdev, sb);
|
||||
qdev->surfaceram_size = pci_resource_len(pdev, sb);
|
||||
qdev->surface_mapping =
|
||||
io_mapping_create_wc(qdev->surfaceram_base,
|
||||
qdev->surfaceram_size);
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("qxl: vram %llx-%llx(%dM %dk), surface %llx-%llx(%dM %dk, %s)\n",
|
||||
(unsigned long long)qdev->vram_base,
|
||||
(unsigned long long)pci_resource_end(pdev, 0),
|
||||
(int)pci_resource_len(pdev, 0) / 1024 / 1024,
|
||||
(int)pci_resource_len(pdev, 0) / 1024,
|
||||
(unsigned long long)qdev->surfaceram_base,
|
||||
(unsigned long long)pci_resource_end(pdev, sb),
|
||||
(int)qdev->surfaceram_size / 1024 / 1024,
|
||||
(int)qdev->surfaceram_size / 1024,
|
||||
(sb == 4) ? "64bit" : "32bit");
|
||||
|
||||
qdev->rom = ioremap(qdev->rom_base, qdev->rom_size);
|
||||
if (!qdev->rom) {
|
||||
pr_err("Unable to ioremap ROM\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
qxl_check_device(qdev);
|
||||
|
||||
r = qxl_bo_init(qdev);
|
||||
if (r) {
|
||||
DRM_ERROR("bo init failed %d\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
qdev->ram_header = ioremap(qdev->vram_base +
|
||||
qdev->rom->ram_header_offset,
|
||||
sizeof(*qdev->ram_header));
|
||||
|
||||
qdev->command_ring = qxl_ring_create(&(qdev->ram_header->cmd_ring_hdr),
|
||||
sizeof(struct qxl_command),
|
||||
QXL_COMMAND_RING_SIZE,
|
||||
qdev->io_base + QXL_IO_NOTIFY_CMD,
|
||||
false,
|
||||
&qdev->display_event);
|
||||
|
||||
qdev->cursor_ring = qxl_ring_create(
|
||||
&(qdev->ram_header->cursor_ring_hdr),
|
||||
sizeof(struct qxl_command),
|
||||
QXL_CURSOR_RING_SIZE,
|
||||
qdev->io_base + QXL_IO_NOTIFY_CMD,
|
||||
false,
|
||||
&qdev->cursor_event);
|
||||
|
||||
qdev->release_ring = qxl_ring_create(
|
||||
&(qdev->ram_header->release_ring_hdr),
|
||||
sizeof(uint64_t),
|
||||
QXL_RELEASE_RING_SIZE, 0, true,
|
||||
NULL);
|
||||
|
||||
/* TODO - slot initialization should happen on reset. where is our
|
||||
* reset handler? */
|
||||
qdev->n_mem_slots = qdev->rom->slots_end;
|
||||
qdev->slot_gen_bits = qdev->rom->slot_gen_bits;
|
||||
qdev->slot_id_bits = qdev->rom->slot_id_bits;
|
||||
qdev->va_slot_mask =
|
||||
(~(uint64_t)0) >> (qdev->slot_id_bits + qdev->slot_gen_bits);
|
||||
|
||||
qdev->mem_slots =
|
||||
kmalloc(qdev->n_mem_slots * sizeof(struct qxl_memslot),
|
||||
GFP_KERNEL);
|
||||
|
||||
idr_init(&qdev->release_idr);
|
||||
spin_lock_init(&qdev->release_idr_lock);
|
||||
spin_lock_init(&qdev->release_lock);
|
||||
|
||||
idr_init(&qdev->surf_id_idr);
|
||||
spin_lock_init(&qdev->surf_id_idr_lock);
|
||||
|
||||
mutex_init(&qdev->async_io_mutex);
|
||||
|
||||
/* reset the device into a known state - no memslots, no primary
|
||||
* created, no surfaces. */
|
||||
qxl_io_reset(qdev);
|
||||
|
||||
/* must initialize irq before first async io - slot creation */
|
||||
r = qxl_irq_init(qdev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/*
|
||||
* Note that virtual is surface0. We rely on the single ioremap done
|
||||
* before.
|
||||
*/
|
||||
qdev->main_mem_slot = setup_slot(qdev, 0,
|
||||
(unsigned long)qdev->vram_base,
|
||||
(unsigned long)qdev->vram_base + qdev->rom->ram_header_offset);
|
||||
qdev->surfaces_mem_slot = setup_slot(qdev, 1,
|
||||
(unsigned long)qdev->surfaceram_base,
|
||||
(unsigned long)qdev->surfaceram_base + qdev->surfaceram_size);
|
||||
DRM_INFO("main mem slot %d [%lx,%x]\n",
|
||||
qdev->main_mem_slot,
|
||||
(unsigned long)qdev->vram_base, qdev->rom->ram_header_offset);
|
||||
DRM_INFO("surface mem slot %d [%lx,%lx]\n",
|
||||
qdev->surfaces_mem_slot,
|
||||
(unsigned long)qdev->surfaceram_base,
|
||||
(unsigned long)qdev->surfaceram_size);
|
||||
|
||||
|
||||
qdev->gc_queue = create_singlethread_workqueue("qxl_gc");
|
||||
INIT_WORK(&qdev->gc_work, qxl_gc_work);
|
||||
|
||||
r = qxl_fb_init(qdev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qxl_device_fini(struct qxl_device *qdev)
|
||||
{
|
||||
if (qdev->current_release_bo[0])
|
||||
qxl_bo_unref(&qdev->current_release_bo[0]);
|
||||
if (qdev->current_release_bo[1])
|
||||
qxl_bo_unref(&qdev->current_release_bo[1]);
|
||||
flush_workqueue(qdev->gc_queue);
|
||||
destroy_workqueue(qdev->gc_queue);
|
||||
qdev->gc_queue = NULL;
|
||||
|
||||
qxl_ring_free(qdev->command_ring);
|
||||
qxl_ring_free(qdev->cursor_ring);
|
||||
qxl_ring_free(qdev->release_ring);
|
||||
qxl_bo_fini(qdev);
|
||||
io_mapping_free(qdev->surface_mapping);
|
||||
io_mapping_free(qdev->vram_mapping);
|
||||
iounmap(qdev->ram_header);
|
||||
iounmap(qdev->rom);
|
||||
qdev->rom = NULL;
|
||||
qdev->mode_info.modes = NULL;
|
||||
qdev->mode_info.num_modes = 0;
|
||||
qxl_debugfs_remove_files(qdev);
|
||||
}
|
||||
|
||||
int qxl_driver_unload(struct drm_device *dev)
|
||||
{
|
||||
struct qxl_device *qdev = dev->dev_private;
|
||||
|
||||
if (qdev == NULL)
|
||||
return 0;
|
||||
|
||||
drm_vblank_cleanup(dev);
|
||||
|
||||
qxl_modeset_fini(qdev);
|
||||
qxl_device_fini(qdev);
|
||||
|
||||
kfree(qdev);
|
||||
dev->dev_private = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int qxl_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
{
|
||||
struct qxl_device *qdev;
|
||||
int r;
|
||||
|
||||
/* require kms */
|
||||
if (!drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
return -ENODEV;
|
||||
|
||||
qdev = kzalloc(sizeof(struct qxl_device), GFP_KERNEL);
|
||||
if (qdev == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
dev->dev_private = qdev;
|
||||
|
||||
r = qxl_device_init(qdev, dev, dev->pdev, flags);
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
r = drm_vblank_init(dev, 1);
|
||||
if (r)
|
||||
goto unload;
|
||||
|
||||
r = qxl_modeset_init(qdev);
|
||||
if (r)
|
||||
goto unload;
|
||||
|
||||
drm_kms_helper_poll_init(qdev->ddev);
|
||||
|
||||
return 0;
|
||||
unload:
|
||||
qxl_driver_unload(dev);
|
||||
|
||||
out:
|
||||
kfree(qdev);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
325
drivers/gpu/drm/qxl/qxl_object.c
Normal file
325
drivers/gpu/drm/qxl/qxl_object.c
Normal file
|
@ -0,0 +1,325 @@
|
|||
/*
|
||||
* Copyright 2013 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alon Levy
|
||||
*/
|
||||
|
||||
#include "qxl_drv.h"
|
||||
#include "qxl_object.h"
|
||||
|
||||
#include <linux/io-mapping.h>
|
||||
static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
|
||||
{
|
||||
struct qxl_bo *bo;
|
||||
struct qxl_device *qdev;
|
||||
|
||||
bo = container_of(tbo, struct qxl_bo, tbo);
|
||||
qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
|
||||
|
||||
qxl_surface_evict(qdev, bo, false);
|
||||
mutex_lock(&qdev->gem.mutex);
|
||||
list_del_init(&bo->list);
|
||||
mutex_unlock(&qdev->gem.mutex);
|
||||
drm_gem_object_release(&bo->gem_base);
|
||||
kfree(bo);
|
||||
}
|
||||
|
||||
bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
|
||||
{
|
||||
if (bo->destroy == &qxl_ttm_bo_destroy)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
|
||||
{
|
||||
u32 c = 0;
|
||||
u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0;
|
||||
unsigned i;
|
||||
|
||||
qbo->placement.placement = qbo->placements;
|
||||
qbo->placement.busy_placement = qbo->placements;
|
||||
if (domain == QXL_GEM_DOMAIN_VRAM)
|
||||
qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag;
|
||||
if (domain == QXL_GEM_DOMAIN_SURFACE)
|
||||
qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0 | pflag;
|
||||
if (domain == QXL_GEM_DOMAIN_CPU)
|
||||
qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag;
|
||||
if (!c)
|
||||
qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
|
||||
qbo->placement.num_placement = c;
|
||||
qbo->placement.num_busy_placement = c;
|
||||
for (i = 0; i < c; ++i) {
|
||||
qbo->placements[i].fpfn = 0;
|
||||
qbo->placements[i].lpfn = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int qxl_bo_create(struct qxl_device *qdev,
|
||||
unsigned long size, bool kernel, bool pinned, u32 domain,
|
||||
struct qxl_surface *surf,
|
||||
struct qxl_bo **bo_ptr)
|
||||
{
|
||||
struct qxl_bo *bo;
|
||||
enum ttm_bo_type type;
|
||||
int r;
|
||||
|
||||
if (kernel)
|
||||
type = ttm_bo_type_kernel;
|
||||
else
|
||||
type = ttm_bo_type_device;
|
||||
*bo_ptr = NULL;
|
||||
bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL);
|
||||
if (bo == NULL)
|
||||
return -ENOMEM;
|
||||
size = roundup(size, PAGE_SIZE);
|
||||
r = drm_gem_object_init(qdev->ddev, &bo->gem_base, size);
|
||||
if (unlikely(r)) {
|
||||
kfree(bo);
|
||||
return r;
|
||||
}
|
||||
bo->type = domain;
|
||||
bo->pin_count = pinned ? 1 : 0;
|
||||
bo->surface_id = 0;
|
||||
INIT_LIST_HEAD(&bo->list);
|
||||
|
||||
if (surf)
|
||||
bo->surf = *surf;
|
||||
|
||||
qxl_ttm_placement_from_domain(bo, domain, pinned);
|
||||
|
||||
r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
|
||||
&bo->placement, 0, !kernel, NULL, size,
|
||||
NULL, NULL, &qxl_ttm_bo_destroy);
|
||||
if (unlikely(r != 0)) {
|
||||
if (r != -ERESTARTSYS)
|
||||
dev_err(qdev->dev,
|
||||
"object_init failed for (%lu, 0x%08X)\n",
|
||||
size, domain);
|
||||
return r;
|
||||
}
|
||||
*bo_ptr = bo;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int qxl_bo_kmap(struct qxl_bo *bo, void **ptr)
|
||||
{
|
||||
bool is_iomem;
|
||||
int r;
|
||||
|
||||
if (bo->kptr) {
|
||||
if (ptr)
|
||||
*ptr = bo->kptr;
|
||||
return 0;
|
||||
}
|
||||
r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
|
||||
if (r)
|
||||
return r;
|
||||
bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
|
||||
if (ptr)
|
||||
*ptr = bo->kptr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
|
||||
struct qxl_bo *bo, int page_offset)
|
||||
{
|
||||
struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
|
||||
void *rptr;
|
||||
int ret;
|
||||
struct io_mapping *map;
|
||||
|
||||
if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
|
||||
map = qdev->vram_mapping;
|
||||
else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0)
|
||||
map = qdev->surface_mapping;
|
||||
else
|
||||
goto fallback;
|
||||
|
||||
(void) ttm_mem_io_lock(man, false);
|
||||
ret = ttm_mem_io_reserve(bo->tbo.bdev, &bo->tbo.mem);
|
||||
ttm_mem_io_unlock(man);
|
||||
|
||||
return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset);
|
||||
fallback:
|
||||
if (bo->kptr) {
|
||||
rptr = bo->kptr + (page_offset * PAGE_SIZE);
|
||||
return rptr;
|
||||
}
|
||||
|
||||
ret = qxl_bo_kmap(bo, &rptr);
|
||||
if (ret)
|
||||
return NULL;
|
||||
|
||||
rptr += page_offset * PAGE_SIZE;
|
||||
return rptr;
|
||||
}
|
||||
|
||||
void qxl_bo_kunmap(struct qxl_bo *bo)
|
||||
{
|
||||
if (bo->kptr == NULL)
|
||||
return;
|
||||
bo->kptr = NULL;
|
||||
ttm_bo_kunmap(&bo->kmap);
|
||||
}
|
||||
|
||||
void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
|
||||
struct qxl_bo *bo, void *pmap)
|
||||
{
|
||||
struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
|
||||
struct io_mapping *map;
|
||||
|
||||
if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
|
||||
map = qdev->vram_mapping;
|
||||
else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0)
|
||||
map = qdev->surface_mapping;
|
||||
else
|
||||
goto fallback;
|
||||
|
||||
io_mapping_unmap_atomic(pmap);
|
||||
|
||||
(void) ttm_mem_io_lock(man, false);
|
||||
ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem);
|
||||
ttm_mem_io_unlock(man);
|
||||
return ;
|
||||
fallback:
|
||||
qxl_bo_kunmap(bo);
|
||||
}
|
||||
|
||||
void qxl_bo_unref(struct qxl_bo **bo)
|
||||
{
|
||||
struct ttm_buffer_object *tbo;
|
||||
|
||||
if ((*bo) == NULL)
|
||||
return;
|
||||
tbo = &((*bo)->tbo);
|
||||
ttm_bo_unref(&tbo);
|
||||
if (tbo == NULL)
|
||||
*bo = NULL;
|
||||
}
|
||||
|
||||
struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
|
||||
{
|
||||
ttm_bo_reference(&bo->tbo);
|
||||
return bo;
|
||||
}
|
||||
|
||||
int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
|
||||
{
|
||||
struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
|
||||
int r;
|
||||
|
||||
if (bo->pin_count) {
|
||||
bo->pin_count++;
|
||||
if (gpu_addr)
|
||||
*gpu_addr = qxl_bo_gpu_offset(bo);
|
||||
return 0;
|
||||
}
|
||||
qxl_ttm_placement_from_domain(bo, domain, true);
|
||||
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
|
||||
if (likely(r == 0)) {
|
||||
bo->pin_count = 1;
|
||||
if (gpu_addr != NULL)
|
||||
*gpu_addr = qxl_bo_gpu_offset(bo);
|
||||
}
|
||||
if (unlikely(r != 0))
|
||||
dev_err(qdev->dev, "%p pin failed\n", bo);
|
||||
return r;
|
||||
}
|
||||
|
||||
int qxl_bo_unpin(struct qxl_bo *bo)
|
||||
{
|
||||
struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
|
||||
int r, i;
|
||||
|
||||
if (!bo->pin_count) {
|
||||
dev_warn(qdev->dev, "%p unpin not necessary\n", bo);
|
||||
return 0;
|
||||
}
|
||||
bo->pin_count--;
|
||||
if (bo->pin_count)
|
||||
return 0;
|
||||
for (i = 0; i < bo->placement.num_placement; i++)
|
||||
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
|
||||
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
|
||||
if (unlikely(r != 0))
|
||||
dev_err(qdev->dev, "%p validate failed for unpin\n", bo);
|
||||
return r;
|
||||
}
|
||||
|
||||
void qxl_bo_force_delete(struct qxl_device *qdev)
|
||||
{
|
||||
struct qxl_bo *bo, *n;
|
||||
|
||||
if (list_empty(&qdev->gem.objects))
|
||||
return;
|
||||
dev_err(qdev->dev, "Userspace still has active objects !\n");
|
||||
list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
|
||||
mutex_lock(&qdev->ddev->struct_mutex);
|
||||
dev_err(qdev->dev, "%p %p %lu %lu force free\n",
|
||||
&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
|
||||
*((unsigned long *)&bo->gem_base.refcount));
|
||||
mutex_lock(&qdev->gem.mutex);
|
||||
list_del_init(&bo->list);
|
||||
mutex_unlock(&qdev->gem.mutex);
|
||||
/* this should unref the ttm bo */
|
||||
drm_gem_object_unreference(&bo->gem_base);
|
||||
mutex_unlock(&qdev->ddev->struct_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
int qxl_bo_init(struct qxl_device *qdev)
|
||||
{
|
||||
return qxl_ttm_init(qdev);
|
||||
}
|
||||
|
||||
void qxl_bo_fini(struct qxl_device *qdev)
|
||||
{
|
||||
qxl_ttm_fini(qdev);
|
||||
}
|
||||
|
||||
int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
|
||||
{
|
||||
int ret;
|
||||
if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) {
|
||||
/* allocate a surface id for this surface now */
|
||||
ret = qxl_surface_id_alloc(qdev, bo);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = qxl_hw_surface_alloc(qdev, bo, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int qxl_surf_evict(struct qxl_device *qdev)
|
||||
{
|
||||
return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV0);
|
||||
}
|
||||
|
||||
int qxl_vram_evict(struct qxl_device *qdev)
|
||||
{
|
||||
return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_VRAM);
|
||||
}
|
103
drivers/gpu/drm/qxl/qxl_object.h
Normal file
103
drivers/gpu/drm/qxl/qxl_object.h
Normal file
|
@ -0,0 +1,103 @@
|
|||
/*
|
||||
* Copyright 2013 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alon Levy
|
||||
*/
|
||||
#ifndef QXL_OBJECT_H
|
||||
#define QXL_OBJECT_H
|
||||
|
||||
#include "qxl_drv.h"
|
||||
|
||||
static inline int qxl_bo_reserve(struct qxl_bo *bo, bool no_wait)
|
||||
{
|
||||
int r;
|
||||
|
||||
r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
|
||||
if (unlikely(r != 0)) {
|
||||
if (r != -ERESTARTSYS) {
|
||||
struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
|
||||
dev_err(qdev->dev, "%p reserve failed\n", bo);
|
||||
}
|
||||
return r;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void qxl_bo_unreserve(struct qxl_bo *bo)
|
||||
{
|
||||
ttm_bo_unreserve(&bo->tbo);
|
||||
}
|
||||
|
||||
static inline u64 qxl_bo_gpu_offset(struct qxl_bo *bo)
|
||||
{
|
||||
return bo->tbo.offset;
|
||||
}
|
||||
|
||||
static inline unsigned long qxl_bo_size(struct qxl_bo *bo)
|
||||
{
|
||||
return bo->tbo.num_pages << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
|
||||
{
|
||||
return drm_vma_node_offset_addr(&bo->tbo.vma_node);
|
||||
}
|
||||
|
||||
static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
|
||||
bool no_wait)
|
||||
{
|
||||
int r;
|
||||
|
||||
r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
|
||||
if (unlikely(r != 0)) {
|
||||
if (r != -ERESTARTSYS) {
|
||||
struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
|
||||
dev_err(qdev->dev, "%p reserve failed for wait\n",
|
||||
bo);
|
||||
}
|
||||
return r;
|
||||
}
|
||||
if (mem_type)
|
||||
*mem_type = bo->tbo.mem.mem_type;
|
||||
|
||||
r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
|
||||
ttm_bo_unreserve(&bo->tbo);
|
||||
return r;
|
||||
}
|
||||
|
||||
extern int qxl_bo_create(struct qxl_device *qdev,
|
||||
unsigned long size,
|
||||
bool kernel, bool pinned, u32 domain,
|
||||
struct qxl_surface *surf,
|
||||
struct qxl_bo **bo_ptr);
|
||||
extern int qxl_bo_kmap(struct qxl_bo *bo, void **ptr);
|
||||
extern void qxl_bo_kunmap(struct qxl_bo *bo);
|
||||
void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int page_offset);
|
||||
void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, void *map);
|
||||
extern struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo);
|
||||
extern void qxl_bo_unref(struct qxl_bo **bo);
|
||||
extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr);
|
||||
extern int qxl_bo_unpin(struct qxl_bo *bo);
|
||||
extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned);
|
||||
extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo);
|
||||
|
||||
#endif
|
72
drivers/gpu/drm/qxl/qxl_prime.c
Normal file
72
drivers/gpu/drm/qxl/qxl_prime.c
Normal file
|
@ -0,0 +1,72 @@
|
|||
/*
|
||||
* Copyright 2014 Canonical
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Andreas Pokorny
|
||||
*/
|
||||
|
||||
#include "qxl_drv.h"
|
||||
|
||||
/* Empty Implementations as there should not be any other driver for a virtual
|
||||
* device that might share buffers with qxl */
|
||||
|
||||
int qxl_gem_prime_pin(struct drm_gem_object *obj)
|
||||
{
|
||||
WARN_ONCE(1, "not implemented");
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
void qxl_gem_prime_unpin(struct drm_gem_object *obj)
|
||||
{
|
||||
WARN_ONCE(1, "not implemented");
|
||||
}
|
||||
|
||||
|
||||
struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj)
|
||||
{
|
||||
WARN_ONCE(1, "not implemented");
|
||||
return ERR_PTR(-ENOSYS);
|
||||
}
|
||||
|
||||
struct drm_gem_object *qxl_gem_prime_import_sg_table(
|
||||
struct drm_device *dev, struct dma_buf_attachment *attach,
|
||||
struct sg_table *table)
|
||||
{
|
||||
WARN_ONCE(1, "not implemented");
|
||||
return ERR_PTR(-ENOSYS);
|
||||
}
|
||||
|
||||
void *qxl_gem_prime_vmap(struct drm_gem_object *obj)
|
||||
{
|
||||
WARN_ONCE(1, "not implemented");
|
||||
return ERR_PTR(-ENOSYS);
|
||||
}
|
||||
|
||||
void qxl_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
|
||||
{
|
||||
WARN_ONCE(1, "not implemented");
|
||||
}
|
||||
|
||||
int qxl_gem_prime_mmap(struct drm_gem_object *obj,
|
||||
struct vm_area_struct *area)
|
||||
{
|
||||
WARN_ONCE(1, "not implemented");
|
||||
return ENOSYS;
|
||||
}
|
475
drivers/gpu/drm/qxl/qxl_release.c
Normal file
475
drivers/gpu/drm/qxl/qxl_release.c
Normal file
|
@ -0,0 +1,475 @@
|
|||
/*
|
||||
* Copyright 2011 Red Hat, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* on the rights to use, copy, modify, merge, publish, distribute, sub
|
||||
* license, and/or sell copies of the Software, and to permit persons to whom
|
||||
* the Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#include "qxl_drv.h"
|
||||
#include "qxl_object.h"
|
||||
#include <trace/events/fence.h>
|
||||
|
||||
/*
|
||||
* drawable cmd cache - allocate a bunch of VRAM pages, suballocate
|
||||
* into 256 byte chunks for now - gives 16 cmds per page.
|
||||
*
|
||||
* use an ida to index into the chunks?
|
||||
*/
|
||||
/* manage releaseables */
|
||||
/* stack them 16 high for now -drawable object is 191 */
|
||||
#define RELEASE_SIZE 256
|
||||
#define RELEASES_PER_BO (4096 / RELEASE_SIZE)
|
||||
/* put an alloc/dealloc surface cmd into one bo and round up to 128 */
|
||||
#define SURFACE_RELEASE_SIZE 128
|
||||
#define SURFACE_RELEASES_PER_BO (4096 / SURFACE_RELEASE_SIZE)
|
||||
|
||||
static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
|
||||
static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
|
||||
|
||||
static const char *qxl_get_driver_name(struct fence *fence)
|
||||
{
|
||||
return "qxl";
|
||||
}
|
||||
|
||||
static const char *qxl_get_timeline_name(struct fence *fence)
|
||||
{
|
||||
return "release";
|
||||
}
|
||||
|
||||
static bool qxl_nop_signaling(struct fence *fence)
|
||||
{
|
||||
/* fences are always automatically signaled, so just pretend we did this.. */
|
||||
return true;
|
||||
}
|
||||
|
||||
static long qxl_fence_wait(struct fence *fence, bool intr, signed long timeout)
|
||||
{
|
||||
struct qxl_device *qdev;
|
||||
struct qxl_release *release;
|
||||
int count = 0, sc = 0;
|
||||
bool have_drawable_releases;
|
||||
unsigned long cur, end = jiffies + timeout;
|
||||
|
||||
qdev = container_of(fence->lock, struct qxl_device, release_lock);
|
||||
release = container_of(fence, struct qxl_release, base);
|
||||
have_drawable_releases = release->type == QXL_RELEASE_DRAWABLE;
|
||||
|
||||
retry:
|
||||
sc++;
|
||||
|
||||
if (fence_is_signaled(fence))
|
||||
goto signaled;
|
||||
|
||||
qxl_io_notify_oom(qdev);
|
||||
|
||||
for (count = 0; count < 11; count++) {
|
||||
if (!qxl_queue_garbage_collect(qdev, true))
|
||||
break;
|
||||
|
||||
if (fence_is_signaled(fence))
|
||||
goto signaled;
|
||||
}
|
||||
|
||||
if (fence_is_signaled(fence))
|
||||
goto signaled;
|
||||
|
||||
if (have_drawable_releases || sc < 4) {
|
||||
if (sc > 2)
|
||||
/* back off */
|
||||
usleep_range(500, 1000);
|
||||
|
||||
if (time_after(jiffies, end))
|
||||
return 0;
|
||||
|
||||
if (have_drawable_releases && sc > 300) {
|
||||
FENCE_WARN(fence, "failed to wait on release %d "
|
||||
"after spincount %d\n",
|
||||
fence->context & ~0xf0000000, sc);
|
||||
goto signaled;
|
||||
}
|
||||
goto retry;
|
||||
}
|
||||
/*
|
||||
* yeah, original sync_obj_wait gave up after 3 spins when
|
||||
* have_drawable_releases is not set.
|
||||
*/
|
||||
|
||||
signaled:
|
||||
cur = jiffies;
|
||||
if (time_after(cur, end))
|
||||
return 0;
|
||||
return end - cur;
|
||||
}
|
||||
|
||||
static const struct fence_ops qxl_fence_ops = {
|
||||
.get_driver_name = qxl_get_driver_name,
|
||||
.get_timeline_name = qxl_get_timeline_name,
|
||||
.enable_signaling = qxl_nop_signaling,
|
||||
.wait = qxl_fence_wait,
|
||||
};
|
||||
|
||||
static uint64_t
|
||||
qxl_release_alloc(struct qxl_device *qdev, int type,
|
||||
struct qxl_release **ret)
|
||||
{
|
||||
struct qxl_release *release;
|
||||
int handle;
|
||||
size_t size = sizeof(*release);
|
||||
|
||||
release = kmalloc(size, GFP_KERNEL);
|
||||
if (!release) {
|
||||
DRM_ERROR("Out of memory\n");
|
||||
return 0;
|
||||
}
|
||||
release->base.ops = NULL;
|
||||
release->type = type;
|
||||
release->release_offset = 0;
|
||||
release->surface_release_id = 0;
|
||||
INIT_LIST_HEAD(&release->bos);
|
||||
|
||||
idr_preload(GFP_KERNEL);
|
||||
spin_lock(&qdev->release_idr_lock);
|
||||
handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
|
||||
release->base.seqno = ++qdev->release_seqno;
|
||||
spin_unlock(&qdev->release_idr_lock);
|
||||
idr_preload_end();
|
||||
if (handle < 0) {
|
||||
kfree(release);
|
||||
*ret = NULL;
|
||||
return handle;
|
||||
}
|
||||
*ret = release;
|
||||
QXL_INFO(qdev, "allocated release %lld\n", handle);
|
||||
release->id = handle;
|
||||
return handle;
|
||||
}
|
||||
|
||||
static void
|
||||
qxl_release_free_list(struct qxl_release *release)
|
||||
{
|
||||
while (!list_empty(&release->bos)) {
|
||||
struct qxl_bo_list *entry;
|
||||
struct qxl_bo *bo;
|
||||
|
||||
entry = container_of(release->bos.next,
|
||||
struct qxl_bo_list, tv.head);
|
||||
bo = to_qxl_bo(entry->tv.bo);
|
||||
qxl_bo_unref(&bo);
|
||||
list_del(&entry->tv.head);
|
||||
kfree(entry);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
qxl_release_free(struct qxl_device *qdev,
|
||||
struct qxl_release *release)
|
||||
{
|
||||
QXL_INFO(qdev, "release %d, type %d\n", release->id,
|
||||
release->type);
|
||||
|
||||
if (release->surface_release_id)
|
||||
qxl_surface_id_dealloc(qdev, release->surface_release_id);
|
||||
|
||||
spin_lock(&qdev->release_idr_lock);
|
||||
idr_remove(&qdev->release_idr, release->id);
|
||||
spin_unlock(&qdev->release_idr_lock);
|
||||
|
||||
if (release->base.ops) {
|
||||
WARN_ON(list_empty(&release->bos));
|
||||
qxl_release_free_list(release);
|
||||
|
||||
fence_signal(&release->base);
|
||||
fence_put(&release->base);
|
||||
} else {
|
||||
qxl_release_free_list(release);
|
||||
kfree(release);
|
||||
}
|
||||
}
|
||||
|
||||
static int qxl_release_bo_alloc(struct qxl_device *qdev,
|
||||
struct qxl_bo **bo)
|
||||
{
|
||||
int ret;
|
||||
/* pin releases bo's they are too messy to evict */
|
||||
ret = qxl_bo_create(qdev, PAGE_SIZE, false, true,
|
||||
QXL_GEM_DOMAIN_VRAM, NULL,
|
||||
bo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
|
||||
{
|
||||
struct qxl_bo_list *entry;
|
||||
|
||||
list_for_each_entry(entry, &release->bos, tv.head) {
|
||||
if (entry->tv.bo == &bo->tbo)
|
||||
return 0;
|
||||
}
|
||||
|
||||
entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
|
||||
if (!entry)
|
||||
return -ENOMEM;
|
||||
|
||||
qxl_bo_ref(bo);
|
||||
entry->tv.bo = &bo->tbo;
|
||||
entry->tv.shared = false;
|
||||
list_add_tail(&entry->tv.head, &release->bos);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qxl_release_validate_bo(struct qxl_bo *bo)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!bo->pin_count) {
|
||||
qxl_ttm_placement_from_domain(bo, bo->type, false);
|
||||
ret = ttm_bo_validate(&bo->tbo, &bo->placement,
|
||||
true, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = reservation_object_reserve_shared(bo->tbo.resv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* allocate a surface for reserved + validated buffers */
|
||||
ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
|
||||
if (ret)
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
|
||||
{
|
||||
int ret;
|
||||
struct qxl_bo_list *entry;
|
||||
|
||||
/* if only one object on the release its the release itself
|
||||
since these objects are pinned no need to reserve */
|
||||
if (list_is_singular(&release->bos))
|
||||
return 0;
|
||||
|
||||
ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos, !no_intr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
list_for_each_entry(entry, &release->bos, tv.head) {
|
||||
struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
|
||||
|
||||
ret = qxl_release_validate_bo(bo);
|
||||
if (ret) {
|
||||
ttm_eu_backoff_reservation(&release->ticket, &release->bos);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void qxl_release_backoff_reserve_list(struct qxl_release *release)
|
||||
{
|
||||
/* if only one object on the release its the release itself
|
||||
since these objects are pinned no need to reserve */
|
||||
if (list_is_singular(&release->bos))
|
||||
return;
|
||||
|
||||
ttm_eu_backoff_reservation(&release->ticket, &release->bos);
|
||||
}
|
||||
|
||||
|
||||
int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
|
||||
enum qxl_surface_cmd_type surface_cmd_type,
|
||||
struct qxl_release *create_rel,
|
||||
struct qxl_release **release)
|
||||
{
|
||||
if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
|
||||
int idr_ret;
|
||||
struct qxl_bo_list *entry = list_first_entry(&create_rel->bos, struct qxl_bo_list, tv.head);
|
||||
struct qxl_bo *bo;
|
||||
union qxl_release_info *info;
|
||||
|
||||
/* stash the release after the create command */
|
||||
idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
|
||||
if (idr_ret < 0)
|
||||
return idr_ret;
|
||||
bo = qxl_bo_ref(to_qxl_bo(entry->tv.bo));
|
||||
|
||||
(*release)->release_offset = create_rel->release_offset + 64;
|
||||
|
||||
qxl_release_list_add(*release, bo);
|
||||
|
||||
info = qxl_release_map(qdev, *release);
|
||||
info->id = idr_ret;
|
||||
qxl_release_unmap(qdev, *release, info);
|
||||
|
||||
qxl_bo_unref(&bo);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
|
||||
QXL_RELEASE_SURFACE_CMD, release, NULL);
|
||||
}
|
||||
|
||||
int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
|
||||
int type, struct qxl_release **release,
|
||||
struct qxl_bo **rbo)
|
||||
{
|
||||
struct qxl_bo *bo;
|
||||
int idr_ret;
|
||||
int ret = 0;
|
||||
union qxl_release_info *info;
|
||||
int cur_idx;
|
||||
|
||||
if (type == QXL_RELEASE_DRAWABLE)
|
||||
cur_idx = 0;
|
||||
else if (type == QXL_RELEASE_SURFACE_CMD)
|
||||
cur_idx = 1;
|
||||
else if (type == QXL_RELEASE_CURSOR_CMD)
|
||||
cur_idx = 2;
|
||||
else {
|
||||
DRM_ERROR("got illegal type: %d\n", type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
idr_ret = qxl_release_alloc(qdev, type, release);
|
||||
if (idr_ret < 0) {
|
||||
if (rbo)
|
||||
*rbo = NULL;
|
||||
return idr_ret;
|
||||
}
|
||||
|
||||
mutex_lock(&qdev->release_mutex);
|
||||
if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) {
|
||||
qxl_bo_unref(&qdev->current_release_bo[cur_idx]);
|
||||
qdev->current_release_bo_offset[cur_idx] = 0;
|
||||
qdev->current_release_bo[cur_idx] = NULL;
|
||||
}
|
||||
if (!qdev->current_release_bo[cur_idx]) {
|
||||
ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]);
|
||||
if (ret) {
|
||||
mutex_unlock(&qdev->release_mutex);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
|
||||
|
||||
(*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx];
|
||||
qdev->current_release_bo_offset[cur_idx]++;
|
||||
|
||||
if (rbo)
|
||||
*rbo = bo;
|
||||
|
||||
mutex_unlock(&qdev->release_mutex);
|
||||
|
||||
qxl_release_list_add(*release, bo);
|
||||
|
||||
info = qxl_release_map(qdev, *release);
|
||||
info->id = idr_ret;
|
||||
qxl_release_unmap(qdev, *release, info);
|
||||
|
||||
qxl_bo_unref(&bo);
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
|
||||
uint64_t id)
|
||||
{
|
||||
struct qxl_release *release;
|
||||
|
||||
spin_lock(&qdev->release_idr_lock);
|
||||
release = idr_find(&qdev->release_idr, id);
|
||||
spin_unlock(&qdev->release_idr_lock);
|
||||
if (!release) {
|
||||
DRM_ERROR("failed to find id in release_idr\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return release;
|
||||
}
|
||||
|
||||
union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
|
||||
struct qxl_release *release)
|
||||
{
|
||||
void *ptr;
|
||||
union qxl_release_info *info;
|
||||
struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
|
||||
struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
|
||||
|
||||
ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE);
|
||||
if (!ptr)
|
||||
return NULL;
|
||||
info = ptr + (release->release_offset & ~PAGE_SIZE);
|
||||
return info;
|
||||
}
|
||||
|
||||
void qxl_release_unmap(struct qxl_device *qdev,
|
||||
struct qxl_release *release,
|
||||
union qxl_release_info *info)
|
||||
{
|
||||
struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
|
||||
struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
|
||||
void *ptr;
|
||||
|
||||
ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE);
|
||||
qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
|
||||
}
|
||||
|
||||
void qxl_release_fence_buffer_objects(struct qxl_release *release)
|
||||
{
|
||||
struct ttm_buffer_object *bo;
|
||||
struct ttm_bo_global *glob;
|
||||
struct ttm_bo_device *bdev;
|
||||
struct ttm_bo_driver *driver;
|
||||
struct qxl_bo *qbo;
|
||||
struct ttm_validate_buffer *entry;
|
||||
struct qxl_device *qdev;
|
||||
|
||||
/* if only one object on the release its the release itself
|
||||
since these objects are pinned no need to reserve */
|
||||
if (list_is_singular(&release->bos) || list_empty(&release->bos))
|
||||
return;
|
||||
|
||||
bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo;
|
||||
bdev = bo->bdev;
|
||||
qdev = container_of(bdev, struct qxl_device, mman.bdev);
|
||||
|
||||
/*
|
||||
* Since we never really allocated a context and we don't want to conflict,
|
||||
* set the highest bits. This will break if we really allow exporting of dma-bufs.
|
||||
*/
|
||||
fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock,
|
||||
release->id | 0xf0000000, release->base.seqno);
|
||||
trace_fence_emit(&release->base);
|
||||
|
||||
driver = bdev->driver;
|
||||
glob = bo->glob;
|
||||
|
||||
spin_lock(&glob->lru_lock);
|
||||
|
||||
list_for_each_entry(entry, &release->bos, head) {
|
||||
bo = entry->bo;
|
||||
qbo = to_qxl_bo(bo);
|
||||
|
||||
reservation_object_add_shared_fence(bo->resv, &release->base);
|
||||
ttm_bo_add_to_lru(bo);
|
||||
__ttm_bo_unreserve(bo);
|
||||
}
|
||||
spin_unlock(&glob->lru_lock);
|
||||
ww_acquire_fini(&release->ticket);
|
||||
}
|
||||
|
489
drivers/gpu/drm/qxl/qxl_ttm.c
Normal file
489
drivers/gpu/drm/qxl/qxl_ttm.c
Normal file
|
@ -0,0 +1,489 @@
|
|||
/*
|
||||
* Copyright 2013 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alon Levy
|
||||
*/
|
||||
|
||||
#include <ttm/ttm_bo_api.h>
|
||||
#include <ttm/ttm_bo_driver.h>
|
||||
#include <ttm/ttm_placement.h>
|
||||
#include <ttm/ttm_page_alloc.h>
|
||||
#include <ttm/ttm_module.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm.h>
|
||||
#include <drm/qxl_drm.h>
|
||||
#include "qxl_drv.h"
|
||||
#include "qxl_object.h"
|
||||
|
||||
#include <linux/delay.h>
|
||||
static int qxl_ttm_debugfs_init(struct qxl_device *qdev);
|
||||
|
||||
static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev)
|
||||
{
|
||||
struct qxl_mman *mman;
|
||||
struct qxl_device *qdev;
|
||||
|
||||
mman = container_of(bdev, struct qxl_mman, bdev);
|
||||
qdev = container_of(mman, struct qxl_device, mman);
|
||||
return qdev;
|
||||
}
|
||||
|
||||
static int qxl_ttm_mem_global_init(struct drm_global_reference *ref)
|
||||
{
|
||||
return ttm_mem_global_init(ref->object);
|
||||
}
|
||||
|
||||
static void qxl_ttm_mem_global_release(struct drm_global_reference *ref)
|
||||
{
|
||||
ttm_mem_global_release(ref->object);
|
||||
}
|
||||
|
||||
static int qxl_ttm_global_init(struct qxl_device *qdev)
|
||||
{
|
||||
struct drm_global_reference *global_ref;
|
||||
int r;
|
||||
|
||||
qdev->mman.mem_global_referenced = false;
|
||||
global_ref = &qdev->mman.mem_global_ref;
|
||||
global_ref->global_type = DRM_GLOBAL_TTM_MEM;
|
||||
global_ref->size = sizeof(struct ttm_mem_global);
|
||||
global_ref->init = &qxl_ttm_mem_global_init;
|
||||
global_ref->release = &qxl_ttm_mem_global_release;
|
||||
|
||||
r = drm_global_item_ref(global_ref);
|
||||
if (r != 0) {
|
||||
DRM_ERROR("Failed setting up TTM memory accounting "
|
||||
"subsystem.\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
qdev->mman.bo_global_ref.mem_glob =
|
||||
qdev->mman.mem_global_ref.object;
|
||||
global_ref = &qdev->mman.bo_global_ref.ref;
|
||||
global_ref->global_type = DRM_GLOBAL_TTM_BO;
|
||||
global_ref->size = sizeof(struct ttm_bo_global);
|
||||
global_ref->init = &ttm_bo_global_init;
|
||||
global_ref->release = &ttm_bo_global_release;
|
||||
r = drm_global_item_ref(global_ref);
|
||||
if (r != 0) {
|
||||
DRM_ERROR("Failed setting up TTM BO subsystem.\n");
|
||||
drm_global_item_unref(&qdev->mman.mem_global_ref);
|
||||
return r;
|
||||
}
|
||||
|
||||
qdev->mman.mem_global_referenced = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qxl_ttm_global_fini(struct qxl_device *qdev)
|
||||
{
|
||||
if (qdev->mman.mem_global_referenced) {
|
||||
drm_global_item_unref(&qdev->mman.bo_global_ref.ref);
|
||||
drm_global_item_unref(&qdev->mman.mem_global_ref);
|
||||
qdev->mman.mem_global_referenced = false;
|
||||
}
|
||||
}
|
||||
|
||||
static struct vm_operations_struct qxl_ttm_vm_ops;
|
||||
static const struct vm_operations_struct *ttm_vm_ops;
|
||||
|
||||
static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
{
|
||||
struct ttm_buffer_object *bo;
|
||||
int r;
|
||||
|
||||
bo = (struct ttm_buffer_object *)vma->vm_private_data;
|
||||
if (bo == NULL)
|
||||
return VM_FAULT_NOPAGE;
|
||||
r = ttm_vm_ops->fault(vma, vmf);
|
||||
return r;
|
||||
}
|
||||
|
||||
int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
{
|
||||
struct drm_file *file_priv;
|
||||
struct qxl_device *qdev;
|
||||
int r;
|
||||
|
||||
if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
|
||||
pr_info("%s: vma->vm_pgoff (%ld) < DRM_FILE_PAGE_OFFSET\n",
|
||||
__func__, vma->vm_pgoff);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
file_priv = filp->private_data;
|
||||
qdev = file_priv->minor->dev->dev_private;
|
||||
if (qdev == NULL) {
|
||||
DRM_ERROR(
|
||||
"filp->private_data->minor->dev->dev_private == NULL\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
QXL_INFO(qdev, "%s: filp->private_data = 0x%p, vma->vm_pgoff = %lx\n",
|
||||
__func__, filp->private_data, vma->vm_pgoff);
|
||||
|
||||
r = ttm_bo_mmap(filp, vma, &qdev->mman.bdev);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
if (unlikely(ttm_vm_ops == NULL)) {
|
||||
ttm_vm_ops = vma->vm_ops;
|
||||
qxl_ttm_vm_ops = *ttm_vm_ops;
|
||||
qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
|
||||
}
|
||||
vma->vm_ops = &qxl_ttm_vm_ops;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qxl_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qxl_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
|
||||
struct ttm_mem_type_manager *man)
|
||||
{
|
||||
switch (type) {
|
||||
case TTM_PL_SYSTEM:
|
||||
/* System memory */
|
||||
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
|
||||
man->available_caching = TTM_PL_MASK_CACHING;
|
||||
man->default_caching = TTM_PL_FLAG_CACHED;
|
||||
break;
|
||||
case TTM_PL_VRAM:
|
||||
case TTM_PL_PRIV0:
|
||||
/* "On-card" video ram */
|
||||
man->func = &ttm_bo_manager_func;
|
||||
man->gpu_offset = 0;
|
||||
man->flags = TTM_MEMTYPE_FLAG_FIXED |
|
||||
TTM_MEMTYPE_FLAG_MAPPABLE;
|
||||
man->available_caching = TTM_PL_MASK_CACHING;
|
||||
man->default_caching = TTM_PL_FLAG_CACHED;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qxl_evict_flags(struct ttm_buffer_object *bo,
|
||||
struct ttm_placement *placement)
|
||||
{
|
||||
struct qxl_bo *qbo;
|
||||
static struct ttm_place placements = {
|
||||
.fpfn = 0,
|
||||
.lpfn = 0,
|
||||
.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
|
||||
};
|
||||
|
||||
if (!qxl_ttm_bo_is_qxl_bo(bo)) {
|
||||
placement->placement = &placements;
|
||||
placement->busy_placement = &placements;
|
||||
placement->num_placement = 1;
|
||||
placement->num_busy_placement = 1;
|
||||
return;
|
||||
}
|
||||
qbo = container_of(bo, struct qxl_bo, tbo);
|
||||
qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU, false);
|
||||
*placement = qbo->placement;
|
||||
}
|
||||
|
||||
static int qxl_verify_access(struct ttm_buffer_object *bo, struct file *filp)
|
||||
{
|
||||
struct qxl_bo *qbo = to_qxl_bo(bo);
|
||||
|
||||
return drm_vma_node_verify_access(&qbo->gem_base.vma_node, filp);
|
||||
}
|
||||
|
||||
static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
|
||||
struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
|
||||
struct qxl_device *qdev = qxl_get_qdev(bdev);
|
||||
|
||||
mem->bus.addr = NULL;
|
||||
mem->bus.offset = 0;
|
||||
mem->bus.size = mem->num_pages << PAGE_SHIFT;
|
||||
mem->bus.base = 0;
|
||||
mem->bus.is_iomem = false;
|
||||
if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
|
||||
return -EINVAL;
|
||||
switch (mem->mem_type) {
|
||||
case TTM_PL_SYSTEM:
|
||||
/* system memory */
|
||||
return 0;
|
||||
case TTM_PL_VRAM:
|
||||
mem->bus.is_iomem = true;
|
||||
mem->bus.base = qdev->vram_base;
|
||||
mem->bus.offset = mem->start << PAGE_SHIFT;
|
||||
break;
|
||||
case TTM_PL_PRIV0:
|
||||
mem->bus.is_iomem = true;
|
||||
mem->bus.base = qdev->surfaceram_base;
|
||||
mem->bus.offset = mem->start << PAGE_SHIFT;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qxl_ttm_io_mem_free(struct ttm_bo_device *bdev,
|
||||
struct ttm_mem_reg *mem)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* TTM backend functions.
|
||||
*/
|
||||
struct qxl_ttm_tt {
|
||||
struct ttm_dma_tt ttm;
|
||||
struct qxl_device *qdev;
|
||||
u64 offset;
|
||||
};
|
||||
|
||||
static int qxl_ttm_backend_bind(struct ttm_tt *ttm,
|
||||
struct ttm_mem_reg *bo_mem)
|
||||
{
|
||||
struct qxl_ttm_tt *gtt = (void *)ttm;
|
||||
|
||||
gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
|
||||
if (!ttm->num_pages) {
|
||||
WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
|
||||
ttm->num_pages, bo_mem, ttm);
|
||||
}
|
||||
/* Not implemented */
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int qxl_ttm_backend_unbind(struct ttm_tt *ttm)
|
||||
{
|
||||
/* Not implemented */
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void qxl_ttm_backend_destroy(struct ttm_tt *ttm)
|
||||
{
|
||||
struct qxl_ttm_tt *gtt = (void *)ttm;
|
||||
|
||||
ttm_dma_tt_fini(>t->ttm);
|
||||
kfree(gtt);
|
||||
}
|
||||
|
||||
static struct ttm_backend_func qxl_backend_func = {
|
||||
.bind = &qxl_ttm_backend_bind,
|
||||
.unbind = &qxl_ttm_backend_unbind,
|
||||
.destroy = &qxl_ttm_backend_destroy,
|
||||
};
|
||||
|
||||
static int qxl_ttm_tt_populate(struct ttm_tt *ttm)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (ttm->state != tt_unpopulated)
|
||||
return 0;
|
||||
|
||||
r = ttm_pool_populate(ttm);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qxl_ttm_tt_unpopulate(struct ttm_tt *ttm)
|
||||
{
|
||||
ttm_pool_unpopulate(ttm);
|
||||
}
|
||||
|
||||
static struct ttm_tt *qxl_ttm_tt_create(struct ttm_bo_device *bdev,
|
||||
unsigned long size, uint32_t page_flags,
|
||||
struct page *dummy_read_page)
|
||||
{
|
||||
struct qxl_device *qdev;
|
||||
struct qxl_ttm_tt *gtt;
|
||||
|
||||
qdev = qxl_get_qdev(bdev);
|
||||
gtt = kzalloc(sizeof(struct qxl_ttm_tt), GFP_KERNEL);
|
||||
if (gtt == NULL)
|
||||
return NULL;
|
||||
gtt->ttm.ttm.func = &qxl_backend_func;
|
||||
gtt->qdev = qdev;
|
||||
if (ttm_dma_tt_init(>t->ttm, bdev, size, page_flags,
|
||||
dummy_read_page)) {
|
||||
kfree(gtt);
|
||||
return NULL;
|
||||
}
|
||||
return >t->ttm.ttm;
|
||||
}
|
||||
|
||||
static void qxl_move_null(struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct ttm_mem_reg *old_mem = &bo->mem;
|
||||
|
||||
BUG_ON(old_mem->mm_node != NULL);
|
||||
*old_mem = *new_mem;
|
||||
new_mem->mm_node = NULL;
|
||||
}
|
||||
|
||||
static int qxl_bo_move(struct ttm_buffer_object *bo,
|
||||
bool evict, bool interruptible,
|
||||
bool no_wait_gpu,
|
||||
struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct ttm_mem_reg *old_mem = &bo->mem;
|
||||
if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
|
||||
qxl_move_null(bo, new_mem);
|
||||
return 0;
|
||||
}
|
||||
return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
|
||||
}
|
||||
|
||||
static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct qxl_bo *qbo;
|
||||
struct qxl_device *qdev;
|
||||
|
||||
if (!qxl_ttm_bo_is_qxl_bo(bo))
|
||||
return;
|
||||
qbo = container_of(bo, struct qxl_bo, tbo);
|
||||
qdev = qbo->gem_base.dev->dev_private;
|
||||
|
||||
if (bo->mem.mem_type == TTM_PL_PRIV0 && qbo->surface_id)
|
||||
qxl_surface_evict(qdev, qbo, new_mem ? true : false);
|
||||
}
|
||||
|
||||
static struct ttm_bo_driver qxl_bo_driver = {
|
||||
.ttm_tt_create = &qxl_ttm_tt_create,
|
||||
.ttm_tt_populate = &qxl_ttm_tt_populate,
|
||||
.ttm_tt_unpopulate = &qxl_ttm_tt_unpopulate,
|
||||
.invalidate_caches = &qxl_invalidate_caches,
|
||||
.init_mem_type = &qxl_init_mem_type,
|
||||
.evict_flags = &qxl_evict_flags,
|
||||
.move = &qxl_bo_move,
|
||||
.verify_access = &qxl_verify_access,
|
||||
.io_mem_reserve = &qxl_ttm_io_mem_reserve,
|
||||
.io_mem_free = &qxl_ttm_io_mem_free,
|
||||
.move_notify = &qxl_bo_move_notify,
|
||||
};
|
||||
|
||||
int qxl_ttm_init(struct qxl_device *qdev)
|
||||
{
|
||||
int r;
|
||||
int num_io_pages; /* != rom->num_io_pages, we include surface0 */
|
||||
|
||||
r = qxl_ttm_global_init(qdev);
|
||||
if (r)
|
||||
return r;
|
||||
/* No others user of address space so set it to 0 */
|
||||
r = ttm_bo_device_init(&qdev->mman.bdev,
|
||||
qdev->mman.bo_global_ref.ref.object,
|
||||
&qxl_bo_driver,
|
||||
qdev->ddev->anon_inode->i_mapping,
|
||||
DRM_FILE_PAGE_OFFSET, 0);
|
||||
if (r) {
|
||||
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
/* NOTE: this includes the framebuffer (aka surface 0) */
|
||||
num_io_pages = qdev->rom->ram_header_offset / PAGE_SIZE;
|
||||
r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_VRAM,
|
||||
num_io_pages);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed initializing VRAM heap.\n");
|
||||
return r;
|
||||
}
|
||||
r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_PRIV0,
|
||||
qdev->surfaceram_size / PAGE_SIZE);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed initializing Surfaces heap.\n");
|
||||
return r;
|
||||
}
|
||||
DRM_INFO("qxl: %uM of VRAM memory size\n",
|
||||
(unsigned)qdev->vram_size / (1024 * 1024));
|
||||
DRM_INFO("qxl: %luM of IO pages memory ready (VRAM domain)\n",
|
||||
((unsigned)num_io_pages * PAGE_SIZE) / (1024 * 1024));
|
||||
DRM_INFO("qxl: %uM of Surface memory size\n",
|
||||
(unsigned)qdev->surfaceram_size / (1024 * 1024));
|
||||
r = qxl_ttm_debugfs_init(qdev);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to init debugfs\n");
|
||||
return r;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void qxl_ttm_fini(struct qxl_device *qdev)
|
||||
{
|
||||
ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_VRAM);
|
||||
ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_PRIV0);
|
||||
ttm_bo_device_release(&qdev->mman.bdev);
|
||||
qxl_ttm_global_fini(qdev);
|
||||
DRM_INFO("qxl: ttm finalized\n");
|
||||
}
|
||||
|
||||
|
||||
#define QXL_DEBUGFS_MEM_TYPES 2
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
static int qxl_mm_dump_table(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *)m->private;
|
||||
struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct qxl_device *rdev = dev->dev_private;
|
||||
int ret;
|
||||
struct ttm_bo_global *glob = rdev->mman.bdev.glob;
|
||||
|
||||
spin_lock(&glob->lru_lock);
|
||||
ret = drm_mm_dump_table(m, mm);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
|
||||
{
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
|
||||
static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
|
||||
if (i == 0)
|
||||
sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
|
||||
else
|
||||
sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
|
||||
qxl_mem_types_list[i].name = qxl_mem_types_names[i];
|
||||
qxl_mem_types_list[i].show = &qxl_mm_dump_table;
|
||||
qxl_mem_types_list[i].driver_features = 0;
|
||||
if (i == 0)
|
||||
qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
|
||||
else
|
||||
qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
|
||||
|
||||
}
|
||||
return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue