mirror of
https://github.com/AetherDroid/android_kernel_samsung_on5xelte.git
synced 2025-10-30 07:38:52 +01:00
Fixed MTP to work with TWRP
This commit is contained in:
commit
f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions
10
drivers/gpu/drm/ttm/Makefile
Normal file
10
drivers/gpu/drm/ttm/Makefile
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
#
|
||||
# Makefile for the drm device driver. This driver provides support for the
|
||||
|
||||
ccflags-y := -Iinclude/drm
|
||||
ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
|
||||
ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
|
||||
ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \
|
||||
ttm_bo_manager.o ttm_page_alloc_dma.o
|
||||
|
||||
obj-$(CONFIG_DRM_TTM) += ttm.o
|
||||
152
drivers/gpu/drm/ttm/ttm_agp_backend.c
Normal file
152
drivers/gpu/drm/ttm/ttm_agp_backend.c
Normal file
|
|
@ -0,0 +1,152 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
* Keith Packard.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "[TTM] " fmt
|
||||
|
||||
#include <drm/ttm/ttm_module.h>
|
||||
#include <drm/ttm/ttm_bo_driver.h>
|
||||
#include <drm/ttm/ttm_page_alloc.h>
|
||||
#ifdef TTM_HAS_AGP
|
||||
#include <drm/ttm/ttm_placement.h>
|
||||
#include <linux/agp_backend.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/io.h>
|
||||
#include <asm/agp.h>
|
||||
|
||||
struct ttm_agp_backend {
|
||||
struct ttm_tt ttm;
|
||||
struct agp_memory *mem;
|
||||
struct agp_bridge_data *bridge;
|
||||
};
|
||||
|
||||
static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
|
||||
{
|
||||
struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
|
||||
struct drm_mm_node *node = bo_mem->mm_node;
|
||||
struct agp_memory *mem;
|
||||
int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
|
||||
unsigned i;
|
||||
|
||||
mem = agp_allocate_memory(agp_be->bridge, ttm->num_pages, AGP_USER_MEMORY);
|
||||
if (unlikely(mem == NULL))
|
||||
return -ENOMEM;
|
||||
|
||||
mem->page_count = 0;
|
||||
for (i = 0; i < ttm->num_pages; i++) {
|
||||
struct page *page = ttm->pages[i];
|
||||
|
||||
if (!page)
|
||||
page = ttm->dummy_read_page;
|
||||
|
||||
mem->pages[mem->page_count++] = page;
|
||||
}
|
||||
agp_be->mem = mem;
|
||||
|
||||
mem->is_flushed = 1;
|
||||
mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
|
||||
|
||||
ret = agp_bind_memory(mem, node->start);
|
||||
if (ret)
|
||||
pr_err("AGP Bind memory failed\n");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ttm_agp_unbind(struct ttm_tt *ttm)
|
||||
{
|
||||
struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
|
||||
|
||||
if (agp_be->mem) {
|
||||
if (agp_be->mem->is_bound)
|
||||
return agp_unbind_memory(agp_be->mem);
|
||||
agp_free_memory(agp_be->mem);
|
||||
agp_be->mem = NULL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ttm_agp_destroy(struct ttm_tt *ttm)
|
||||
{
|
||||
struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
|
||||
|
||||
if (agp_be->mem)
|
||||
ttm_agp_unbind(ttm);
|
||||
ttm_tt_fini(ttm);
|
||||
kfree(agp_be);
|
||||
}
|
||||
|
||||
static struct ttm_backend_func ttm_agp_func = {
|
||||
.bind = ttm_agp_bind,
|
||||
.unbind = ttm_agp_unbind,
|
||||
.destroy = ttm_agp_destroy,
|
||||
};
|
||||
|
||||
struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
|
||||
struct agp_bridge_data *bridge,
|
||||
unsigned long size, uint32_t page_flags,
|
||||
struct page *dummy_read_page)
|
||||
{
|
||||
struct ttm_agp_backend *agp_be;
|
||||
|
||||
agp_be = kmalloc(sizeof(*agp_be), GFP_KERNEL);
|
||||
if (!agp_be)
|
||||
return NULL;
|
||||
|
||||
agp_be->mem = NULL;
|
||||
agp_be->bridge = bridge;
|
||||
agp_be->ttm.func = &ttm_agp_func;
|
||||
|
||||
if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags, dummy_read_page)) {
|
||||
kfree(agp_be);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return &agp_be->ttm;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_agp_tt_create);
|
||||
|
||||
int ttm_agp_tt_populate(struct ttm_tt *ttm)
|
||||
{
|
||||
if (ttm->state != tt_unpopulated)
|
||||
return 0;
|
||||
|
||||
return ttm_pool_populate(ttm);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_agp_tt_populate);
|
||||
|
||||
void ttm_agp_tt_unpopulate(struct ttm_tt *ttm)
|
||||
{
|
||||
ttm_pool_unpopulate(ttm);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_agp_tt_unpopulate);
|
||||
|
||||
#endif
|
||||
1730
drivers/gpu/drm/ttm/ttm_bo.c
Normal file
1730
drivers/gpu/drm/ttm/ttm_bo.c
Normal file
File diff suppressed because it is too large
Load diff
155
drivers/gpu/drm/ttm/ttm_bo_manager.c
Normal file
155
drivers/gpu/drm/ttm/ttm_bo_manager.c
Normal file
|
|
@ -0,0 +1,155 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
*/
|
||||
|
||||
#include <drm/ttm/ttm_module.h>
|
||||
#include <drm/ttm/ttm_bo_driver.h>
|
||||
#include <drm/ttm/ttm_placement.h>
|
||||
#include <drm/drm_mm.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
/**
|
||||
* Currently we use a spinlock for the lock, but a mutex *may* be
|
||||
* more appropriate to reduce scheduling latency if the range manager
|
||||
* ends up with very fragmented allocation patterns.
|
||||
*/
|
||||
|
||||
struct ttm_range_manager {
|
||||
struct drm_mm mm;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
|
||||
struct ttm_buffer_object *bo,
|
||||
const struct ttm_place *place,
|
||||
struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
|
||||
struct drm_mm *mm = &rman->mm;
|
||||
struct drm_mm_node *node = NULL;
|
||||
enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
|
||||
unsigned long lpfn;
|
||||
int ret;
|
||||
|
||||
lpfn = place->lpfn;
|
||||
if (!lpfn)
|
||||
lpfn = man->size;
|
||||
|
||||
node = kzalloc(sizeof(*node), GFP_KERNEL);
|
||||
if (!node)
|
||||
return -ENOMEM;
|
||||
|
||||
if (place->flags & TTM_PL_FLAG_TOPDOWN)
|
||||
aflags = DRM_MM_CREATE_TOP;
|
||||
|
||||
spin_lock(&rman->lock);
|
||||
ret = drm_mm_insert_node_in_range_generic(mm, node, mem->num_pages,
|
||||
mem->page_alignment, 0,
|
||||
place->fpfn, lpfn,
|
||||
DRM_MM_SEARCH_BEST,
|
||||
aflags);
|
||||
spin_unlock(&rman->lock);
|
||||
|
||||
if (unlikely(ret)) {
|
||||
kfree(node);
|
||||
} else {
|
||||
mem->mm_node = node;
|
||||
mem->start = node->start;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
|
||||
struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
|
||||
|
||||
if (mem->mm_node) {
|
||||
spin_lock(&rman->lock);
|
||||
drm_mm_remove_node(mem->mm_node);
|
||||
spin_unlock(&rman->lock);
|
||||
|
||||
kfree(mem->mm_node);
|
||||
mem->mm_node = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
|
||||
unsigned long p_size)
|
||||
{
|
||||
struct ttm_range_manager *rman;
|
||||
|
||||
rman = kzalloc(sizeof(*rman), GFP_KERNEL);
|
||||
if (!rman)
|
||||
return -ENOMEM;
|
||||
|
||||
drm_mm_init(&rman->mm, 0, p_size);
|
||||
spin_lock_init(&rman->lock);
|
||||
man->priv = rman;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
|
||||
{
|
||||
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
|
||||
struct drm_mm *mm = &rman->mm;
|
||||
|
||||
spin_lock(&rman->lock);
|
||||
if (drm_mm_clean(mm)) {
|
||||
drm_mm_takedown(mm);
|
||||
spin_unlock(&rman->lock);
|
||||
kfree(rman);
|
||||
man->priv = NULL;
|
||||
return 0;
|
||||
}
|
||||
spin_unlock(&rman->lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
|
||||
const char *prefix)
|
||||
{
|
||||
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
|
||||
|
||||
spin_lock(&rman->lock);
|
||||
drm_mm_debug_table(&rman->mm, prefix);
|
||||
spin_unlock(&rman->lock);
|
||||
}
|
||||
|
||||
const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
|
||||
ttm_bo_man_init,
|
||||
ttm_bo_man_takedown,
|
||||
ttm_bo_man_get_node,
|
||||
ttm_bo_man_put_node,
|
||||
ttm_bo_man_debug
|
||||
};
|
||||
EXPORT_SYMBOL(ttm_bo_manager_func);
|
||||
695
drivers/gpu/drm/ttm/ttm_bo_util.c
Normal file
695
drivers/gpu/drm/ttm/ttm_bo_util.c
Normal file
|
|
@ -0,0 +1,695 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
*/
|
||||
|
||||
#include <drm/ttm/ttm_bo_driver.h>
|
||||
#include <drm/ttm/ttm_placement.h>
|
||||
#include <drm/drm_vma_manager.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/reservation.h>
|
||||
|
||||
void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
|
||||
{
|
||||
ttm_bo_mem_put(bo, &bo->mem);
|
||||
}
|
||||
|
||||
int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
|
||||
bool evict,
|
||||
bool no_wait_gpu, struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct ttm_tt *ttm = bo->ttm;
|
||||
struct ttm_mem_reg *old_mem = &bo->mem;
|
||||
int ret;
|
||||
|
||||
if (old_mem->mem_type != TTM_PL_SYSTEM) {
|
||||
ttm_tt_unbind(ttm);
|
||||
ttm_bo_free_old_node(bo);
|
||||
ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
|
||||
TTM_PL_MASK_MEM);
|
||||
old_mem->mem_type = TTM_PL_SYSTEM;
|
||||
}
|
||||
|
||||
ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
if (new_mem->mem_type != TTM_PL_SYSTEM) {
|
||||
ret = ttm_tt_bind(ttm, new_mem);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
}
|
||||
|
||||
*old_mem = *new_mem;
|
||||
new_mem->mm_node = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_move_ttm);
|
||||
|
||||
int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
|
||||
{
|
||||
if (likely(man->io_reserve_fastpath))
|
||||
return 0;
|
||||
|
||||
if (interruptible)
|
||||
return mutex_lock_interruptible(&man->io_reserve_mutex);
|
||||
|
||||
mutex_lock(&man->io_reserve_mutex);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_mem_io_lock);
|
||||
|
||||
void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
|
||||
{
|
||||
if (likely(man->io_reserve_fastpath))
|
||||
return;
|
||||
|
||||
mutex_unlock(&man->io_reserve_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_mem_io_unlock);
|
||||
|
||||
static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
|
||||
{
|
||||
struct ttm_buffer_object *bo;
|
||||
|
||||
if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
|
||||
return -EAGAIN;
|
||||
|
||||
bo = list_first_entry(&man->io_reserve_lru,
|
||||
struct ttm_buffer_object,
|
||||
io_reserve_lru);
|
||||
list_del_init(&bo->io_reserve_lru);
|
||||
ttm_bo_unmap_virtual_locked(bo);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
|
||||
struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
|
||||
int ret = 0;
|
||||
|
||||
if (!bdev->driver->io_mem_reserve)
|
||||
return 0;
|
||||
if (likely(man->io_reserve_fastpath))
|
||||
return bdev->driver->io_mem_reserve(bdev, mem);
|
||||
|
||||
if (bdev->driver->io_mem_reserve &&
|
||||
mem->bus.io_reserved_count++ == 0) {
|
||||
retry:
|
||||
ret = bdev->driver->io_mem_reserve(bdev, mem);
|
||||
if (ret == -EAGAIN) {
|
||||
ret = ttm_mem_io_evict(man);
|
||||
if (ret == 0)
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_mem_io_reserve);
|
||||
|
||||
void ttm_mem_io_free(struct ttm_bo_device *bdev,
|
||||
struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
|
||||
|
||||
if (likely(man->io_reserve_fastpath))
|
||||
return;
|
||||
|
||||
if (bdev->driver->io_mem_reserve &&
|
||||
--mem->bus.io_reserved_count == 0 &&
|
||||
bdev->driver->io_mem_free)
|
||||
bdev->driver->io_mem_free(bdev, mem);
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_mem_io_free);
|
||||
|
||||
int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct ttm_mem_reg *mem = &bo->mem;
|
||||
int ret;
|
||||
|
||||
if (!mem->bus.io_reserved_vm) {
|
||||
struct ttm_mem_type_manager *man =
|
||||
&bo->bdev->man[mem->mem_type];
|
||||
|
||||
ret = ttm_mem_io_reserve(bo->bdev, mem);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
mem->bus.io_reserved_vm = true;
|
||||
if (man->use_io_reserve_lru)
|
||||
list_add_tail(&bo->io_reserve_lru,
|
||||
&man->io_reserve_lru);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct ttm_mem_reg *mem = &bo->mem;
|
||||
|
||||
if (mem->bus.io_reserved_vm) {
|
||||
mem->bus.io_reserved_vm = false;
|
||||
list_del_init(&bo->io_reserve_lru);
|
||||
ttm_mem_io_free(bo->bdev, mem);
|
||||
}
|
||||
}
|
||||
|
||||
static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
|
||||
void **virtual)
|
||||
{
|
||||
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
|
||||
int ret;
|
||||
void *addr;
|
||||
|
||||
*virtual = NULL;
|
||||
(void) ttm_mem_io_lock(man, false);
|
||||
ret = ttm_mem_io_reserve(bdev, mem);
|
||||
ttm_mem_io_unlock(man);
|
||||
if (ret || !mem->bus.is_iomem)
|
||||
return ret;
|
||||
|
||||
if (mem->bus.addr) {
|
||||
addr = mem->bus.addr;
|
||||
} else {
|
||||
if (mem->placement & TTM_PL_FLAG_WC)
|
||||
addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
|
||||
else
|
||||
addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
|
||||
if (!addr) {
|
||||
(void) ttm_mem_io_lock(man, false);
|
||||
ttm_mem_io_free(bdev, mem);
|
||||
ttm_mem_io_unlock(man);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
*virtual = addr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
|
||||
void *virtual)
|
||||
{
|
||||
struct ttm_mem_type_manager *man;
|
||||
|
||||
man = &bdev->man[mem->mem_type];
|
||||
|
||||
if (virtual && mem->bus.addr == NULL)
|
||||
iounmap(virtual);
|
||||
(void) ttm_mem_io_lock(man, false);
|
||||
ttm_mem_io_free(bdev, mem);
|
||||
ttm_mem_io_unlock(man);
|
||||
}
|
||||
|
||||
static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
|
||||
{
|
||||
uint32_t *dstP =
|
||||
(uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
|
||||
uint32_t *srcP =
|
||||
(uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
|
||||
|
||||
int i;
|
||||
for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
|
||||
iowrite32(ioread32(srcP++), dstP++);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
|
||||
unsigned long page,
|
||||
pgprot_t prot)
|
||||
{
|
||||
struct page *d = ttm->pages[page];
|
||||
void *dst;
|
||||
|
||||
if (!d)
|
||||
return -ENOMEM;
|
||||
|
||||
src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
dst = kmap_atomic_prot(d, prot);
|
||||
#else
|
||||
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
|
||||
dst = vmap(&d, 1, 0, prot);
|
||||
else
|
||||
dst = kmap(d);
|
||||
#endif
|
||||
if (!dst)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy_fromio(dst, src, PAGE_SIZE);
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
kunmap_atomic(dst);
|
||||
#else
|
||||
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
|
||||
vunmap(dst);
|
||||
else
|
||||
kunmap(d);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
|
||||
unsigned long page,
|
||||
pgprot_t prot)
|
||||
{
|
||||
struct page *s = ttm->pages[page];
|
||||
void *src;
|
||||
|
||||
if (!s)
|
||||
return -ENOMEM;
|
||||
|
||||
dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
|
||||
#ifdef CONFIG_X86
|
||||
src = kmap_atomic_prot(s, prot);
|
||||
#else
|
||||
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
|
||||
src = vmap(&s, 1, 0, prot);
|
||||
else
|
||||
src = kmap(s);
|
||||
#endif
|
||||
if (!src)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy_toio(dst, src, PAGE_SIZE);
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
kunmap_atomic(src);
|
||||
#else
|
||||
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
|
||||
vunmap(src);
|
||||
else
|
||||
kunmap(s);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
|
||||
bool evict, bool no_wait_gpu,
|
||||
struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
|
||||
struct ttm_tt *ttm = bo->ttm;
|
||||
struct ttm_mem_reg *old_mem = &bo->mem;
|
||||
struct ttm_mem_reg old_copy = *old_mem;
|
||||
void *old_iomap;
|
||||
void *new_iomap;
|
||||
int ret;
|
||||
unsigned long i;
|
||||
unsigned long page;
|
||||
unsigned long add = 0;
|
||||
int dir;
|
||||
|
||||
ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Single TTM move. NOP.
|
||||
*/
|
||||
if (old_iomap == NULL && new_iomap == NULL)
|
||||
goto out2;
|
||||
|
||||
/*
|
||||
* Don't move nonexistent data. Clear destination instead.
|
||||
*/
|
||||
if (old_iomap == NULL &&
|
||||
(ttm == NULL || (ttm->state == tt_unpopulated &&
|
||||
!(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
|
||||
memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
|
||||
goto out2;
|
||||
}
|
||||
|
||||
/*
|
||||
* TTM might be null for moves within the same region.
|
||||
*/
|
||||
if (ttm && ttm->state == tt_unpopulated) {
|
||||
ret = ttm->bdev->driver->ttm_tt_populate(ttm);
|
||||
if (ret)
|
||||
goto out1;
|
||||
}
|
||||
|
||||
add = 0;
|
||||
dir = 1;
|
||||
|
||||
if ((old_mem->mem_type == new_mem->mem_type) &&
|
||||
(new_mem->start < old_mem->start + old_mem->size)) {
|
||||
dir = -1;
|
||||
add = new_mem->num_pages - 1;
|
||||
}
|
||||
|
||||
for (i = 0; i < new_mem->num_pages; ++i) {
|
||||
page = i * dir + add;
|
||||
if (old_iomap == NULL) {
|
||||
pgprot_t prot = ttm_io_prot(old_mem->placement,
|
||||
PAGE_KERNEL);
|
||||
ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
|
||||
prot);
|
||||
} else if (new_iomap == NULL) {
|
||||
pgprot_t prot = ttm_io_prot(new_mem->placement,
|
||||
PAGE_KERNEL);
|
||||
ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
|
||||
prot);
|
||||
} else
|
||||
ret = ttm_copy_io_page(new_iomap, old_iomap, page);
|
||||
if (ret)
|
||||
goto out1;
|
||||
}
|
||||
mb();
|
||||
out2:
|
||||
old_copy = *old_mem;
|
||||
*old_mem = *new_mem;
|
||||
new_mem->mm_node = NULL;
|
||||
|
||||
if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
|
||||
ttm_tt_unbind(ttm);
|
||||
ttm_tt_destroy(ttm);
|
||||
bo->ttm = NULL;
|
||||
}
|
||||
|
||||
out1:
|
||||
ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
|
||||
out:
|
||||
ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
|
||||
|
||||
/*
|
||||
* On error, keep the mm node!
|
||||
*/
|
||||
if (!ret)
|
||||
ttm_bo_mem_put(bo, &old_copy);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_move_memcpy);
|
||||
|
||||
static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
|
||||
{
|
||||
kfree(bo);
|
||||
}
|
||||
|
||||
/**
|
||||
* ttm_buffer_object_transfer
|
||||
*
|
||||
* @bo: A pointer to a struct ttm_buffer_object.
|
||||
* @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
|
||||
* holding the data of @bo with the old placement.
|
||||
*
|
||||
* This is a utility function that may be called after an accelerated move
|
||||
* has been scheduled. A new buffer object is created as a placeholder for
|
||||
* the old data while it's being copied. When that buffer object is idle,
|
||||
* it can be destroyed, releasing the space of the old placement.
|
||||
* Returns:
|
||||
* !0: Failure.
|
||||
*/
|
||||
|
||||
static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
|
||||
struct ttm_buffer_object **new_obj)
|
||||
{
|
||||
struct ttm_buffer_object *fbo;
|
||||
int ret;
|
||||
|
||||
fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
|
||||
if (!fbo)
|
||||
return -ENOMEM;
|
||||
|
||||
*fbo = *bo;
|
||||
|
||||
/**
|
||||
* Fix up members that we shouldn't copy directly:
|
||||
* TODO: Explicit member copy would probably be better here.
|
||||
*/
|
||||
|
||||
INIT_LIST_HEAD(&fbo->ddestroy);
|
||||
INIT_LIST_HEAD(&fbo->lru);
|
||||
INIT_LIST_HEAD(&fbo->swap);
|
||||
INIT_LIST_HEAD(&fbo->io_reserve_lru);
|
||||
drm_vma_node_reset(&fbo->vma_node);
|
||||
atomic_set(&fbo->cpu_writers, 0);
|
||||
|
||||
kref_init(&fbo->list_kref);
|
||||
kref_init(&fbo->kref);
|
||||
fbo->destroy = &ttm_transfered_destroy;
|
||||
fbo->acc_size = 0;
|
||||
fbo->resv = &fbo->ttm_resv;
|
||||
reservation_object_init(fbo->resv);
|
||||
ret = ww_mutex_trylock(&fbo->resv->lock);
|
||||
WARN_ON(!ret);
|
||||
|
||||
*new_obj = fbo;
|
||||
return 0;
|
||||
}
|
||||
|
||||
pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
|
||||
{
|
||||
/* Cached mappings need no adjustment */
|
||||
if (caching_flags & TTM_PL_FLAG_CACHED)
|
||||
return tmp;
|
||||
|
||||
#if defined(__i386__) || defined(__x86_64__)
|
||||
if (caching_flags & TTM_PL_FLAG_WC)
|
||||
tmp = pgprot_writecombine(tmp);
|
||||
else if (boot_cpu_data.x86 > 3)
|
||||
tmp = pgprot_noncached(tmp);
|
||||
#endif
|
||||
#if defined(__ia64__) || defined(__arm__) || defined(__powerpc__)
|
||||
if (caching_flags & TTM_PL_FLAG_WC)
|
||||
tmp = pgprot_writecombine(tmp);
|
||||
else
|
||||
tmp = pgprot_noncached(tmp);
|
||||
#endif
|
||||
#if defined(__sparc__) || defined(__mips__)
|
||||
tmp = pgprot_noncached(tmp);
|
||||
#endif
|
||||
return tmp;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_io_prot);
|
||||
|
||||
static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
|
||||
unsigned long offset,
|
||||
unsigned long size,
|
||||
struct ttm_bo_kmap_obj *map)
|
||||
{
|
||||
struct ttm_mem_reg *mem = &bo->mem;
|
||||
|
||||
if (bo->mem.bus.addr) {
|
||||
map->bo_kmap_type = ttm_bo_map_premapped;
|
||||
map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
|
||||
} else {
|
||||
map->bo_kmap_type = ttm_bo_map_iomap;
|
||||
if (mem->placement & TTM_PL_FLAG_WC)
|
||||
map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
|
||||
size);
|
||||
else
|
||||
map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
|
||||
size);
|
||||
}
|
||||
return (!map->virtual) ? -ENOMEM : 0;
|
||||
}
|
||||
|
||||
static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
|
||||
unsigned long start_page,
|
||||
unsigned long num_pages,
|
||||
struct ttm_bo_kmap_obj *map)
|
||||
{
|
||||
struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
|
||||
struct ttm_tt *ttm = bo->ttm;
|
||||
int ret;
|
||||
|
||||
BUG_ON(!ttm);
|
||||
|
||||
if (ttm->state == tt_unpopulated) {
|
||||
ret = ttm->bdev->driver->ttm_tt_populate(ttm);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
|
||||
/*
|
||||
* We're mapping a single page, and the desired
|
||||
* page protection is consistent with the bo.
|
||||
*/
|
||||
|
||||
map->bo_kmap_type = ttm_bo_map_kmap;
|
||||
map->page = ttm->pages[start_page];
|
||||
map->virtual = kmap(map->page);
|
||||
} else {
|
||||
/*
|
||||
* We need to use vmap to get the desired page protection
|
||||
* or to make the buffer object look contiguous.
|
||||
*/
|
||||
prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
|
||||
map->bo_kmap_type = ttm_bo_map_vmap;
|
||||
map->virtual = vmap(ttm->pages + start_page, num_pages,
|
||||
0, prot);
|
||||
}
|
||||
return (!map->virtual) ? -ENOMEM : 0;
|
||||
}
|
||||
|
||||
int ttm_bo_kmap(struct ttm_buffer_object *bo,
|
||||
unsigned long start_page, unsigned long num_pages,
|
||||
struct ttm_bo_kmap_obj *map)
|
||||
{
|
||||
struct ttm_mem_type_manager *man =
|
||||
&bo->bdev->man[bo->mem.mem_type];
|
||||
unsigned long offset, size;
|
||||
int ret;
|
||||
|
||||
BUG_ON(!list_empty(&bo->swap));
|
||||
map->virtual = NULL;
|
||||
map->bo = bo;
|
||||
if (num_pages > bo->num_pages)
|
||||
return -EINVAL;
|
||||
if (start_page > bo->num_pages)
|
||||
return -EINVAL;
|
||||
#if 0
|
||||
if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
#endif
|
||||
(void) ttm_mem_io_lock(man, false);
|
||||
ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
|
||||
ttm_mem_io_unlock(man);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (!bo->mem.bus.is_iomem) {
|
||||
return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
|
||||
} else {
|
||||
offset = start_page << PAGE_SHIFT;
|
||||
size = num_pages << PAGE_SHIFT;
|
||||
return ttm_bo_ioremap(bo, offset, size, map);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_kmap);
|
||||
|
||||
void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
|
||||
{
|
||||
struct ttm_buffer_object *bo = map->bo;
|
||||
struct ttm_mem_type_manager *man =
|
||||
&bo->bdev->man[bo->mem.mem_type];
|
||||
|
||||
if (!map->virtual)
|
||||
return;
|
||||
switch (map->bo_kmap_type) {
|
||||
case ttm_bo_map_iomap:
|
||||
iounmap(map->virtual);
|
||||
break;
|
||||
case ttm_bo_map_vmap:
|
||||
vunmap(map->virtual);
|
||||
break;
|
||||
case ttm_bo_map_kmap:
|
||||
kunmap(map->page);
|
||||
break;
|
||||
case ttm_bo_map_premapped:
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
(void) ttm_mem_io_lock(man, false);
|
||||
ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
|
||||
ttm_mem_io_unlock(man);
|
||||
map->virtual = NULL;
|
||||
map->page = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_kunmap);
|
||||
|
||||
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
|
||||
struct fence *fence,
|
||||
bool evict,
|
||||
bool no_wait_gpu,
|
||||
struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
|
||||
struct ttm_mem_reg *old_mem = &bo->mem;
|
||||
int ret;
|
||||
struct ttm_buffer_object *ghost_obj;
|
||||
|
||||
reservation_object_add_excl_fence(bo->resv, fence);
|
||||
if (evict) {
|
||||
ret = ttm_bo_wait(bo, false, false, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
|
||||
(bo->ttm != NULL)) {
|
||||
ttm_tt_unbind(bo->ttm);
|
||||
ttm_tt_destroy(bo->ttm);
|
||||
bo->ttm = NULL;
|
||||
}
|
||||
ttm_bo_free_old_node(bo);
|
||||
} else {
|
||||
/**
|
||||
* This should help pipeline ordinary buffer moves.
|
||||
*
|
||||
* Hang old buffer memory on a new buffer object,
|
||||
* and leave it to be released when the GPU
|
||||
* operation has completed.
|
||||
*/
|
||||
|
||||
set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
|
||||
|
||||
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
reservation_object_add_excl_fence(ghost_obj->resv, fence);
|
||||
|
||||
/**
|
||||
* If we're not moving to fixed memory, the TTM object
|
||||
* needs to stay alive. Otherwhise hang it on the ghost
|
||||
* bo to be unbound and destroyed.
|
||||
*/
|
||||
|
||||
if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
|
||||
ghost_obj->ttm = NULL;
|
||||
else
|
||||
bo->ttm = NULL;
|
||||
|
||||
ttm_bo_unreserve(ghost_obj);
|
||||
ttm_bo_unref(&ghost_obj);
|
||||
}
|
||||
|
||||
*old_mem = *new_mem;
|
||||
new_mem->mm_node = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
|
||||
364
drivers/gpu/drm/ttm/ttm_bo_vm.c
Normal file
364
drivers/gpu/drm/ttm/ttm_bo_vm.c
Normal file
|
|
@ -0,0 +1,364 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "[TTM] " fmt
|
||||
|
||||
#include <ttm/ttm_module.h>
|
||||
#include <ttm/ttm_bo_driver.h>
|
||||
#include <ttm/ttm_placement.h>
|
||||
#include <drm/drm_vma_manager.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#define TTM_BO_VM_NUM_PREFAULT 16
|
||||
|
||||
static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
|
||||
struct vm_area_struct *vma,
|
||||
struct vm_fault *vmf)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (likely(!test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)))
|
||||
goto out_unlock;
|
||||
|
||||
/*
|
||||
* Quick non-stalling check for idle.
|
||||
*/
|
||||
ret = ttm_bo_wait(bo, false, false, true);
|
||||
if (likely(ret == 0))
|
||||
goto out_unlock;
|
||||
|
||||
/*
|
||||
* If possible, avoid waiting for GPU with mmap_sem
|
||||
* held.
|
||||
*/
|
||||
if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
|
||||
ret = VM_FAULT_RETRY;
|
||||
if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
|
||||
goto out_unlock;
|
||||
|
||||
up_read(&vma->vm_mm->mmap_sem);
|
||||
(void) ttm_bo_wait(bo, false, true, false);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ordinary wait.
|
||||
*/
|
||||
ret = ttm_bo_wait(bo, false, true, false);
|
||||
if (unlikely(ret != 0))
|
||||
ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
|
||||
VM_FAULT_NOPAGE;
|
||||
|
||||
out_unlock:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
{
|
||||
struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
|
||||
vma->vm_private_data;
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
unsigned long page_offset;
|
||||
unsigned long page_last;
|
||||
unsigned long pfn;
|
||||
struct ttm_tt *ttm = NULL;
|
||||
struct page *page;
|
||||
int ret;
|
||||
int i;
|
||||
unsigned long address = (unsigned long)vmf->virtual_address;
|
||||
int retval = VM_FAULT_NOPAGE;
|
||||
struct ttm_mem_type_manager *man =
|
||||
&bdev->man[bo->mem.mem_type];
|
||||
struct vm_area_struct cvma;
|
||||
|
||||
/*
|
||||
* Work around locking order reversal in fault / nopfn
|
||||
* between mmap_sem and bo_reserve: Perform a trylock operation
|
||||
* for reserve, and if it fails, retry the fault after waiting
|
||||
* for the buffer to become unreserved.
|
||||
*/
|
||||
ret = ttm_bo_reserve(bo, true, true, false, NULL);
|
||||
if (unlikely(ret != 0)) {
|
||||
if (ret != -EBUSY)
|
||||
return VM_FAULT_NOPAGE;
|
||||
|
||||
if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
|
||||
if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
|
||||
up_read(&vma->vm_mm->mmap_sem);
|
||||
(void) ttm_bo_wait_unreserved(bo);
|
||||
}
|
||||
|
||||
return VM_FAULT_RETRY;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we'd want to change locking order to
|
||||
* mmap_sem -> bo::reserve, we'd use a blocking reserve here
|
||||
* instead of retrying the fault...
|
||||
*/
|
||||
return VM_FAULT_NOPAGE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Refuse to fault imported pages. This should be handled
|
||||
* (if at all) by redirecting mmap to the exporter.
|
||||
*/
|
||||
if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
|
||||
retval = VM_FAULT_SIGBUS;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (bdev->driver->fault_reserve_notify) {
|
||||
ret = bdev->driver->fault_reserve_notify(bo);
|
||||
switch (ret) {
|
||||
case 0:
|
||||
break;
|
||||
case -EBUSY:
|
||||
case -ERESTARTSYS:
|
||||
retval = VM_FAULT_NOPAGE;
|
||||
goto out_unlock;
|
||||
default:
|
||||
retval = VM_FAULT_SIGBUS;
|
||||
goto out_unlock;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Wait for buffer data in transit, due to a pipelined
|
||||
* move.
|
||||
*/
|
||||
ret = ttm_bo_vm_fault_idle(bo, vma, vmf);
|
||||
if (unlikely(ret != 0)) {
|
||||
retval = ret;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
ret = ttm_mem_io_lock(man, true);
|
||||
if (unlikely(ret != 0)) {
|
||||
retval = VM_FAULT_NOPAGE;
|
||||
goto out_unlock;
|
||||
}
|
||||
ret = ttm_mem_io_reserve_vm(bo);
|
||||
if (unlikely(ret != 0)) {
|
||||
retval = VM_FAULT_SIGBUS;
|
||||
goto out_io_unlock;
|
||||
}
|
||||
|
||||
page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
|
||||
vma->vm_pgoff - drm_vma_node_start(&bo->vma_node);
|
||||
page_last = vma_pages(vma) + vma->vm_pgoff -
|
||||
drm_vma_node_start(&bo->vma_node);
|
||||
|
||||
if (unlikely(page_offset >= bo->num_pages)) {
|
||||
retval = VM_FAULT_SIGBUS;
|
||||
goto out_io_unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* Make a local vma copy to modify the page_prot member
|
||||
* and vm_flags if necessary. The vma parameter is protected
|
||||
* by mmap_sem in write mode.
|
||||
*/
|
||||
cvma = *vma;
|
||||
cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
|
||||
|
||||
if (bo->mem.bus.is_iomem) {
|
||||
cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
|
||||
cvma.vm_page_prot);
|
||||
} else {
|
||||
ttm = bo->ttm;
|
||||
cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
|
||||
cvma.vm_page_prot);
|
||||
|
||||
/* Allocate all page at once, most common usage */
|
||||
if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
|
||||
retval = VM_FAULT_OOM;
|
||||
goto out_io_unlock;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Speculatively prefault a number of pages. Only error on
|
||||
* first page.
|
||||
*/
|
||||
for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
|
||||
if (bo->mem.bus.is_iomem)
|
||||
pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
|
||||
else {
|
||||
page = ttm->pages[page_offset];
|
||||
if (unlikely(!page && i == 0)) {
|
||||
retval = VM_FAULT_OOM;
|
||||
goto out_io_unlock;
|
||||
} else if (unlikely(!page)) {
|
||||
break;
|
||||
}
|
||||
page->mapping = vma->vm_file->f_mapping;
|
||||
page->index = drm_vma_node_start(&bo->vma_node) +
|
||||
page_offset;
|
||||
pfn = page_to_pfn(page);
|
||||
}
|
||||
|
||||
if (vma->vm_flags & VM_MIXEDMAP)
|
||||
ret = vm_insert_mixed(&cvma, address, pfn);
|
||||
else
|
||||
ret = vm_insert_pfn(&cvma, address, pfn);
|
||||
|
||||
/*
|
||||
* Somebody beat us to this PTE or prefaulting to
|
||||
* an already populated PTE, or prefaulting error.
|
||||
*/
|
||||
|
||||
if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
|
||||
break;
|
||||
else if (unlikely(ret != 0)) {
|
||||
retval =
|
||||
(ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
|
||||
goto out_io_unlock;
|
||||
}
|
||||
|
||||
address += PAGE_SIZE;
|
||||
if (unlikely(++page_offset >= page_last))
|
||||
break;
|
||||
}
|
||||
out_io_unlock:
|
||||
ttm_mem_io_unlock(man);
|
||||
out_unlock:
|
||||
ttm_bo_unreserve(bo);
|
||||
return retval;
|
||||
}
|
||||
|
||||
static void ttm_bo_vm_open(struct vm_area_struct *vma)
|
||||
{
|
||||
struct ttm_buffer_object *bo =
|
||||
(struct ttm_buffer_object *)vma->vm_private_data;
|
||||
|
||||
WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
|
||||
|
||||
(void)ttm_bo_reference(bo);
|
||||
}
|
||||
|
||||
static void ttm_bo_vm_close(struct vm_area_struct *vma)
|
||||
{
|
||||
struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
|
||||
|
||||
ttm_bo_unref(&bo);
|
||||
vma->vm_private_data = NULL;
|
||||
}
|
||||
|
||||
static const struct vm_operations_struct ttm_bo_vm_ops = {
|
||||
.fault = ttm_bo_vm_fault,
|
||||
.open = ttm_bo_vm_open,
|
||||
.close = ttm_bo_vm_close
|
||||
};
|
||||
|
||||
static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
|
||||
unsigned long offset,
|
||||
unsigned long pages)
|
||||
{
|
||||
struct drm_vma_offset_node *node;
|
||||
struct ttm_buffer_object *bo = NULL;
|
||||
|
||||
drm_vma_offset_lock_lookup(&bdev->vma_manager);
|
||||
|
||||
node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
|
||||
if (likely(node)) {
|
||||
bo = container_of(node, struct ttm_buffer_object, vma_node);
|
||||
if (!kref_get_unless_zero(&bo->kref))
|
||||
bo = NULL;
|
||||
}
|
||||
|
||||
drm_vma_offset_unlock_lookup(&bdev->vma_manager);
|
||||
|
||||
if (!bo)
|
||||
pr_err("Could not find buffer object to map\n");
|
||||
|
||||
return bo;
|
||||
}
|
||||
|
||||
int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
|
||||
struct ttm_bo_device *bdev)
|
||||
{
|
||||
struct ttm_bo_driver *driver;
|
||||
struct ttm_buffer_object *bo;
|
||||
int ret;
|
||||
|
||||
bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
|
||||
if (unlikely(!bo))
|
||||
return -EINVAL;
|
||||
|
||||
driver = bo->bdev->driver;
|
||||
if (unlikely(!driver->verify_access)) {
|
||||
ret = -EPERM;
|
||||
goto out_unref;
|
||||
}
|
||||
ret = driver->verify_access(bo, filp);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_unref;
|
||||
|
||||
vma->vm_ops = &ttm_bo_vm_ops;
|
||||
|
||||
/*
|
||||
* Note: We're transferring the bo reference to
|
||||
* vma->vm_private_data here.
|
||||
*/
|
||||
|
||||
vma->vm_private_data = bo;
|
||||
|
||||
/*
|
||||
* We'd like to use VM_PFNMAP on shared mappings, where
|
||||
* (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
|
||||
* but for some reason VM_PFNMAP + x86 PAT + write-combine is very
|
||||
* bad for performance. Until that has been sorted out, use
|
||||
* VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
|
||||
*/
|
||||
vma->vm_flags |= VM_MIXEDMAP;
|
||||
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
|
||||
return 0;
|
||||
out_unref:
|
||||
ttm_bo_unref(&bo);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_mmap);
|
||||
|
||||
int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
|
||||
{
|
||||
if (vma->vm_pgoff != 0)
|
||||
return -EACCES;
|
||||
|
||||
vma->vm_ops = &ttm_bo_vm_ops;
|
||||
vma->vm_private_data = ttm_bo_reference(bo);
|
||||
vma->vm_flags |= VM_MIXEDMAP;
|
||||
vma->vm_flags |= VM_IO | VM_DONTEXPAND;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_fbdev_mmap);
|
||||
206
drivers/gpu/drm/ttm/ttm_execbuf_util.c
Normal file
206
drivers/gpu/drm/ttm/ttm_execbuf_util.c
Normal file
|
|
@ -0,0 +1,206 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#include <drm/ttm/ttm_execbuf_util.h>
|
||||
#include <drm/ttm/ttm_bo_driver.h>
|
||||
#include <drm/ttm/ttm_placement.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
|
||||
struct ttm_validate_buffer *entry)
|
||||
{
|
||||
list_for_each_entry_continue_reverse(entry, list, head) {
|
||||
struct ttm_buffer_object *bo = entry->bo;
|
||||
|
||||
__ttm_bo_unreserve(bo);
|
||||
}
|
||||
}
|
||||
|
||||
static void ttm_eu_del_from_lru_locked(struct list_head *list)
|
||||
{
|
||||
struct ttm_validate_buffer *entry;
|
||||
|
||||
list_for_each_entry(entry, list, head) {
|
||||
struct ttm_buffer_object *bo = entry->bo;
|
||||
unsigned put_count = ttm_bo_del_from_lru(bo);
|
||||
|
||||
ttm_bo_list_ref_sub(bo, put_count, true);
|
||||
}
|
||||
}
|
||||
|
||||
void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
|
||||
struct list_head *list)
|
||||
{
|
||||
struct ttm_validate_buffer *entry;
|
||||
struct ttm_bo_global *glob;
|
||||
|
||||
if (list_empty(list))
|
||||
return;
|
||||
|
||||
entry = list_first_entry(list, struct ttm_validate_buffer, head);
|
||||
glob = entry->bo->glob;
|
||||
|
||||
spin_lock(&glob->lru_lock);
|
||||
list_for_each_entry(entry, list, head) {
|
||||
struct ttm_buffer_object *bo = entry->bo;
|
||||
|
||||
ttm_bo_add_to_lru(bo);
|
||||
__ttm_bo_unreserve(bo);
|
||||
}
|
||||
spin_unlock(&glob->lru_lock);
|
||||
|
||||
if (ticket)
|
||||
ww_acquire_fini(ticket);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_eu_backoff_reservation);
|
||||
|
||||
/*
|
||||
* Reserve buffers for validation.
|
||||
*
|
||||
* If a buffer in the list is marked for CPU access, we back off and
|
||||
* wait for that buffer to become free for GPU access.
|
||||
*
|
||||
* If a buffer is reserved for another validation, the validator with
|
||||
* the highest validation sequence backs off and waits for that buffer
|
||||
* to become unreserved. This prevents deadlocks when validating multiple
|
||||
* buffers in different orders.
|
||||
*/
|
||||
|
||||
int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
|
||||
struct list_head *list, bool intr)
|
||||
{
|
||||
struct ttm_bo_global *glob;
|
||||
struct ttm_validate_buffer *entry;
|
||||
int ret;
|
||||
|
||||
if (list_empty(list))
|
||||
return 0;
|
||||
|
||||
entry = list_first_entry(list, struct ttm_validate_buffer, head);
|
||||
glob = entry->bo->glob;
|
||||
|
||||
if (ticket)
|
||||
ww_acquire_init(ticket, &reservation_ww_class);
|
||||
|
||||
list_for_each_entry(entry, list, head) {
|
||||
struct ttm_buffer_object *bo = entry->bo;
|
||||
|
||||
ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), true,
|
||||
ticket);
|
||||
if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
|
||||
__ttm_bo_unreserve(bo);
|
||||
|
||||
ret = -EBUSY;
|
||||
}
|
||||
|
||||
if (!ret) {
|
||||
if (!entry->shared)
|
||||
continue;
|
||||
|
||||
ret = reservation_object_reserve_shared(bo->resv);
|
||||
if (!ret)
|
||||
continue;
|
||||
}
|
||||
|
||||
/* uh oh, we lost out, drop every reservation and try
|
||||
* to only reserve this buffer, then start over if
|
||||
* this succeeds.
|
||||
*/
|
||||
ttm_eu_backoff_reservation_reverse(list, entry);
|
||||
|
||||
if (ret == -EDEADLK && intr) {
|
||||
ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
|
||||
ticket);
|
||||
} else if (ret == -EDEADLK) {
|
||||
ww_mutex_lock_slow(&bo->resv->lock, ticket);
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
if (!ret && entry->shared)
|
||||
ret = reservation_object_reserve_shared(bo->resv);
|
||||
|
||||
if (unlikely(ret != 0)) {
|
||||
if (ret == -EINTR)
|
||||
ret = -ERESTARTSYS;
|
||||
if (ticket) {
|
||||
ww_acquire_done(ticket);
|
||||
ww_acquire_fini(ticket);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* move this item to the front of the list,
|
||||
* forces correct iteration of the loop without keeping track
|
||||
*/
|
||||
list_del(&entry->head);
|
||||
list_add(&entry->head, list);
|
||||
}
|
||||
|
||||
if (ticket)
|
||||
ww_acquire_done(ticket);
|
||||
spin_lock(&glob->lru_lock);
|
||||
ttm_eu_del_from_lru_locked(list);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
|
||||
|
||||
void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
|
||||
struct list_head *list, struct fence *fence)
|
||||
{
|
||||
struct ttm_validate_buffer *entry;
|
||||
struct ttm_buffer_object *bo;
|
||||
struct ttm_bo_global *glob;
|
||||
struct ttm_bo_device *bdev;
|
||||
struct ttm_bo_driver *driver;
|
||||
|
||||
if (list_empty(list))
|
||||
return;
|
||||
|
||||
bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
|
||||
bdev = bo->bdev;
|
||||
driver = bdev->driver;
|
||||
glob = bo->glob;
|
||||
|
||||
spin_lock(&glob->lru_lock);
|
||||
|
||||
list_for_each_entry(entry, list, head) {
|
||||
bo = entry->bo;
|
||||
if (entry->shared)
|
||||
reservation_object_add_shared_fence(bo->resv, fence);
|
||||
else
|
||||
reservation_object_add_excl_fence(bo->resv, fence);
|
||||
ttm_bo_add_to_lru(bo);
|
||||
__ttm_bo_unreserve(bo);
|
||||
}
|
||||
spin_unlock(&glob->lru_lock);
|
||||
if (ticket)
|
||||
ww_acquire_fini(ticket);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
|
||||
302
drivers/gpu/drm/ttm/ttm_lock.c
Normal file
302
drivers/gpu/drm/ttm/ttm_lock.c
Normal file
|
|
@ -0,0 +1,302 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
*/
|
||||
|
||||
#include <drm/ttm/ttm_lock.h>
|
||||
#include <drm/ttm/ttm_module.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#define TTM_WRITE_LOCK_PENDING (1 << 0)
|
||||
#define TTM_VT_LOCK_PENDING (1 << 1)
|
||||
#define TTM_SUSPEND_LOCK_PENDING (1 << 2)
|
||||
#define TTM_VT_LOCK (1 << 3)
|
||||
#define TTM_SUSPEND_LOCK (1 << 4)
|
||||
|
||||
void ttm_lock_init(struct ttm_lock *lock)
|
||||
{
|
||||
spin_lock_init(&lock->lock);
|
||||
init_waitqueue_head(&lock->queue);
|
||||
lock->rw = 0;
|
||||
lock->flags = 0;
|
||||
lock->kill_takers = false;
|
||||
lock->signal = SIGKILL;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_lock_init);
|
||||
|
||||
void ttm_read_unlock(struct ttm_lock *lock)
|
||||
{
|
||||
spin_lock(&lock->lock);
|
||||
if (--lock->rw == 0)
|
||||
wake_up_all(&lock->queue);
|
||||
spin_unlock(&lock->lock);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_read_unlock);
|
||||
|
||||
static bool __ttm_read_lock(struct ttm_lock *lock)
|
||||
{
|
||||
bool locked = false;
|
||||
|
||||
spin_lock(&lock->lock);
|
||||
if (unlikely(lock->kill_takers)) {
|
||||
send_sig(lock->signal, current, 0);
|
||||
spin_unlock(&lock->lock);
|
||||
return false;
|
||||
}
|
||||
if (lock->rw >= 0 && lock->flags == 0) {
|
||||
++lock->rw;
|
||||
locked = true;
|
||||
}
|
||||
spin_unlock(&lock->lock);
|
||||
return locked;
|
||||
}
|
||||
|
||||
int ttm_read_lock(struct ttm_lock *lock, bool interruptible)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (interruptible)
|
||||
ret = wait_event_interruptible(lock->queue,
|
||||
__ttm_read_lock(lock));
|
||||
else
|
||||
wait_event(lock->queue, __ttm_read_lock(lock));
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_read_lock);
|
||||
|
||||
static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked)
|
||||
{
|
||||
bool block = true;
|
||||
|
||||
*locked = false;
|
||||
|
||||
spin_lock(&lock->lock);
|
||||
if (unlikely(lock->kill_takers)) {
|
||||
send_sig(lock->signal, current, 0);
|
||||
spin_unlock(&lock->lock);
|
||||
return false;
|
||||
}
|
||||
if (lock->rw >= 0 && lock->flags == 0) {
|
||||
++lock->rw;
|
||||
block = false;
|
||||
*locked = true;
|
||||
} else if (lock->flags == 0) {
|
||||
block = false;
|
||||
}
|
||||
spin_unlock(&lock->lock);
|
||||
|
||||
return !block;
|
||||
}
|
||||
|
||||
int ttm_read_trylock(struct ttm_lock *lock, bool interruptible)
|
||||
{
|
||||
int ret = 0;
|
||||
bool locked;
|
||||
|
||||
if (interruptible)
|
||||
ret = wait_event_interruptible
|
||||
(lock->queue, __ttm_read_trylock(lock, &locked));
|
||||
else
|
||||
wait_event(lock->queue, __ttm_read_trylock(lock, &locked));
|
||||
|
||||
if (unlikely(ret != 0)) {
|
||||
BUG_ON(locked);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return (locked) ? 0 : -EBUSY;
|
||||
}
|
||||
|
||||
void ttm_write_unlock(struct ttm_lock *lock)
|
||||
{
|
||||
spin_lock(&lock->lock);
|
||||
lock->rw = 0;
|
||||
wake_up_all(&lock->queue);
|
||||
spin_unlock(&lock->lock);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_write_unlock);
|
||||
|
||||
static bool __ttm_write_lock(struct ttm_lock *lock)
|
||||
{
|
||||
bool locked = false;
|
||||
|
||||
spin_lock(&lock->lock);
|
||||
if (unlikely(lock->kill_takers)) {
|
||||
send_sig(lock->signal, current, 0);
|
||||
spin_unlock(&lock->lock);
|
||||
return false;
|
||||
}
|
||||
if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) {
|
||||
lock->rw = -1;
|
||||
lock->flags &= ~TTM_WRITE_LOCK_PENDING;
|
||||
locked = true;
|
||||
} else {
|
||||
lock->flags |= TTM_WRITE_LOCK_PENDING;
|
||||
}
|
||||
spin_unlock(&lock->lock);
|
||||
return locked;
|
||||
}
|
||||
|
||||
int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (interruptible) {
|
||||
ret = wait_event_interruptible(lock->queue,
|
||||
__ttm_write_lock(lock));
|
||||
if (unlikely(ret != 0)) {
|
||||
spin_lock(&lock->lock);
|
||||
lock->flags &= ~TTM_WRITE_LOCK_PENDING;
|
||||
wake_up_all(&lock->queue);
|
||||
spin_unlock(&lock->lock);
|
||||
}
|
||||
} else
|
||||
wait_event(lock->queue, __ttm_read_lock(lock));
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_write_lock);
|
||||
|
||||
static int __ttm_vt_unlock(struct ttm_lock *lock)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
spin_lock(&lock->lock);
|
||||
if (unlikely(!(lock->flags & TTM_VT_LOCK)))
|
||||
ret = -EINVAL;
|
||||
lock->flags &= ~TTM_VT_LOCK;
|
||||
wake_up_all(&lock->queue);
|
||||
spin_unlock(&lock->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ttm_vt_lock_remove(struct ttm_base_object **p_base)
|
||||
{
|
||||
struct ttm_base_object *base = *p_base;
|
||||
struct ttm_lock *lock = container_of(base, struct ttm_lock, base);
|
||||
int ret;
|
||||
|
||||
*p_base = NULL;
|
||||
ret = __ttm_vt_unlock(lock);
|
||||
BUG_ON(ret != 0);
|
||||
}
|
||||
|
||||
static bool __ttm_vt_lock(struct ttm_lock *lock)
|
||||
{
|
||||
bool locked = false;
|
||||
|
||||
spin_lock(&lock->lock);
|
||||
if (lock->rw == 0) {
|
||||
lock->flags &= ~TTM_VT_LOCK_PENDING;
|
||||
lock->flags |= TTM_VT_LOCK;
|
||||
locked = true;
|
||||
} else {
|
||||
lock->flags |= TTM_VT_LOCK_PENDING;
|
||||
}
|
||||
spin_unlock(&lock->lock);
|
||||
return locked;
|
||||
}
|
||||
|
||||
int ttm_vt_lock(struct ttm_lock *lock,
|
||||
bool interruptible,
|
||||
struct ttm_object_file *tfile)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (interruptible) {
|
||||
ret = wait_event_interruptible(lock->queue,
|
||||
__ttm_vt_lock(lock));
|
||||
if (unlikely(ret != 0)) {
|
||||
spin_lock(&lock->lock);
|
||||
lock->flags &= ~TTM_VT_LOCK_PENDING;
|
||||
wake_up_all(&lock->queue);
|
||||
spin_unlock(&lock->lock);
|
||||
return ret;
|
||||
}
|
||||
} else
|
||||
wait_event(lock->queue, __ttm_vt_lock(lock));
|
||||
|
||||
/*
|
||||
* Add a base-object, the destructor of which will
|
||||
* make sure the lock is released if the client dies
|
||||
* while holding it.
|
||||
*/
|
||||
|
||||
ret = ttm_base_object_init(tfile, &lock->base, false,
|
||||
ttm_lock_type, &ttm_vt_lock_remove, NULL);
|
||||
if (ret)
|
||||
(void)__ttm_vt_unlock(lock);
|
||||
else
|
||||
lock->vt_holder = tfile;
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_vt_lock);
|
||||
|
||||
int ttm_vt_unlock(struct ttm_lock *lock)
|
||||
{
|
||||
return ttm_ref_object_base_unref(lock->vt_holder,
|
||||
lock->base.hash.key, TTM_REF_USAGE);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_vt_unlock);
|
||||
|
||||
void ttm_suspend_unlock(struct ttm_lock *lock)
|
||||
{
|
||||
spin_lock(&lock->lock);
|
||||
lock->flags &= ~TTM_SUSPEND_LOCK;
|
||||
wake_up_all(&lock->queue);
|
||||
spin_unlock(&lock->lock);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_suspend_unlock);
|
||||
|
||||
static bool __ttm_suspend_lock(struct ttm_lock *lock)
|
||||
{
|
||||
bool locked = false;
|
||||
|
||||
spin_lock(&lock->lock);
|
||||
if (lock->rw == 0) {
|
||||
lock->flags &= ~TTM_SUSPEND_LOCK_PENDING;
|
||||
lock->flags |= TTM_SUSPEND_LOCK;
|
||||
locked = true;
|
||||
} else {
|
||||
lock->flags |= TTM_SUSPEND_LOCK_PENDING;
|
||||
}
|
||||
spin_unlock(&lock->lock);
|
||||
return locked;
|
||||
}
|
||||
|
||||
void ttm_suspend_lock(struct ttm_lock *lock)
|
||||
{
|
||||
wait_event(lock->queue, __ttm_suspend_lock(lock));
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_suspend_lock);
|
||||
602
drivers/gpu/drm/ttm/ttm_memory.c
Normal file
602
drivers/gpu/drm/ttm/ttm_memory.c
Normal file
|
|
@ -0,0 +1,602 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#define pr_fmt(fmt) "[TTM] " fmt
|
||||
|
||||
#include <drm/ttm/ttm_memory.h>
|
||||
#include <drm/ttm/ttm_module.h>
|
||||
#include <drm/ttm/ttm_page_alloc.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#define TTM_MEMORY_ALLOC_RETRIES 4
|
||||
|
||||
struct ttm_mem_zone {
|
||||
struct kobject kobj;
|
||||
struct ttm_mem_global *glob;
|
||||
const char *name;
|
||||
uint64_t zone_mem;
|
||||
uint64_t emer_mem;
|
||||
uint64_t max_mem;
|
||||
uint64_t swap_limit;
|
||||
uint64_t used_mem;
|
||||
};
|
||||
|
||||
static struct attribute ttm_mem_sys = {
|
||||
.name = "zone_memory",
|
||||
.mode = S_IRUGO
|
||||
};
|
||||
static struct attribute ttm_mem_emer = {
|
||||
.name = "emergency_memory",
|
||||
.mode = S_IRUGO | S_IWUSR
|
||||
};
|
||||
static struct attribute ttm_mem_max = {
|
||||
.name = "available_memory",
|
||||
.mode = S_IRUGO | S_IWUSR
|
||||
};
|
||||
static struct attribute ttm_mem_swap = {
|
||||
.name = "swap_limit",
|
||||
.mode = S_IRUGO | S_IWUSR
|
||||
};
|
||||
static struct attribute ttm_mem_used = {
|
||||
.name = "used_memory",
|
||||
.mode = S_IRUGO
|
||||
};
|
||||
|
||||
static void ttm_mem_zone_kobj_release(struct kobject *kobj)
|
||||
{
|
||||
struct ttm_mem_zone *zone =
|
||||
container_of(kobj, struct ttm_mem_zone, kobj);
|
||||
|
||||
pr_info("Zone %7s: Used memory at exit: %llu kiB\n",
|
||||
zone->name, (unsigned long long)zone->used_mem >> 10);
|
||||
kfree(zone);
|
||||
}
|
||||
|
||||
static ssize_t ttm_mem_zone_show(struct kobject *kobj,
|
||||
struct attribute *attr,
|
||||
char *buffer)
|
||||
{
|
||||
struct ttm_mem_zone *zone =
|
||||
container_of(kobj, struct ttm_mem_zone, kobj);
|
||||
uint64_t val = 0;
|
||||
|
||||
spin_lock(&zone->glob->lock);
|
||||
if (attr == &ttm_mem_sys)
|
||||
val = zone->zone_mem;
|
||||
else if (attr == &ttm_mem_emer)
|
||||
val = zone->emer_mem;
|
||||
else if (attr == &ttm_mem_max)
|
||||
val = zone->max_mem;
|
||||
else if (attr == &ttm_mem_swap)
|
||||
val = zone->swap_limit;
|
||||
else if (attr == &ttm_mem_used)
|
||||
val = zone->used_mem;
|
||||
spin_unlock(&zone->glob->lock);
|
||||
|
||||
return snprintf(buffer, PAGE_SIZE, "%llu\n",
|
||||
(unsigned long long) val >> 10);
|
||||
}
|
||||
|
||||
static void ttm_check_swapping(struct ttm_mem_global *glob);
|
||||
|
||||
static ssize_t ttm_mem_zone_store(struct kobject *kobj,
|
||||
struct attribute *attr,
|
||||
const char *buffer,
|
||||
size_t size)
|
||||
{
|
||||
struct ttm_mem_zone *zone =
|
||||
container_of(kobj, struct ttm_mem_zone, kobj);
|
||||
int chars;
|
||||
unsigned long val;
|
||||
uint64_t val64;
|
||||
|
||||
chars = sscanf(buffer, "%lu", &val);
|
||||
if (chars == 0)
|
||||
return size;
|
||||
|
||||
val64 = val;
|
||||
val64 <<= 10;
|
||||
|
||||
spin_lock(&zone->glob->lock);
|
||||
if (val64 > zone->zone_mem)
|
||||
val64 = zone->zone_mem;
|
||||
if (attr == &ttm_mem_emer) {
|
||||
zone->emer_mem = val64;
|
||||
if (zone->max_mem > val64)
|
||||
zone->max_mem = val64;
|
||||
} else if (attr == &ttm_mem_max) {
|
||||
zone->max_mem = val64;
|
||||
if (zone->emer_mem < val64)
|
||||
zone->emer_mem = val64;
|
||||
} else if (attr == &ttm_mem_swap)
|
||||
zone->swap_limit = val64;
|
||||
spin_unlock(&zone->glob->lock);
|
||||
|
||||
ttm_check_swapping(zone->glob);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static struct attribute *ttm_mem_zone_attrs[] = {
|
||||
&ttm_mem_sys,
|
||||
&ttm_mem_emer,
|
||||
&ttm_mem_max,
|
||||
&ttm_mem_swap,
|
||||
&ttm_mem_used,
|
||||
NULL
|
||||
};
|
||||
|
||||
static const struct sysfs_ops ttm_mem_zone_ops = {
|
||||
.show = &ttm_mem_zone_show,
|
||||
.store = &ttm_mem_zone_store
|
||||
};
|
||||
|
||||
static struct kobj_type ttm_mem_zone_kobj_type = {
|
||||
.release = &ttm_mem_zone_kobj_release,
|
||||
.sysfs_ops = &ttm_mem_zone_ops,
|
||||
.default_attrs = ttm_mem_zone_attrs,
|
||||
};
|
||||
|
||||
static void ttm_mem_global_kobj_release(struct kobject *kobj)
|
||||
{
|
||||
struct ttm_mem_global *glob =
|
||||
container_of(kobj, struct ttm_mem_global, kobj);
|
||||
|
||||
kfree(glob);
|
||||
}
|
||||
|
||||
static struct kobj_type ttm_mem_glob_kobj_type = {
|
||||
.release = &ttm_mem_global_kobj_release,
|
||||
};
|
||||
|
||||
static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
|
||||
bool from_wq, uint64_t extra)
|
||||
{
|
||||
unsigned int i;
|
||||
struct ttm_mem_zone *zone;
|
||||
uint64_t target;
|
||||
|
||||
for (i = 0; i < glob->num_zones; ++i) {
|
||||
zone = glob->zones[i];
|
||||
|
||||
if (from_wq)
|
||||
target = zone->swap_limit;
|
||||
else if (capable(CAP_SYS_ADMIN))
|
||||
target = zone->emer_mem;
|
||||
else
|
||||
target = zone->max_mem;
|
||||
|
||||
target = (extra > target) ? 0ULL : target;
|
||||
|
||||
if (zone->used_mem > target)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* At this point we only support a single shrink callback.
|
||||
* Extend this if needed, perhaps using a linked list of callbacks.
|
||||
* Note that this function is reentrant:
|
||||
* many threads may try to swap out at any given time.
|
||||
*/
|
||||
|
||||
static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
|
||||
uint64_t extra)
|
||||
{
|
||||
int ret;
|
||||
struct ttm_mem_shrink *shrink;
|
||||
|
||||
spin_lock(&glob->lock);
|
||||
if (glob->shrink == NULL)
|
||||
goto out;
|
||||
|
||||
while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
|
||||
shrink = glob->shrink;
|
||||
spin_unlock(&glob->lock);
|
||||
ret = shrink->do_shrink(shrink);
|
||||
spin_lock(&glob->lock);
|
||||
if (unlikely(ret != 0))
|
||||
goto out;
|
||||
}
|
||||
out:
|
||||
spin_unlock(&glob->lock);
|
||||
}
|
||||
|
||||
|
||||
|
||||
static void ttm_shrink_work(struct work_struct *work)
|
||||
{
|
||||
struct ttm_mem_global *glob =
|
||||
container_of(work, struct ttm_mem_global, work);
|
||||
|
||||
ttm_shrink(glob, true, 0ULL);
|
||||
}
|
||||
|
||||
static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
|
||||
const struct sysinfo *si)
|
||||
{
|
||||
struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
|
||||
uint64_t mem;
|
||||
int ret;
|
||||
|
||||
if (unlikely(!zone))
|
||||
return -ENOMEM;
|
||||
|
||||
mem = si->totalram - si->totalhigh;
|
||||
mem *= si->mem_unit;
|
||||
|
||||
zone->name = "kernel";
|
||||
zone->zone_mem = mem;
|
||||
zone->max_mem = mem >> 1;
|
||||
zone->emer_mem = (mem >> 1) + (mem >> 2);
|
||||
zone->swap_limit = zone->max_mem - (mem >> 3);
|
||||
zone->used_mem = 0;
|
||||
zone->glob = glob;
|
||||
glob->zone_kernel = zone;
|
||||
ret = kobject_init_and_add(
|
||||
&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
|
||||
if (unlikely(ret != 0)) {
|
||||
kobject_put(&zone->kobj);
|
||||
return ret;
|
||||
}
|
||||
glob->zones[glob->num_zones++] = zone;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
|
||||
const struct sysinfo *si)
|
||||
{
|
||||
struct ttm_mem_zone *zone;
|
||||
uint64_t mem;
|
||||
int ret;
|
||||
|
||||
if (si->totalhigh == 0)
|
||||
return 0;
|
||||
|
||||
zone = kzalloc(sizeof(*zone), GFP_KERNEL);
|
||||
if (unlikely(!zone))
|
||||
return -ENOMEM;
|
||||
|
||||
mem = si->totalram;
|
||||
mem *= si->mem_unit;
|
||||
|
||||
zone->name = "highmem";
|
||||
zone->zone_mem = mem;
|
||||
zone->max_mem = mem >> 1;
|
||||
zone->emer_mem = (mem >> 1) + (mem >> 2);
|
||||
zone->swap_limit = zone->max_mem - (mem >> 3);
|
||||
zone->used_mem = 0;
|
||||
zone->glob = glob;
|
||||
glob->zone_highmem = zone;
|
||||
ret = kobject_init_and_add(
|
||||
&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s",
|
||||
zone->name);
|
||||
if (unlikely(ret != 0)) {
|
||||
kobject_put(&zone->kobj);
|
||||
return ret;
|
||||
}
|
||||
glob->zones[glob->num_zones++] = zone;
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
|
||||
const struct sysinfo *si)
|
||||
{
|
||||
struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
|
||||
uint64_t mem;
|
||||
int ret;
|
||||
|
||||
if (unlikely(!zone))
|
||||
return -ENOMEM;
|
||||
|
||||
mem = si->totalram;
|
||||
mem *= si->mem_unit;
|
||||
|
||||
/**
|
||||
* No special dma32 zone needed.
|
||||
*/
|
||||
|
||||
if (mem <= ((uint64_t) 1ULL << 32)) {
|
||||
kfree(zone);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Limit max dma32 memory to 4GB for now
|
||||
* until we can figure out how big this
|
||||
* zone really is.
|
||||
*/
|
||||
|
||||
mem = ((uint64_t) 1ULL << 32);
|
||||
zone->name = "dma32";
|
||||
zone->zone_mem = mem;
|
||||
zone->max_mem = mem >> 1;
|
||||
zone->emer_mem = (mem >> 1) + (mem >> 2);
|
||||
zone->swap_limit = zone->max_mem - (mem >> 3);
|
||||
zone->used_mem = 0;
|
||||
zone->glob = glob;
|
||||
glob->zone_dma32 = zone;
|
||||
ret = kobject_init_and_add(
|
||||
&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
|
||||
if (unlikely(ret != 0)) {
|
||||
kobject_put(&zone->kobj);
|
||||
return ret;
|
||||
}
|
||||
glob->zones[glob->num_zones++] = zone;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
int ttm_mem_global_init(struct ttm_mem_global *glob)
|
||||
{
|
||||
struct sysinfo si;
|
||||
int ret;
|
||||
int i;
|
||||
struct ttm_mem_zone *zone;
|
||||
|
||||
spin_lock_init(&glob->lock);
|
||||
glob->swap_queue = create_singlethread_workqueue("ttm_swap");
|
||||
INIT_WORK(&glob->work, ttm_shrink_work);
|
||||
ret = kobject_init_and_add(
|
||||
&glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting");
|
||||
if (unlikely(ret != 0)) {
|
||||
kobject_put(&glob->kobj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
si_meminfo(&si);
|
||||
|
||||
ret = ttm_mem_init_kernel_zone(glob, &si);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_no_zone;
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
ret = ttm_mem_init_highmem_zone(glob, &si);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_no_zone;
|
||||
#else
|
||||
ret = ttm_mem_init_dma32_zone(glob, &si);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_no_zone;
|
||||
#endif
|
||||
for (i = 0; i < glob->num_zones; ++i) {
|
||||
zone = glob->zones[i];
|
||||
pr_info("Zone %7s: Available graphics memory: %llu kiB\n",
|
||||
zone->name, (unsigned long long)zone->max_mem >> 10);
|
||||
}
|
||||
ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
|
||||
ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
|
||||
return 0;
|
||||
out_no_zone:
|
||||
ttm_mem_global_release(glob);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_mem_global_init);
|
||||
|
||||
void ttm_mem_global_release(struct ttm_mem_global *glob)
|
||||
{
|
||||
unsigned int i;
|
||||
struct ttm_mem_zone *zone;
|
||||
|
||||
/* let the page allocator first stop the shrink work. */
|
||||
ttm_page_alloc_fini();
|
||||
ttm_dma_page_alloc_fini();
|
||||
|
||||
flush_workqueue(glob->swap_queue);
|
||||
destroy_workqueue(glob->swap_queue);
|
||||
glob->swap_queue = NULL;
|
||||
for (i = 0; i < glob->num_zones; ++i) {
|
||||
zone = glob->zones[i];
|
||||
kobject_del(&zone->kobj);
|
||||
kobject_put(&zone->kobj);
|
||||
}
|
||||
kobject_del(&glob->kobj);
|
||||
kobject_put(&glob->kobj);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_mem_global_release);
|
||||
|
||||
static void ttm_check_swapping(struct ttm_mem_global *glob)
|
||||
{
|
||||
bool needs_swapping = false;
|
||||
unsigned int i;
|
||||
struct ttm_mem_zone *zone;
|
||||
|
||||
spin_lock(&glob->lock);
|
||||
for (i = 0; i < glob->num_zones; ++i) {
|
||||
zone = glob->zones[i];
|
||||
if (zone->used_mem > zone->swap_limit) {
|
||||
needs_swapping = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock(&glob->lock);
|
||||
|
||||
if (unlikely(needs_swapping))
|
||||
(void)queue_work(glob->swap_queue, &glob->work);
|
||||
|
||||
}
|
||||
|
||||
static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
|
||||
struct ttm_mem_zone *single_zone,
|
||||
uint64_t amount)
|
||||
{
|
||||
unsigned int i;
|
||||
struct ttm_mem_zone *zone;
|
||||
|
||||
spin_lock(&glob->lock);
|
||||
for (i = 0; i < glob->num_zones; ++i) {
|
||||
zone = glob->zones[i];
|
||||
if (single_zone && zone != single_zone)
|
||||
continue;
|
||||
zone->used_mem -= amount;
|
||||
}
|
||||
spin_unlock(&glob->lock);
|
||||
}
|
||||
|
||||
void ttm_mem_global_free(struct ttm_mem_global *glob,
|
||||
uint64_t amount)
|
||||
{
|
||||
return ttm_mem_global_free_zone(glob, NULL, amount);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_mem_global_free);
|
||||
|
||||
static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
|
||||
struct ttm_mem_zone *single_zone,
|
||||
uint64_t amount, bool reserve)
|
||||
{
|
||||
uint64_t limit;
|
||||
int ret = -ENOMEM;
|
||||
unsigned int i;
|
||||
struct ttm_mem_zone *zone;
|
||||
|
||||
spin_lock(&glob->lock);
|
||||
for (i = 0; i < glob->num_zones; ++i) {
|
||||
zone = glob->zones[i];
|
||||
if (single_zone && zone != single_zone)
|
||||
continue;
|
||||
|
||||
limit = (capable(CAP_SYS_ADMIN)) ?
|
||||
zone->emer_mem : zone->max_mem;
|
||||
|
||||
if (zone->used_mem > limit)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (reserve) {
|
||||
for (i = 0; i < glob->num_zones; ++i) {
|
||||
zone = glob->zones[i];
|
||||
if (single_zone && zone != single_zone)
|
||||
continue;
|
||||
zone->used_mem += amount;
|
||||
}
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
out_unlock:
|
||||
spin_unlock(&glob->lock);
|
||||
ttm_check_swapping(glob);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
|
||||
struct ttm_mem_zone *single_zone,
|
||||
uint64_t memory,
|
||||
bool no_wait, bool interruptible)
|
||||
{
|
||||
int count = TTM_MEMORY_ALLOC_RETRIES;
|
||||
|
||||
while (unlikely(ttm_mem_global_reserve(glob,
|
||||
single_zone,
|
||||
memory, true)
|
||||
!= 0)) {
|
||||
if (no_wait)
|
||||
return -ENOMEM;
|
||||
if (unlikely(count-- == 0))
|
||||
return -ENOMEM;
|
||||
ttm_shrink(glob, false, memory + (memory >> 2) + 16);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
|
||||
bool no_wait, bool interruptible)
|
||||
{
|
||||
/**
|
||||
* Normal allocations of kernel memory are registered in
|
||||
* all zones.
|
||||
*/
|
||||
|
||||
return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait,
|
||||
interruptible);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_mem_global_alloc);
|
||||
|
||||
int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
|
||||
struct page *page,
|
||||
bool no_wait, bool interruptible)
|
||||
{
|
||||
|
||||
struct ttm_mem_zone *zone = NULL;
|
||||
|
||||
/**
|
||||
* Page allocations may be registed in a single zone
|
||||
* only if highmem or !dma32.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
if (PageHighMem(page) && glob->zone_highmem != NULL)
|
||||
zone = glob->zone_highmem;
|
||||
#else
|
||||
if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
|
||||
zone = glob->zone_kernel;
|
||||
#endif
|
||||
return ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, no_wait,
|
||||
interruptible);
|
||||
}
|
||||
|
||||
void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page)
|
||||
{
|
||||
struct ttm_mem_zone *zone = NULL;
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
if (PageHighMem(page) && glob->zone_highmem != NULL)
|
||||
zone = glob->zone_highmem;
|
||||
#else
|
||||
if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
|
||||
zone = glob->zone_kernel;
|
||||
#endif
|
||||
ttm_mem_global_free_zone(glob, zone, PAGE_SIZE);
|
||||
}
|
||||
|
||||
|
||||
size_t ttm_round_pot(size_t size)
|
||||
{
|
||||
if ((size & (size - 1)) == 0)
|
||||
return size;
|
||||
else if (size > PAGE_SIZE)
|
||||
return PAGE_ALIGN(size);
|
||||
else {
|
||||
size_t tmp_size = 4;
|
||||
|
||||
while (tmp_size < size)
|
||||
tmp_size <<= 1;
|
||||
|
||||
return tmp_size;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_round_pot);
|
||||
102
drivers/gpu/drm/ttm/ttm_module.c
Normal file
102
drivers/gpu/drm/ttm/ttm_module.c
Normal file
|
|
@ -0,0 +1,102 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
* Jerome Glisse
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/sched.h>
|
||||
#include <drm/ttm/ttm_module.h>
|
||||
#include <drm/drm_sysfs.h>
|
||||
|
||||
static DECLARE_WAIT_QUEUE_HEAD(exit_q);
|
||||
static atomic_t device_released;
|
||||
|
||||
static struct device_type ttm_drm_class_type = {
|
||||
.name = "ttm",
|
||||
/**
|
||||
* Add pm ops here.
|
||||
*/
|
||||
};
|
||||
|
||||
static void ttm_drm_class_device_release(struct device *dev)
|
||||
{
|
||||
atomic_set(&device_released, 1);
|
||||
wake_up_all(&exit_q);
|
||||
}
|
||||
|
||||
static struct device ttm_drm_class_device = {
|
||||
.type = &ttm_drm_class_type,
|
||||
.release = &ttm_drm_class_device_release
|
||||
};
|
||||
|
||||
struct kobject *ttm_get_kobj(void)
|
||||
{
|
||||
struct kobject *kobj = &ttm_drm_class_device.kobj;
|
||||
BUG_ON(kobj == NULL);
|
||||
return kobj;
|
||||
}
|
||||
|
||||
static int __init ttm_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = dev_set_name(&ttm_drm_class_device, "ttm");
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
atomic_set(&device_released, 0);
|
||||
ret = drm_class_device_register(&ttm_drm_class_device);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_no_dev_reg;
|
||||
|
||||
return 0;
|
||||
out_no_dev_reg:
|
||||
atomic_set(&device_released, 1);
|
||||
wake_up_all(&exit_q);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit ttm_exit(void)
|
||||
{
|
||||
drm_class_device_unregister(&ttm_drm_class_device);
|
||||
|
||||
/**
|
||||
* Refuse to unload until the TTM device is released.
|
||||
* Not sure this is 100% needed.
|
||||
*/
|
||||
|
||||
wait_event(exit_q, atomic_read(&device_released) == 1);
|
||||
}
|
||||
|
||||
module_init(ttm_init);
|
||||
module_exit(ttm_exit);
|
||||
|
||||
MODULE_AUTHOR("Thomas Hellstrom, Jerome Glisse");
|
||||
MODULE_DESCRIPTION("TTM memory manager subsystem (for DRM device)");
|
||||
MODULE_LICENSE("GPL and additional rights");
|
||||
760
drivers/gpu/drm/ttm/ttm_object.c
Normal file
760
drivers/gpu/drm/ttm/ttm_object.c
Normal file
|
|
@ -0,0 +1,760 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2009-2013 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
*
|
||||
* While no substantial code is shared, the prime code is inspired by
|
||||
* drm_prime.c, with
|
||||
* Authors:
|
||||
* Dave Airlie <airlied@redhat.com>
|
||||
* Rob Clark <rob.clark@linaro.org>
|
||||
*/
|
||||
/** @file ttm_ref_object.c
|
||||
*
|
||||
* Base- and reference object implementation for the various
|
||||
* ttm objects. Implements reference counting, minimal security checks
|
||||
* and release on file close.
|
||||
*/
|
||||
|
||||
|
||||
/**
|
||||
* struct ttm_object_file
|
||||
*
|
||||
* @tdev: Pointer to the ttm_object_device.
|
||||
*
|
||||
* @lock: Lock that protects the ref_list list and the
|
||||
* ref_hash hash tables.
|
||||
*
|
||||
* @ref_list: List of ttm_ref_objects to be destroyed at
|
||||
* file release.
|
||||
*
|
||||
* @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
|
||||
* for fast lookup of ref objects given a base object.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "[TTM] " fmt
|
||||
|
||||
#include <drm/ttm/ttm_object.h>
|
||||
#include <drm/ttm/ttm_module.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/atomic.h>
|
||||
|
||||
struct ttm_object_file {
|
||||
struct ttm_object_device *tdev;
|
||||
spinlock_t lock;
|
||||
struct list_head ref_list;
|
||||
struct drm_open_hash ref_hash[TTM_REF_NUM];
|
||||
struct kref refcount;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ttm_object_device
|
||||
*
|
||||
* @object_lock: lock that protects the object_hash hash table.
|
||||
*
|
||||
* @object_hash: hash table for fast lookup of object global names.
|
||||
*
|
||||
* @object_count: Per device object count.
|
||||
*
|
||||
* This is the per-device data structure needed for ttm object management.
|
||||
*/
|
||||
|
||||
struct ttm_object_device {
|
||||
spinlock_t object_lock;
|
||||
struct drm_open_hash object_hash;
|
||||
atomic_t object_count;
|
||||
struct ttm_mem_global *mem_glob;
|
||||
struct dma_buf_ops ops;
|
||||
void (*dmabuf_release)(struct dma_buf *dma_buf);
|
||||
size_t dma_buf_size;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ttm_ref_object
|
||||
*
|
||||
* @hash: Hash entry for the per-file object reference hash.
|
||||
*
|
||||
* @head: List entry for the per-file list of ref-objects.
|
||||
*
|
||||
* @kref: Ref count.
|
||||
*
|
||||
* @obj: Base object this ref object is referencing.
|
||||
*
|
||||
* @ref_type: Type of ref object.
|
||||
*
|
||||
* This is similar to an idr object, but it also has a hash table entry
|
||||
* that allows lookup with a pointer to the referenced object as a key. In
|
||||
* that way, one can easily detect whether a base object is referenced by
|
||||
* a particular ttm_object_file. It also carries a ref count to avoid creating
|
||||
* multiple ref objects if a ttm_object_file references the same base
|
||||
* object more than once.
|
||||
*/
|
||||
|
||||
struct ttm_ref_object {
|
||||
struct rcu_head rcu_head;
|
||||
struct drm_hash_item hash;
|
||||
struct list_head head;
|
||||
struct kref kref;
|
||||
enum ttm_ref_type ref_type;
|
||||
struct ttm_base_object *obj;
|
||||
struct ttm_object_file *tfile;
|
||||
};
|
||||
|
||||
static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf);
|
||||
|
||||
static inline struct ttm_object_file *
|
||||
ttm_object_file_ref(struct ttm_object_file *tfile)
|
||||
{
|
||||
kref_get(&tfile->refcount);
|
||||
return tfile;
|
||||
}
|
||||
|
||||
static void ttm_object_file_destroy(struct kref *kref)
|
||||
{
|
||||
struct ttm_object_file *tfile =
|
||||
container_of(kref, struct ttm_object_file, refcount);
|
||||
|
||||
kfree(tfile);
|
||||
}
|
||||
|
||||
|
||||
static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
|
||||
{
|
||||
struct ttm_object_file *tfile = *p_tfile;
|
||||
|
||||
*p_tfile = NULL;
|
||||
kref_put(&tfile->refcount, ttm_object_file_destroy);
|
||||
}
|
||||
|
||||
|
||||
int ttm_base_object_init(struct ttm_object_file *tfile,
|
||||
struct ttm_base_object *base,
|
||||
bool shareable,
|
||||
enum ttm_object_type object_type,
|
||||
void (*refcount_release) (struct ttm_base_object **),
|
||||
void (*ref_obj_release) (struct ttm_base_object *,
|
||||
enum ttm_ref_type ref_type))
|
||||
{
|
||||
struct ttm_object_device *tdev = tfile->tdev;
|
||||
int ret;
|
||||
|
||||
base->shareable = shareable;
|
||||
base->tfile = ttm_object_file_ref(tfile);
|
||||
base->refcount_release = refcount_release;
|
||||
base->ref_obj_release = ref_obj_release;
|
||||
base->object_type = object_type;
|
||||
kref_init(&base->refcount);
|
||||
spin_lock(&tdev->object_lock);
|
||||
ret = drm_ht_just_insert_please_rcu(&tdev->object_hash,
|
||||
&base->hash,
|
||||
(unsigned long)base, 31, 0, 0);
|
||||
spin_unlock(&tdev->object_lock);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_err0;
|
||||
|
||||
ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_err1;
|
||||
|
||||
ttm_base_object_unref(&base);
|
||||
|
||||
return 0;
|
||||
out_err1:
|
||||
spin_lock(&tdev->object_lock);
|
||||
(void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
|
||||
spin_unlock(&tdev->object_lock);
|
||||
out_err0:
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_base_object_init);
|
||||
|
||||
static void ttm_release_base(struct kref *kref)
|
||||
{
|
||||
struct ttm_base_object *base =
|
||||
container_of(kref, struct ttm_base_object, refcount);
|
||||
struct ttm_object_device *tdev = base->tfile->tdev;
|
||||
|
||||
spin_lock(&tdev->object_lock);
|
||||
(void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
|
||||
spin_unlock(&tdev->object_lock);
|
||||
|
||||
/*
|
||||
* Note: We don't use synchronize_rcu() here because it's far
|
||||
* too slow. It's up to the user to free the object using
|
||||
* call_rcu() or ttm_base_object_kfree().
|
||||
*/
|
||||
|
||||
ttm_object_file_unref(&base->tfile);
|
||||
if (base->refcount_release)
|
||||
base->refcount_release(&base);
|
||||
}
|
||||
|
||||
void ttm_base_object_unref(struct ttm_base_object **p_base)
|
||||
{
|
||||
struct ttm_base_object *base = *p_base;
|
||||
|
||||
*p_base = NULL;
|
||||
|
||||
kref_put(&base->refcount, ttm_release_base);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_base_object_unref);
|
||||
|
||||
struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
|
||||
uint32_t key)
|
||||
{
|
||||
struct ttm_base_object *base = NULL;
|
||||
struct drm_hash_item *hash;
|
||||
struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
|
||||
int ret;
|
||||
|
||||
rcu_read_lock();
|
||||
ret = drm_ht_find_item_rcu(ht, key, &hash);
|
||||
|
||||
if (likely(ret == 0)) {
|
||||
base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
|
||||
if (!kref_get_unless_zero(&base->refcount))
|
||||
base = NULL;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return base;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_base_object_lookup);
|
||||
|
||||
struct ttm_base_object *
|
||||
ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
|
||||
{
|
||||
struct ttm_base_object *base = NULL;
|
||||
struct drm_hash_item *hash;
|
||||
struct drm_open_hash *ht = &tdev->object_hash;
|
||||
int ret;
|
||||
|
||||
rcu_read_lock();
|
||||
ret = drm_ht_find_item_rcu(ht, key, &hash);
|
||||
|
||||
if (likely(ret == 0)) {
|
||||
base = drm_hash_entry(hash, struct ttm_base_object, hash);
|
||||
if (!kref_get_unless_zero(&base->refcount))
|
||||
base = NULL;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return base;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_base_object_lookup_for_ref);
|
||||
|
||||
/**
|
||||
* ttm_ref_object_exists - Check whether a caller has a valid ref object
|
||||
* (has opened) a base object.
|
||||
*
|
||||
* @tfile: Pointer to a struct ttm_object_file identifying the caller.
|
||||
* @base: Pointer to a struct base object.
|
||||
*
|
||||
* Checks wether the caller identified by @tfile has put a valid USAGE
|
||||
* reference object on the base object identified by @base.
|
||||
*/
|
||||
bool ttm_ref_object_exists(struct ttm_object_file *tfile,
|
||||
struct ttm_base_object *base)
|
||||
{
|
||||
struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
|
||||
struct drm_hash_item *hash;
|
||||
struct ttm_ref_object *ref;
|
||||
|
||||
rcu_read_lock();
|
||||
if (unlikely(drm_ht_find_item_rcu(ht, base->hash.key, &hash) != 0))
|
||||
goto out_false;
|
||||
|
||||
/*
|
||||
* Verify that the ref object is really pointing to our base object.
|
||||
* Our base object could actually be dead, and the ref object pointing
|
||||
* to another base object with the same handle.
|
||||
*/
|
||||
ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
|
||||
if (unlikely(base != ref->obj))
|
||||
goto out_false;
|
||||
|
||||
/*
|
||||
* Verify that the ref->obj pointer was actually valid!
|
||||
*/
|
||||
rmb();
|
||||
if (unlikely(atomic_read(&ref->kref.refcount) == 0))
|
||||
goto out_false;
|
||||
|
||||
rcu_read_unlock();
|
||||
return true;
|
||||
|
||||
out_false:
|
||||
rcu_read_unlock();
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_ref_object_exists);
|
||||
|
||||
int ttm_ref_object_add(struct ttm_object_file *tfile,
|
||||
struct ttm_base_object *base,
|
||||
enum ttm_ref_type ref_type, bool *existed)
|
||||
{
|
||||
struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
|
||||
struct ttm_ref_object *ref;
|
||||
struct drm_hash_item *hash;
|
||||
struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (base->tfile != tfile && !base->shareable)
|
||||
return -EPERM;
|
||||
|
||||
if (existed != NULL)
|
||||
*existed = true;
|
||||
|
||||
while (ret == -EINVAL) {
|
||||
rcu_read_lock();
|
||||
ret = drm_ht_find_item_rcu(ht, base->hash.key, &hash);
|
||||
|
||||
if (ret == 0) {
|
||||
ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
|
||||
if (kref_get_unless_zero(&ref->kref)) {
|
||||
rcu_read_unlock();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
|
||||
false, false);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
ref = kmalloc(sizeof(*ref), GFP_KERNEL);
|
||||
if (unlikely(ref == NULL)) {
|
||||
ttm_mem_global_free(mem_glob, sizeof(*ref));
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ref->hash.key = base->hash.key;
|
||||
ref->obj = base;
|
||||
ref->tfile = tfile;
|
||||
ref->ref_type = ref_type;
|
||||
kref_init(&ref->kref);
|
||||
|
||||
spin_lock(&tfile->lock);
|
||||
ret = drm_ht_insert_item_rcu(ht, &ref->hash);
|
||||
|
||||
if (likely(ret == 0)) {
|
||||
list_add_tail(&ref->head, &tfile->ref_list);
|
||||
kref_get(&base->refcount);
|
||||
spin_unlock(&tfile->lock);
|
||||
if (existed != NULL)
|
||||
*existed = false;
|
||||
break;
|
||||
}
|
||||
|
||||
spin_unlock(&tfile->lock);
|
||||
BUG_ON(ret != -EINVAL);
|
||||
|
||||
ttm_mem_global_free(mem_glob, sizeof(*ref));
|
||||
kfree(ref);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_ref_object_add);
|
||||
|
||||
static void ttm_ref_object_release(struct kref *kref)
|
||||
{
|
||||
struct ttm_ref_object *ref =
|
||||
container_of(kref, struct ttm_ref_object, kref);
|
||||
struct ttm_base_object *base = ref->obj;
|
||||
struct ttm_object_file *tfile = ref->tfile;
|
||||
struct drm_open_hash *ht;
|
||||
struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
|
||||
|
||||
ht = &tfile->ref_hash[ref->ref_type];
|
||||
(void)drm_ht_remove_item_rcu(ht, &ref->hash);
|
||||
list_del(&ref->head);
|
||||
spin_unlock(&tfile->lock);
|
||||
|
||||
if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
|
||||
base->ref_obj_release(base, ref->ref_type);
|
||||
|
||||
ttm_base_object_unref(&ref->obj);
|
||||
ttm_mem_global_free(mem_glob, sizeof(*ref));
|
||||
kfree_rcu(ref, rcu_head);
|
||||
spin_lock(&tfile->lock);
|
||||
}
|
||||
|
||||
int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
|
||||
unsigned long key, enum ttm_ref_type ref_type)
|
||||
{
|
||||
struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
|
||||
struct ttm_ref_object *ref;
|
||||
struct drm_hash_item *hash;
|
||||
int ret;
|
||||
|
||||
spin_lock(&tfile->lock);
|
||||
ret = drm_ht_find_item(ht, key, &hash);
|
||||
if (unlikely(ret != 0)) {
|
||||
spin_unlock(&tfile->lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
|
||||
kref_put(&ref->kref, ttm_ref_object_release);
|
||||
spin_unlock(&tfile->lock);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_ref_object_base_unref);
|
||||
|
||||
void ttm_object_file_release(struct ttm_object_file **p_tfile)
|
||||
{
|
||||
struct ttm_ref_object *ref;
|
||||
struct list_head *list;
|
||||
unsigned int i;
|
||||
struct ttm_object_file *tfile = *p_tfile;
|
||||
|
||||
*p_tfile = NULL;
|
||||
spin_lock(&tfile->lock);
|
||||
|
||||
/*
|
||||
* Since we release the lock within the loop, we have to
|
||||
* restart it from the beginning each time.
|
||||
*/
|
||||
|
||||
while (!list_empty(&tfile->ref_list)) {
|
||||
list = tfile->ref_list.next;
|
||||
ref = list_entry(list, struct ttm_ref_object, head);
|
||||
ttm_ref_object_release(&ref->kref);
|
||||
}
|
||||
|
||||
for (i = 0; i < TTM_REF_NUM; ++i)
|
||||
drm_ht_remove(&tfile->ref_hash[i]);
|
||||
|
||||
spin_unlock(&tfile->lock);
|
||||
ttm_object_file_unref(&tfile);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_object_file_release);
|
||||
|
||||
struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
|
||||
unsigned int hash_order)
|
||||
{
|
||||
struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
|
||||
unsigned int i;
|
||||
unsigned int j = 0;
|
||||
int ret;
|
||||
|
||||
if (unlikely(tfile == NULL))
|
||||
return NULL;
|
||||
|
||||
spin_lock_init(&tfile->lock);
|
||||
tfile->tdev = tdev;
|
||||
kref_init(&tfile->refcount);
|
||||
INIT_LIST_HEAD(&tfile->ref_list);
|
||||
|
||||
for (i = 0; i < TTM_REF_NUM; ++i) {
|
||||
ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
|
||||
if (ret) {
|
||||
j = i;
|
||||
goto out_err;
|
||||
}
|
||||
}
|
||||
|
||||
return tfile;
|
||||
out_err:
|
||||
for (i = 0; i < j; ++i)
|
||||
drm_ht_remove(&tfile->ref_hash[i]);
|
||||
|
||||
kfree(tfile);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_object_file_init);
|
||||
|
||||
struct ttm_object_device *
|
||||
ttm_object_device_init(struct ttm_mem_global *mem_glob,
|
||||
unsigned int hash_order,
|
||||
const struct dma_buf_ops *ops)
|
||||
{
|
||||
struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
|
||||
int ret;
|
||||
|
||||
if (unlikely(tdev == NULL))
|
||||
return NULL;
|
||||
|
||||
tdev->mem_glob = mem_glob;
|
||||
spin_lock_init(&tdev->object_lock);
|
||||
atomic_set(&tdev->object_count, 0);
|
||||
ret = drm_ht_create(&tdev->object_hash, hash_order);
|
||||
if (ret != 0)
|
||||
goto out_no_object_hash;
|
||||
|
||||
tdev->ops = *ops;
|
||||
tdev->dmabuf_release = tdev->ops.release;
|
||||
tdev->ops.release = ttm_prime_dmabuf_release;
|
||||
tdev->dma_buf_size = ttm_round_pot(sizeof(struct dma_buf)) +
|
||||
ttm_round_pot(sizeof(struct file));
|
||||
return tdev;
|
||||
|
||||
out_no_object_hash:
|
||||
kfree(tdev);
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_object_device_init);
|
||||
|
||||
void ttm_object_device_release(struct ttm_object_device **p_tdev)
|
||||
{
|
||||
struct ttm_object_device *tdev = *p_tdev;
|
||||
|
||||
*p_tdev = NULL;
|
||||
|
||||
spin_lock(&tdev->object_lock);
|
||||
drm_ht_remove(&tdev->object_hash);
|
||||
spin_unlock(&tdev->object_lock);
|
||||
|
||||
kfree(tdev);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_object_device_release);
|
||||
|
||||
/**
|
||||
* get_dma_buf_unless_doomed - get a dma_buf reference if possible.
|
||||
*
|
||||
* @dma_buf: Non-refcounted pointer to a struct dma-buf.
|
||||
*
|
||||
* Obtain a file reference from a lookup structure that doesn't refcount
|
||||
* the file, but synchronizes with its release method to make sure it has
|
||||
* not been freed yet. See for example kref_get_unless_zero documentation.
|
||||
* Returns true if refcounting succeeds, false otherwise.
|
||||
*
|
||||
* Nobody really wants this as a public API yet, so let it mature here
|
||||
* for some time...
|
||||
*/
|
||||
static bool __must_check get_dma_buf_unless_doomed(struct dma_buf *dmabuf)
|
||||
{
|
||||
return atomic_long_inc_not_zero(&dmabuf->file->f_count) != 0L;
|
||||
}
|
||||
|
||||
/**
|
||||
* ttm_prime_refcount_release - refcount release method for a prime object.
|
||||
*
|
||||
* @p_base: Pointer to ttm_base_object pointer.
|
||||
*
|
||||
* This is a wrapper that calls the refcount_release founction of the
|
||||
* underlying object. At the same time it cleans up the prime object.
|
||||
* This function is called when all references to the base object we
|
||||
* derive from are gone.
|
||||
*/
|
||||
static void ttm_prime_refcount_release(struct ttm_base_object **p_base)
|
||||
{
|
||||
struct ttm_base_object *base = *p_base;
|
||||
struct ttm_prime_object *prime;
|
||||
|
||||
*p_base = NULL;
|
||||
prime = container_of(base, struct ttm_prime_object, base);
|
||||
BUG_ON(prime->dma_buf != NULL);
|
||||
mutex_destroy(&prime->mutex);
|
||||
if (prime->refcount_release)
|
||||
prime->refcount_release(&base);
|
||||
}
|
||||
|
||||
/**
|
||||
* ttm_prime_dmabuf_release - Release method for the dma-bufs we export
|
||||
*
|
||||
* @dma_buf:
|
||||
*
|
||||
* This function first calls the dma_buf release method the driver
|
||||
* provides. Then it cleans up our dma_buf pointer used for lookup,
|
||||
* and finally releases the reference the dma_buf has on our base
|
||||
* object.
|
||||
*/
|
||||
static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf)
|
||||
{
|
||||
struct ttm_prime_object *prime =
|
||||
(struct ttm_prime_object *) dma_buf->priv;
|
||||
struct ttm_base_object *base = &prime->base;
|
||||
struct ttm_object_device *tdev = base->tfile->tdev;
|
||||
|
||||
if (tdev->dmabuf_release)
|
||||
tdev->dmabuf_release(dma_buf);
|
||||
mutex_lock(&prime->mutex);
|
||||
if (prime->dma_buf == dma_buf)
|
||||
prime->dma_buf = NULL;
|
||||
mutex_unlock(&prime->mutex);
|
||||
ttm_mem_global_free(tdev->mem_glob, tdev->dma_buf_size);
|
||||
ttm_base_object_unref(&base);
|
||||
}
|
||||
|
||||
/**
|
||||
* ttm_prime_fd_to_handle - Get a base object handle from a prime fd
|
||||
*
|
||||
* @tfile: A struct ttm_object_file identifying the caller.
|
||||
* @fd: The prime / dmabuf fd.
|
||||
* @handle: The returned handle.
|
||||
*
|
||||
* This function returns a handle to an object that previously exported
|
||||
* a dma-buf. Note that we don't handle imports yet, because we simply
|
||||
* have no consumers of that implementation.
|
||||
*/
|
||||
int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
|
||||
int fd, u32 *handle)
|
||||
{
|
||||
struct ttm_object_device *tdev = tfile->tdev;
|
||||
struct dma_buf *dma_buf;
|
||||
struct ttm_prime_object *prime;
|
||||
struct ttm_base_object *base;
|
||||
int ret;
|
||||
|
||||
dma_buf = dma_buf_get(fd);
|
||||
if (IS_ERR(dma_buf))
|
||||
return PTR_ERR(dma_buf);
|
||||
|
||||
if (dma_buf->ops != &tdev->ops)
|
||||
return -ENOSYS;
|
||||
|
||||
prime = (struct ttm_prime_object *) dma_buf->priv;
|
||||
base = &prime->base;
|
||||
*handle = base->hash.key;
|
||||
ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
|
||||
|
||||
dma_buf_put(dma_buf);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ttm_prime_fd_to_handle);
|
||||
|
||||
/**
|
||||
* ttm_prime_handle_to_fd - Return a dma_buf fd from a ttm prime object
|
||||
*
|
||||
* @tfile: Struct ttm_object_file identifying the caller.
|
||||
* @handle: Handle to the object we're exporting from.
|
||||
* @flags: flags for dma-buf creation. We just pass them on.
|
||||
* @prime_fd: The returned file descriptor.
|
||||
*
|
||||
*/
|
||||
int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
|
||||
uint32_t handle, uint32_t flags,
|
||||
int *prime_fd)
|
||||
{
|
||||
struct ttm_object_device *tdev = tfile->tdev;
|
||||
struct ttm_base_object *base;
|
||||
struct dma_buf *dma_buf;
|
||||
struct ttm_prime_object *prime;
|
||||
int ret;
|
||||
|
||||
base = ttm_base_object_lookup(tfile, handle);
|
||||
if (unlikely(base == NULL ||
|
||||
base->object_type != ttm_prime_type)) {
|
||||
ret = -ENOENT;
|
||||
goto out_unref;
|
||||
}
|
||||
|
||||
prime = container_of(base, struct ttm_prime_object, base);
|
||||
if (unlikely(!base->shareable)) {
|
||||
ret = -EPERM;
|
||||
goto out_unref;
|
||||
}
|
||||
|
||||
ret = mutex_lock_interruptible(&prime->mutex);
|
||||
if (unlikely(ret != 0)) {
|
||||
ret = -ERESTARTSYS;
|
||||
goto out_unref;
|
||||
}
|
||||
|
||||
dma_buf = prime->dma_buf;
|
||||
if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) {
|
||||
|
||||
/*
|
||||
* Need to create a new dma_buf, with memory accounting.
|
||||
*/
|
||||
ret = ttm_mem_global_alloc(tdev->mem_glob, tdev->dma_buf_size,
|
||||
false, true);
|
||||
if (unlikely(ret != 0)) {
|
||||
mutex_unlock(&prime->mutex);
|
||||
goto out_unref;
|
||||
}
|
||||
|
||||
dma_buf = dma_buf_export(prime, &tdev->ops,
|
||||
prime->size, flags, NULL);
|
||||
if (IS_ERR(dma_buf)) {
|
||||
ret = PTR_ERR(dma_buf);
|
||||
ttm_mem_global_free(tdev->mem_glob,
|
||||
tdev->dma_buf_size);
|
||||
mutex_unlock(&prime->mutex);
|
||||
goto out_unref;
|
||||
}
|
||||
|
||||
/*
|
||||
* dma_buf has taken the base object reference
|
||||
*/
|
||||
base = NULL;
|
||||
prime->dma_buf = dma_buf;
|
||||
}
|
||||
mutex_unlock(&prime->mutex);
|
||||
|
||||
ret = dma_buf_fd(dma_buf, flags);
|
||||
if (ret >= 0) {
|
||||
*prime_fd = ret;
|
||||
ret = 0;
|
||||
} else
|
||||
dma_buf_put(dma_buf);
|
||||
|
||||
out_unref:
|
||||
if (base)
|
||||
ttm_base_object_unref(&base);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ttm_prime_handle_to_fd);
|
||||
|
||||
/**
|
||||
* ttm_prime_object_init - Initialize a ttm_prime_object
|
||||
*
|
||||
* @tfile: struct ttm_object_file identifying the caller
|
||||
* @size: The size of the dma_bufs we export.
|
||||
* @prime: The object to be initialized.
|
||||
* @shareable: See ttm_base_object_init
|
||||
* @type: See ttm_base_object_init
|
||||
* @refcount_release: See ttm_base_object_init
|
||||
* @ref_obj_release: See ttm_base_object_init
|
||||
*
|
||||
* Initializes an object which is compatible with the drm_prime model
|
||||
* for data sharing between processes and devices.
|
||||
*/
|
||||
int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
|
||||
struct ttm_prime_object *prime, bool shareable,
|
||||
enum ttm_object_type type,
|
||||
void (*refcount_release) (struct ttm_base_object **),
|
||||
void (*ref_obj_release) (struct ttm_base_object *,
|
||||
enum ttm_ref_type ref_type))
|
||||
{
|
||||
mutex_init(&prime->mutex);
|
||||
prime->size = PAGE_ALIGN(size);
|
||||
prime->real_type = type;
|
||||
prime->dma_buf = NULL;
|
||||
prime->refcount_release = refcount_release;
|
||||
return ttm_base_object_init(tfile, &prime->base, shareable,
|
||||
ttm_prime_type,
|
||||
ttm_prime_refcount_release,
|
||||
ref_obj_release);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_prime_object_init);
|
||||
940
drivers/gpu/drm/ttm/ttm_page_alloc.c
Normal file
940
drivers/gpu/drm/ttm/ttm_page_alloc.c
Normal file
|
|
@ -0,0 +1,940 @@
|
|||
/*
|
||||
* Copyright (c) Red Hat Inc.
|
||||
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sub license,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie <airlied@redhat.com>
|
||||
* Jerome Glisse <jglisse@redhat.com>
|
||||
* Pauli Nieminen <suokkos@gmail.com>
|
||||
*/
|
||||
|
||||
/* simple list based uncached page pool
|
||||
* - Pool collects resently freed pages for reuse
|
||||
* - Use page->lru to keep a free list
|
||||
* - doesn't track currently in use pages
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "[TTM] " fmt
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/mm_types.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/seq_file.h> /* for seq_printf */
|
||||
#include <linux/slab.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
|
||||
#include <linux/atomic.h>
|
||||
|
||||
#include <drm/ttm/ttm_bo_driver.h>
|
||||
#include <drm/ttm/ttm_page_alloc.h>
|
||||
|
||||
#ifdef TTM_HAS_AGP
|
||||
#include <asm/agp.h>
|
||||
#endif
|
||||
|
||||
#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
|
||||
#define SMALL_ALLOCATION 16
|
||||
#define FREE_ALL_PAGES (~0U)
|
||||
/* times are in msecs */
|
||||
#define PAGE_FREE_INTERVAL 1000
|
||||
|
||||
/**
|
||||
* struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
|
||||
*
|
||||
* @lock: Protects the shared pool from concurrnet access. Must be used with
|
||||
* irqsave/irqrestore variants because pool allocator maybe called from
|
||||
* delayed work.
|
||||
* @fill_lock: Prevent concurrent calls to fill.
|
||||
* @list: Pool of free uc/wc pages for fast reuse.
|
||||
* @gfp_flags: Flags to pass for alloc_page.
|
||||
* @npages: Number of pages in pool.
|
||||
*/
|
||||
struct ttm_page_pool {
|
||||
spinlock_t lock;
|
||||
bool fill_lock;
|
||||
struct list_head list;
|
||||
gfp_t gfp_flags;
|
||||
unsigned npages;
|
||||
char *name;
|
||||
unsigned long nfrees;
|
||||
unsigned long nrefills;
|
||||
};
|
||||
|
||||
/**
|
||||
* Limits for the pool. They are handled without locks because only place where
|
||||
* they may change is in sysfs store. They won't have immediate effect anyway
|
||||
* so forcing serialization to access them is pointless.
|
||||
*/
|
||||
|
||||
struct ttm_pool_opts {
|
||||
unsigned alloc_size;
|
||||
unsigned max_size;
|
||||
unsigned small;
|
||||
};
|
||||
|
||||
#define NUM_POOLS 4
|
||||
|
||||
/**
|
||||
* struct ttm_pool_manager - Holds memory pools for fst allocation
|
||||
*
|
||||
* Manager is read only object for pool code so it doesn't need locking.
|
||||
*
|
||||
* @free_interval: minimum number of jiffies between freeing pages from pool.
|
||||
* @page_alloc_inited: reference counting for pool allocation.
|
||||
* @work: Work that is used to shrink the pool. Work is only run when there is
|
||||
* some pages to free.
|
||||
* @small_allocation: Limit in number of pages what is small allocation.
|
||||
*
|
||||
* @pools: All pool objects in use.
|
||||
**/
|
||||
struct ttm_pool_manager {
|
||||
struct kobject kobj;
|
||||
struct shrinker mm_shrink;
|
||||
struct ttm_pool_opts options;
|
||||
|
||||
union {
|
||||
struct ttm_page_pool pools[NUM_POOLS];
|
||||
struct {
|
||||
struct ttm_page_pool wc_pool;
|
||||
struct ttm_page_pool uc_pool;
|
||||
struct ttm_page_pool wc_pool_dma32;
|
||||
struct ttm_page_pool uc_pool_dma32;
|
||||
} ;
|
||||
};
|
||||
};
|
||||
|
||||
static struct attribute ttm_page_pool_max = {
|
||||
.name = "pool_max_size",
|
||||
.mode = S_IRUGO | S_IWUSR
|
||||
};
|
||||
static struct attribute ttm_page_pool_small = {
|
||||
.name = "pool_small_allocation",
|
||||
.mode = S_IRUGO | S_IWUSR
|
||||
};
|
||||
static struct attribute ttm_page_pool_alloc_size = {
|
||||
.name = "pool_allocation_size",
|
||||
.mode = S_IRUGO | S_IWUSR
|
||||
};
|
||||
|
||||
static struct attribute *ttm_pool_attrs[] = {
|
||||
&ttm_page_pool_max,
|
||||
&ttm_page_pool_small,
|
||||
&ttm_page_pool_alloc_size,
|
||||
NULL
|
||||
};
|
||||
|
||||
static void ttm_pool_kobj_release(struct kobject *kobj)
|
||||
{
|
||||
struct ttm_pool_manager *m =
|
||||
container_of(kobj, struct ttm_pool_manager, kobj);
|
||||
kfree(m);
|
||||
}
|
||||
|
||||
static ssize_t ttm_pool_store(struct kobject *kobj,
|
||||
struct attribute *attr, const char *buffer, size_t size)
|
||||
{
|
||||
struct ttm_pool_manager *m =
|
||||
container_of(kobj, struct ttm_pool_manager, kobj);
|
||||
int chars;
|
||||
unsigned val;
|
||||
chars = sscanf(buffer, "%u", &val);
|
||||
if (chars == 0)
|
||||
return size;
|
||||
|
||||
/* Convert kb to number of pages */
|
||||
val = val / (PAGE_SIZE >> 10);
|
||||
|
||||
if (attr == &ttm_page_pool_max)
|
||||
m->options.max_size = val;
|
||||
else if (attr == &ttm_page_pool_small)
|
||||
m->options.small = val;
|
||||
else if (attr == &ttm_page_pool_alloc_size) {
|
||||
if (val > NUM_PAGES_TO_ALLOC*8) {
|
||||
pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
|
||||
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
|
||||
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
|
||||
return size;
|
||||
} else if (val > NUM_PAGES_TO_ALLOC) {
|
||||
pr_warn("Setting allocation size to larger than %lu is not recommended\n",
|
||||
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
|
||||
}
|
||||
m->options.alloc_size = val;
|
||||
}
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static ssize_t ttm_pool_show(struct kobject *kobj,
|
||||
struct attribute *attr, char *buffer)
|
||||
{
|
||||
struct ttm_pool_manager *m =
|
||||
container_of(kobj, struct ttm_pool_manager, kobj);
|
||||
unsigned val = 0;
|
||||
|
||||
if (attr == &ttm_page_pool_max)
|
||||
val = m->options.max_size;
|
||||
else if (attr == &ttm_page_pool_small)
|
||||
val = m->options.small;
|
||||
else if (attr == &ttm_page_pool_alloc_size)
|
||||
val = m->options.alloc_size;
|
||||
|
||||
val = val * (PAGE_SIZE >> 10);
|
||||
|
||||
return snprintf(buffer, PAGE_SIZE, "%u\n", val);
|
||||
}
|
||||
|
||||
static const struct sysfs_ops ttm_pool_sysfs_ops = {
|
||||
.show = &ttm_pool_show,
|
||||
.store = &ttm_pool_store,
|
||||
};
|
||||
|
||||
static struct kobj_type ttm_pool_kobj_type = {
|
||||
.release = &ttm_pool_kobj_release,
|
||||
.sysfs_ops = &ttm_pool_sysfs_ops,
|
||||
.default_attrs = ttm_pool_attrs,
|
||||
};
|
||||
|
||||
static struct ttm_pool_manager *_manager;
|
||||
|
||||
#ifndef CONFIG_X86
|
||||
static int set_pages_array_wb(struct page **pages, int addrinarray)
|
||||
{
|
||||
#ifdef TTM_HAS_AGP
|
||||
int i;
|
||||
|
||||
for (i = 0; i < addrinarray; i++)
|
||||
unmap_page_from_agp(pages[i]);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_pages_array_wc(struct page **pages, int addrinarray)
|
||||
{
|
||||
#ifdef TTM_HAS_AGP
|
||||
int i;
|
||||
|
||||
for (i = 0; i < addrinarray; i++)
|
||||
map_page_into_agp(pages[i]);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_pages_array_uc(struct page **pages, int addrinarray)
|
||||
{
|
||||
#ifdef TTM_HAS_AGP
|
||||
int i;
|
||||
|
||||
for (i = 0; i < addrinarray; i++)
|
||||
map_page_into_agp(pages[i]);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Select the right pool or requested caching state and ttm flags. */
|
||||
static struct ttm_page_pool *ttm_get_pool(int flags,
|
||||
enum ttm_caching_state cstate)
|
||||
{
|
||||
int pool_index;
|
||||
|
||||
if (cstate == tt_cached)
|
||||
return NULL;
|
||||
|
||||
if (cstate == tt_wc)
|
||||
pool_index = 0x0;
|
||||
else
|
||||
pool_index = 0x1;
|
||||
|
||||
if (flags & TTM_PAGE_FLAG_DMA32)
|
||||
pool_index |= 0x2;
|
||||
|
||||
return &_manager->pools[pool_index];
|
||||
}
|
||||
|
||||
/* set memory back to wb and free the pages. */
|
||||
static void ttm_pages_put(struct page *pages[], unsigned npages)
|
||||
{
|
||||
unsigned i;
|
||||
if (set_pages_array_wb(pages, npages))
|
||||
pr_err("Failed to set %d pages to wb!\n", npages);
|
||||
for (i = 0; i < npages; ++i)
|
||||
__free_page(pages[i]);
|
||||
}
|
||||
|
||||
static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
|
||||
unsigned freed_pages)
|
||||
{
|
||||
pool->npages -= freed_pages;
|
||||
pool->nfrees += freed_pages;
|
||||
}
|
||||
|
||||
/**
|
||||
* Free pages from pool.
|
||||
*
|
||||
* To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
|
||||
* number of pages in one go.
|
||||
*
|
||||
* @pool: to free the pages from
|
||||
* @free_all: If set to true will free all pages in pool
|
||||
* @use_static: Safe to use static buffer
|
||||
**/
|
||||
static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
|
||||
bool use_static)
|
||||
{
|
||||
static struct page *static_buf[NUM_PAGES_TO_ALLOC];
|
||||
unsigned long irq_flags;
|
||||
struct page *p;
|
||||
struct page **pages_to_free;
|
||||
unsigned freed_pages = 0,
|
||||
npages_to_free = nr_free;
|
||||
|
||||
if (NUM_PAGES_TO_ALLOC < nr_free)
|
||||
npages_to_free = NUM_PAGES_TO_ALLOC;
|
||||
|
||||
if (use_static)
|
||||
pages_to_free = static_buf;
|
||||
else
|
||||
pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
|
||||
GFP_KERNEL);
|
||||
if (!pages_to_free) {
|
||||
pr_err("Failed to allocate memory for pool free operation\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
restart:
|
||||
spin_lock_irqsave(&pool->lock, irq_flags);
|
||||
|
||||
list_for_each_entry_reverse(p, &pool->list, lru) {
|
||||
if (freed_pages >= npages_to_free)
|
||||
break;
|
||||
|
||||
pages_to_free[freed_pages++] = p;
|
||||
/* We can only remove NUM_PAGES_TO_ALLOC at a time. */
|
||||
if (freed_pages >= NUM_PAGES_TO_ALLOC) {
|
||||
/* remove range of pages from the pool */
|
||||
__list_del(p->lru.prev, &pool->list);
|
||||
|
||||
ttm_pool_update_free_locked(pool, freed_pages);
|
||||
/**
|
||||
* Because changing page caching is costly
|
||||
* we unlock the pool to prevent stalling.
|
||||
*/
|
||||
spin_unlock_irqrestore(&pool->lock, irq_flags);
|
||||
|
||||
ttm_pages_put(pages_to_free, freed_pages);
|
||||
if (likely(nr_free != FREE_ALL_PAGES))
|
||||
nr_free -= freed_pages;
|
||||
|
||||
if (NUM_PAGES_TO_ALLOC >= nr_free)
|
||||
npages_to_free = nr_free;
|
||||
else
|
||||
npages_to_free = NUM_PAGES_TO_ALLOC;
|
||||
|
||||
freed_pages = 0;
|
||||
|
||||
/* free all so restart the processing */
|
||||
if (nr_free)
|
||||
goto restart;
|
||||
|
||||
/* Not allowed to fall through or break because
|
||||
* following context is inside spinlock while we are
|
||||
* outside here.
|
||||
*/
|
||||
goto out;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
/* remove range of pages from the pool */
|
||||
if (freed_pages) {
|
||||
__list_del(&p->lru, &pool->list);
|
||||
|
||||
ttm_pool_update_free_locked(pool, freed_pages);
|
||||
nr_free -= freed_pages;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&pool->lock, irq_flags);
|
||||
|
||||
if (freed_pages)
|
||||
ttm_pages_put(pages_to_free, freed_pages);
|
||||
out:
|
||||
if (pages_to_free != static_buf)
|
||||
kfree(pages_to_free);
|
||||
return nr_free;
|
||||
}
|
||||
|
||||
/**
|
||||
* Callback for mm to request pool to reduce number of page held.
|
||||
*
|
||||
* XXX: (dchinner) Deadlock warning!
|
||||
*
|
||||
* This code is crying out for a shrinker per pool....
|
||||
*/
|
||||
static unsigned long
|
||||
ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
|
||||
{
|
||||
static DEFINE_MUTEX(lock);
|
||||
static unsigned start_pool;
|
||||
unsigned i;
|
||||
unsigned pool_offset;
|
||||
struct ttm_page_pool *pool;
|
||||
int shrink_pages = sc->nr_to_scan;
|
||||
unsigned long freed = 0;
|
||||
|
||||
if (!mutex_trylock(&lock))
|
||||
return SHRINK_STOP;
|
||||
pool_offset = ++start_pool % NUM_POOLS;
|
||||
/* select start pool in round robin fashion */
|
||||
for (i = 0; i < NUM_POOLS; ++i) {
|
||||
unsigned nr_free = shrink_pages;
|
||||
if (shrink_pages == 0)
|
||||
break;
|
||||
pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
|
||||
/* OK to use static buffer since global mutex is held. */
|
||||
shrink_pages = ttm_page_pool_free(pool, nr_free, true);
|
||||
freed += nr_free - shrink_pages;
|
||||
}
|
||||
mutex_unlock(&lock);
|
||||
return freed;
|
||||
}
|
||||
|
||||
|
||||
static unsigned long
|
||||
ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
|
||||
{
|
||||
unsigned i;
|
||||
unsigned long count = 0;
|
||||
|
||||
for (i = 0; i < NUM_POOLS; ++i)
|
||||
count += _manager->pools[i].npages;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
|
||||
{
|
||||
manager->mm_shrink.count_objects = ttm_pool_shrink_count;
|
||||
manager->mm_shrink.scan_objects = ttm_pool_shrink_scan;
|
||||
manager->mm_shrink.seeks = 1;
|
||||
register_shrinker(&manager->mm_shrink);
|
||||
}
|
||||
|
||||
static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
|
||||
{
|
||||
unregister_shrinker(&manager->mm_shrink);
|
||||
}
|
||||
|
||||
static int ttm_set_pages_caching(struct page **pages,
|
||||
enum ttm_caching_state cstate, unsigned cpages)
|
||||
{
|
||||
int r = 0;
|
||||
/* Set page caching */
|
||||
switch (cstate) {
|
||||
case tt_uncached:
|
||||
r = set_pages_array_uc(pages, cpages);
|
||||
if (r)
|
||||
pr_err("Failed to set %d pages to uc!\n", cpages);
|
||||
break;
|
||||
case tt_wc:
|
||||
r = set_pages_array_wc(pages, cpages);
|
||||
if (r)
|
||||
pr_err("Failed to set %d pages to wc!\n", cpages);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* Free pages the pages that failed to change the caching state. If there is
|
||||
* any pages that have changed their caching state already put them to the
|
||||
* pool.
|
||||
*/
|
||||
static void ttm_handle_caching_state_failure(struct list_head *pages,
|
||||
int ttm_flags, enum ttm_caching_state cstate,
|
||||
struct page **failed_pages, unsigned cpages)
|
||||
{
|
||||
unsigned i;
|
||||
/* Failed pages have to be freed */
|
||||
for (i = 0; i < cpages; ++i) {
|
||||
list_del(&failed_pages[i]->lru);
|
||||
__free_page(failed_pages[i]);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Allocate new pages with correct caching.
|
||||
*
|
||||
* This function is reentrant if caller updates count depending on number of
|
||||
* pages returned in pages array.
|
||||
*/
|
||||
static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
|
||||
int ttm_flags, enum ttm_caching_state cstate, unsigned count)
|
||||
{
|
||||
struct page **caching_array;
|
||||
struct page *p;
|
||||
int r = 0;
|
||||
unsigned i, cpages;
|
||||
unsigned max_cpages = min(count,
|
||||
(unsigned)(PAGE_SIZE/sizeof(struct page *)));
|
||||
|
||||
/* allocate array for page caching change */
|
||||
caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
|
||||
|
||||
if (!caching_array) {
|
||||
pr_err("Unable to allocate table for new pages\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (i = 0, cpages = 0; i < count; ++i) {
|
||||
p = alloc_page(gfp_flags);
|
||||
|
||||
if (!p) {
|
||||
pr_err("Unable to get page %u\n", i);
|
||||
|
||||
/* store already allocated pages in the pool after
|
||||
* setting the caching state */
|
||||
if (cpages) {
|
||||
r = ttm_set_pages_caching(caching_array,
|
||||
cstate, cpages);
|
||||
if (r)
|
||||
ttm_handle_caching_state_failure(pages,
|
||||
ttm_flags, cstate,
|
||||
caching_array, cpages);
|
||||
}
|
||||
r = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
/* gfp flags of highmem page should never be dma32 so we
|
||||
* we should be fine in such case
|
||||
*/
|
||||
if (!PageHighMem(p))
|
||||
#endif
|
||||
{
|
||||
caching_array[cpages++] = p;
|
||||
if (cpages == max_cpages) {
|
||||
|
||||
r = ttm_set_pages_caching(caching_array,
|
||||
cstate, cpages);
|
||||
if (r) {
|
||||
ttm_handle_caching_state_failure(pages,
|
||||
ttm_flags, cstate,
|
||||
caching_array, cpages);
|
||||
goto out;
|
||||
}
|
||||
cpages = 0;
|
||||
}
|
||||
}
|
||||
|
||||
list_add(&p->lru, pages);
|
||||
}
|
||||
|
||||
if (cpages) {
|
||||
r = ttm_set_pages_caching(caching_array, cstate, cpages);
|
||||
if (r)
|
||||
ttm_handle_caching_state_failure(pages,
|
||||
ttm_flags, cstate,
|
||||
caching_array, cpages);
|
||||
}
|
||||
out:
|
||||
kfree(caching_array);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* Fill the given pool if there aren't enough pages and the requested number of
|
||||
* pages is small.
|
||||
*/
|
||||
static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
|
||||
int ttm_flags, enum ttm_caching_state cstate, unsigned count,
|
||||
unsigned long *irq_flags)
|
||||
{
|
||||
struct page *p;
|
||||
int r;
|
||||
unsigned cpages = 0;
|
||||
/**
|
||||
* Only allow one pool fill operation at a time.
|
||||
* If pool doesn't have enough pages for the allocation new pages are
|
||||
* allocated from outside of pool.
|
||||
*/
|
||||
if (pool->fill_lock)
|
||||
return;
|
||||
|
||||
pool->fill_lock = true;
|
||||
|
||||
/* If allocation request is small and there are not enough
|
||||
* pages in a pool we fill the pool up first. */
|
||||
if (count < _manager->options.small
|
||||
&& count > pool->npages) {
|
||||
struct list_head new_pages;
|
||||
unsigned alloc_size = _manager->options.alloc_size;
|
||||
|
||||
/**
|
||||
* Can't change page caching if in irqsave context. We have to
|
||||
* drop the pool->lock.
|
||||
*/
|
||||
spin_unlock_irqrestore(&pool->lock, *irq_flags);
|
||||
|
||||
INIT_LIST_HEAD(&new_pages);
|
||||
r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
|
||||
cstate, alloc_size);
|
||||
spin_lock_irqsave(&pool->lock, *irq_flags);
|
||||
|
||||
if (!r) {
|
||||
list_splice(&new_pages, &pool->list);
|
||||
++pool->nrefills;
|
||||
pool->npages += alloc_size;
|
||||
} else {
|
||||
pr_err("Failed to fill pool (%p)\n", pool);
|
||||
/* If we have any pages left put them to the pool. */
|
||||
list_for_each_entry(p, &pool->list, lru) {
|
||||
++cpages;
|
||||
}
|
||||
list_splice(&new_pages, &pool->list);
|
||||
pool->npages += cpages;
|
||||
}
|
||||
|
||||
}
|
||||
pool->fill_lock = false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Cut 'count' number of pages from the pool and put them on the return list.
|
||||
*
|
||||
* @return count of pages still required to fulfill the request.
|
||||
*/
|
||||
static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
|
||||
struct list_head *pages,
|
||||
int ttm_flags,
|
||||
enum ttm_caching_state cstate,
|
||||
unsigned count)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
struct list_head *p;
|
||||
unsigned i;
|
||||
|
||||
spin_lock_irqsave(&pool->lock, irq_flags);
|
||||
ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
|
||||
|
||||
if (count >= pool->npages) {
|
||||
/* take all pages from the pool */
|
||||
list_splice_init(&pool->list, pages);
|
||||
count -= pool->npages;
|
||||
pool->npages = 0;
|
||||
goto out;
|
||||
}
|
||||
/* find the last pages to include for requested number of pages. Split
|
||||
* pool to begin and halve it to reduce search space. */
|
||||
if (count <= pool->npages/2) {
|
||||
i = 0;
|
||||
list_for_each(p, &pool->list) {
|
||||
if (++i == count)
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
i = pool->npages + 1;
|
||||
list_for_each_prev(p, &pool->list) {
|
||||
if (--i == count)
|
||||
break;
|
||||
}
|
||||
}
|
||||
/* Cut 'count' number of pages from the pool */
|
||||
list_cut_position(pages, &pool->list, p);
|
||||
pool->npages -= count;
|
||||
count = 0;
|
||||
out:
|
||||
spin_unlock_irqrestore(&pool->lock, irq_flags);
|
||||
return count;
|
||||
}
|
||||
|
||||
/* Put all pages in pages list to correct pool to wait for reuse */
|
||||
static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
|
||||
enum ttm_caching_state cstate)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
|
||||
unsigned i;
|
||||
|
||||
if (pool == NULL) {
|
||||
/* No pool for this memory type so free the pages */
|
||||
for (i = 0; i < npages; i++) {
|
||||
if (pages[i]) {
|
||||
if (page_count(pages[i]) != 1)
|
||||
pr_err("Erroneous page count. Leaking pages.\n");
|
||||
__free_page(pages[i]);
|
||||
pages[i] = NULL;
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&pool->lock, irq_flags);
|
||||
for (i = 0; i < npages; i++) {
|
||||
if (pages[i]) {
|
||||
if (page_count(pages[i]) != 1)
|
||||
pr_err("Erroneous page count. Leaking pages.\n");
|
||||
list_add_tail(&pages[i]->lru, &pool->list);
|
||||
pages[i] = NULL;
|
||||
pool->npages++;
|
||||
}
|
||||
}
|
||||
/* Check that we don't go over the pool limit */
|
||||
npages = 0;
|
||||
if (pool->npages > _manager->options.max_size) {
|
||||
npages = pool->npages - _manager->options.max_size;
|
||||
/* free at least NUM_PAGES_TO_ALLOC number of pages
|
||||
* to reduce calls to set_memory_wb */
|
||||
if (npages < NUM_PAGES_TO_ALLOC)
|
||||
npages = NUM_PAGES_TO_ALLOC;
|
||||
}
|
||||
spin_unlock_irqrestore(&pool->lock, irq_flags);
|
||||
if (npages)
|
||||
ttm_page_pool_free(pool, npages, false);
|
||||
}
|
||||
|
||||
/*
|
||||
* On success pages list will hold count number of correctly
|
||||
* cached pages.
|
||||
*/
|
||||
static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
|
||||
enum ttm_caching_state cstate)
|
||||
{
|
||||
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
|
||||
struct list_head plist;
|
||||
struct page *p = NULL;
|
||||
gfp_t gfp_flags = GFP_USER;
|
||||
unsigned count;
|
||||
int r;
|
||||
|
||||
/* set zero flag for page allocation if required */
|
||||
if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
|
||||
gfp_flags |= __GFP_ZERO;
|
||||
|
||||
/* No pool for cached pages */
|
||||
if (pool == NULL) {
|
||||
if (flags & TTM_PAGE_FLAG_DMA32)
|
||||
gfp_flags |= GFP_DMA32;
|
||||
else
|
||||
gfp_flags |= GFP_HIGHUSER;
|
||||
|
||||
for (r = 0; r < npages; ++r) {
|
||||
p = alloc_page(gfp_flags);
|
||||
if (!p) {
|
||||
|
||||
pr_err("Unable to allocate page\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
pages[r] = p;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* combine zero flag to pool flags */
|
||||
gfp_flags |= pool->gfp_flags;
|
||||
|
||||
/* First we take pages from the pool */
|
||||
INIT_LIST_HEAD(&plist);
|
||||
npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
|
||||
count = 0;
|
||||
list_for_each_entry(p, &plist, lru) {
|
||||
pages[count++] = p;
|
||||
}
|
||||
|
||||
/* clear the pages coming from the pool if requested */
|
||||
if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
|
||||
list_for_each_entry(p, &plist, lru) {
|
||||
if (PageHighMem(p))
|
||||
clear_highpage(p);
|
||||
else
|
||||
clear_page(page_address(p));
|
||||
}
|
||||
}
|
||||
|
||||
/* If pool didn't have enough pages allocate new one. */
|
||||
if (npages > 0) {
|
||||
/* ttm_alloc_new_pages doesn't reference pool so we can run
|
||||
* multiple requests in parallel.
|
||||
**/
|
||||
INIT_LIST_HEAD(&plist);
|
||||
r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages);
|
||||
list_for_each_entry(p, &plist, lru) {
|
||||
pages[count++] = p;
|
||||
}
|
||||
if (r) {
|
||||
/* If there is any pages in the list put them back to
|
||||
* the pool. */
|
||||
pr_err("Failed to allocate extra pages for large request\n");
|
||||
ttm_put_pages(pages, count, flags, cstate);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
|
||||
char *name)
|
||||
{
|
||||
spin_lock_init(&pool->lock);
|
||||
pool->fill_lock = false;
|
||||
INIT_LIST_HEAD(&pool->list);
|
||||
pool->npages = pool->nfrees = 0;
|
||||
pool->gfp_flags = flags;
|
||||
pool->name = name;
|
||||
}
|
||||
|
||||
int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
|
||||
{
|
||||
int ret;
|
||||
|
||||
WARN_ON(_manager);
|
||||
|
||||
pr_info("Initializing pool allocator\n");
|
||||
|
||||
_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
|
||||
|
||||
ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
|
||||
|
||||
ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc");
|
||||
|
||||
ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
|
||||
GFP_USER | GFP_DMA32, "wc dma");
|
||||
|
||||
ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
|
||||
GFP_USER | GFP_DMA32, "uc dma");
|
||||
|
||||
_manager->options.max_size = max_pages;
|
||||
_manager->options.small = SMALL_ALLOCATION;
|
||||
_manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
|
||||
|
||||
ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
|
||||
&glob->kobj, "pool");
|
||||
if (unlikely(ret != 0)) {
|
||||
kobject_put(&_manager->kobj);
|
||||
_manager = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
ttm_pool_mm_shrink_init(_manager);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ttm_page_alloc_fini(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
pr_info("Finalizing pool allocator\n");
|
||||
ttm_pool_mm_shrink_fini(_manager);
|
||||
|
||||
/* OK to use static buffer since global mutex is no longer used. */
|
||||
for (i = 0; i < NUM_POOLS; ++i)
|
||||
ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, true);
|
||||
|
||||
kobject_put(&_manager->kobj);
|
||||
_manager = NULL;
|
||||
}
|
||||
|
||||
int ttm_pool_populate(struct ttm_tt *ttm)
|
||||
{
|
||||
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
|
||||
unsigned i;
|
||||
int ret;
|
||||
|
||||
if (ttm->state != tt_unpopulated)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
ret = ttm_get_pages(&ttm->pages[i], 1,
|
||||
ttm->page_flags,
|
||||
ttm->caching_state);
|
||||
if (ret != 0) {
|
||||
ttm_pool_unpopulate(ttm);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
|
||||
false, false);
|
||||
if (unlikely(ret != 0)) {
|
||||
ttm_pool_unpopulate(ttm);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
|
||||
ret = ttm_tt_swapin(ttm);
|
||||
if (unlikely(ret != 0)) {
|
||||
ttm_pool_unpopulate(ttm);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
ttm->state = tt_unbound;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_pool_populate);
|
||||
|
||||
void ttm_pool_unpopulate(struct ttm_tt *ttm)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
if (ttm->pages[i]) {
|
||||
ttm_mem_global_free_page(ttm->glob->mem_glob,
|
||||
ttm->pages[i]);
|
||||
ttm_put_pages(&ttm->pages[i], 1,
|
||||
ttm->page_flags,
|
||||
ttm->caching_state);
|
||||
}
|
||||
}
|
||||
ttm->state = tt_unpopulated;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_pool_unpopulate);
|
||||
|
||||
int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
|
||||
{
|
||||
struct ttm_page_pool *p;
|
||||
unsigned i;
|
||||
char *h[] = {"pool", "refills", "pages freed", "size"};
|
||||
if (!_manager) {
|
||||
seq_printf(m, "No pool allocator running.\n");
|
||||
return 0;
|
||||
}
|
||||
seq_printf(m, "%6s %12s %13s %8s\n",
|
||||
h[0], h[1], h[2], h[3]);
|
||||
for (i = 0; i < NUM_POOLS; ++i) {
|
||||
p = &_manager->pools[i];
|
||||
|
||||
seq_printf(m, "%6s %12ld %13ld %8d\n",
|
||||
p->name, p->nrefills,
|
||||
p->nfrees, p->npages);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_page_alloc_debugfs);
|
||||
1158
drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
Normal file
1158
drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
Normal file
File diff suppressed because it is too large
Load diff
402
drivers/gpu/drm/ttm/ttm_tt.c
Normal file
402
drivers/gpu/drm/ttm/ttm_tt.c
Normal file
|
|
@ -0,0 +1,402 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "[TTM] " fmt
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/shmem_fs.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/export.h>
|
||||
#include <drm/drm_cache.h>
|
||||
#include <drm/drm_mem_util.h>
|
||||
#include <drm/ttm/ttm_module.h>
|
||||
#include <drm/ttm/ttm_bo_driver.h>
|
||||
#include <drm/ttm/ttm_placement.h>
|
||||
#include <drm/ttm/ttm_page_alloc.h>
|
||||
|
||||
/**
|
||||
* Allocates storage for pointers to the pages that back the ttm.
|
||||
*/
|
||||
static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
|
||||
{
|
||||
ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*));
|
||||
}
|
||||
|
||||
static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
|
||||
{
|
||||
ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages,
|
||||
sizeof(*ttm->ttm.pages) +
|
||||
sizeof(*ttm->dma_address) +
|
||||
sizeof(*ttm->cpu_address));
|
||||
ttm->cpu_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
|
||||
ttm->dma_address = (void *) (ttm->cpu_address + ttm->ttm.num_pages);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
static inline int ttm_tt_set_page_caching(struct page *p,
|
||||
enum ttm_caching_state c_old,
|
||||
enum ttm_caching_state c_new)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (PageHighMem(p))
|
||||
return 0;
|
||||
|
||||
if (c_old != tt_cached) {
|
||||
/* p isn't in the default caching state, set it to
|
||||
* writeback first to free its current memtype. */
|
||||
|
||||
ret = set_pages_wb(p, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (c_new == tt_wc)
|
||||
ret = set_memory_wc((unsigned long) page_address(p), 1);
|
||||
else if (c_new == tt_uncached)
|
||||
ret = set_pages_uc(p, 1);
|
||||
|
||||
return ret;
|
||||
}
|
||||
#else /* CONFIG_X86 */
|
||||
static inline int ttm_tt_set_page_caching(struct page *p,
|
||||
enum ttm_caching_state c_old,
|
||||
enum ttm_caching_state c_new)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_X86 */
|
||||
|
||||
/*
|
||||
* Change caching policy for the linear kernel map
|
||||
* for range of pages in a ttm.
|
||||
*/
|
||||
|
||||
static int ttm_tt_set_caching(struct ttm_tt *ttm,
|
||||
enum ttm_caching_state c_state)
|
||||
{
|
||||
int i, j;
|
||||
struct page *cur_page;
|
||||
int ret;
|
||||
|
||||
if (ttm->caching_state == c_state)
|
||||
return 0;
|
||||
|
||||
if (ttm->state == tt_unpopulated) {
|
||||
/* Change caching but don't populate */
|
||||
ttm->caching_state = c_state;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (ttm->caching_state == tt_cached)
|
||||
drm_clflush_pages(ttm->pages, ttm->num_pages);
|
||||
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
cur_page = ttm->pages[i];
|
||||
if (likely(cur_page != NULL)) {
|
||||
ret = ttm_tt_set_page_caching(cur_page,
|
||||
ttm->caching_state,
|
||||
c_state);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_err;
|
||||
}
|
||||
}
|
||||
|
||||
ttm->caching_state = c_state;
|
||||
|
||||
return 0;
|
||||
|
||||
out_err:
|
||||
for (j = 0; j < i; ++j) {
|
||||
cur_page = ttm->pages[j];
|
||||
if (likely(cur_page != NULL)) {
|
||||
(void)ttm_tt_set_page_caching(cur_page, c_state,
|
||||
ttm->caching_state);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
|
||||
{
|
||||
enum ttm_caching_state state;
|
||||
|
||||
if (placement & TTM_PL_FLAG_WC)
|
||||
state = tt_wc;
|
||||
else if (placement & TTM_PL_FLAG_UNCACHED)
|
||||
state = tt_uncached;
|
||||
else
|
||||
state = tt_cached;
|
||||
|
||||
return ttm_tt_set_caching(ttm, state);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_tt_set_placement_caching);
|
||||
|
||||
void ttm_tt_destroy(struct ttm_tt *ttm)
|
||||
{
|
||||
if (unlikely(ttm == NULL))
|
||||
return;
|
||||
|
||||
if (ttm->state == tt_bound) {
|
||||
ttm_tt_unbind(ttm);
|
||||
}
|
||||
|
||||
if (ttm->state == tt_unbound)
|
||||
ttm_tt_unpopulate(ttm);
|
||||
|
||||
if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
|
||||
ttm->swap_storage)
|
||||
fput(ttm->swap_storage);
|
||||
|
||||
ttm->swap_storage = NULL;
|
||||
ttm->func->destroy(ttm);
|
||||
}
|
||||
|
||||
int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
|
||||
unsigned long size, uint32_t page_flags,
|
||||
struct page *dummy_read_page)
|
||||
{
|
||||
ttm->bdev = bdev;
|
||||
ttm->glob = bdev->glob;
|
||||
ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
ttm->caching_state = tt_cached;
|
||||
ttm->page_flags = page_flags;
|
||||
ttm->dummy_read_page = dummy_read_page;
|
||||
ttm->state = tt_unpopulated;
|
||||
ttm->swap_storage = NULL;
|
||||
|
||||
ttm_tt_alloc_page_directory(ttm);
|
||||
if (!ttm->pages) {
|
||||
ttm_tt_destroy(ttm);
|
||||
pr_err("Failed allocating page table\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_tt_init);
|
||||
|
||||
void ttm_tt_fini(struct ttm_tt *ttm)
|
||||
{
|
||||
drm_free_large(ttm->pages);
|
||||
ttm->pages = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_tt_fini);
|
||||
|
||||
int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
|
||||
unsigned long size, uint32_t page_flags,
|
||||
struct page *dummy_read_page)
|
||||
{
|
||||
struct ttm_tt *ttm = &ttm_dma->ttm;
|
||||
|
||||
ttm->bdev = bdev;
|
||||
ttm->glob = bdev->glob;
|
||||
ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
ttm->caching_state = tt_cached;
|
||||
ttm->page_flags = page_flags;
|
||||
ttm->dummy_read_page = dummy_read_page;
|
||||
ttm->state = tt_unpopulated;
|
||||
ttm->swap_storage = NULL;
|
||||
|
||||
INIT_LIST_HEAD(&ttm_dma->pages_list);
|
||||
ttm_dma_tt_alloc_page_directory(ttm_dma);
|
||||
if (!ttm->pages) {
|
||||
ttm_tt_destroy(ttm);
|
||||
pr_err("Failed allocating page table\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_dma_tt_init);
|
||||
|
||||
void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
|
||||
{
|
||||
struct ttm_tt *ttm = &ttm_dma->ttm;
|
||||
|
||||
drm_free_large(ttm->pages);
|
||||
ttm->pages = NULL;
|
||||
ttm_dma->cpu_address = NULL;
|
||||
ttm_dma->dma_address = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_dma_tt_fini);
|
||||
|
||||
void ttm_tt_unbind(struct ttm_tt *ttm)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (ttm->state == tt_bound) {
|
||||
ret = ttm->func->unbind(ttm);
|
||||
BUG_ON(ret);
|
||||
ttm->state = tt_unbound;
|
||||
}
|
||||
}
|
||||
|
||||
int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (!ttm)
|
||||
return -EINVAL;
|
||||
|
||||
if (ttm->state == tt_bound)
|
||||
return 0;
|
||||
|
||||
ret = ttm->bdev->driver->ttm_tt_populate(ttm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = ttm->func->bind(ttm, bo_mem);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
ttm->state = tt_bound;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_tt_bind);
|
||||
|
||||
int ttm_tt_swapin(struct ttm_tt *ttm)
|
||||
{
|
||||
struct address_space *swap_space;
|
||||
struct file *swap_storage;
|
||||
struct page *from_page;
|
||||
struct page *to_page;
|
||||
int i;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
swap_storage = ttm->swap_storage;
|
||||
BUG_ON(swap_storage == NULL);
|
||||
|
||||
swap_space = file_inode(swap_storage)->i_mapping;
|
||||
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
from_page = shmem_read_mapping_page(swap_space, i);
|
||||
if (IS_ERR(from_page)) {
|
||||
ret = PTR_ERR(from_page);
|
||||
goto out_err;
|
||||
}
|
||||
to_page = ttm->pages[i];
|
||||
if (unlikely(to_page == NULL))
|
||||
goto out_err;
|
||||
|
||||
copy_highpage(to_page, from_page);
|
||||
page_cache_release(from_page);
|
||||
}
|
||||
|
||||
if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
|
||||
fput(swap_storage);
|
||||
ttm->swap_storage = NULL;
|
||||
ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
|
||||
|
||||
return 0;
|
||||
out_err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
|
||||
{
|
||||
struct address_space *swap_space;
|
||||
struct file *swap_storage;
|
||||
struct page *from_page;
|
||||
struct page *to_page;
|
||||
int i;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
|
||||
BUG_ON(ttm->caching_state != tt_cached);
|
||||
|
||||
if (!persistent_swap_storage) {
|
||||
swap_storage = shmem_file_setup("ttm swap",
|
||||
ttm->num_pages << PAGE_SHIFT,
|
||||
0);
|
||||
if (unlikely(IS_ERR(swap_storage))) {
|
||||
pr_err("Failed allocating swap storage\n");
|
||||
return PTR_ERR(swap_storage);
|
||||
}
|
||||
} else
|
||||
swap_storage = persistent_swap_storage;
|
||||
|
||||
swap_space = file_inode(swap_storage)->i_mapping;
|
||||
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
from_page = ttm->pages[i];
|
||||
if (unlikely(from_page == NULL))
|
||||
continue;
|
||||
to_page = shmem_read_mapping_page(swap_space, i);
|
||||
if (unlikely(IS_ERR(to_page))) {
|
||||
ret = PTR_ERR(to_page);
|
||||
goto out_err;
|
||||
}
|
||||
copy_highpage(to_page, from_page);
|
||||
set_page_dirty(to_page);
|
||||
mark_page_accessed(to_page);
|
||||
page_cache_release(to_page);
|
||||
}
|
||||
|
||||
ttm_tt_unpopulate(ttm);
|
||||
ttm->swap_storage = swap_storage;
|
||||
ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
|
||||
if (persistent_swap_storage)
|
||||
ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
|
||||
|
||||
return 0;
|
||||
out_err:
|
||||
if (!persistent_swap_storage)
|
||||
fput(swap_storage);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
|
||||
{
|
||||
pgoff_t i;
|
||||
struct page **page = ttm->pages;
|
||||
|
||||
if (ttm->page_flags & TTM_PAGE_FLAG_SG)
|
||||
return;
|
||||
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
(*page)->mapping = NULL;
|
||||
(*page++)->index = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void ttm_tt_unpopulate(struct ttm_tt *ttm)
|
||||
{
|
||||
if (ttm->state == tt_unpopulated)
|
||||
return;
|
||||
|
||||
ttm_tt_clear_mapping(ttm);
|
||||
ttm->bdev->driver->ttm_tt_unpopulate(ttm);
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue