/**************************************************************************
*
* Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include "drmP.h"
/*
* Locking may look a bit complicated but isn't really:
*
* The buffer usage atomic_t needs to be protected by dev->struct_mutex
* when there is a chance that it can be zero before or after the operation.
*
* dev->struct_mutex also protects all lists and list heads. Hash tables and hash
* heads.
*
* bo->mutex protects the buffer object itself excluding the usage field.
* bo->mutex does also protect the buffer list heads, so to manipulate those, we need
* both the bo->mutex and the dev->struct_mutex.
*
* Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal is a bit
* complicated. When dev->struct_mutex is released to grab bo->mutex, the list
* traversal will, in general, need to be restarted.
*
*/
static void drm_bo_destroy_locked(drm_buffer_object_t * bo);
static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo);
static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo);
static void drm_bo_unmap_virtual(drm_buffer_object_t * bo);
static inline uint32_t drm_bo_type_flags(unsigned type)
{
return (1 << (24 + type));
}
/*
* bo locked. dev->struct_mutex locked.
*/
void drm_bo_add_to_pinned_lru(drm_buffer_object_t * bo)
{
drm_mem_type_manager_t *man;
man = &bo->dev->bm.man[bo->pinned_mem_type];
list_add_tail(&bo->pinned_lru, &man->pinned);
}
void drm_bo_add_to_lru(drm_buffer_object_t * bo)
{
drm_mem_type_manager_t *man;
if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) {
man = &bo->dev->bm.man[bo->mem.mem_type];
list_add_tail(&bo->lru, &man->lru);
} else {
INIT_LIST_HEAD(&bo->lru);
}
}
static int drm_bo_vm_pre_move(drm_buffer_object_t * bo, int old_is_pci)
{
#ifdef DRM_ODD_MM_COMPAT
int ret;
ret = drm_bo_lock_kmm(bo);
if (ret)
return ret;
drm_bo_unmap_virtual(bo);
if (old_is_pci)
drm_bo_finish_unmap(bo);
#else
drm_bo_unmap_virtual(bo);
#endif
return 0;
}
static void drm_bo_vm_post_move(drm_buffer_object_t * bo)
{
#ifdef DRM_ODD_MM_COMPAT
int ret;
ret = drm_bo_remap_bound(bo);
if (ret) {
DRM_ERROR("Failed to remap a bound buffer object.\n"
"\tThis might cause a sigbus later.\n");
}
drm_bo_unlock_kmm(bo);
#endif
}
/*
* Call bo->mutex locked.
*/
static int drm_bo_add_ttm(drm_buffer_object_t * bo)
{
drm_device_t *dev = bo->dev;
int ret = 0;
bo->ttm = NULL;
switch (bo->type) {
case drm_bo_type_dc:
bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT);
if (!bo->ttm)
ret = -ENOMEM;
break;
case drm_bo_type_user:
case drm_bo_type_fake:
break;
default:
DRM_ERROR("Illegal buffer object type\n");
ret = -EINVAL;
break;
}
return ret;
}
static int drm_bo_handle_move_mem(drm_buffer_object_t * bo,
drm_bo_mem_reg_t * mem,
int evict, int no_wait)
{
drm_device_t *dev = bo->dev;
drm_buffer_manager_t *bm = &dev->bm;
int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
int new_is_pci = drm_mem_reg_is_pci(dev, mem);
drm_mem_type_manager_t *old_man = &bm->man[bo->mem.mem_type];
drm_mem_type_manager_t *new_man = &bm->man[mem->mem_type];
int ret = 0;
if (old_is_pci || new_is_pci)
ret = drm_bo_vm_pre_move(bo, old_is_pci);
if (ret)
return ret;
/*
* Create and bind a ttm if required.
*/
if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
ret = drm_bo_add_ttm(bo);
if (ret)
goto out_err;
if (mem->mem_type != DRM_BO_MEM_LOCAL) {
ret = drm_bind_ttm(bo->ttm, new_man->flags &
DRM_BO_FLAG_CACHED,
mem->mm_node->start);
if (ret)
goto out_err;
}
}
if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) {
drm_bo_mem_reg_t *old_mem = &bo->mem;
uint32_t save_flags = old_mem->flags;
uint32_t save_mask = old_mem->mask;
*old_mem = *mem;
mem->mm_node = NULL;
old_mem->mask = save_mask;
DRM_FLAG_MASKED(save_flags, mem->flags, DRM_BO_MASK_MEMTYPE);
} else if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
} else if (dev->driver->bo_driver->move) {
ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
} else {
ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
}
if (ret)
goto out_err;
if (old_is_pci || new_is_pci)
drm_bo_vm_post_move(bo);
if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
ret =
dev->driver->bo_driver->invalidate_caches(dev,
bo->mem.flags);
if (ret)
DRM_ERROR("Can not flush read caches\n");
}
DRM_FLAG_MASKED(bo->priv_flags,
(evict) ? _DRM_BO_FLAG_EVICTED : 0,
_DRM_BO_FLAG_EVICTED);
if (bo->mem.mm_node)
bo->offset = bo->mem.mm_node->start << PAGE_SHIFT;
return 0;
out_err:
if (old_is_pci || new_is_pci)
drm_bo_vm_post_move(bo);
new_man = &bm->man[bo->mem.mem_type];
if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
drm_ttm_unbind(bo->ttm);
drm_destroy_ttm(bo->ttm);
bo->ttm = NULL;
}
return ret;
}
/*
* Call bo->mutex locked.
* Wait until the buffer is idle.
*/
int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals,
int no_wait)
{
drm_fence_object_t *fence = bo->fence;
int ret;
if (fence) {
drm_device_t *dev = bo->dev;
if (drm_fence_object_signaled(fence, bo->fence_type)) {
drm_fence_usage_deref_unlocked(dev, fence);
bo->fence = NULL;
return 0;
}
if (no_wait) {
return -EBUSY;
}
ret =
drm_fence_object_wait(dev, fence, lazy, ignore_signals,
bo->fence_type);
if (ret)
return ret;
drm_fence_usage_deref_unlocked(dev, fence);
bo->fence = NULL;
}
return 0;
}
static int drm_bo_expire_fence(drm_buffer_object_t * bo, int allow_errors)
{
drm_device_t *dev = bo->dev;
drm_buffer_manager_t *bm = &dev->bm;
if (bo->fence) {
if (bm->nice_mode) {
unsigned long _end = jiffies + 3 * DRM_HZ;
|