/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include "drmP.h"
/*
* Buffer object locking policy:
* Lock dev->struct_mutex;
* Increase usage
* Unlock dev->struct_mutex;
* Lock buffer->mutex;
* Do whatever you want;
* Unlock buffer->mutex;
* Decrease usage. Call destruction if zero.
*
* User object visibility ups usage just once, since it has its own
* refcounting.
*
* Destruction:
* lock dev->struct_mutex;
* Verify that usage is zero. Otherwise unlock and continue.
* Destroy object.
* unlock dev->struct_mutex;
*
* Mutex and spinlock locking orders:
* 1.) Buffer mutex
* 2.) Refer to ttm locking orders.
*/
#define DRM_FLAG_MASKED(_old, _new, _mask) {\
(_old) ^= (((_old) ^ (_new)) & (_mask)); \
}
static inline uint32_t drm_bo_type_flags(unsigned type)
{
return (1 << (24 + type));
}
static inline drm_buffer_object_t *drm_bo_entry(struct list_head *list,
unsigned type)
{
switch (type) {
case DRM_BO_MEM_LOCAL:
case DRM_BO_MEM_TT:
return list_entry(list, drm_buffer_object_t, lru_ttm);
case DRM_BO_MEM_VRAM:
case DRM_BO_MEM_VRAM_NM:
return list_entry(list, drm_buffer_object_t, lru_card);
default:
BUG_ON(1);
}
return NULL;
}
static inline drm_mm_node_t *drm_bo_mm_node(drm_buffer_object_t * bo,
unsigned type)
{
switch (type) {
case DRM_BO_MEM_LOCAL:
case DRM_BO_MEM_TT:
return bo->node_ttm;
case DRM_BO_MEM_VRAM:
case DRM_BO_MEM_VRAM_NM:
return bo->node_card;
default:
BUG_ON(1);
}
return NULL;
}
/*
* bo locked. dev->struct_mutex locked.
*/
static void drm_bo_add_to_lru(drm_buffer_object_t * buf,
drm_buffer_manager_t * bm)
{
struct list_head *list;
unsigned mem_type;
if (buf->flags & DRM_BO_FLAG_MEM_TT) {
mem_type = DRM_BO_MEM_TT;
list =
(buf->
flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ?
&bm->pinned[mem_type] : &bm->lru[mem_type];
list_add_tail(&buf->lru_ttm, list);
} else {
mem_type = DRM_BO_MEM_LOCAL;
list =
(buf->
flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ?
&bm->pinned[mem_type] : &bm->lru[mem_type];
list_add_tail(&buf->lru_ttm, list);
}
if (buf->flags & DRM_BO_FLAG_MEM_VRAM) {
mem_type = DRM_BO_MEM_VRAM;
list =
(buf->
flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ?
&bm->pinned[mem_type] : &bm->lru[mem_type];
list_add_tail(&buf->lru_card, list);
}
}
/*
* bo locked.
*/
static int drm_move_tt_to_local(drm_buffer_object_t * buf, int evict,
int force_no_move)
{
drm_device_t *dev = buf->dev;
int ret;
if (buf->node_ttm) {
mutex_lock(&dev->struct_mutex);
if (evict)
ret = drm_evict_ttm(buf->ttm);
else
ret = drm_unbind_ttm(buf->ttm);
if (ret) {
mutex_unlock(&dev->struct_mutex);
if (ret == -EAGAIN)
schedule();
return ret;
}
if (!(buf->flags & DRM_BO_FLAG_NO_MOVE) || force_no_move) {
drm_mm_put_block(buf->node_ttm);
buf->node_ttm = NULL;
}
mutex_unlock(&dev->struct_mutex);
}
buf->flags &= ~DRM_BO_FLAG_MEM_TT;
buf->flags |= DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED;
return 0;
}
/*
* Lock dev->struct_mutex
*/
static void drm_bo_destroy_locked(drm_device_t * dev, drm_buffer_object_t * bo)
{
drm_buffer_manager_t *bm = &dev->bm;
DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
/*
* Somone might try to access us through the still active BM lists.
*/
if (atomic_read(&bo->usage) != 0)
return;
if (!list_empty(&bo->ddestroy))
return;
if (bo->fence) {
if (!drm_fence_object_signaled(bo->fence, bo->fence_type)) {
drm_fence_object_flush(dev, bo->fence, bo->fence_type);
list_add_tail(&bo->ddestroy, &bm->ddestroy);
schedule_delayed_work(&bm->wq,
((DRM_HZ / 100) <
1) ? 1 : DRM_HZ / 100);
return;
} else {
drm_fence_usage_deref_locked(dev, bo->fence);
bo->fence = NULL;
}
}
/*
* Take away from lru lists.
*/
list_del_init(&bo->lru_ttm);
list_del_init(&bo->lru_card);
if (bo->ttm) {
unsigned long _end = jiffies + DRM_HZ;
int ret;
/*
* This temporarily unlocks struct_mutex.
*/
do {
ret = drm_unbind_ttm(bo->ttm);
if (ret == -EAGAIN) {
mutex_unlock(&dev->struct_mutex);
schedule();
mutex_lock(&dev->struct_mutex);
}
} while (ret == -EAGAIN && !time_after_eq(jiffies, _end));
if (ret) {
DRM_ERROR("Couldn't unbind buffer. "
"Bad. Continuing anyway\n");
}
}
if (bo->node_ttm) {
drm_mm_put_block(bo->node_ttm);
bo->node_ttm = NULL;
}
if (bo->node_card) {
drm_mm_put_block(bo->node_card);
bo->node_card = NULL;
}
if (bo->ttm_object) {
drm_ttm_object_deref_locked(dev, bo->ttm_object);
}
atomic_dec(&bm->count);
drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
}
/*
* Call bo->mutex locked.
* Wait until the buffer is idle.
*/
static int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals,
int no_wait)
{
drm_fence_object_t *fence = bo->fence;
int ret;
if (fence) {
drm_device_t *dev = bo->dev;
if (drm_fence_object_signaled(fence, bo->fence_type)) {
drm_fence_usage_deref_unlocked(dev, fence);
bo->fence = NULL;
return 0;
}
if (no_wait) {
return -EBUSY;
}
ret =
drm_fence_object_wait(dev, fence, lazy, ignore_signals,
bo->fence_type);
if (ret)
return ret;
drm_fence_usage_deref_unlocked(dev, fence);
bo->fence = NULL;
}
return 0;
}
/*
* Call dev->struct_mutex locked.
*/
static void drm_bo_delayed_delete(drm_device_t * dev, int remove_all)
{
drm_buffer_manager_t *bm = &dev->bm;
drm_buffer_object_t *entry, *nentry;
struct list_head *list, *next;
drm_fence_object_t *fence;
list_for_each_safe(list, next, &bm->ddestroy) {
entry = list_entry(list, drm_buffer_object_t, ddestroy);
atomic_inc(&entry->usage);
if (atomic_read(&entry->usage) != 1) {
atomic_dec(&entry->usage);
continue;
}
nentry = NULL;
if (next != &bm->ddestroy) {
nentry = list_entry(next, drm_buffer_object_t,
ddestroy);
atomic_inc(&nentry->usage);
}
mutex_unlock(&dev->struct_mutex);
mutex_lock(&entry->mutex);
fence = entry->fence;
if (fence && drm_fence_object_signaled(fence,
entry->fence_type)) {
drm_fence_usage_deref_locked(dev, fence);
entry->fence = NULL;
}
if (entry->fence && remove_all) {
if (bm->nice_mode) {
unsigned long _end = jiffies + 3 * DRM_HZ;
int ret;
do {
ret = drm_bo_wait(entry, 0, 1, 0);
} while (ret && !time_after_eq(jiffies, _end));
if (entry->fence) {
bm->nice_mode = 0;
DRM_ERROR("Detected GPU lockup or "
"fence driver was taken down. "
"Evicting waiting buffers.\n");
}
}
if (entry->fence) {
drm_fence_usage_deref_unlocked(dev,
entry->fence);
entry->fence = NULL;
}
}
mutex_lock(&dev->struct_mutex);
mutex_unlock(&entry->mutex);
if (atomic_dec_and_test(&entry->usage) && (!entry->fence)) {
list_del_init(&entry->ddestroy);
drm_bo_destroy_locked(dev, entry);
}
if (nentry) {
atomic_dec(&nentry->usage);
}
}
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
static void drm_bo_delayed_workqueue(void *data)
#else
static void drm_bo_delayed_workqueue(struct work_struct *work)
#endif
{
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
drm_device_t *dev = (drm_device_t *) data;
drm_buffer_manager_t *bm = &dev->bm;
#else
drm_buffer_manager_t *bm = container_of(work, drm_buffer_manager_t, wq.work);
drm_device_t *dev = container_of(bm, drm_device_t, bm);
#endif
DRM_DEBUG("Delayed delete Worker\n");
mutex_lock(&dev->struct_mutex);
if (!bm->initialized) {
mutex_unlock(&dev->struct_mutex);
return;
}
drm_bo_delayed_delete(dev, 0);
if (bm->initialized && !list_empty(&bm->ddestroy)) {
schedule_delayed_work(&bm->wq,
((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
}
mutex_unlock(&dev->struct_mutex);
}
void drm_bo_usage_deref_locked(drm_device_t * dev, drm_buffer_object_t * bo)
{
if (atomic_dec_and_test(&bo->usage)) {
drm_bo_destroy_locked(dev, bo);
}
}
static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo)
{
drm_bo_usage_deref_locked(priv->head->dev,
drm_user_object_entry(uo, drm_buffer_object_t,
base));
}
void drm_bo_usage_deref_unlocked(drm_device_t * dev, drm_buffer_object_t * bo)
{
if (atomic_dec_and_test(&bo->usage)) {
mutex_lock(&dev->struct_mutex);
if (atomic_read(&bo->usage) == 0)
drm_bo_destroy_locked(dev, bo);
mutex_unlock(&dev->struct_mutex);
}
}
/*
* Note. The caller has to register (if applicable)
* and deregister fence object usage.
*/
|