summaryrefslogtreecommitdiff
path: root/linux-core/ati_pcigart.c
blob: beaa4424df6d71817e5123ffab245913bf1b81eb (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
/**
 * \file ati_pcigart.c
 * ATI PCI GART support
 *
 * \author Gareth Hughes <gareth@valinux.com>
 */

/*
 * Created: Wed Dec 13 21:52:19 2000 by gareth@valinux.com
 *
 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 * DEALINGS IN THE SOFTWARE.
 */

#include "drmP.h"

# define ATI_PCIGART_PAGE_SIZE		4096	/**< PCI GART page size */
static int drm_ati_alloc_pcigart_table(struct drm_device *dev,
				       struct drm_ati_pcigart_info *gart_info)
{
	gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size,
						PAGE_SIZE,
						gart_info->table_mask);
	if (gart_info->table_handle == NULL)
		return -ENOMEM;

	return 0;
}

static void drm_ati_free_pcigart_table(struct drm_device *dev,
				       struct drm_ati_pcigart_info *gart_info)
{
	drm_pci_free(dev, gart_info->table_handle);
	gart_info->table_handle = NULL;
}

int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
{
	struct drm_sg_mem *entry = dev->sg;
	unsigned long pages;
	int i;
	int max_pages;

	/* we need to support large memory configurations */
	if (!entry) {
		DRM_ERROR("no scatter/gather memory!\n");
		return 0;
	}

	if (gart_info->bus_addr) {

		max_pages = (gart_info->table_size / sizeof(u32));
		pages = (entry->pages <= max_pages)
		  ? entry->pages : max_pages;

		for (i = 0; i < pages; i++) {
			if (!entry->busaddr[i])
				break;
			pci_unmap_single(dev->pdev, entry->busaddr[i],
					 PAGE_SIZE, PCI_DMA_TODEVICE);
		}

		if (gart_info->gart_table_location == DRM_ATI_GART_MAIN)
			gart_info->bus_addr = 0;
	}


	if (gart_info->gart_table_location == DRM_ATI_GART_MAIN
	    && gart_info->table_handle) {

		drm_ati_free_pcigart_table(dev, gart_info);
	}

	return 1;
}
EXPORT_SYMBOL(drm_ati_pcigart_cleanup);

int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
{
	struct drm_sg_mem *entry = dev->sg;
	void *address = NULL;
	unsigned long pages;
	u32 *pci_gart, page_base;
	dma_addr_t bus_address = 0;
	int i, j, ret = 0;
	int max_pages;

	if (!entry) {
		DRM_ERROR("no scatter/gather memory!\n");
		goto done;
	}

	if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
		DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");

		ret = drm_ati_alloc_pcigart_table(dev, gart_info);
		if (ret) {
			DRM_ERROR("cannot allocate PCI GART page!\n");
			goto done;
		}

		address = gart_info->table_handle->vaddr;
		bus_address = gart_info->table_handle->busaddr;
	} else {
		address = gart_info->addr;
		bus_address = gart_info->bus_addr;
		DRM_DEBUG("PCI: Gart Table: VRAM %08X mapped at %08lX\n",
			  bus_address, (unsigned long)address);
	}

	pci_gart = (u32 *) address;

	max_pages = (gart_info->table_size / sizeof(u32));
	pages = (entry->pages <= max_pages)
	    ? entry->pages : max_pages;

	memset(pci_gart, 0, max_pages * sizeof(u32));

	for (i = 0; i < pages; i++) {
		/* we need to support large memory configurations */
		entry->busaddr[i] = pci_map_single(dev->pdev,
						   page_address(entry->
								pagelist[i]),
						   PAGE_SIZE, PCI_DMA_TODEVICE);
		if (entry->busaddr[i] == 0) {
			DRM_ERROR("unable to map PCIGART pages!\n");
			drm_ati_pcigart_cleanup(dev, gart_info);
			address = NULL;
			bus_address = 0;
			goto done;
		}
		page_base = (u32) entry->busaddr[i];

		for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
			switch(gart_info->gart_reg_if) {
			case DRM_ATI_GART_IGP:
				*pci_gart = cpu_to_le32((page_base) | 0xc);
				break;
			case DRM_ATI_GART_PCIE:
				*pci_gart = cpu_to_le32((page_base >> 8) | 0xc);
				break;
			default:
			case DRM_ATI_GART_PCI:
				*pci_gart = cpu_to_le32(page_base);
				break;
			}
			pci_gart++;
			page_base += ATI_PCIGART_PAGE_SIZE;
		}
	}

	ret = 1;

#if defined(__i386__) || defined(__x86_64__)
	wbinvd();
#else
	mb();
#endif

      done:
	gart_info->addr = address;
	gart_info->bus_addr = bus_address;
	return ret;
}
EXPORT_SYMBOL(drm_ati_pcigart_init);
18 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829
/**************************************************************************
 *
 * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 **************************************************************************/
/*
 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
 */

#include "drmP.h"


/*
 * Convenience function to be called by fence::wait methods that
 * need polling.
 */

int drm_fence_wait_polling(struct drm_fence_object *fence, int lazy,
			   int interruptible, uint32_t mask, 
			   unsigned long end_jiffies)
{
	struct drm_device *dev = fence->dev;
	struct drm_fence_manager *fm = &dev->fm;
	struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
	uint32_t count = 0;
	int ret;

	DECLARE_WAITQUEUE(entry, current);
	add_wait_queue(&fc->fence_queue, &entry);

	ret = 0;
	
	for (;;) {
		__set_current_state((interruptible) ? 
				    TASK_INTERRUPTIBLE :
				    TASK_UNINTERRUPTIBLE);
		if (drm_fence_object_signaled(fence, mask))
			break;
		if (time_after_eq(jiffies, end_jiffies)) {
			ret = -EBUSY;
			break;
		}
		if (lazy)
			schedule_timeout(1);
		else if ((++count & 0x0F) == 0){
			__set_current_state(TASK_RUNNING);
			schedule();
			__set_current_state((interruptible) ? 
					    TASK_INTERRUPTIBLE :
					    TASK_UNINTERRUPTIBLE);
		}			
		if (interruptible && signal_pending(current)) {
			ret = -EAGAIN;
			break;
		}
	}
	__set_current_state(TASK_RUNNING);
	remove_wait_queue(&fc->fence_queue, &entry);
	return ret;
}
EXPORT_SYMBOL(drm_fence_wait_polling);

/*
 * Typically called by the IRQ handler.
 */

void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
		       uint32_t sequence, uint32_t type, uint32_t error)
{
	int wake = 0;
	uint32_t diff;
	uint32_t relevant_type;
	uint32_t new_type;
	struct drm_fence_manager *fm = &dev->fm;
	struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
	struct drm_fence_driver *driver = dev->driver->fence_driver;
	struct list_head *head;
	struct drm_fence_object *fence, *next;
	int found = 0;

	if (list_empty(&fc->ring))
		return;

	list_for_each_entry(fence, &fc->ring, ring) {
		diff = (sequence - fence->sequence) & driver->sequence_mask;
		if (diff > driver->wrap_diff) {
			found = 1;
			break;
		}
	}

	fc->waiting_types &= ~type;
	head = (found) ? &fence->ring : &fc->ring;

	list_for_each_entry_safe_reverse(fence, next, head, ring) {
		if (&fence->ring == &fc->ring)
			break;

		if (error) {
			fence->error = error;
			fence->signaled_types = fence->type;
			list_del_init(&fence->ring);
			wake = 1;
			break;
		}

		if (type & DRM_FENCE_TYPE_EXE)
			type |= fence->native_types;

		relevant_type = type & fence->type;
		new_type = (fence->signaled_types | relevant_type) ^
			fence->signaled_types;

		if (new_type) {
			fence->signaled_types |= new_type;
			DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
				  fence->base.hash.key, fence->signaled_types);

			if (driver->needed_flush)
				fc->pending_flush |= driver->needed_flush(fence);

			if (new_type & fence->waiting_types)
				wake = 1;
		}

		fc->waiting_types |= fence->waiting_types & ~fence->signaled_types;

		if (!(fence->type & ~fence->signaled_types)) {
			DRM_DEBUG("Fence completely signaled 0x%08lx\n",
				  fence->base.hash.key);
			list_del_init(&fence->ring);
		}
	}

	/*
	 * Reinstate lost waiting types.
	 */

	if ((fc->waiting_types & type) != type) {
		head = head->prev;
		list_for_each_entry(fence, head, ring) {
			if (&fence->ring == &fc->ring)
				break;
			diff = (fc->highest_waiting_sequence - fence->sequence) &
				driver->sequence_mask;
			if (diff > driver->wrap_diff)
				break;
			
			fc->waiting_types |= fence->waiting_types & ~fence->signaled_types;
		}
	}

	if (wake) 
		wake_up_all(&fc->fence_queue);
}
EXPORT_SYMBOL(drm_fence_handler);

static void drm_fence_unring(struct drm_device *dev, struct list_head *ring)
{
	struct drm_fence_manager *fm = &dev->fm;
	unsigned long flags;

	write_lock_irqsave(&fm->lock, flags);
	list_del_init(ring);
	write_unlock_irqrestore(&fm->lock, flags);
}

void drm_fence_usage_deref_locked(struct drm_fence_object **fence)
{
	struct drm_fence_object *tmp_fence = *fence;
	struct drm_device *dev = tmp_fence->dev;
	struct drm_fence_manager *fm = &dev->fm;

	DRM_ASSERT_LOCKED(&dev->struct_mutex);
	*fence = NULL;
	if (atomic_dec_and_test(&tmp_fence->usage)) {
		drm_fence_unring(dev, &tmp_fence->ring);
		DRM_DEBUG("Destroyed a fence object 0x%08lx\n",
			  tmp_fence->base.hash.key);
		atomic_dec(&fm->count);
		BUG_ON(!list_empty(&tmp_fence->base.list));
		drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
	}
}
EXPORT_SYMBOL(drm_fence_usage_deref_locked);

void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence)
{
	struct drm_fence_object *tmp_fence = *fence;
	struct drm_device *dev = tmp_fence->dev;
	struct drm_fence_manager *fm = &dev->fm;

	*fence = NULL;
	if (atomic_dec_and_test(&tmp_fence->usage)) {
		mutex_lock(&dev->struct_mutex);
		if (atomic_read(&tmp_fence->usage) == 0) {
			drm_fence_unring(dev, &tmp_fence->ring);
			atomic_dec(&fm->count);
			BUG_ON(!list_empty(&tmp_fence->base.list));
			drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
		}
		mutex_unlock(&dev->struct_mutex);
	}
}
EXPORT_SYMBOL(drm_fence_usage_deref_unlocked);

struct drm_fence_object
*drm_fence_reference_locked(struct drm_fence_object *src)
{
	DRM_ASSERT_LOCKED(&src->dev->struct_mutex);

	atomic_inc(&src->usage);
	return src;
}

void drm_fence_reference_unlocked(struct drm_fence_object **dst,
				  struct drm_fence_object *src)
{
	mutex_lock(&src->dev->struct_mutex);
	*dst = src;
	atomic_inc(&src->usage);
	mutex_unlock(&src->dev->struct_mutex);
}
EXPORT_SYMBOL(drm_fence_reference_unlocked);

static void drm_fence_object_destroy(struct drm_file *priv,
				     struct drm_user_object *base)
{
	struct drm_fence_object *fence =
	    drm_user_object_entry(base, struct drm_fence_object, base);

	drm_fence_usage_deref_locked(&fence);
}

int drm_fence_object_signaled(struct drm_fence_object *fence, uint32_t mask)
{
	unsigned long flags;
	int signaled;
	struct drm_device *dev = fence->dev;
	struct drm_fence_manager *fm = &dev->fm;
	struct drm_fence_driver *driver = dev->driver->fence_driver;
	
	mask &= fence->type;
	read_lock_irqsave(&fm->lock, flags);
	signaled = (mask & fence->signaled_types) == mask;
	read_unlock_irqrestore(&fm->lock, flags);
	if (!signaled && driver->poll) {
		write_lock_irqsave(&fm->lock, flags);
		driver->poll(dev, fence->fence_class, mask);
		signaled = (mask & fence->signaled_types) == mask;
		write_unlock_irqrestore(&fm->lock, flags);
	}
	return signaled;
}
EXPORT_SYMBOL(drm_fence_object_signaled);


int drm_fence_object_flush(struct drm_fence_object *fence,
			   uint32_t type)
{
	struct drm_device *dev = fence->dev;
	struct drm_fence_manager *fm = &dev->fm;
	struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
	struct drm_fence_driver *driver = dev->driver->fence_driver;
	unsigned long irq_flags;
	uint32_t saved_pending_flush;
	uint32_t diff;
	int call_flush;

	if (type & ~fence->type) {
		DRM_ERROR("Flush trying to extend fence type, "
			  "0x%x, 0x%x\n", type, fence->type);
		return -EINVAL;
	}

	write_lock_irqsave(&fm->lock, irq_flags);
	fence->waiting_types |= type;
	fc->waiting_types |= fence->waiting_types;
	diff = (fence->sequence - fc->highest_waiting_sequence) & 
		driver->sequence_mask;

	if (diff < driver->wrap_diff)
		fc->highest_waiting_sequence = fence->sequence;

	/*
	 * fence->waiting_types has changed. Determine whether
	 * we need to initiate some kind of flush as a result of this.
	 */

	saved_pending_flush = fc->pending_flush;
	if (driver->needed_flush) 
		fc->pending_flush |= driver->needed_flush(fence);

	if (driver->poll)
		driver->poll(dev, fence->fence_class, fence->waiting_types);

	call_flush = fc->pending_flush;
	write_unlock_irqrestore(&fm->lock, irq_flags);

	if (call_flush && driver->flush)
		driver->flush(dev, fence->fence_class);

	return 0;
}
EXPORT_SYMBOL(drm_fence_object_flush);

/*
 * Make sure old fence objects are signaled before their fence sequences are
 * wrapped around and reused.
 */

void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class,
			 uint32_t sequence)
{
	struct drm_fence_manager *fm = &dev->fm;
	struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
	struct drm_fence_object *fence;
	unsigned long irq_flags;
	struct drm_fence_driver *driver = dev->driver->fence_driver;
	int call_flush;

	uint32_t diff;

	write_lock_irqsave(&fm->lock, irq_flags);

	list_for_each_entry_reverse(fence, &fc->ring, ring) {
		diff = (sequence - fence->sequence) & driver->sequence_mask;
		if (diff <= driver->flush_diff)
			break;
	
		fence->waiting_types = fence->type;
		fc->waiting_types |= fence->type;

		if (driver->needed_flush)
			fc->pending_flush |= driver->needed_flush(fence);
	}	
	
	if (driver->poll)
		driver->poll(dev, fence_class, fc->waiting_types);

	call_flush = fc->pending_flush;
	write_unlock_irqrestore(&fm->lock, irq_flags);

	if (call_flush && driver->flush)
		driver->flush(dev, fence->fence_class);

	/*
	 * FIXME: Shold we implement a wait here for really old fences?
	 */

}
EXPORT_SYMBOL(drm_fence_flush_old);

int drm_fence_object_wait(struct drm_fence_object *fence,
			  int lazy, int ignore_signals, uint32_t mask)
{
	struct drm_device *dev = fence->dev;
	struct drm_fence_driver *driver = dev->driver->fence_driver;
	struct drm_fence_manager *fm = &dev->fm;
	struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
	int ret = 0;
	unsigned long _end = 3 * DRM_HZ;

	if (mask & ~fence->type) {
		DRM_ERROR("Wait trying to extend fence type"
			  " 0x%08x 0x%08x\n", mask, fence->type);
		BUG();
		return -EINVAL;
	}

	if (driver->wait)
		return driver->wait(fence, lazy, !ignore_signals, mask);


	drm_fence_object_flush(fence, mask);
	if (driver->has_irq(dev, fence->fence_class, mask)) {
		if (!ignore_signals)
			ret = wait_event_interruptible_timeout
				(fc->fence_queue, 
				 drm_fence_object_signaled(fence, mask), 
				 3 * DRM_HZ);
		else 
			ret = wait_event_timeout
				(fc->fence_queue, 
				 drm_fence_object_signaled(fence, mask), 
				 3 * DRM_HZ);

		if (unlikely(ret == -ERESTARTSYS))
			return -EAGAIN;

		if (unlikely(ret == 0))
			return -EBUSY;

		return 0;
	}

	return drm_fence_wait_polling(fence, lazy, !ignore_signals, mask,
				      _end);
}
EXPORT_SYMBOL(drm_fence_object_wait);



int drm_fence_object_emit(struct drm_fence_object *fence, uint32_t fence_flags,
			  uint32_t fence_class, uint32_t type)
{
	struct drm_device *dev = fence->dev;
	struct drm_fence_manager *fm = &dev->fm;
	struct drm_fence_driver *driver = dev->driver->fence_driver;
	struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
	unsigned long flags;
	uint32_t sequence;
	uint32_t native_types;
	int ret;

	drm_fence_unring(dev, &fence->ring);
	ret = driver->emit(dev, fence_class, fence_flags, &sequence,
			   &native_types);
	if (ret)
		return ret;

	write_lock_irqsave(&fm->lock, flags);
	fence->fence_class = fence_class;
	fence->type = type;
	fence->waiting_types = 0;
	fence->signaled_types = 0;
	fence->error = 0;
	fence->sequence = sequence;
	fence->native_types = native_types;
	if (list_empty(&fc->ring))
		fc->highest_waiting_sequence = sequence - 1;
	list_add_tail(&fence->ring, &fc->ring);
	fc->latest_queued_sequence = sequence;
	write_unlock_irqrestore(&fm->lock, flags);
	return 0;
}
EXPORT_SYMBOL(drm_fence_object_emit);

static int drm_fence_object_init(struct drm_device *dev, uint32_t fence_class,
				 uint32_t type,
				 uint32_t fence_flags,
				 struct drm_fence_object *fence)
{
	int ret = 0;
	unsigned long flags;
	struct drm_fence_manager *fm = &dev->fm;

	mutex_lock(&dev->struct_mutex);
	atomic_set(&fence->usage, 1);
	mutex_unlock(&dev->struct_mutex);

	write_lock_irqsave(&fm->lock, flags);
	INIT_LIST_HEAD(&fence->ring);

	/*
	 *  Avoid hitting BUG() for kernel-only fence objects.
	 */

	INIT_LIST_HEAD(&fence->base.list);
	fence->fence_class = fence_class;
	fence->type = type;
	fence->signaled_types = 0;
	fence->waiting_types = 0;
	fence->sequence = 0;
	fence->error = 0;
	fence->dev = dev;
	write_unlock_irqrestore(&fm->lock, flags);
	if (fence_flags & DRM_FENCE_FLAG_EMIT) {
		ret = drm_fence_object_emit(fence, fence_flags,
					    fence->fence_class, type);
	}
	return ret;
}

int drm_fence_add_user_object(struct drm_file *priv,
			      struct drm_fence_object *fence, int shareable)
{
	struct drm_device *dev = priv->minor->dev;
	int ret;

	mutex_lock(&dev->struct_mutex);
	ret = drm_add_user_object(priv, &fence->base, shareable);
	if (ret)
		goto out;
	atomic_inc(&fence->usage);
	fence->base.type = drm_fence_type;
	fence->base.remove = &drm_fence_object_destroy;
	DRM_DEBUG("Fence 0x%08lx created\n", fence->base.hash.key);
out:
	mutex_unlock(&dev->struct_mutex);
	return ret;
}
EXPORT_SYMBOL(drm_fence_add_user_object);

int drm_fence_object_create(struct drm_device *dev, uint32_t fence_class,
			    uint32_t type, unsigned flags,
			    struct drm_fence_object **c_fence)
{
	struct drm_fence_object *fence;
	int ret;
	struct drm_fence_manager *fm = &dev->fm;

	fence = drm_ctl_calloc(1, sizeof(*fence), DRM_MEM_FENCE);
	if (!fence) {
		DRM_ERROR("Out of memory creating fence object\n");
		return -ENOMEM;
	}
	ret = drm_fence_object_init(dev, fence_class, type, flags, fence);
	if (ret) {
		drm_fence_usage_deref_unlocked(&fence);
		return ret;
	}
	*c_fence = fence;
	atomic_inc(&fm->count);

	return 0;
}
EXPORT_SYMBOL(drm_fence_object_create);

void drm_fence_manager_init(struct drm_device *dev)
{
	struct drm_fence_manager *fm = &dev->fm;
	struct drm_fence_class_manager *fence_class;
	struct drm_fence_driver *fed = dev->driver->fence_driver;
	int i;
	unsigned long flags;

	rwlock_init(&fm->lock);
	write_lock_irqsave(&fm->lock, flags);
	fm->initialized = 0;
	if (!fed)
	    goto out_unlock;

	fm->initialized = 1;
	fm->num_classes = fed->num_classes;
	BUG_ON(fm->num_classes > _DRM_FENCE_CLASSES);

	for (i = 0; i < fm->num_classes; ++i) {
	    fence_class = &fm->fence_class[i];

	    memset(fence_class, 0, sizeof(*fence_class));
	    INIT_LIST_HEAD(&fence_class->ring);
	    DRM_INIT_WAITQUEUE(&fence_class->fence_queue);
	}

	atomic_set(&fm->count, 0);
 out_unlock:
	write_unlock_irqrestore(&fm->lock, flags);
}

void drm_fence_fill_arg(struct drm_fence_object *fence,
			struct drm_fence_arg *arg)
{
	struct drm_device *dev = fence->dev;
	struct drm_fence_manager *fm = &dev->fm;
	unsigned long irq_flags;

	read_lock_irqsave(&fm->lock, irq_flags);
	arg->handle = fence->base.hash.key;
	arg->fence_class = fence->fence_class;
	arg->type = fence->type;
	arg->signaled = fence->signaled_types;
	arg->error = fence->error;
	arg->sequence = fence->sequence;
	read_unlock_irqrestore(&fm->lock, irq_flags);
}
EXPORT_SYMBOL(drm_fence_fill_arg);

void drm_fence_manager_takedown(struct drm_device *dev)
{
}

struct drm_fence_object *drm_lookup_fence_object(struct drm_file *priv,
						 uint32_t handle)
{
	struct drm_device *dev = priv->minor->dev;
	struct drm_user_object *uo;
	struct drm_fence_object *fence;

	mutex_lock(&dev->struct_mutex);
	uo = drm_lookup_user_object(priv, handle);
	if (!uo || (uo->type != drm_fence_type)) {
		mutex_unlock(&dev->struct_mutex);
		return NULL;
	}
	fence = drm_fence_reference_locked(drm_user_object_entry(uo, struct drm_fence_object, base));
	mutex_unlock(&dev->struct_mutex);
	return fence;
}

int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
	int ret;
	struct drm_fence_manager *fm = &dev->fm;
	struct drm_fence_arg *arg = data;
	struct drm_fence_object *fence;
	ret = 0;

	if (!fm->initialized) {
		DRM_ERROR("The DRM driver does not support fencing.\n");
		return -EINVAL;
	}

	if (arg->flags & DRM_FENCE_FLAG_EMIT)