summaryrefslogtreecommitdiff
path: root/linux-core/xgi_misc.c
blob: 4a4a984453f9c3a855e28ae5e27290873e8cfa0f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
/****************************************************************************
 * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
 *
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining
 * a copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation on the rights to use, copy, modify, merge,
 * publish, distribute, sublicense, and/or sell copies of the Software,
 * and to permit persons to whom the Software is furnished to do so,
 * subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial
 * portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
 * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 * DEALINGS IN THE SOFTWARE.
 ***************************************************************************/

#include "xgi_drv.h"
#include "xgi_regs.h"

#include <linux/delay.h>

/*
 * irq functions
 */
#define STALL_INTERRUPT_RESET_THRESHOLD 0xffff

static unsigned int s_invalid_begin = 0;

static bool xgi_validate_signal(struct drm_map * map)
{
	if (le32_to_cpu(DRM_READ32(map, 0x2800) & 0x001c0000)) {
		u16 check;

		/* Check Read back status */
		DRM_WRITE8(map, 0x235c, 0x80);
		check = le16_to_cpu(DRM_READ16(map, 0x2360));

		if ((check & 0x3f) != ((check & 0x3f00) >> 8)) {
			return FALSE;
		}

		/* Check RO channel */
		DRM_WRITE8(map, 0x235c, 0x83);
		check = le16_to_cpu(DRM_READ16(map, 0x2360));
		if ((check & 0x0f) != ((check & 0xf0) >> 4)) {
			return FALSE;
		}

		/* Check RW channel */
		DRM_WRITE8(map, 0x235c, 0x88);
		check = le16_to_cpu(DRM_READ16(map, 0x2360));
		if ((check & 0x0f) != ((check & 0xf0) >> 4)) {
			return FALSE;
		}

		/* Check RO channel outstanding */
		DRM_WRITE8(map, 0x235c, 0x8f);
		check = le16_to_cpu(DRM_READ16(map, 0x2360));
		if (0 != (check & 0x3ff)) {
			return FALSE;
		}

		/* Check RW channel outstanding */
		DRM_WRITE8(map, 0x235c, 0x90);
		check = le16_to_cpu(DRM_READ16(map, 0x2360));
		if (0 != (check & 0x3ff)) {
			return FALSE;
		}

		/* No pending PCIE request. GE stall. */
	}

	return TRUE;
}


static void xgi_ge_hang_reset(struct drm_map * map)
{
	int time_out = 0xffff;

	DRM_WRITE8(map, 0xb057, 8);
	while (0 != le32_to_cpu(DRM_READ32(map, 0x2800) & 0xf0000000)) {
		while (0 != ((--time_out) & 0xfff)) 
			/* empty */ ;

		if (0 == time_out) {
			u8 old_3ce;
			u8 old_3cf;
			u8 old_index;
			u8 old_36;

			DRM_INFO("Can not reset back 0x%x!\n",
				 le32_to_cpu(DRM_READ32(map, 0x2800)));

			DRM_WRITE8(map, 0xb057, 0);

			/* Have to use 3x5.36 to reset. */
			/* Save and close dynamic gating */

			old_3ce = DRM_READ8(map, 0x3ce);
			DRM_WRITE8(map, 0x3ce, 0x2a);
			old_3cf = DRM_READ8(map, 0x3cf);
			DRM_WRITE8(map, 0x3cf, old_3cf & 0xfe);

			/* Reset GE */
			old_index = DRM_READ8(map, 0x3d4);
			DRM_WRITE8(map, 0x3d4, 0x36);
			old_36 = DRM_READ8(map, 0x3d5);
			DRM_WRITE8(map, 0x3d5, old_36 | 0x10);
			
			while (0 != ((--time_out) & 0xfff)) 
				/* empty */ ;

			DRM_WRITE8(map, 0x3d5, old_36);
			DRM_WRITE8(map, 0x3d4, old_index);

			/* Restore dynamic gating */
			DRM_WRITE8(map, 0x3cf, old_3cf);
			DRM_WRITE8(map, 0x3ce, old_3ce);
			break;
		}
	}

	DRM_WRITE8(map, 0xb057, 0);
}

	
bool xgi_ge_irq_handler(struct xgi_info * info)
{
	const u32 int_status = le32_to_cpu(DRM_READ32(info->mmio_map, 0x2810));
	bool is_support_auto_reset = FALSE;

	/* Check GE on/off */
	if (0 == (0xffffc0f0 & int_status)) {
		if (0 != (0x1000 & int_status)) {
			/* We got GE stall interrupt. 
			 */
			DRM_WRITE32(info->mmio_map, 0x2810,
				    cpu_to_le32(int_status | 0x04000000));

			if (is_support_auto_reset) {
				static cycles_t last_tick;
				static unsigned continue_int_count = 0;

				/* OE II is busy. */

				if (!xgi_validate_signal(info->mmio_map)) {
					/* Nothing but skip. */
				} else if (0 == continue_int_count++) {
					last_tick = get_cycles();
				} else {
					const cycles_t new_tick = get_cycles();
					if ((new_tick - last_tick) >
					    STALL_INTERRUPT_RESET_THRESHOLD) {
						continue_int_count = 0;
					} else if (continue_int_count >= 3) {
						continue_int_count = 0;

						/* GE Hung up, need reset. */
						DRM_INFO("Reset GE!\n");

						xgi_ge_hang_reset(info->mmio_map);
					}
				}
			}
		} else if (0 != (0x1 & int_status)) {
			s_invalid_begin++;
			DRM_WRITE32(info->mmio_map, 0x2810,
				    cpu_to_le32((int_status & ~0x01) | 0x04000000));
		}

		return TRUE;
	}

	return FALSE;
}

bool xgi_crt_irq_handler(struct xgi_info * info)
{
	bool ret = FALSE;
	u8 save_3ce = DRM_READ8(info->mmio_map, 0x3ce);

	/* CRT1 interrupt just happened
	 */
	if (IN3CFB(info->mmio_map, 0x37) & 0x01) {
		u8 op3cf_3d;
		u8 op3cf_37;

		/* What happened?
		 */
		op3cf_37 = IN3CFB(info->mmio_map, 0x37);

		/* Clear CRT interrupt
		 */
		op3cf_3d = IN3CFB(info->mmio_map, 0x3d);
		OUT3CFB(info->mmio_map, 0x3d, (op3cf_3d | 0x04));
		OUT3CFB(info->mmio_map, 0x3d, (op3cf_3d & ~0x04));
		ret = TRUE;
	}
	DRM_WRITE8(info->mmio_map, 0x3ce, save_3ce);

	return (ret);
}

bool xgi_dvi_irq_handler(struct xgi_info * info)
{
	bool ret = FALSE;
	const u8 save_3ce = DRM_READ8(info->mmio_map, 0x3ce);

	/* DVI interrupt just happened
	 */
	if (IN3CFB(info->mmio_map, 0x38) & 0x20) {
		const u8 save_3x4 = DRM_READ8(info->mmio_map, 0x3d4);
		u8 op3cf_39;
		u8 op3cf_37;
		u8 op3x5_5a;

		/* What happened?
		 */
		op3cf_37 = IN3CFB(info->mmio_map, 0x37);

		/* Notify BIOS that DVI plug/unplug happened
		 */
		op3x5_5a = IN3X5B(info->mmio_map, 0x5a);
		OUT3X5B(info->mmio_map, 0x5a, op3x5_5a & 0xf7);

		DRM_WRITE8(info->mmio_map, 0x3d4, save_3x4);

		/* Clear DVI interrupt
		 */
		op3cf_39 = IN3CFB(info->mmio_map, 0x39);
		OUT3C5B(info->mmio_map, 0x39, (op3cf_39 & ~0x01));
		OUT3C5B(info->mmio_map, 0x39, (op3cf_39 | 0x01));

		ret = TRUE;
	}
	DRM_WRITE8(info->mmio_map, 0x3ce, save_3ce);

	return (ret);
}


static void dump_reg_header(unsigned regbase)
{
	printk("\n=====xgi_dump_register========0x%x===============\n",
	       regbase);
	printk("    0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f\n");
}


static void dump_indexed_reg(struct xgi_info * info, unsigned regbase)
{
	unsigned i, j;
	u8 temp;


	dump_reg_header(regbase);
	for (i = 0; i < 0x10; i++) {
		printk("%1x ", i);

		for (j = 0; j < 0x10; j++) {
			DRM_WRITE8(info->mmio_map, regbase - 1,
				   (i * 0x10) + j);
			temp = DRM_READ8(info->mmio_map, regbase);
			printk("%3x", temp);
		}
		printk("\n");
	}
}


static void dump_reg(struct xgi_info * info, unsigned regbase, unsigned range)
{
	unsigned i, j;


	dump_reg_header(regbase);
	for (i = 0; i < range; i++) {
		printk("%1x ", i);

		for (j = 0; j < 0x10; j++) {
			u8 temp = DRM_READ8(info->mmio_map, 
					    regbase + (i * 0x10) + j);
			printk("%3x", temp);
		}
		printk("\n");
	}
}


void xgi_dump_register(struct xgi_info * info)
{
	dump_indexed_reg(info, 0x3c5);
	dump_indexed_reg(info, 0x3d5);
	dump_indexed_reg(info, 0x3cf);

	dump_reg(info, 0xB000, 0x05);
	dump_reg(info, 0x2200, 0x0B);
	dump_reg(info, 0x2300, 0x07);
	dump_reg(info, 0x2400, 0x10);
	dump_reg(info, 0x2800, 0x10);
}


#define WHOLD_GE_STATUS             0x2800

/* Test everything except the "whole GE busy" bit, the "master engine busy"
 * bit, and the reserved bits [26:21].
 */
#define IDLE_MASK                   ~((1U<<31) | (1U<<28) | (0x3f<<21))

void xgi_waitfor_pci_idle(struct xgi_info * info)
{
	unsigned int idleCount = 0;
	u32 old_status = 0;
	unsigned int same_count = 0;

	while (idleCount < 5) {
		const u32 status = DRM_READ32(info->mmio_map, WHOLD_GE_STATUS)
			& IDLE_MASK;

		if (status == old_status) {
			same_count++;

			if ((same_count % 100) == 0) {
				DRM_ERROR("GE status stuck at 0x%08x for %u iterations!\n",
					  old_status, same_count);
			}
		} else {
			old_status = status;
			same_count = 0;
		}

		if (status != 0) {
			msleep(1);
			idleCount = 0;
		} else {
			idleCount++;
		}
	}
}


void xgi_enable_mmio(struct xgi_info * info)
{
	u8 protect = 0;
	u8 temp;

	/* Unprotect registers */
	DRM_WRITE8(info->mmio_map, 0x3C4, 0x11);
	protect = DRM_READ8(info->mmio_map, 0x3C5);
	DRM_WRITE8(info->mmio_map, 0x3C5, 0x92);

	DRM_WRITE8(info->mmio_map, 0x3D4, 0x3A);
	temp = DRM_READ8(info->mmio_map, 0x3D5);
	DRM_WRITE8(info->mmio_map, 0x3D5, temp | 0x20);

	/* Enable MMIO */
	DRM_WRITE8(info->mmio_map, 0x3D4, 0x39);
	temp = DRM_READ8(info->mmio_map, 0x3D5);
	DRM_WRITE8(info->mmio_map, 0x3D5, temp | 0x01);

	/* Protect registers */
	OUT3C5B(info->mmio_map, 0x11, protect);
}


void xgi_disable_mmio(struct xgi_info * info)
{
	u8 protect = 0;
	u8 temp;

	/* Unprotect registers */
	DRM_WRITE8(info->mmio_map, 0x3C4, 0x11);
	protect = DRM_READ8(info->mmio_map, 0x3C5);
	DRM_WRITE8(info->mmio_map, 0x3C5, 0x92);

	/* Disable MMIO access */
	DRM_WRITE8(info->mmio_map, 0x3D4, 0x39);
	temp = DRM_READ8(info->mmio_map, 0x3D5);
	DRM_WRITE8(info->mmio_map, 0x3D5, temp & 0xFE);

	/* Protect registers */
	OUT3C5B(info->mmio_map, 0x11, protect);
}


void xgi_enable_ge(struct xgi_info * info)
{
	u8 bOld3cf2a;
	int wait = 0;

	OUT3C5B(info->mmio_map, 0x11, 0x92);

	/* Save and close dynamic gating
	 */
	bOld3cf2a = IN3CFB(info->mmio_map, XGI_MISC_CTRL);
	OUT3CFB(info->mmio_map, XGI_MISC_CTRL, bOld3cf2a & ~EN_GEPWM);

	/* Enable 2D and 3D GE
	 */
	OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D));
	wait = 10;
	while (wait--) {
		DRM_READ8(info->mmio_map, 0x36);
	}

	/* Reset both 3D and 2D engine
	 */
	OUT3X5B(info->mmio_map, XGI_GE_CNTL,
		(GE_ENABLE | GE_RESET | GE_ENABLE_3D));
	wait = 10;
	while (wait--) {
		DRM_READ8(info->mmio_map, 0x36);
	}

	OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D));
	wait = 10;
	while (wait--) {
		DRM_READ8(info->mmio_map, 0x36);
	}

	/* Enable 2D engine only
	 */
	OUT3X5B(info->mmio_map, XGI_GE_CNTL, GE_ENABLE);

	/* Enable 2D+3D engine
	 */
	OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D));

	/* Restore dynamic gating
	 */
	OUT3CFB(info->mmio_map, XGI_MISC_CTRL, bOld3cf2a);
}


void xgi_disable_ge(struct xgi_info * info)
{
	int wait = 0;

	OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D));

	wait = 10;
	while (wait--) {
		DRM_READ8(info->mmio_map, 0x36);
	}

	/* Reset both 3D and 2D engine
	 */
	OUT3X5B(info->mmio_map, XGI_GE_CNTL,
		(GE_ENABLE | GE_RESET | GE_ENABLE_3D));

	wait = 10;
	while (wait--) {
		DRM_READ8(info->mmio_map, 0x36);
	}
	OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D));

	wait = 10;
	while (wait--) {
		DRM_READ8(info->mmio_map, 0x36);
	}

	/* Disable 2D engine and 3D engine.
	 */
	OUT3X5B(info->mmio_map, XGI_GE_CNTL, 0);
}
891'>1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080
/**************************************************************************
 *
 * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 **************************************************************************/
/*
 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
 */

#include "drmP.h"

/*
 * Locking may look a bit complicated but isn't really:
 *
 * The buffer usage atomic_t needs to be protected by dev->struct_mutex
 * when there is a chance that it can be zero before or after the operation.
 *
 * dev->struct_mutex also protects all lists and list heads,
 * Hash tables and hash heads.
 *
 * bo->mutex protects the buffer object itself excluding the usage field.
 * bo->mutex does also protect the buffer list heads, so to manipulate those,
 * we need both the bo->mutex and the dev->struct_mutex.
 *
 * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal
 * is a bit complicated. When dev->struct_mutex is released to grab bo->mutex,
 * the list traversal will, in general, need to be restarted.
 *
 */

static void drm_bo_destroy_locked(struct drm_buffer_object *bo);
static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo);
static void drm_bo_unmap_virtual(struct drm_buffer_object *bo);

static inline uint64_t drm_bo_type_flags(unsigned type)
{
	return (1ULL << (24 + type));
}

/*
 * bo locked. dev->struct_mutex locked.
 */

void drm_bo_add_to_pinned_lru(struct drm_buffer_object *bo)
{
	struct drm_mem_type_manager *man;

	DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
	DRM_ASSERT_LOCKED(&bo->mutex);

	man = &bo->dev->bm.man[bo->pinned_mem_type];
	list_add_tail(&bo->pinned_lru, &man->pinned);
}

void drm_bo_add_to_lru(struct drm_buffer_object *bo)
{
	struct drm_mem_type_manager *man;

	DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);

	if (!(bo->mem.proposed_flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
	    || bo->mem.mem_type != bo->pinned_mem_type) {
		man = &bo->dev->bm.man[bo->mem.mem_type];
		list_add_tail(&bo->lru, &man->lru);
	} else {
		INIT_LIST_HEAD(&bo->lru);
	}
}

static int drm_bo_vm_pre_move(struct drm_buffer_object *bo, int old_is_pci)
{
#ifdef DRM_ODD_MM_COMPAT
	int ret;

	if (!bo->map_list.map)
		return 0;

	ret = drm_bo_lock_kmm(bo);
	if (ret)
		return ret;
	drm_bo_unmap_virtual(bo);
	if (old_is_pci)
		drm_bo_finish_unmap(bo);
#else
	if (!bo->map_list.map)
		return 0;

	drm_bo_unmap_virtual(bo);
#endif
	return 0;
}

static void drm_bo_vm_post_move(struct drm_buffer_object *bo)
{
#ifdef DRM_ODD_MM_COMPAT
	int ret;

	if (!bo->map_list.map)
		return;

	ret = drm_bo_remap_bound(bo);
	if (ret) {
		DRM_ERROR("Failed to remap a bound buffer object.\n"
			  "\tThis might cause a sigbus later.\n");
	}
	drm_bo_unlock_kmm(bo);
#endif
}

/*
 * Call bo->mutex locked.
 */

static int drm_bo_add_ttm(struct drm_buffer_object *bo)
{
	struct drm_device *dev = bo->dev;
	int ret = 0;
	uint32_t page_flags = 0;

	DRM_ASSERT_LOCKED(&bo->mutex);
	bo->ttm = NULL;

	if (bo->mem.proposed_flags & DRM_BO_FLAG_WRITE)
		page_flags |= DRM_TTM_PAGE_WRITE;

	switch (bo->type) {
	case drm_bo_type_device:
	case drm_bo_type_kernel:
		bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT, 
					 page_flags, dev->bm.dummy_read_page);
		if (!bo->ttm)
			ret = -ENOMEM;
		break;
	case drm_bo_type_user:
		bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT,
					 page_flags | DRM_TTM_PAGE_USER,
					 dev->bm.dummy_read_page);
		if (!bo->ttm)
			ret = -ENOMEM;

		ret = drm_ttm_set_user(bo->ttm, current,
				       bo->buffer_start,
				       bo->num_pages);
		if (ret)
			return ret;

		break;
	default:
		DRM_ERROR("Illegal buffer object type\n");
		ret = -EINVAL;
		break;
	}

	return ret;
}

static int drm_bo_handle_move_mem(struct drm_buffer_object *bo,
				  struct drm_bo_mem_reg *mem,
				  int evict, int no_wait)
{
	struct drm_device *dev = bo->dev;
	struct drm_buffer_manager *bm = &dev->bm;
	int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
	int new_is_pci = drm_mem_reg_is_pci(dev, mem);
	struct drm_mem_type_manager *old_man = &bm->man[bo->mem.mem_type];
	struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type];
	int ret = 0;

	if (old_is_pci || new_is_pci ||
	    ((mem->flags ^ bo->mem.flags) & DRM_BO_FLAG_CACHED))
		ret = drm_bo_vm_pre_move(bo, old_is_pci);
	if (ret)
		return ret;

	/*
	 * Create and bind a ttm if required.
	 */

	if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
		ret = drm_bo_add_ttm(bo);
		if (ret)
			goto out_err;

		if (mem->mem_type != DRM_BO_MEM_LOCAL) {
			ret = drm_ttm_bind(bo->ttm, mem);
			if (ret)
				goto out_err;
		}

		if (bo->mem.mem_type == DRM_BO_MEM_LOCAL) {
			
			struct drm_bo_mem_reg *old_mem = &bo->mem;
			uint64_t save_flags = old_mem->flags;
			uint64_t save_proposed_flags = old_mem->proposed_flags;
			
			*old_mem = *mem;
			mem->mm_node = NULL;
			old_mem->proposed_flags = save_proposed_flags;
			DRM_FLAG_MASKED(save_flags, mem->flags,
					DRM_BO_MASK_MEMTYPE);
			goto moved;
		}
		
	}

	if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
	    !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED))		
		ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
	else if (dev->driver->bo_driver->move) 
		ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
	else
		ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);

	if (ret)
		goto out_err;

moved:
	if (old_is_pci || new_is_pci)
		drm_bo_vm_post_move(bo);

	if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
		ret =
		    dev->driver->bo_driver->invalidate_caches(dev,
							      bo->mem.flags);
		if (ret)
			DRM_ERROR("Can not flush read caches\n");
	}

	DRM_FLAG_MASKED(bo->priv_flags,
			(evict) ? _DRM_BO_FLAG_EVICTED : 0,
			_DRM_BO_FLAG_EVICTED);

	if (bo->mem.mm_node)
		bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
			bm->man[bo->mem.mem_type].gpu_offset;


	return 0;

out_err:
	if (old_is_pci || new_is_pci)
		drm_bo_vm_post_move(bo);

	new_man = &bm->man[bo->mem.mem_type];
	if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
		drm_ttm_unbind(bo->ttm);
		drm_ttm_destroy(bo->ttm);
		bo->ttm = NULL;
	}

	return ret;
}

/*
 * Call bo->mutex locked.
 * Returns -EBUSY if the buffer is currently rendered to or from. 0 otherwise.
 */

static int drm_bo_busy(struct drm_buffer_object *bo, int check_unfenced)
{
	struct drm_fence_object *fence = bo->fence;

	if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
		return -EBUSY;

	if (fence) {
		if (drm_fence_object_signaled(fence, bo->fence_type)) {
			drm_fence_usage_deref_unlocked(&bo->fence);
			return 0;
		}
		drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE);
		if (drm_fence_object_signaled(fence, bo->fence_type)) {
			drm_fence_usage_deref_unlocked(&bo->fence);
			return 0;
		}
		return -EBUSY;
	}
	return 0;
}

static int drm_bo_check_unfenced(struct drm_buffer_object *bo)
{
	int ret;

	mutex_lock(&bo->mutex);
	ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
	mutex_unlock(&bo->mutex);
	return ret;
}


/*
 * Call bo->mutex locked.
 * Wait until the buffer is idle.
 */

int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int interruptible,
		int no_wait, int check_unfenced)
{
	int ret;

	DRM_ASSERT_LOCKED(&bo->mutex);
	while(unlikely(drm_bo_busy(bo, check_unfenced))) {
		if (no_wait)
			return -EBUSY;

		if (check_unfenced &&  (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)) {
			mutex_unlock(&bo->mutex);
			wait_event(bo->event_queue, !drm_bo_check_unfenced(bo));
			mutex_lock(&bo->mutex);
			bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED;
		}

		if (bo->fence) {
			struct drm_fence_object *fence;
			uint32_t fence_type = bo->fence_type;

			drm_fence_reference_unlocked(&fence, bo->fence);
			mutex_unlock(&bo->mutex);

			ret = drm_fence_object_wait(fence, lazy, !interruptible,
						    fence_type);

			drm_fence_usage_deref_unlocked(&fence);
			mutex_lock(&bo->mutex);
			bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED;
			if (ret)
				return ret;
		}

	}
	return 0;
}
EXPORT_SYMBOL(drm_bo_wait);

static int drm_bo_expire_fence(struct drm_buffer_object *bo, int allow_errors)
{
	struct drm_device *dev = bo->dev;
	struct drm_buffer_manager *bm = &dev->bm;

	if (bo->fence) {
		if (bm->nice_mode) {
			unsigned long _end = jiffies + 3 * DRM_HZ;
			int ret;
			do {
				ret = drm_bo_wait(bo, 0, 0, 0, 0);
				if (ret && allow_errors)
					return ret;

			} while (ret && !time_after_eq(jiffies, _end));

			if (bo->fence) {
				bm->nice_mode = 0;
				DRM_ERROR("Detected GPU lockup or "
					  "fence driver was taken down. "
					  "Evicting buffer.\n");
			}
		}
		if (bo->fence)
			drm_fence_usage_deref_unlocked(&bo->fence);
	}
	return 0;
}

/*
 * Call dev->struct_mutex locked.
 * Attempts to remove all private references to a buffer by expiring its
 * fence object and removing from lru lists and memory managers.
 */

static void drm_bo_cleanup_refs(struct drm_buffer_object *bo, int remove_all)
{
	struct drm_device *dev = bo->dev;
	struct drm_buffer_manager *bm = &dev->bm;

	DRM_ASSERT_LOCKED(&dev->struct_mutex);

	atomic_inc(&bo->usage);
	mutex_unlock(&dev->struct_mutex);
	mutex_lock(&bo->mutex);

	DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);

	if (bo->fence && drm_fence_object_signaled(bo->fence,
						   bo->fence_type))
		drm_fence_usage_deref_unlocked(&bo->fence);

	if (bo->fence && remove_all)
		(void)drm_bo_expire_fence(bo, 0);

	mutex_lock(&dev->struct_mutex);

	if (!atomic_dec_and_test(&bo->usage))
		goto out;

	if (!bo->fence) {
		list_del_init(&bo->lru);
		if (bo->mem.mm_node) {
			drm_mm_put_block(bo->mem.mm_node);
			if (bo->pinned_node == bo->mem.mm_node)
				bo->pinned_node = NULL;
			bo->mem.mm_node = NULL;
		}
		list_del_init(&bo->pinned_lru);
		if (bo->pinned_node) {
			drm_mm_put_block(bo->pinned_node);
			bo->pinned_node = NULL;
		}
		list_del_init(&bo->ddestroy);
		mutex_unlock(&bo->mutex);
		drm_bo_destroy_locked(bo);
		return;
	}

	if (list_empty(&bo->ddestroy)) {
		drm_fence_object_flush(bo->fence, bo->fence_type);
		list_add_tail(&bo->ddestroy, &bm->ddestroy);
		schedule_delayed_work(&bm->wq,
				      ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
	}

out:
	mutex_unlock(&bo->mutex);
	return;
}

/*
 * Verify that refcount is 0 and that there are no internal references
 * to the buffer object. Then destroy it.
 */

static void drm_bo_destroy_locked(struct drm_buffer_object *bo)
{
	struct drm_device *dev = bo->dev;
	struct drm_buffer_manager *bm = &dev->bm;

	DRM_ASSERT_LOCKED(&dev->struct_mutex);

	DRM_DEBUG("freeing %p\n", bo);
	if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
	    list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
	    list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
		if (bo->fence != NULL) {
			DRM_ERROR("Fence was non-zero.\n");
			drm_bo_cleanup_refs(bo, 0);
			return;
		}

#ifdef DRM_ODD_MM_COMPAT
		BUG_ON(!list_empty(&bo->vma_list));
		BUG_ON(!list_empty(&bo->p_mm_list));
#endif

		if (bo->ttm) {
			drm_ttm_unbind(bo->ttm);
			drm_ttm_destroy(bo->ttm);
			bo->ttm = NULL;
		}

		atomic_dec(&bm->count);

		drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);

		return;
	}

	/*
	 * Some stuff is still trying to reference the buffer object.
	 * Get rid of those references.
	 */

	drm_bo_cleanup_refs(bo, 0);

	return;
}

/*
 * Call dev->struct_mutex locked.
 */

static void drm_bo_delayed_delete(struct drm_device *dev, int remove_all)
{
	struct drm_buffer_manager *bm = &dev->bm;

	struct drm_buffer_object *entry, *nentry;
	struct list_head *list, *next;

	list_for_each_safe(list, next, &bm->ddestroy) {
		entry = list_entry(list, struct drm_buffer_object, ddestroy);

		nentry = NULL;
		if (next != &bm->ddestroy) {
			nentry = list_entry(next, struct drm_buffer_object,
					    ddestroy);
			atomic_inc(&nentry->usage);
		}

		drm_bo_cleanup_refs(entry, remove_all);

		if (nentry)
			atomic_dec(&nentry->usage);
	}
}

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
static void drm_bo_delayed_workqueue(void *data)
#else
static void drm_bo_delayed_workqueue(struct work_struct *work)
#endif
{
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
	struct drm_device *dev = (struct drm_device *) data;
	struct drm_buffer_manager *bm = &dev->bm;
#else
	struct drm_buffer_manager *bm =
	    container_of(work, struct drm_buffer_manager, wq.work);
	struct drm_device *dev = container_of(bm, struct drm_device, bm);
#endif

	DRM_DEBUG("Delayed delete Worker\n");

	mutex_lock(&dev->struct_mutex);
	if (!bm->initialized) {
		mutex_unlock(&dev->struct_mutex);
		return;
	}
	drm_bo_delayed_delete(dev, 0);
	if (bm->initialized && !list_empty(&bm->ddestroy)) {
		schedule_delayed_work(&bm->wq,
				      ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
	}
	mutex_unlock(&dev->struct_mutex);
}

void drm_bo_usage_deref_locked(struct drm_buffer_object **bo)
{
	struct drm_buffer_object *tmp_bo = *bo;
	bo = NULL;

	DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex);

	if (atomic_dec_and_test(&tmp_bo->usage))
		drm_bo_destroy_locked(tmp_bo);
}
EXPORT_SYMBOL(drm_bo_usage_deref_locked);

void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo)
{
	struct drm_buffer_object *tmp_bo = *bo;
	struct drm_device *dev = tmp_bo->dev;

	*bo = NULL;
	if (atomic_dec_and_test(&tmp_bo->usage)) {
		mutex_lock(&dev->struct_mutex);
		if (atomic_read(&tmp_bo->usage) == 0)
			drm_bo_destroy_locked(tmp_bo);
		mutex_unlock(&dev->struct_mutex);
	}
}
EXPORT_SYMBOL(drm_bo_usage_deref_unlocked);

void drm_putback_buffer_objects(struct drm_device *dev)
{
	struct drm_buffer_manager *bm = &dev->bm;
	struct list_head *list = &bm->unfenced;
	struct drm_buffer_object *entry, *next;

	mutex_lock(&dev->struct_mutex);
	list_for_each_entry_safe(entry, next, list, lru) {
		atomic_inc(&entry->usage);
		mutex_unlock(&dev->struct_mutex);

		mutex_lock(&entry->mutex);
		BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
		mutex_lock(&dev->struct_mutex);

		list_del_init(&entry->lru);
		DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
		wake_up_all(&entry->event_queue);

		/*
		 * FIXME: Might want to put back on head of list
		 * instead of tail here.
		 */

		drm_bo_add_to_lru(entry);
		mutex_unlock(&entry->mutex);
		drm_bo_usage_deref_locked(&entry);
	}
	mutex_unlock(&dev->struct_mutex);
}
EXPORT_SYMBOL(drm_putback_buffer_objects);

/*
 * Note. The caller has to register (if applicable)
 * and deregister fence object usage.
 */

int drm_fence_buffer_objects(struct drm_device *dev,
			     struct list_head *list,
			     uint32_t fence_flags,
			     struct drm_fence_object *fence,
			     struct drm_fence_object **used_fence)
{
	struct drm_buffer_manager *bm = &dev->bm;
	struct drm_buffer_object *entry;
	uint32_t fence_type = 0;
	uint32_t fence_class = ~0;
	int count = 0;
	int ret = 0;
	struct list_head *l;

	mutex_lock(&dev->struct_mutex);

	if (!list)
		list = &bm->unfenced;

	if (fence)
		fence_class = fence->fence_class;

	list_for_each_entry(entry, list, lru) {
		BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
		fence_type |= entry->new_fence_type;
		if (fence_class == ~0)
			fence_class = entry->new_fence_class;
		else if (entry->new_fence_class != fence_class) {
			DRM_ERROR("Unmatching fence classes on unfenced list: "
				  "%d and %d.\n",
				  fence_class,
				  entry->new_fence_class);
			ret = -EINVAL;
			goto out;
		}
		count++;
	}

	if (!count) {
		ret = -EINVAL;
		goto out;
	}

	if (fence) {
		if ((fence_type & fence->type) != fence_type ||
		    (fence->fence_class != fence_class)) {
			DRM_ERROR("Given fence doesn't match buffers "
				  "on unfenced list.\n");
			ret = -EINVAL;
			goto out;
		}
	} else {
		mutex_unlock(&dev->struct_mutex);
		ret = drm_fence_object_create(dev, fence_class, fence_type,
					      fence_flags | DRM_FENCE_FLAG_EMIT,
					      &fence);
		mutex_lock(&dev->struct_mutex);
		if (ret)
			goto out;
	}

	count = 0;
	l = list->next;
	while (l != list) {
		prefetch(l->next);
		entry = list_entry(l, struct drm_buffer_object, lru);
		atomic_inc(&entry->usage);
		mutex_unlock(&dev->struct_mutex);
		mutex_lock(&entry->mutex);
		mutex_lock(&dev->struct_mutex);
		list_del_init(l);
		if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
			count++;
			if (entry->fence)
				drm_fence_usage_deref_locked(&entry->fence);
			entry->fence = drm_fence_reference_locked(fence);
			entry->fence_class = entry->new_fence_class;
			entry->fence_type = entry->new_fence_type;
			DRM_FLAG_MASKED(entry->priv_flags, 0,
					_DRM_BO_FLAG_UNFENCED);
			wake_up_all(&entry->event_queue);
			drm_bo_add_to_lru(entry);
		}
		mutex_unlock(&entry->mutex);
		drm_bo_usage_deref_locked(&entry);
		l = list->next;
	}
	DRM_DEBUG("Fenced %d buffers\n", count);
out:
	mutex_unlock(&dev->struct_mutex);
	*used_fence = fence;
	return ret;
}
EXPORT_SYMBOL(drm_fence_buffer_objects);

/*
 * bo->mutex locked
 */

static int drm_bo_evict(struct drm_buffer_object *bo, unsigned mem_type,
			int no_wait)
{
	int ret = 0;
	struct drm_device *dev = bo->dev;
	struct drm_bo_mem_reg evict_mem;

	/*
	 * Someone might have modified the buffer before we took the
	 * buffer mutex.
	 */

	do {
		bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;

		if (unlikely(bo->mem.flags &
			     (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)))
			goto out_unlock;
		if (unlikely(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
			goto out_unlock;
		if (unlikely(bo->mem.mem_type != mem_type))
			goto out_unlock;
		ret = drm_bo_wait(bo, 0, 1, no_wait, 0);
		if (ret)
			goto out_unlock;

	} while(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);

	evict_mem = bo->mem;
	evict_mem.mm_node = NULL;

	evict_mem = bo->mem;
	evict_mem.proposed_flags = dev->driver->bo_driver->evict_flags(bo);

	mutex_lock(&dev->struct_mutex);
	list_del_init(&bo->lru);
	mutex_unlock(&dev->struct_mutex);

	ret = drm_bo_mem_space(bo, &evict_mem, no_wait);

	if (ret) {
		if (ret != -EAGAIN)
			DRM_ERROR("Failed to find memory space for "
				  "buffer 0x%p eviction.\n", bo);
		goto out;
	}

	ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);

	if (ret) {
		if (ret != -EAGAIN)
			DRM_ERROR("Buffer eviction failed\n");
		goto out;
	}

	DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
			_DRM_BO_FLAG_EVICTED);

out:
	mutex_lock(&dev->struct_mutex);
	if (evict_mem.mm_node) {
		if (evict_mem.mm_node != bo->pinned_node)
			drm_mm_put_block(evict_mem.mm_node);
		evict_mem.mm_node = NULL;
	}
	drm_bo_add_to_lru(bo);
	BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
out_unlock:
	mutex_unlock(&dev->struct_mutex);

	return ret;
}

/**
 * Repeatedly evict memory from the LRU for @mem_type until we create enough
 * space, or we've evicted everything and there isn't enough space.
 */
static int drm_bo_mem_force_space(struct drm_device *dev,
				  struct drm_bo_mem_reg *mem,