summaryrefslogtreecommitdiff
path: root/shared-core/i915_drm.h
blob: ab13cd4a6e078696b744df20f8435d7f8307660b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
/*
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 */

#ifndef _I915_DRM_H_
#define _I915_DRM_H_

/* Please note that modifications to all structs defined here are
 * subject to backwards-compatibility constraints.
 */

#include "drm.h"

/* Each region is a minimum of 16k, and there are at most 255 of them.
 */
#define I915_NR_TEX_REGIONS 255	/* table size 2k - maximum due to use
				 * of chars for next/prev indices */
#define I915_LOG_MIN_TEX_REGION_SIZE 14

typedef struct drm_i915_init {
	enum {
		I915_INIT_DMA = 0x01,
		I915_CLEANUP_DMA = 0x02,
		I915_RESUME_DMA = 0x03,

		/* Since this struct isn't versioned, just used a new
		 * 'func' code to indicate the presence of dri2 sarea
		 * info. */
		I915_INIT_DMA2 = 0x04
	} func;
	unsigned int mmio_offset;
	int sarea_priv_offset;
	unsigned int ring_start;
	unsigned int ring_end;
	unsigned int ring_size;
	unsigned int front_offset;
	unsigned int back_offset;
	unsigned int depth_offset;
	unsigned int w;
	unsigned int h;
	unsigned int pitch;
	unsigned int pitch_bits;
	unsigned int back_pitch;
	unsigned int depth_pitch;
	unsigned int cpp;
	unsigned int chipset;
	unsigned int sarea_handle;
} drm_i915_init_t;

typedef struct drm_i915_sarea {
	struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
	int last_upload;	/* last time texture was uploaded */
	int last_enqueue;	/* last time a buffer was enqueued */
	int last_dispatch;	/* age of the most recently dispatched buffer */
	int ctxOwner;		/* last context to upload state */
	int texAge;
	int pf_enabled;		/* is pageflipping allowed? */
	int pf_active;
	int pf_current_page;	/* which buffer is being displayed? */
	int perf_boxes;		/* performance boxes to be displayed */
	int width, height;      /* screen size in pixels */

	drm_handle_t front_handle;
	int front_offset;
	int front_size;

	drm_handle_t back_handle;
	int back_offset;
	int back_size;

	drm_handle_t depth_handle;
	int depth_offset;
	int depth_size;

	drm_handle_t tex_handle;
	int tex_offset;
	int tex_size;
	int log_tex_granularity;
	int pitch;
	int rotation;           /* 0, 90, 180 or 270 */
	int rotated_offset;
	int rotated_size;
	int rotated_pitch;
	int virtualX, virtualY;

	unsigned int front_tiled;
	unsigned int back_tiled;
	unsigned int depth_tiled;
	unsigned int rotated_tiled;
	unsigned int rotated2_tiled;

	int planeA_x;
	int planeA_y;
	int planeA_w;
	int planeA_h;
	int planeB_x;
	int planeB_y;
	int planeB_w;
	int planeB_h;

	/* Triple buffering */
	drm_handle_t third_handle;
	int third_offset;
	int third_size;
	unsigned int third_tiled;

	/* buffer object handles for the static buffers.  May change
	 * over the lifetime of the client, though it doesn't in our current
	 * implementation.
	 */
	unsigned int front_bo_handle;
	unsigned int back_bo_handle;
	unsigned int third_bo_handle;
	unsigned int depth_bo_handle;
} drm_i915_sarea_t;

/* Driver specific fence types and classes.
 */

/* The only fence class we support */
#define DRM_I915_FENCE_CLASS_ACCEL 0
/* Fence type that guarantees read-write flush */
#define DRM_I915_FENCE_TYPE_RW 2
/* MI_FLUSH programmed just before the fence */
#define DRM_I915_FENCE_FLAG_FLUSHED 0x01000000

/* Flags for perf_boxes
 */
#define I915_BOX_RING_EMPTY    0x1
#define I915_BOX_FLIP          0x2
#define I915_BOX_WAIT          0x4
#define I915_BOX_TEXTURE_LOAD  0x8
#define I915_BOX_LOST_CONTEXT  0x10

/* I915 specific ioctls
 * The device specific ioctl range is 0x40 to 0x79.
 */
#define DRM_I915_INIT		0x00
#define DRM_I915_FLUSH		0x01
#define DRM_I915_FLIP		0x02
#define DRM_I915_BATCHBUFFER	0x03
#define DRM_I915_IRQ_EMIT	0x04
#define DRM_I915_IRQ_WAIT	0x05
#define DRM_I915_GETPARAM	0x06
#define DRM_I915_SETPARAM	0x07
#define DRM_I915_ALLOC		0x08
#define DRM_I915_FREE		0x09
#define DRM_I915_INIT_HEAP	0x0a
#define DRM_I915_CMDBUFFER	0x0b
#define DRM_I915_DESTROY_HEAP	0x0c
#define DRM_I915_SET_VBLANK_PIPE	0x0d
#define DRM_I915_GET_VBLANK_PIPE	0x0e
#define DRM_I915_VBLANK_SWAP	0x0f
#define DRM_I915_MMIO		0x10
#define DRM_I915_HWS_ADDR	0x11
#define DRM_I915_EXECBUFFER	0x12
#define DRM_I915_GEM_INIT	0x13
#define DRM_I915_GEM_EXECBUFFER	0x14
#define DRM_I915_GEM_PIN	0x15
#define DRM_I915_GEM_UNPIN	0x16
#define DRM_I915_GEM_BUSY	0x17
#define DRM_I915_GEM_THROTTLE	0x18
#define DRM_I915_GEM_ENTERVT	0x19
#define DRM_I915_GEM_LEAVEVT	0x1a
#define DRM_I915_GEM_CREATE	0x1b
#define DRM_I915_GEM_PREAD	0x1c
#define DRM_I915_GEM_PWRITE	0x1d
#define DRM_I915_GEM_MMAP	0x1e
#define DRM_I915_GEM_SET_DOMAIN	0x1f
#define DRM_I915_GEM_SW_FINISH	0x20
#define DRM_I915_GEM_SET_TILING	0x21
#define DRM_I915_GEM_GET_TILING	0x22

#define DRM_IOCTL_I915_INIT		DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
#define DRM_IOCTL_I915_FLIP		DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FLIP, drm_i915_flip_t)
#define DRM_IOCTL_I915_BATCHBUFFER	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
#define DRM_IOCTL_I915_IRQ_EMIT         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
#define DRM_IOCTL_I915_IRQ_WAIT         DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
#define DRM_IOCTL_I915_GETPARAM         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
#define DRM_IOCTL_I915_SETPARAM         DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
#define DRM_IOCTL_I915_ALLOC            DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
#define DRM_IOCTL_I915_FREE             DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
#define DRM_IOCTL_I915_INIT_HEAP        DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
#define DRM_IOCTL_I915_CMDBUFFER	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
#define DRM_IOCTL_I915_DESTROY_HEAP	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
#define DRM_IOCTL_I915_SET_VBLANK_PIPE	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
#define DRM_IOCTL_I915_GET_VBLANK_PIPE	DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
#define DRM_IOCTL_I915_VBLANK_SWAP	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
#define DRM_IOCTL_I915_MMIO             DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_MMIO, drm_i915_mmio)
#define DRM_IOCTL_I915_EXECBUFFER	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_EXECBUFFER, struct drm_i915_execbuffer)
#define DRM_IOCTL_I915_GEM_INIT		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
#define DRM_IOCTL_I915_GEM_EXECBUFFER	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
#define DRM_IOCTL_I915_GEM_PIN		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
#define DRM_IOCTL_I915_GEM_UNPIN	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
#define DRM_IOCTL_I915_GEM_BUSY		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
#define DRM_IOCTL_I915_GEM_THROTTLE	DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
#define DRM_IOCTL_I915_GEM_ENTERVT	DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
#define DRM_IOCTL_I915_GEM_LEAVEVT	DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
#define DRM_IOCTL_I915_GEM_CREATE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
#define DRM_IOCTL_I915_GEM_PREAD	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
#define DRM_IOCTL_I915_GEM_PWRITE	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
#define DRM_IOCTL_I915_GEM_MMAP		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
#define DRM_IOCTL_I915_GEM_SET_DOMAIN	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
#define DRM_IOCTL_I915_GEM_SW_FINISH	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
#define DRM_IOCTL_I915_GEM_SET_TILING	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
#define DRM_IOCTL_I915_GEM_GET_TILING	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)

/* Asynchronous page flipping:
 */
typedef struct drm_i915_flip {
	/*
	 * This is really talking about planes, and we could rename it
	 * except for the fact that some of the duplicated i915_drm.h files
	 * out there check for HAVE_I915_FLIP and so might pick up this
	 * version.
	 */
	int pipes;
} drm_i915_flip_t;

/* Allow drivers to submit batchbuffers directly to hardware, relying
 * on the security mechanisms provided by hardware.
 */
typedef struct drm_i915_batchbuffer {
	int start;		/* agp offset */
	int used;		/* nr bytes in use */
	int DR1;		/* hw flags for GFX_OP_DRAWRECT_INFO */
	int DR4;		/* window origin for GFX_OP_DRAWRECT_INFO */
	int num_cliprects;	/* mulitpass with multiple cliprects? */
	struct drm_clip_rect __user *cliprects;	/* pointer to userspace cliprects */
} drm_i915_batchbuffer_t;

/* As above, but pass a pointer to userspace buffer which can be
 * validated by the kernel prior to sending to hardware.
 */
typedef struct drm_i915_cmdbuffer {
	char __user *buf;	/* pointer to userspace command buffer */
	int sz;			/* nr bytes in buf */
	int DR1;		/* hw flags for GFX_OP_DRAWRECT_INFO */
	int DR4;		/* window origin for GFX_OP_DRAWRECT_INFO */
	int num_cliprects;	/* mulitpass with multiple cliprects? */
	struct drm_clip_rect __user *cliprects;	/* pointer to userspace cliprects */
} drm_i915_cmdbuffer_t;

/* Userspace can request & wait on irq's:
 */
typedef struct drm_i915_irq_emit {
	int __user *irq_seq;
} drm_i915_irq_emit_t;

typedef struct drm_i915_irq_wait {
	int irq_seq;
} drm_i915_irq_wait_t;

/* Ioctl to query kernel params:
 */
#define I915_PARAM_IRQ_ACTIVE            1
#define I915_PARAM_ALLOW_BATCHBUFFER     2
#define I915_PARAM_LAST_DISPATCH         3
#define I915_PARAM_CHIPSET_ID            4
#define I915_PARAM_HAS_GEM               5

typedef struct drm_i915_getparam {
	int param;
	int __user *value;
} drm_i915_getparam_t;

/* Ioctl to set kernel params:
 */
#define I915_SETPARAM_USE_MI_BATCHBUFFER_START            1
#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY             2
#define I915_SETPARAM_ALLOW_BATCHBUFFER                   3

typedef struct drm_i915_setparam {
	int param;
	int value;
} drm_i915_setparam_t;

/* A memory manager for regions of shared memory:
 */
#define I915_MEM_REGION_AGP 1

typedef struct drm_i915_mem_alloc {
	int region;
	int alignment;
	int size;
	int __user *region_offset;	/* offset from start of fb or agp */
} drm_i915_mem_alloc_t;

typedef struct drm_i915_mem_free {
	int region;
	int region_offset;
} drm_i915_mem_free_t;

typedef struct drm_i915_mem_init_heap {
	int region;
	int size;
	int start;
} drm_i915_mem_init_heap_t;

/* Allow memory manager to be torn down and re-initialized (eg on
 * rotate):
 */
typedef struct drm_i915_mem_destroy_heap {
	int region;
} drm_i915_mem_destroy_heap_t;

/* Allow X server to configure which pipes to monitor for vblank signals
 */
#define	DRM_I915_VBLANK_PIPE_A	1
#define	DRM_I915_VBLANK_PIPE_B	2

typedef struct drm_i915_vblank_pipe {
	int pipe;
} drm_i915_vblank_pipe_t;

/* Schedule buffer swap at given vertical blank:
 */
typedef struct drm_i915_vblank_swap {
	drm_drawable_t drawable;
	enum drm_vblank_seq_type seqtype;
	unsigned int sequence;
} drm_i915_vblank_swap_t;

#define I915_MMIO_READ	0
#define I915_MMIO_WRITE 1

#define I915_MMIO_MAY_READ	0x1
#define I915_MMIO_MAY_WRITE	0x2

#define MMIO_REGS_IA_PRIMATIVES_COUNT		0
#define MMIO_REGS_IA_VERTICES_COUNT		1
#define MMIO_REGS_VS_INVOCATION_COUNT		2
#define MMIO_REGS_GS_PRIMITIVES_COUNT		3
#define MMIO_REGS_GS_INVOCATION_COUNT		4
#define MMIO_REGS_CL_PRIMITIVES_COUNT		5
#define MMIO_REGS_CL_INVOCATION_COUNT		6
#define MMIO_REGS_PS_INVOCATION_COUNT		7
#define MMIO_REGS_PS_DEPTH_COUNT		8
#define MMIO_REGS_DOVSTA			9
#define MMIO_REGS_GAMMA				10
#define MMIO_REGS_FENCE				11
#define MMIO_REGS_FENCE_NEW			12

typedef struct drm_i915_mmio_entry {
	unsigned int flag;
	unsigned int offset;
	unsigned int size;
} drm_i915_mmio_entry_t;

typedef struct drm_i915_mmio {
	unsigned int read_write:1;
	unsigned int reg:31;
	void __user *data;
} drm_i915_mmio_t;

typedef struct drm_i915_hws_addr {
	uint64_t addr;
} drm_i915_hws_addr_t;

struct drm_i915_gem_init {
	/**
	 * Beginning offset in the GTT to be managed by the DRM memory
	 * manager.
	 */
	uint64_t gtt_start;
	/**
	 * Ending offset in the GTT to be managed by the DRM memory
	 * manager.
	 */
	uint64_t gtt_end;
};

struct drm_i915_gem_create {
	/**
	 * Requested size for the object.
	 *
	 * The (page-aligned) allocated size for the object will be returned.
	 */
	uint64_t size;
	/**
	 * Returned handle for the object.
	 *
	 * Object handles are nonzero.
	 */
	uint32_t handle;
	uint32_t pad;
};

struct drm_i915_gem_pread {
	/** Handle for the object being read. */
	uint32_t handle;
	uint32_t pad;
	/** Offset into the object to read from */
	uint64_t offset;
	/** Length of data to read */
	uint64_t size;
	/** Pointer to write the data into. */
	uint64_t data_ptr;	/* void *, but pointers are not 32/64 compatible */
};

struct drm_i915_gem_pwrite {
	/** Handle for the object being written to. */
	uint32_t handle;
	uint32_t pad;
	/** Offset into the object to write to */
	uint64_t offset;
	/** Length of data to write */
	uint64_t size;
	/** Pointer to read the data from. */
	uint64_t data_ptr;	/* void *, but pointers are not 32/64 compatible */
};

struct drm_i915_gem_mmap {
	/** Handle for the object being mapped. */
	uint32_t handle;
	uint32_t pad;
	/** Offset in the object to map. */
	uint64_t offset;
	/**
	 * Length of data to map.
	 *
	 * The value will be page-aligned.
	 */
	uint64_t size;
	/** Returned pointer the data was mapped at */
	uint64_t addr_ptr;	/* void *, but pointers are not 32/64 compatible */
};

struct drm_i915_gem_set_domain {
	/** Handle for the object */
	uint32_t handle;

	/** New read domains */
	uint32_t read_domains;

	/** New write domain */
	uint32_t write_domain;
};

struct drm_i915_gem_sw_finish {
	/** Handle for the object */
	uint32_t handle;
};

struct drm_i915_gem_relocation_entry {
	/**
	 * Handle of the buffer being pointed to by this relocation entry.
	 *
	 * It's appealing to make this be an index into the mm_validate_entry
	 * list to refer to the buffer, but this allows the driver to create
	 * a relocation list for state buffers and not re-write it per
	 * exec using the buffer.
	 */
	uint32_t target_handle;

	/**
	 * Value to be added to the offset of the target buffer to make up
	 * the relocation entry.
	 */
	uint32_t delta;

	/** Offset in the buffer the relocation entry will be written into */
	uint64_t offset;

	/**
	 * Offset value of the target buffer that the relocation entry was last
	 * written as.
	 *
	 * If the buffer has the same offset as last time, we can skip syncing
	 * and writing the relocation.  This value is written back out by
	 * the execbuffer ioctl when the relocation is written.
	 */
	uint64_t presumed_offset;

	/**
	 * Target memory domains read by this operation.
	 */
	uint32_t read_domains;

	/**
	 * Target memory domains written by this operation.
	 *
	 * Note that only one domain may be written by the whole
	 * execbuffer operation, so that where there are conflicts,
	 * the application will get -EINVAL back.
	 */
	uint32_t write_domain;
};

/** @{
 * Intel memory domains
 *
 * Most of these just align with the various caches in
 * the system and are used to flush and invalidate as
 * objects end up cached in different domains.
 */
/** CPU cache */
#define I915_GEM_DOMAIN_CPU		0x00000001
/** Render cache, used by 2D and 3D drawing */
#define I915_GEM_DOMAIN_RENDER		0x00000002
/** Sampler cache, used by texture engine */
#define I915_GEM_DOMAIN_SAMPLER		0x00000004
/** Command queue, used to load batch buffers */
#define I915_GEM_DOMAIN_COMMAND		0x00000008
/** Instruction cache, used by shader programs */
#define I915_GEM_DOMAIN_INSTRUCTION	0x00000010
/** Vertex address cache */
#define I915_GEM_DOMAIN_VERTEX		0x00000020
/** GTT domain - aperture and scanout */
#define I915_GEM_DOMAIN_GTT		0x00000040
/** @} */

struct drm_i915_gem_exec_object {
	/**
	 * User's handle for a buffer to be bound into the GTT for this
	 * operation.
	 */
	uint32_t handle;

	/** Number of relocations to be performed on this buffer */
	uint32_t relocation_count;
	/**
	 * Pointer to array of struct drm_i915_gem_relocation_entry containing
	 * the relocations to be performed in this buffer.
	 */
	uint64_t relocs_ptr;

	/** Required alignment in graphics aperture */
	uint64_t alignment;

	/**
	 * Returned value of the updated offset of the object, for future
	 * presumed_offset writes.
	 */
	uint64_t offset;
};

struct drm_i915_gem_execbuffer {
	/**
	 * List of buffers to be validated with their relocations to be
	 * performend on them.
	 *
	 * This is a pointer to an array of struct drm_i915_gem_validate_entry.
	 *
	 * These buffers must be listed in an order such that all relocations
	 * a buffer is performing refer to buffers that have already appeared
	 * in the validate list.
	 */
	uint64_t buffers_ptr;
	uint32_t buffer_count;

	/** Offset in the batchbuffer to start execution from. */
	uint32_t batch_start_offset;
	/** Bytes used in batchbuffer from batch_start_offset */
	uint32_t batch_len;
	uint32_t DR1;
	uint32_t DR4;
	uint32_t num_cliprects;
	uint64_t cliprects_ptr;	/* struct drm_clip_rect *cliprects */
};

struct drm_i915_gem_pin {
	/** Handle of the buffer to be pinned. */
	uint32_t handle;
	uint32_t pad;

	/** alignment required within the aperture */
	uint64_t alignment;

	/** Returned GTT offset of the buffer. */
	uint64_t offset;
};

struct drm_i915_gem_unpin {
	/** Handle of the buffer to be unpinned. */
	uint32_t handle;
	uint32_t pad;
};

struct drm_i915_gem_busy {
	/** Handle of the buffer to check for busy */
	uint32_t handle;

	/** Return busy status (1 if busy, 0 if idle) */
	uint32_t busy;
};

#define I915_TILING_NONE	0
#define I915_TILING_X		1
#define I915_TILING_Y		2

#define I915_BIT_6_SWIZZLE_NONE		0
#define I915_BIT_6_SWIZZLE_9		1
#define I915_BIT_6_SWIZZLE_9_10		2
#define I915_BIT_6_SWIZZLE_9_11		3
#define I915_BIT_6_SWIZZLE_9_10_11	4
/* Not seen by userland */
#define I915_BIT_6_SWIZZLE_UNKNOWN	5

struct drm_i915_gem_set_tiling {
	/** Handle of the buffer to have its tiling state updated */
	uint32_t handle;

	/**
	 * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
	 * I915_TILING_Y).
	 *
	 * This value is to be set on request, and will be updated by the
	 * kernel on successful return with the actual chosen tiling layout.
	 *
	 * The tiling mode may be demoted to I915_TILING_NONE when the system
	 * has bit 6 swizzling that can't be managed correctly by GEM.
	 *
	 * Buffer contents become undefined when changing tiling_mode.
	 */
	uint32_t tiling_mode;

	/**
	 * Stride in bytes for the object when in I915_TILING_X or
	 * I915_TILING_Y.
	 */
	uint32_t stride;

	/**
	 * Returned address bit 6 swizzling required for CPU access through
	 * mmap mapping.
	 */
	uint32_t swizzle_mode;
};

struct drm_i915_gem_get_tiling {
	/** Handle of the buffer to get tiling state for. */
	uint32_t handle;

	/**
	 * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
	 * I915_TILING_Y).
	 */
	uint32_t tiling_mode;

	/**
	 * Returned address bit 6 swizzling required for CPU access through
	 * mmap mapping.
	 */
	uint32_t swizzle_mode;
};

#endif				/* _I915_DRM_H_ */
id='n1933' href='#n1933'>1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606
/**************************************************************************
 *
 * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 **************************************************************************/
/*
 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
 */

#include "drmP.h"

/*
 * Locking may look a bit complicated but isn't really:
 *
 * The buffer usage atomic_t needs to be protected by dev->struct_mutex
 * when there is a chance that it can be zero before or after the operation.
 *
 * dev->struct_mutex also protects all lists and list heads. Hash tables and hash
 * heads.
 *
 * bo->mutex protects the buffer object itself excluding the usage field.
 * bo->mutex does also protect the buffer list heads, so to manipulate those, we need
 * both the bo->mutex and the dev->struct_mutex.
 *
 * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal is a bit
 * complicated. When dev->struct_mutex is released to grab bo->mutex, the list
 * traversal will, in general, need to be restarted.
 *
 */

static void drm_bo_destroy_locked(struct drm_buffer_object * bo);
static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo);
static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo);
static void drm_bo_unmap_virtual(struct drm_buffer_object * bo);

static inline uint32_t drm_bo_type_flags(unsigned type)
{
	return (1 << (24 + type));
}

/*
 * bo locked. dev->struct_mutex locked.
 */

void drm_bo_add_to_pinned_lru(struct drm_buffer_object * bo)
{
	struct drm_mem_type_manager *man;

	DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
	DRM_ASSERT_LOCKED(&bo->mutex);

	man = &bo->dev->bm.man[bo->pinned_mem_type];
	list_add_tail(&bo->pinned_lru, &man->pinned);
}

void drm_bo_add_to_lru(struct drm_buffer_object * bo)
{
	struct drm_mem_type_manager *man;

	DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);

	if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
	    || bo->mem.mem_type != bo->pinned_mem_type) {
		man = &bo->dev->bm.man[bo->mem.mem_type];
		list_add_tail(&bo->lru, &man->lru);
	} else {
		INIT_LIST_HEAD(&bo->lru);
	}
}

static int drm_bo_vm_pre_move(struct drm_buffer_object * bo, int old_is_pci)
{
#ifdef DRM_ODD_MM_COMPAT
	int ret;

	if (!bo->map_list.map)
		return 0;

	ret = drm_bo_lock_kmm(bo);
	if (ret)
		return ret;
	drm_bo_unmap_virtual(bo);
	if (old_is_pci)
		drm_bo_finish_unmap(bo);
#else
	if (!bo->map_list.map)
		return 0;

	drm_bo_unmap_virtual(bo);
#endif
	return 0;
}

static void drm_bo_vm_post_move(struct drm_buffer_object * bo)
{
#ifdef DRM_ODD_MM_COMPAT
	int ret;

	if (!bo->map_list.map)
		return;

	ret = drm_bo_remap_bound(bo);
	if (ret) {
		DRM_ERROR("Failed to remap a bound buffer object.\n"
			  "\tThis might cause a sigbus later.\n");
	}
	drm_bo_unlock_kmm(bo);
#endif
}

/*
 * Call bo->mutex locked.
 */

static int drm_bo_add_ttm(struct drm_buffer_object * bo)
{
	struct drm_device *dev = bo->dev;
	int ret = 0;
	bo->ttm = NULL;

	DRM_ASSERT_LOCKED(&bo->mutex);

	switch (bo->type) {
	case drm_bo_type_dc:
		bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT);
		if (!bo->ttm)
			ret = -ENOMEM;
		break;
	case drm_bo_type_kernel:
		bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT);
		if (!bo->ttm)
			ret = -ENOMEM;
		break;
	case drm_bo_type_user:
	case drm_bo_type_fake:
		break;
	default:
		DRM_ERROR("Illegal buffer object type\n");
		ret = -EINVAL;
		break;
	}

	return ret;
}

static int drm_bo_handle_move_mem(struct drm_buffer_object * bo,
				  struct drm_bo_mem_reg * mem,
				  int evict, int no_wait)
{
	struct drm_device *dev = bo->dev;
	struct drm_buffer_manager *bm = &dev->bm;
	int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
	int new_is_pci = drm_mem_reg_is_pci(dev, mem);
	struct drm_mem_type_manager *old_man = &bm->man[bo->mem.mem_type];
	struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type];
	int ret = 0;

	if (old_is_pci || new_is_pci)
		ret = drm_bo_vm_pre_move(bo, old_is_pci);
	if (ret)
		return ret;

	/*
	 * Create and bind a ttm if required.
	 */

	if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
		ret = drm_bo_add_ttm(bo);
		if (ret)
			goto out_err;

		if (mem->mem_type != DRM_BO_MEM_LOCAL) {
			ret = drm_bind_ttm(bo->ttm, new_man->flags &
					   DRM_BO_FLAG_CACHED,
					   mem->mm_node->start);
			if (ret)
				goto out_err;
		}
	}

	if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) {

		struct drm_bo_mem_reg *old_mem = &bo->mem;
		uint64_t save_flags = old_mem->flags;
		uint64_t save_mask = old_mem->mask;

		*old_mem = *mem;
		mem->mm_node = NULL;
		old_mem->mask = save_mask;
		DRM_FLAG_MASKED(save_flags, mem->flags, DRM_BO_MASK_MEMTYPE);

	} else if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
		   !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {

		ret = drm_bo_move_ttm(bo, evict, no_wait, mem);

	} else if (dev->driver->bo_driver->move) {
		ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);

	} else {

		ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);

	}

	if (ret)
		goto out_err;

	if (old_is_pci || new_is_pci)
		drm_bo_vm_post_move(bo);

	if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
		ret =
		    dev->driver->bo_driver->invalidate_caches(dev,
							      bo->mem.flags);
		if (ret)
			DRM_ERROR("Can not flush read caches\n");
	}

	DRM_FLAG_MASKED(bo->priv_flags,
			(evict) ? _DRM_BO_FLAG_EVICTED : 0,
			_DRM_BO_FLAG_EVICTED);

	if (bo->mem.mm_node)
		bo->offset = bo->mem.mm_node->start << PAGE_SHIFT;

	return 0;

      out_err:
	if (old_is_pci || new_is_pci)
		drm_bo_vm_post_move(bo);

	new_man = &bm->man[bo->mem.mem_type];
	if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
		drm_ttm_unbind(bo->ttm);
		drm_destroy_ttm(bo->ttm);
		bo->ttm = NULL;
	}

	return ret;
}

/*
 * Call bo->mutex locked.
 * Wait until the buffer is idle.
 */

int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals,
		int no_wait)
{
	int ret;

	DRM_ASSERT_LOCKED(&bo->mutex);

	if (bo->fence) {
		if (drm_fence_object_signaled(bo->fence, bo->fence_type, 0)) {
			drm_fence_usage_deref_unlocked(&bo->fence);
			return 0;
		}
		if (no_wait) {
			return -EBUSY;
		}
		ret =
		    drm_fence_object_wait(bo->fence, lazy, ignore_signals,
					  bo->fence_type);
		if (ret)
			return ret;

		drm_fence_usage_deref_unlocked(&bo->fence);
	}
	return 0;
}

static int drm_bo_expire_fence(struct drm_buffer_object * bo, int allow_errors)
{
	struct drm_device *dev = bo->dev;
	struct drm_buffer_manager *bm = &dev->bm;

	if (bo->fence) {
		if (bm->nice_mode) {
			unsigned long _end = jiffies + 3 * DRM_HZ;
			int ret;
			do {
				ret = drm_bo_wait(bo, 0, 1, 0);
				if (ret && allow_errors)
					return ret;

			} while (ret && !time_after_eq(jiffies, _end));

			if (bo->fence) {
				bm->nice_mode = 0;
				DRM_ERROR("Detected GPU lockup or "
					  "fence driver was taken down. "
					  "Evicting buffer.\n");
			}
		}
		if (bo->fence)
			drm_fence_usage_deref_unlocked(&bo->fence);
	}
	return 0;
}

/*
 * Call dev->struct_mutex locked.
 * Attempts to remove all private references to a buffer by expiring its
 * fence object and removing from lru lists and memory managers.
 */

static void drm_bo_cleanup_refs(struct drm_buffer_object * bo, int remove_all)
{
	struct drm_device *dev = bo->dev;
	struct drm_buffer_manager *bm = &dev->bm;

	DRM_ASSERT_LOCKED(&dev->struct_mutex);

	atomic_inc(&bo->usage);
	mutex_unlock(&dev->struct_mutex);
	mutex_lock(&bo->mutex);

	DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);

	if (bo->fence && drm_fence_object_signaled(bo->fence,
						   bo->fence_type, 0))
		drm_fence_usage_deref_unlocked(&bo->fence);

	if (bo->fence && remove_all)
		(void)drm_bo_expire_fence(bo, 0);

	mutex_lock(&dev->struct_mutex);

	if (!atomic_dec_and_test(&bo->usage)) {
		goto out;
	}

	if (!bo->fence) {
		list_del_init(&bo->lru);
		if (bo->mem.mm_node) {
			drm_mm_put_block(bo->mem.mm_node);
			if (bo->pinned_node == bo->mem.mm_node)
				bo->pinned_node = NULL;
			bo->mem.mm_node = NULL;
		}
		list_del_init(&bo->pinned_lru);
		if (bo->pinned_node) {
			drm_mm_put_block(bo->pinned_node);
			bo->pinned_node = NULL;
		}
		list_del_init(&bo->ddestroy);
		mutex_unlock(&bo->mutex);
		drm_bo_destroy_locked(bo);
		return;
	}

	if (list_empty(&bo->ddestroy)) {
		drm_fence_object_flush(bo->fence, bo->fence_type);
		list_add_tail(&bo->ddestroy, &bm->ddestroy);
		schedule_delayed_work(&bm->wq,
				      ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
	}

      out:
	mutex_unlock(&bo->mutex);
	return;
}

/*
 * Verify that refcount is 0 and that there are no internal references
 * to the buffer object. Then destroy it.
 */

static void drm_bo_destroy_locked(struct drm_buffer_object * bo)
{
	struct drm_device *dev = bo->dev;
	struct drm_buffer_manager *bm = &dev->bm;

	DRM_ASSERT_LOCKED(&dev->struct_mutex);

	if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
	    list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
	    list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
		if (bo->fence != NULL) {
			DRM_ERROR("Fence was non-zero.\n");
			drm_bo_cleanup_refs(bo, 0);
			return;
		}

#ifdef DRM_ODD_MM_COMPAT
		BUG_ON(!list_empty(&bo->vma_list));
		BUG_ON(!list_empty(&bo->p_mm_list));
#endif

		if (bo->ttm) {
			drm_ttm_unbind(bo->ttm);
			drm_destroy_ttm(bo->ttm);
			bo->ttm = NULL;
		}

		atomic_dec(&bm->count);

		BUG_ON(!list_empty(&bo->base.list));
		drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);

		return;
	}

	/*
	 * Some stuff is still trying to reference the buffer object.
	 * Get rid of those references.
	 */

	drm_bo_cleanup_refs(bo, 0);

	return;
}

/*
 * Call dev->struct_mutex locked.
 */

static void drm_bo_delayed_delete(struct drm_device * dev, int remove_all)
{
	struct drm_buffer_manager *bm = &dev->bm;

	struct drm_buffer_object *entry, *nentry;
	struct list_head *list, *next;

	list_for_each_safe(list, next, &bm->ddestroy) {
		entry = list_entry(list, struct drm_buffer_object, ddestroy);

		nentry = NULL;
		if (next != &bm->ddestroy) {
			nentry = list_entry(next, struct drm_buffer_object,
					    ddestroy);
			atomic_inc(&nentry->usage);
		}

		drm_bo_cleanup_refs(entry, remove_all);

		if (nentry) {
			atomic_dec(&nentry->usage);
		}
	}
}

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
static void drm_bo_delayed_workqueue(void *data)
#else
static void drm_bo_delayed_workqueue(struct work_struct *work)
#endif
{
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
	struct drm_device *dev = (struct drm_device *) data;
	struct drm_buffer_manager *bm = &dev->bm;
#else
	struct drm_buffer_manager *bm =
	    container_of(work, struct drm_buffer_manager, wq.work);
	struct drm_device *dev = container_of(bm, struct drm_device, bm);
#endif

	DRM_DEBUG("Delayed delete Worker\n");

	mutex_lock(&dev->struct_mutex);
	if (!bm->initialized) {
		mutex_unlock(&dev->struct_mutex);
		return;
	}
	drm_bo_delayed_delete(dev, 0);
	if (bm->initialized && !list_empty(&bm->ddestroy)) {
		schedule_delayed_work(&bm->wq,
				      ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
	}
	mutex_unlock(&dev->struct_mutex);
}

void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo)
{
        struct drm_buffer_object *tmp_bo = *bo;
	bo = NULL;

	DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex);

	if (atomic_dec_and_test(&tmp_bo->usage)) {
		drm_bo_destroy_locked(tmp_bo);
	}
}

static void drm_bo_base_deref_locked(struct drm_file * file_priv,
				     struct drm_user_object * uo)
{
	struct drm_buffer_object *bo =
	    drm_user_object_entry(uo, struct drm_buffer_object, base);

	DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);

	drm_bo_takedown_vm_locked(bo);
	drm_bo_usage_deref_locked(&bo);
}

void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo)
{
	struct drm_buffer_object *tmp_bo = *bo;
	struct drm_device *dev = tmp_bo->dev;

	*bo = NULL;
	if (atomic_dec_and_test(&tmp_bo->usage)) {
		mutex_lock(&dev->struct_mutex);
		if (atomic_read(&tmp_bo->usage) == 0)
			drm_bo_destroy_locked(tmp_bo);
		mutex_unlock(&dev->struct_mutex);
	}
}
EXPORT_SYMBOL(drm_bo_usage_deref_unlocked);

/*
 * Note. The caller has to register (if applicable)
 * and deregister fence object usage.
 */

int drm_fence_buffer_objects(struct drm_file * file_priv,
			     struct list_head *list,
			     uint32_t fence_flags,
			     struct drm_fence_object * fence,
			     struct drm_fence_object ** used_fence)
{
	struct drm_device *dev = file_priv->head->dev;
	struct drm_buffer_manager *bm = &dev->bm;

	struct drm_buffer_object *entry;
	uint32_t fence_type = 0;
	int count = 0;
	int ret = 0;
	struct list_head *l;
	LIST_HEAD(f_list);

	mutex_lock(&dev->struct_mutex);

	if (!list)
		list = &bm->unfenced;

	list_for_each_entry(entry, list, lru) {
		BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
		fence_type |= entry->fence_type;
		if (entry->fence_class != 0) {
			DRM_ERROR("Fence class %d is not implemented yet.\n",
				  entry->fence_class);
			ret = -EINVAL;
			goto out;
		}
		count++;
	}

	if (!count) {
		ret = -EINVAL;
		goto out;
	}

	/*
	 * Transfer to a local list before we release the dev->struct_mutex;
	 * This is so we don't get any new unfenced objects while fencing
	 * the ones we already have..
	 */

	list_splice_init(list, &f_list);

	if (fence) {
		if ((fence_type & fence->type) != fence_type) {
			DRM_ERROR("Given fence doesn't match buffers "
				  "on unfenced list.\n");
			ret = -EINVAL;
			goto out;
		}
	} else {
		mutex_unlock(&dev->struct_mutex);
		ret = drm_fence_object_create(dev, 0, fence_type,
					      fence_flags | DRM_FENCE_FLAG_EMIT,
					      &fence);
		mutex_lock(&dev->struct_mutex);
		if (ret)
			goto out;
	}

	count = 0;
	l = f_list.next;
	while (l != &f_list) {
		prefetch(l->next);
		entry = list_entry(l, struct drm_buffer_object, lru);
		atomic_inc(&entry->usage);
		mutex_unlock(&dev->struct_mutex);
		mutex_lock(&entry->mutex);
		mutex_lock(&dev->struct_mutex);
		list_del_init(l);
		if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
			count++;
			if (entry->fence)
				drm_fence_usage_deref_locked(&entry->fence);
			entry->fence = drm_fence_reference_locked(fence);
			DRM_FLAG_MASKED(entry->priv_flags, 0,
					_DRM_BO_FLAG_UNFENCED);
			DRM_WAKEUP(&entry->event_queue);
			drm_bo_add_to_lru(entry);
		}
		mutex_unlock(&entry->mutex);
		drm_bo_usage_deref_locked(&entry);
		l = f_list.next;
	}
	DRM_DEBUG("Fenced %d buffers\n", count);
      out:
	mutex_unlock(&dev->struct_mutex);
	*used_fence = fence;
	return ret;
}

EXPORT_SYMBOL(drm_fence_buffer_objects);

/*
 * bo->mutex locked
 */

static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type,
			int no_wait)
{
	int ret = 0;
	struct drm_device *dev = bo->dev;
	struct drm_bo_mem_reg evict_mem;

	/*
	 * Someone might have modified the buffer before we took the buffer mutex.
	 */

	if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
		goto out;
	if (bo->mem.mem_type != mem_type)
		goto out;

	ret = drm_bo_wait(bo, 0, 0, no_wait);

	if (ret && ret != -EAGAIN) {
		DRM_ERROR("Failed to expire fence before "
			  "buffer eviction.\n");
		goto out;
	}

	evict_mem = bo->mem;
	evict_mem.mm_node = NULL;

	if (bo->type == drm_bo_type_fake) {
		bo->mem.mem_type = DRM_BO_MEM_LOCAL;
		bo->mem.mm_node = NULL;
		goto out1;
	}

	evict_mem = bo->mem;
	evict_mem.mask = dev->driver->bo_driver->evict_mask(bo);
	ret = drm_bo_mem_space(bo, &evict_mem, no_wait);

	if (ret) {
		if (ret != -EAGAIN)
			DRM_ERROR("Failed to find memory space for "
				  "buffer 0x%p eviction.\n", bo);
		goto out;
	}

	ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);

	if (ret) {
		if (ret != -EAGAIN)
			DRM_ERROR("Buffer eviction failed\n");
		goto out;
	}

      out1:
	mutex_lock(&dev->struct_mutex);
	if (evict_mem.mm_node) {
		if (evict_mem.mm_node != bo->pinned_node)
			drm_mm_put_block(evict_mem.mm_node);
		evict_mem.mm_node = NULL;
	}
	list_del(&bo->lru);
	drm_bo_add_to_lru(bo);
	mutex_unlock(&dev->struct_mutex);

	DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
			_DRM_BO_FLAG_EVICTED);

      out:
	return ret;
}

static int drm_bo_mem_force_space(struct drm_device * dev,
				  struct drm_bo_mem_reg * mem,
				  uint32_t mem_type, int no_wait)
{
	struct drm_mm_node *node;
	struct drm_buffer_manager *bm = &dev->bm;
	struct drm_buffer_object *entry;
	struct drm_mem_type_manager *man = &bm->man[mem_type];
	struct list_head *lru;
	unsigned long num_pages = mem->num_pages;
	int ret;

	mutex_lock(&dev->struct_mutex);
	do {
		node = drm_mm_search_free(&man->manager, num_pages,
					  mem->page_alignment, 1);
		if (node)
			break;

		lru = &man->lru;
		if (lru->next == lru)
			break;

		entry = list_entry(lru->next, struct drm_buffer_object, lru);
		atomic_inc(&entry->usage);
		mutex_unlock(&dev->struct_mutex);
		mutex_lock(&entry->mutex);
		BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT));

		ret = drm_bo_evict(entry, mem_type, no_wait);
		mutex_unlock(&entry->mutex);
		drm_bo_usage_deref_unlocked(&entry);
		if (ret)
			return ret;
		mutex_lock(&dev->struct_mutex);
	} while (1);

	if (!node) {
		mutex_unlock(&dev->struct_mutex);
		return -ENOMEM;
	}

	node = drm_mm_get_block(node, num_pages, mem->page_alignment);
	mutex_unlock(&dev->struct_mutex);
	mem->mm_node = node;
	mem->mem_type = mem_type;
	return 0;
}

static int drm_bo_mt_compatible(struct drm_mem_type_manager * man,
				uint32_t mem_type,
				uint32_t mask, uint32_t * res_mask)
{
	uint32_t cur_flags = drm_bo_type_flags(mem_type);
	uint32_t flag_diff;

	if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
		cur_flags |= DRM_BO_FLAG_CACHED;
	if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
		cur_flags |= DRM_BO_FLAG_MAPPABLE;
	if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
		DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);

	if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0)
		return 0;

	if (mem_type == DRM_BO_MEM_LOCAL) {
		*res_mask = cur_flags;
		return 1;
	}

	flag_diff = (mask ^ cur_flags);
	if ((flag_diff & DRM_BO_FLAG_CACHED) &&
	    (!(mask & DRM_BO_FLAG_CACHED) ||
	     (mask & DRM_BO_FLAG_FORCE_CACHING)))
		return 0;

	if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
	    ((mask & DRM_BO_FLAG_MAPPABLE) ||
	     (mask & DRM_BO_FLAG_FORCE_MAPPABLE)) )
		return 0;

	*res_mask = cur_flags;
	return 1;
}

int drm_bo_mem_space(struct drm_buffer_object * bo,
		     struct drm_bo_mem_reg * mem, int no_wait)
{
	struct drm_device *dev = bo->dev;
	struct drm_buffer_manager *bm = &dev->bm;
	struct drm_mem_type_manager *man;

	uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
	const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
	uint32_t i;
	uint32_t mem_type = DRM_BO_MEM_LOCAL;
	uint32_t cur_flags;
	int type_found = 0;
	int type_ok = 0;
	int has_eagain = 0;
	struct drm_mm_node *node = NULL;
	int ret;

	mem->mm_node = NULL;
	for (i = 0; i < num_prios; ++i) {
		mem_type = prios[i];
		man = &bm->man[mem_type];

		type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
					       &cur_flags);

		if (!type_ok)
			continue;

		if (mem_type == DRM_BO_MEM_LOCAL)
			break;

		if ((mem_type == bo->pinned_mem_type) &&
		    (bo->pinned_node != NULL)) {
			node = bo->pinned_node;
			break;
		}

		mutex_lock(&dev->struct_mutex);
		if (man->has_type && man->use_type) {
			type_found = 1;
			node = drm_mm_search_free(&man->manager, mem->num_pages,
						  mem->page_alignment, 1);
			if (node)
				node = drm_mm_get_block(node, mem->num_pages,
							mem->page_alignment);
		}
		mutex_unlock(&dev->struct_mutex);
		if (node)
			break;
	}

	if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
		mem->mm_node = node;
		mem->mem_type = mem_type;
		mem->flags = cur_flags;
		return 0;
	}

	if (!type_found)
		return -EINVAL;

	num_prios = dev->driver->bo_driver->num_mem_busy_prio;
	prios = dev->driver->bo_driver->mem_busy_prio;

	for (i = 0; i < num_prios; ++i) {
		mem_type = prios[i];
		man = &bm->man[mem_type];

		if (!man->has_type)
			continue;

		if (!drm_bo_mt_compatible(man, mem_type, mem->mask, &cur_flags))
			continue;

		ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);

		if (ret == 0) {
			mem->flags = cur_flags;
			return 0;
		}

		if (ret == -EAGAIN)
			has_eagain = 1;
	}

	ret = (has_eagain) ? -EAGAIN : -ENOMEM;
	return ret;
}

EXPORT_SYMBOL(drm_bo_mem_space);

static int drm_bo_new_mask(struct drm_buffer_object * bo,
			   uint64_t new_mask, uint32_t hint)
{
	uint32_t new_props;

	if (bo->type == drm_bo_type_user) {
		DRM_ERROR("User buffers are not supported yet\n");
		return -EINVAL;
	}
	if (bo->type == drm_bo_type_fake &&
	    !(new_mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) {
		DRM_ERROR("Fake buffers must be pinned.\n");
		return -EINVAL;
	}

	if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
		DRM_ERROR
		    ("DRM_BO_FLAG_NO_EVICT is only available to priviliged "
		     "processes\n");
		return -EPERM;
	}

	new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
				DRM_BO_FLAG_READ);

	if (!new_props) {
		DRM_ERROR("Invalid buffer object rwx properties\n");
		return -EINVAL;
	}

	bo->mem.mask = new_mask;
	return 0;
}

/*
 * Call dev->struct_mutex locked.
 */

struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
					      uint32_t handle, int check_owner)
{
	struct drm_user_object *uo;
	struct drm_buffer_object *bo;

	uo = drm_lookup_user_object(file_priv, handle);

	if (!uo || (uo->type != drm_buffer_type)) {
		DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
		return NULL;
	}

	if (check_owner && file_priv != uo->owner) {
		if (!drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE))
			return NULL;
	}

	bo = drm_user_object_entry(uo, struct drm_buffer_object, base);
	atomic_inc(&bo->usage);
	return bo;
}

/*
 * Call bo->mutex locked.
 * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
 * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
 */

static int drm_bo_quick_busy(struct drm_buffer_object * bo)
{
	struct drm_fence_object *fence = bo->fence;

	BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
	if (fence) {
		if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
			drm_fence_usage_deref_unlocked(&bo->fence);
			return 0;
		}
		return 1;
	}
	return 0;
}

/*
 * Call bo->mutex locked.
 * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
 */

static int drm_bo_busy(struct drm_buffer_object * bo)
{
	struct drm_fence_object *fence = bo->fence;

	BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
	if (fence) {
		if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
			drm_fence_usage_deref_unlocked(&bo->fence);
			return 0;
		}
		drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE);
		if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
			drm_fence_usage_deref_unlocked(&bo->fence);
			return 0;
		}
		return 1;
	}
	return 0;
}

static int drm_bo_read_cached(struct drm_buffer_object * bo)
{
	int ret = 0;

	BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
	if (bo->mem.mm_node)
		ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
	return ret;
}

/*
 * Wait until a buffer is unmapped.
 */

static int drm_bo_wait_unmapped(struct drm_buffer_object * bo, int no_wait)
{
	int ret = 0;

	if ((atomic_read(&bo->mapped) >= 0) && no_wait)
		return -EBUSY;

	DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
		    atomic_read(&bo->mapped) == -1);

	if (ret == -EINTR)
		ret = -EAGAIN;

	return ret;
}

static int drm_bo_check_unfenced(struct drm_buffer_object * bo)
{
	int ret;

	mutex_lock(&bo->mutex);
	ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
	mutex_unlock(&bo->mutex);
	return ret;
}

/*
 * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
 * Until then, we cannot really do anything with it except delete it.
 * The unfenced list is a PITA, and the operations
 * 1) validating
 * 2) submitting commands
 * 3) fencing
 * Should really be an atomic operation.
 * We now "solve" this problem by keeping
 * the buffer "unfenced" after validating, but before fencing.
 */

static int drm_bo_wait_unfenced(struct drm_buffer_object * bo, int no_wait,
				int eagain_if_wait)
{
	int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);

	if (ret && no_wait)
		return -EBUSY;
	else if (!ret)
		return 0;

	ret = 0;
	mutex_unlock(&bo->mutex);
	DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
		    !drm_bo_check_unfenced(bo));
	mutex_lock(&bo->mutex);
	if (ret == -EINTR)
		return -EAGAIN;
	ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
	if (ret) {
		DRM_ERROR("Timeout waiting for buffer to become fenced\n");
		return -EBUSY;
	}
	if (eagain_if_wait)
		return -EAGAIN;

	return 0;
}

/*
 * Fill in the ioctl reply argument with buffer info.
 * Bo locked.
 */

static void drm_bo_fill_rep_arg(struct drm_buffer_object * bo,
				struct drm_bo_info_rep *rep)
{
	rep->handle = bo->base.hash.key;
	rep->flags = bo->mem.flags;
	rep->size = bo->mem.num_pages * PAGE_SIZE;
	rep->offset = bo->offset;
	rep->arg_handle = bo->map_list.user_token;
	rep->mask = bo->mem.mask;
	rep->buffer_start = bo->buffer_start;
	rep->fence_flags = bo->fence_type;
	rep->rep_flags = 0;
	rep->page_alignment = bo->mem.page_alignment;

	if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
		DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
				DRM_BO_REP_BUSY);
	}
}

/*
 * Wait for buffer idle and register that we've mapped the buffer.
 * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
 * so that if the client dies, the mapping is automatically
 * unregistered.
 */

static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle,
				 uint32_t map_flags, unsigned hint,
				 struct drm_bo_info_rep *rep)
{
	struct drm_buffer_object *bo;
	struct drm_device *dev = file_priv->head->dev;
	int ret = 0;
	int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;

	mutex_lock(&dev->struct_mutex);
	bo = drm_lookup_buffer_object(file_priv, handle, 1);
	mutex_unlock(&dev->struct_mutex);

	if (!bo)
		return -EINVAL;

	mutex_lock(&bo->mutex);
	if (!(hint & DRM_BO_HINT_ALLOW_UNFENCED_MAP)) {
		ret = drm_bo_wait_unfenced(bo, no_wait, 0);
		if (ret)
			goto out;
	}

	/*
	 * If this returns true, we are currently unmapped.
	 * We need to do this test, because unmapping can
	 * be done without the bo->mutex held.
	 */

	while (1) {
		if (atomic_inc_and_test(&bo->mapped)) {
			if (no_wait && drm_bo_busy(bo)) {
				atomic_dec(&bo->mapped);
				ret = -EBUSY;
				goto out;
			}
			ret = drm_bo_wait(bo, 0, 0, no_wait);
			if (ret) {
				atomic_dec(&bo->mapped);
				goto out;
			}

			if ((map_flags & DRM_BO_FLAG_READ) &&
			    (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
			    (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
				drm_bo_read_cached(bo);
			}
			break;
		} else if ((map_flags & DRM_BO_FLAG_READ) &&
			   (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
			   (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {

			/*
			 * We are already mapped with different flags.
			 * need to wait for unmap.
			 */

			ret = drm_bo_wait_unmapped(bo, no_wait);
			if (ret)
				goto out;

			continue;
		}
		break;
	}

	mutex_lock(&dev->struct_mutex);
	ret = drm_add_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
	mutex_unlock(&dev->struct_mutex);
	if (ret) {
		if (atomic_add_negative(-1, &bo->mapped))
			DRM_WAKEUP(&bo->event_queue);

	} else
		drm_bo_fill_rep_arg(bo, rep);
      out:
	mutex_unlock(&bo->mutex);
	drm_bo_usage_deref_unlocked(&bo);
	return ret;
}

static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle)
{
	struct drm_device *dev = file_priv->head->dev;
	struct drm_buffer_object *bo;
	struct drm_ref_object *ro;
	int ret = 0;

	mutex_lock(&dev->struct_mutex);

	bo = drm_lookup_buffer_object(file_priv, handle, 1);
	if (!bo) {
		ret = -EINVAL;
		goto out;
	}

	ro = drm_lookup_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
	if (!ro) {
		ret = -EINVAL;
		goto out;
	}

	drm_remove_ref_object(file_priv, ro);
	drm_bo_usage_deref_locked(&bo);
      out:
	mutex_unlock(&dev->struct_mutex);
	return ret;
}

/*
 * Call struct-sem locked.
 */

static void drm_buffer_user_object_unmap(struct drm_file *file_priv,
					 struct drm_user_object * uo,
					 enum drm_ref_type action)
{
	struct drm_buffer_object *bo =
	    drm_user_object_entry(uo, struct drm_buffer_object, base);

	/*
	 * We DON'T want to take the bo->lock here, because we want to
	 * hold it when we wait for unmapped buffer.
	 */

	BUG_ON(action != _DRM_REF_TYPE1);

	if (atomic_add_negative(-1, &bo->mapped))
		DRM_WAKEUP(&bo->event_queue);
}

/*
 * bo->mutex locked.
 * Note that new_mem_flags are NOT transferred to the bo->mem.mask.
 */

int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags,
		       int no_wait, int move_unfenced)
{
	struct drm_device *dev = bo->dev;
	struct drm_buffer_manager *bm = &dev->bm;
	int ret = 0;
	struct drm_bo_mem_reg mem;
	/*
	 * Flush outstanding fences.
	 */

	drm_bo_busy(bo);

	/*
	 * Wait for outstanding fences.
	 */

	ret = drm_bo_wait(bo, 0, 0, no_wait);
	if (ret)
		return ret;

	mem.num_pages = bo->mem.num_pages;
	mem.size = mem.num_pages << PAGE_SHIFT;
	mem.mask = new_mem_flags;
	mem.page_alignment = bo->mem.page_alignment;

	mutex_lock(&bm->evict_mutex);
	mutex_lock(&dev->struct_mutex);
	list_del(&bo->lru);
	list_add_tail(&bo->lru, &bm->unfenced);
	DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
			_DRM_BO_FLAG_UNFENCED);
	mutex_unlock(&dev->struct_mutex);

	/*
	 * Determine where to move the buffer.
	 */
	ret = drm_bo_mem_space(bo, &mem, no_wait);
	if (ret)
		goto out_unlock;

	ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);

 out_unlock:
	if (ret || !move_unfenced) {
		mutex_lock(&dev->struct_mutex);
		if (mem.mm_node) {
			if (mem.mm_node != bo->pinned_node)
				drm_mm_put_block(mem.mm_node);
			mem.mm_node = NULL;
		}
		DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
		DRM_WAKEUP(&bo->event_queue);
		list_del(&bo->lru);
		drm_bo_add_to_lru(bo);
		mutex_unlock(&dev->struct_mutex);
	}

	mutex_unlock(&bm->evict_mutex);
	return ret;
}

static int drm_bo_mem_compat(struct drm_bo_mem_reg * mem)
{
	uint32_t flag_diff = (mem->mask ^ mem->flags);

	if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0)
		return 0;
	if ((flag_diff & DRM_BO_FLAG_CACHED) &&
	    (!(mem->mask & DRM_BO_FLAG_CACHED) ||
	     (mem->mask & DRM_BO_FLAG_FORCE_CACHING))) {
	  return 0;
	}
	if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
	    ((mem->mask & DRM_BO_FLAG_MAPPABLE) ||
	     (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
		return 0;
	return 1;
}

static int drm_bo_check_fake(struct drm_device * dev, struct drm_bo_mem_reg * mem)
{
	struct drm_buffer_manager *bm = &dev->bm;
	struct drm_mem_type_manager *man;
	uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
	const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
	uint32_t i;
	int type_ok = 0;
	uint32_t mem_type = 0;
	uint32_t cur_flags;

	if (drm_bo_mem_compat(mem))
		return 0;

	BUG_ON(mem->mm_node);

	for (i = 0; i < num_prios; ++i) {
		mem_type = prios[i];
		man = &bm->man[mem_type];
		type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
					       &cur_flags);
		if (type_ok)
			break;
	}

	if (type_ok) {
		mem->mm_node = NULL;
		mem->mem_type = mem_type;
		mem->flags = cur_flags;
		DRM_FLAG_MASKED(mem->flags, mem->mask, ~DRM_BO_MASK_MEMTYPE);
		return 0;
	}

	DRM_ERROR("Illegal fake buffer flags 0x%016llx\n",
		  (unsigned long long) mem->mask);
	return -EINVAL;
}

/*
 * bo locked.
 */

static int drm_buffer_object_validate(struct drm_buffer_object * bo,
				      uint32_t fence_class,
				      int move_unfenced, int no_wait)
{
	struct drm_device *dev = bo->dev;
	struct drm_buffer_manager *bm = &dev->bm;
	struct drm_bo_driver *driver = dev->driver->bo_driver;
	uint32_t ftype;
	int ret;

	DRM_DEBUG("New flags 0x%016llx, Old flags 0x%016llx\n",
		  (unsigned long long) bo->mem.mask,
		  (unsigned long long) bo->mem.flags);

	ret = driver->fence_type(bo, &ftype);

	if (ret) {
		DRM_ERROR("Driver did not support given buffer permissions\n");
		return ret;
	}

	/*
	 * We're switching command submission mechanism,
	 * or cannot simply rely on the hardware serializing for us.
	 *
	 * Wait for buffer idle.
	 */

	if ((fence_class != bo->fence_class) ||
	    ((ftype ^ bo->fence_type) & bo->fence_type)) {

		ret = drm_bo_wait(bo, 0, 0, no_wait);

		if (ret)
			return ret;

	}
	
	bo->fence_class = fence_class;
	bo->fence_type = ftype;
	ret = drm_bo_wait_unmapped(bo, no_wait);
	if (ret)
		return ret;

	if (bo->type == drm_bo_type_fake) {
		ret = drm_bo_check_fake(dev, &bo->mem);
		if (ret)
			return ret;
	}

	/*
	 * Check whether we need to move buffer.
	 */

	if (!drm_bo_mem_compat(&bo->mem)) {
		ret = drm_bo_move_buffer(bo, bo->mem.mask, no_wait,
					 move_unfenced);
		if (ret) {
			if (ret != -EAGAIN)
				DRM_ERROR("Failed moving buffer.\n");
			return ret;
		}
	}

	/*
	 * Pinned buffers.
	 */