summaryrefslogtreecommitdiff
path: root/linux/drm_context.h
blob: 474b5ba019f3f8c8113ae594221396e7e3c5ae0b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
/**
 * \file drm_context.h 
 * IOCTLs for generic contexts
 * 
 * \author Rickard E. (Rik) Faith <faith@valinux.com>
 * \author Gareth Hughes <gareth@valinux.com>
 */

/*
 * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
 *
 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 */

/*
 * ChangeLog:
 *  2001-11-16	Torsten Duwe <duwe@caldera.de>
 *		added context constructor/destructor hooks,
 *		needed by SiS driver's memory management.
 */

#define __NO_VERSION__
#include "drmP.h"

#if !__HAVE_CTX_BITMAP
#error "__HAVE_CTX_BITMAP must be defined"
#endif


/******************************************************************/
/** \name Context bitmap support */
/*@{*/

/**
 * Free a handle from the context bitmap.
 *
 * \param dev DRM device.
 * \param ctx_handle context handle.
 *
 * Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry
 * in drm_device::context_sareas, while holding the drm_device::struct_sem
 * lock.
 */
void DRM(ctxbitmap_free)( drm_device_t *dev, int ctx_handle )
{
	if ( ctx_handle < 0 ) goto failed;
	if ( !dev->ctx_bitmap ) goto failed;

	if ( ctx_handle < DRM_MAX_CTXBITMAP ) {
		down(&dev->struct_sem);
		clear_bit( ctx_handle, dev->ctx_bitmap );
		dev->context_sareas[ctx_handle] = NULL;
		up(&dev->struct_sem);
		return;
	}
failed:
       	DRM_ERROR( "Attempt to free invalid context handle: %d\n",
		   ctx_handle );
       	return;
}

/** 
 * Context bitmap allocation.
 *
 * \param dev DRM device.
 * \return (non-negative) context handle on success or a negative number on failure.
 *
 * Find the first zero bit in drm_device::ctx_bitmap and (re)allocates
 * drm_device::context_sareas to accommodate the new entry while holding the
 * drm_device::struct_sem lock.
 */
int DRM(ctxbitmap_next)( drm_device_t *dev )
{
	int bit;

	if(!dev->ctx_bitmap) return -1;

	down(&dev->struct_sem);
	bit = find_first_zero_bit( dev->ctx_bitmap, DRM_MAX_CTXBITMAP );
	if ( bit < DRM_MAX_CTXBITMAP ) {
		set_bit( bit, dev->ctx_bitmap );
	   	DRM_DEBUG( "drm_ctxbitmap_next bit : %d\n", bit );
		if((bit+1) > dev->max_context) {
			dev->max_context = (bit+1);
			if(dev->context_sareas) {
				drm_map_t **ctx_sareas;

				ctx_sareas = DRM(realloc)(dev->context_sareas,
						(dev->max_context - 1) * 
						sizeof(*dev->context_sareas),
						dev->max_context * 
						sizeof(*dev->context_sareas),
						DRM_MEM_MAPS);
				if(!ctx_sareas) {
					clear_bit(bit, dev->ctx_bitmap);
					up(&dev->struct_sem);
					return -1;
				}
				dev->context_sareas = ctx_sareas;
				dev->context_sareas[bit] = NULL;
			} else {
				/* max_context == 1 at this point */
				dev->context_sareas = DRM(alloc)(
						dev->max_context * 
						sizeof(*dev->context_sareas),
						DRM_MEM_MAPS);
				if(!dev->context_sareas) {
					clear_bit(bit, dev->ctx_bitmap);
					up(&dev->struct_sem);
					return -1;
				}
				dev->context_sareas[bit] = NULL;
			}
		}
		up(&dev->struct_sem);
		return bit;
	}
	up(&dev->struct_sem);
	return -1;
}

/**
 * Context bitmap initialization.
 *
 * \param dev DRM device.
 *
 * Allocates and initialize drm_device::ctx_bitmap and drm_device::context_sareas, while holding
 * the drm_device::struct_sem lock.
 */
int DRM(ctxbitmap_init)( drm_device_t *dev )
{
	int i;
   	int temp;

	down(&dev->struct_sem);
	dev->ctx_bitmap = (unsigned long *) DRM(alloc)( PAGE_SIZE,
							DRM_MEM_CTXBITMAP );
	if ( dev->ctx_bitmap == NULL ) {
		up(&dev->struct_sem);
		return -ENOMEM;
	}
	memset( (void *)dev->ctx_bitmap, 0, PAGE_SIZE );
	dev->context_sareas = NULL;
	dev->max_context = -1;
	up(&dev->struct_sem);

	for ( i = 0 ; i < DRM_RESERVED_CONTEXTS ; i++ ) {
		temp = DRM(ctxbitmap_next)( dev );
	   	DRM_DEBUG( "drm_ctxbitmap_init : %d\n", temp );
	}

	return 0;
}

/**
 * Context bitmap cleanup.
 *
 * \param dev DRM device.
 *
 * Frees drm_device::ctx_bitmap and drm_device::context_sareas, while holding
 * the drm_device::struct_sem lock.
 */
void DRM(ctxbitmap_cleanup)( drm_device_t *dev )
{
	down(&dev->struct_sem);
	if( dev->context_sareas ) DRM(free)( dev->context_sareas,
					     sizeof(*dev->context_sareas) * 
					     dev->max_context,
					     DRM_MEM_MAPS );
	DRM(free)( (void *)dev->ctx_bitmap, PAGE_SIZE, DRM_MEM_CTXBITMAP );
	up(&dev->struct_sem);
}

/*@}*/

/******************************************************************/
/** \name Per Context SAREA Support */
/*@{*/

/**
 * Get per-context SAREA.
 * 
 * \param inode device inode.
 * \param filp file pointer.
 * \param cmd command.
 * \param arg user argument pointing to a drm_ctx_priv_map structure.
 * \return zero on success or a negative number on failure.
 *
 * Gets the map from drm_device::context_sareas with the handle specified and
 * returns its handle.
 */
int DRM(getsareactx)(struct inode *inode, struct file *filp,
		     unsigned int cmd, unsigned long arg)
{
	drm_file_t	*priv	= filp->private_data;
	drm_device_t	*dev	= priv->dev;
	drm_ctx_priv_map_t request;
	drm_map_t *map;

	if (copy_from_user(&request,
			   (drm_ctx_priv_map_t *)arg,
			   sizeof(request)))
		return -EFAULT;

	down(&dev->struct_sem);
	if (dev->max_context < 0 || request.ctx_id >= (unsigned) dev->max_context) {
		up(&dev->struct_sem);
		return -EINVAL;
	}

	map = dev->context_sareas[request.ctx_id];
	up(&dev->struct_sem);

	request.handle = map->handle;
	if (copy_to_user((drm_ctx_priv_map_t *)arg, &request, sizeof(request)))
		return -EFAULT;
	return 0;
}

/**
 * Set per-context SAREA.
 * 
 * \param inode device inode.
 * \param filp file pointer.
 * \param cmd command.
 * \param arg user argument pointing to a drm_ctx_priv_map structure.
 * \return zero on success or a negative number on failure.
 *
 * Searches the mapping specified in \p arg and update the entry in
 * drm_device::context_sareas with it.
 */
int DRM(setsareactx)(struct inode *inode, struct file *filp,
		     unsigned int cmd, unsigned long arg)
{
	drm_file_t	*priv	= filp->private_data;
	drm_device_t	*dev	= priv->dev;
	drm_ctx_priv_map_t request;
	drm_map_t *map = NULL;
	drm_map_list_t *r_list = NULL;
	struct list_head *list;

	if (copy_from_user(&request,
			   (drm_ctx_priv_map_t *)arg,
			   sizeof(request)))
		return -EFAULT;

	down(&dev->struct_sem);
	list_for_each(list, &dev->maplist->head) {
		r_list = list_entry(list, drm_map_list_t, head);
		if(r_list->map &&
		   r_list->map->handle == request.handle)
			goto found;
	}
bad:
	up(&dev->struct_sem);
	return -EINVAL;

found:
	map = r_list->map;
	if (!map) goto bad;
	if (dev->max_context < 0)
		goto bad;
	if (request.ctx_id >= (unsigned) dev->max_context)
		goto bad;
	dev->context_sareas[request.ctx_id] = map;
	up(&dev->struct_sem);
	return 0;
}

/*@}*/

/******************************************************************/
/** \name The actual DRM context handling routines */
/*@{*/

/**
 * Switch context.
 *
 * \param dev DRM device.
 * \param old old context handle.
 * \param new new context handle.
 * \return zero on success or a negative number on failure.
 *
 * Attempt to set drm_device::context_flag.
 */
int DRM(context_switch)( drm_device_t *dev, int old, int new )
{
        if ( test_and_set_bit( 0, &dev->context_flag ) ) {
                DRM_ERROR( "Reentering -- FIXME\n" );
                return -EBUSY;
        }


        DRM_DEBUG( "Context switch from %d to %d\n", old, new );

        if ( new == dev->last_context ) {
                clear_bit( 0, &dev->context_flag );
                return 0;
        }

        return 0;
}

/**
 * Complete context switch.
 *
 * \param dev DRM device.
 * \param new new context handle.
 * \return zero on success or a negative number on failure.
 *
 * Updates drm_device::last_context and drm_device::last_switch. Verifies the
 * hardware lock is held, clears the drm_device::context_flag and wakes up
 * drm_device::context_wait.
 */
int DRM(context_switch_complete)( drm_device_t *dev, int new )
{
        dev->last_context = new;  /* PRE/POST: This is the _only_ writer. */
        dev->last_switch  = jiffies;

        if ( !_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) ) {
                DRM_ERROR( "Lock isn't held after context switch\n" );
        }

				/* If a context switch is ever initiated
                                   when the kernel holds the lock, release
                                   that lock here. */
        clear_bit( 0, &dev->context_flag );
        wake_up( &dev->context_wait );

        return 0;
}

/**
 * Reserve contexts.
 *
 * \param inode device inode.
 * \param filp file pointer.
 * \param cmd command.
 * \param arg user argument pointing to a drm_ctx_res structure.
 * \return zero on success or a negative number on failure.
 */
int DRM(resctx)( struct inode *inode, struct file *filp,
		 unsigned int cmd, unsigned long arg )
{
	drm_ctx_res_t res;
	drm_ctx_t ctx;
	int i;

	if ( copy_from_user( &res, (drm_ctx_res_t *)arg, sizeof(res) ) )
		return -EFAULT;

	if ( res.count >= DRM_RESERVED_CONTEXTS ) {
		memset( &ctx, 0, sizeof(ctx) );
		for ( i = 0 ; i < DRM_RESERVED_CONTEXTS ; i++ ) {
			ctx.handle = i;
			if ( copy_to_user( &res.contexts[i],
					   &i, sizeof(i) ) )
				return -EFAULT;
		}
	}
	res.count = DRM_RESERVED_CONTEXTS;

	if ( copy_to_user( (drm_ctx_res_t *)arg, &res, sizeof(res) ) )
		return -EFAULT;
	return 0;
}

/**
 * Add context.
 *
 * \param inode device inode.
 * \param filp file pointer.
 * \param cmd command.
 * \param arg user argument pointing to a drm_ctx structure.
 * \return zero on success or a negative number on failure.
 *
 * Get a new handle for the context and copy to userspace.
 */
int DRM(addctx)( struct inode *inode, struct file *filp,
		 unsigned int cmd, unsigned long arg )
{
	drm_file_t *priv = filp->private_data;
	drm_device_t *dev = priv->dev;
	drm_ctx_list_t * ctx_entry;
	drm_ctx_t ctx;

	if ( copy_from_user( &ctx, (drm_ctx_t *)arg, sizeof(ctx) ) )
		return -EFAULT;

	ctx.handle = DRM(ctxbitmap_next)( dev );
	if ( ctx.handle == DRM_KERNEL_CONTEXT ) {
				/* Skip kernel's context and get a new one. */
		ctx.handle = DRM(ctxbitmap_next)( dev );
	}
	DRM_DEBUG( "%d\n", ctx.handle );
	if ( ctx.handle == -1 ) {
		DRM_DEBUG( "Not enough free contexts.\n" );
				/* Should this return -EBUSY instead? */
		return -ENOMEM;
	}
#ifdef DRIVER_CTX_CTOR
	if ( ctx.handle != DRM_KERNEL_CONTEXT )
		DRIVER_CTX_CTOR(ctx.handle); /* XXX: also pass dev ? */
#endif
	ctx_entry = DRM(alloc)( sizeof(*ctx_entry), DRM_MEM_CTXLIST );
	if ( !ctx_entry ) {
		DRM_DEBUG("out of memory\n");
		return -ENOMEM;
	}

	INIT_LIST_HEAD( &ctx_entry->head );
	ctx_entry->handle = ctx.handle;
	ctx_entry->tag = priv;

	down( &dev->ctxlist_sem );
	list_add( &ctx_entry->head, &dev->ctxlist->head );
	++dev->ctx_count;
	up( &dev->ctxlist_sem );

	if ( copy_to_user( (drm_ctx_t *)arg, &ctx, sizeof(ctx) ) )
		return -EFAULT;
	return 0;
}

int DRM(modctx)( struct inode *inode, struct file *filp,
		 unsigned int cmd, unsigned long arg )
{
	/* This does nothing */
	return 0;
}

/**
 * Get context.
 *
 * \param inode device inode.
 * \param filp file pointer.
 * \param cmd command.
 * \param arg user argument pointing to a drm_ctx structure.
 * \return zero on success or a negative number on failure.
 */
int DRM(getctx)( struct inode *inode, struct file *filp,
		 unsigned int cmd, unsigned long arg )
{
	drm_ctx_t ctx;

	if ( copy_from_user( &ctx, (drm_ctx_t*)arg, sizeof(ctx) ) )
		return -EFAULT;

	/* This is 0, because we don't handle any context flags */
	ctx.flags = 0;

	if ( copy_to_user( (drm_ctx_t*)arg, &ctx, sizeof(ctx) ) )
		return -EFAULT;
	return 0;
}

/**
 * Switch context.
 *
 * \param inode device inode.
 * \param filp file pointer.
 * \param cmd command.
 * \param arg user argument pointing to a drm_ctx structure.
 * \return zero on success or a negative number on failure.
 *
 * Calls context_switch().
 */
int DRM(switchctx)( struct inode *inode, struct file *filp,
		    unsigned int cmd, unsigned long arg )
{
	drm_file_t *priv = filp->private_data;
	drm_device_t *dev = priv->dev;
	drm_ctx_t ctx;

	if ( copy_from_user( &ctx, (drm_ctx_t *)arg, sizeof(ctx) ) )
		return -EFAULT;

	DRM_DEBUG( "%d\n", ctx.handle );
	return DRM(context_switch)( dev, dev->last_context, ctx.handle );
}

/**
 * New context.
 *
 * \param inode device inode.
 * \param filp file pointer.
 * \param cmd command.
 * \param arg user argument pointing to a drm_ctx structure.
 * \return zero on success or a negative number on failure.
 *
 * Calls context_switch_complete().
 */
int DRM(newctx)( struct inode *inode, struct file *filp,
		 unsigned int cmd, unsigned long arg )
{
	drm_file_t *priv = filp->private_data;
	drm_device_t *dev = priv->dev;
	drm_ctx_t ctx;

	if ( copy_from_user( &ctx, (drm_ctx_t *)arg, sizeof(ctx) ) )
		return -EFAULT;

	DRM_DEBUG( "%d\n", ctx.handle );
	DRM(context_switch_complete)( dev, ctx.handle );

	return 0;
}

/**
 * Remove context.
 *
 * \param inode device inode.
 * \param filp file pointer.
 * \param cmd command.
 * \param arg user argument pointing to a drm_ctx structure.
 * \return zero on success or a negative number on failure.
 *
 * If not the special kernel context, calls ctxbitmap_free() to free the specified context.
 */
int DRM(rmctx)( struct inode *inode, struct file *filp,
		unsigned int cmd, unsigned long arg )
{
	drm_file_t *priv = filp->private_data;
	drm_device_t *dev = priv->dev;
	drm_ctx_t ctx;

	if ( copy_from_user( &ctx, (drm_ctx_t *)arg, sizeof(ctx) ) )
		return -EFAULT;

	DRM_DEBUG( "%d\n", ctx.handle );
	if ( ctx.handle == DRM_KERNEL_CONTEXT + 1 ) {
		priv->remove_auth_on_close = 1;
	}
	if ( ctx.handle != DRM_KERNEL_CONTEXT ) {
#ifdef DRIVER_CTX_DTOR
		DRIVER_CTX_DTOR(ctx.handle); /* XXX: also pass dev ? */
#endif
		DRM(ctxbitmap_free)( dev, ctx.handle );
	}

	down( &dev->ctxlist_sem );
	if ( !list_empty( &dev->ctxlist->head ) ) {
		drm_ctx_list_t *pos, *n;

		list_for_each_entry_safe( pos, n, &dev->ctxlist->head, head ) {
			if ( pos->handle == ctx.handle ) {
				list_del( &pos->head );
				DRM(free)( pos, sizeof(*pos), DRM_MEM_CTXLIST );
				--dev->ctx_count;
			}
		}
	}
	up( &dev->ctxlist_sem );

	return 0;
}

/*@}*/
">== I915_TILING_NONE) return size; /* 965+ just need multiples of page size for tiling */ if (IS_I965G(bufmgr_gem)) return ROUND_UP_TO(size, 4096); /* Older chips need powers of two, of at least 512k or 1M */ if (IS_I9XX(bufmgr_gem)) { min_size = 1024*1024; max_size = 128*1024*1024; } else { min_size = 512*1024; max_size = 64*1024*1024; } if (size > max_size) { *tiling_mode = I915_TILING_NONE; return size; } for (i = min_size; i < size; i <<= 1) ; return i; } /* * Round a given pitch up to the minimum required for X tiling on a * given chip. We use 512 as the minimum to allow for a later tiling * change. */ static unsigned long drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long pitch, uint32_t tiling_mode) { unsigned long tile_width = 512; unsigned long i; if (tiling_mode == I915_TILING_NONE) return ROUND_UP_TO(pitch, tile_width); /* 965 is flexible */ if (IS_I965G(bufmgr_gem)) return ROUND_UP_TO(pitch, tile_width); /* Pre-965 needs power of two tile width */ for (i = tile_width; i < pitch; i <<= 1) ; return i; } static struct drm_intel_gem_bo_bucket * drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size) { int i; for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) { struct drm_intel_gem_bo_bucket *bucket = &bufmgr_gem->cache_bucket[i]; if (bucket->size >= size) { return bucket; } } return NULL; } static void drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem) { int i, j; for (i = 0; i < bufmgr_gem->exec_count; i++) { drm_intel_bo *bo = bufmgr_gem->exec_bos[i]; drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; if (bo_gem->relocs == NULL) { DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle, bo_gem->name); continue; } for (j = 0; j < bo_gem->reloc_count; j++) { drm_intel_bo *target_bo = bo_gem->reloc_target_bo[j]; drm_intel_bo_gem *target_gem = (drm_intel_bo_gem *) target_bo; DBG("%2d: %d (%s)@0x%08llx -> " "%d (%s)@0x%08lx + 0x%08x\n", i, bo_gem->gem_handle, bo_gem->name, (unsigned long long)bo_gem->relocs[j].offset, target_gem->gem_handle, target_gem->name, target_bo->offset, bo_gem->relocs[j].delta); } } } static void drm_intel_gem_bo_reference(drm_intel_bo *bo) { drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; assert(atomic_read(&bo_gem->refcount) > 0); atomic_inc(&bo_gem->refcount); } /** * Adds the given buffer to the list of buffers to be validated (moved into the * appropriate memory type) with the next batch submission. * * If a buffer is validated multiple times in a batch submission, it ends up * with the intersection of the memory type flags and the union of the * access flags. */ static void drm_intel_add_validate_buffer(drm_intel_bo *bo) { drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; int index; if (bo_gem->validate_index != -1) return; /* Extend the array of validation entries as necessary. */ if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) { int new_size = bufmgr_gem->exec_size * 2; if (new_size == 0) new_size = 5; bufmgr_gem->exec_objects = realloc(bufmgr_gem->exec_objects, sizeof(*bufmgr_gem->exec_objects) * new_size); bufmgr_gem->exec_bos = realloc(bufmgr_gem->exec_bos, sizeof(*bufmgr_gem->exec_bos) * new_size); bufmgr_gem->exec_size = new_size; } index = bufmgr_gem->exec_count; bo_gem->validate_index = index; /* Fill in array entry */ bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle; bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count; bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs; bufmgr_gem->exec_objects[index].alignment = 0; bufmgr_gem->exec_objects[index].offset = 0; bufmgr_gem->exec_bos[index] = bo; bufmgr_gem->exec_count++; } #define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \ sizeof(uint32_t)) static void drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem, drm_intel_bo_gem *bo_gem) { int size; assert(!bo_gem->used_as_reloc_target); /* The older chipsets are far-less flexible in terms of tiling, * and require tiled buffer to be size aligned in the aperture. * This means that in the worst possible case we will need a hole * twice as large as the object in order for it to fit into the * aperture. Optimal packing is for wimps. */ size = bo_gem->bo.size; if (!IS_I965G(bufmgr_gem) && bo_gem->tiling_mode != I915_TILING_NONE) size *= 2; bo_gem->reloc_tree_size = size; } static int drm_intel_setup_reloc_list(drm_intel_bo *bo) { drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; unsigned int max_relocs = bufmgr_gem->max_relocs; if (bo->size / 4 < max_relocs) max_relocs = bo->size / 4; bo_gem->relocs = malloc(max_relocs * sizeof(struct drm_i915_gem_relocation_entry)); bo_gem->reloc_target_bo = malloc(max_relocs * sizeof(drm_intel_bo *)); return 0; } static int drm_intel_gem_bo_busy(drm_intel_bo *bo) { drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; struct drm_i915_gem_busy busy; int ret; memset(&busy, 0, sizeof(busy)); busy.handle = bo_gem->gem_handle; do { ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy); } while (ret == -1 && errno == EINTR); return (ret == 0 && busy.busy); } static int drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem, drm_intel_bo_gem *bo_gem, int state) { struct drm_i915_gem_madvise madv; madv.handle = bo_gem->gem_handle; madv.madv = state; madv.retained = 1; ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv); return madv.retained; } static int drm_intel_gem_bo_madvise(drm_intel_bo *bo, int madv) { return drm_intel_gem_bo_madvise_internal ((drm_intel_bufmgr_gem *) bo->bufmgr, (drm_intel_bo_gem *) bo, madv); } /* drop the oldest entries that have been purged by the kernel */ static void drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem, struct drm_intel_gem_bo_bucket *bucket) { while (!DRMLISTEMPTY(&bucket->head)) { drm_intel_bo_gem *bo_gem; bo_gem = DRMLISTENTRY(drm_intel_bo_gem, bucket->head.next, head); if (drm_intel_gem_bo_madvise_internal (bufmgr_gem, bo_gem, I915_MADV_DONTNEED)) break; DRMLISTDEL(&bo_gem->head); drm_intel_gem_bo_free(&bo_gem->bo); } } static drm_intel_bo * drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr, const char *name, unsigned long size, unsigned long flags) { drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr; drm_intel_bo_gem *bo_gem; unsigned int page_size = getpagesize(); int ret; struct drm_intel_gem_bo_bucket *bucket; int alloc_from_cache; unsigned long bo_size; int for_render = 0; if (flags & BO_ALLOC_FOR_RENDER) for_render = 1; /* Round the allocated size up to a power of two number of pages. */ bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size); /* If we don't have caching at this size, don't actually round the * allocation up. */ if (bucket == NULL) { bo_size = size; if (bo_size < page_size) bo_size = page_size; } else { bo_size = bucket->size; } pthread_mutex_lock(&bufmgr_gem->lock); /* Get a buffer out of the cache if available */ retry: alloc_from_cache = 0; if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) { if (for_render) { /* Allocate new render-target BOs from the tail (MRU) * of the list, as it will likely be hot in the GPU * cache and in the aperture for us. */ bo_gem = DRMLISTENTRY(drm_intel_bo_gem, bucket->head.prev, head); DRMLISTDEL(&bo_gem->head); alloc_from_cache = 1; } else { /* For non-render-target BOs (where we're probably * going to map it first thing in order to fill it * with data), check if the last BO in the cache is * unbusy, and only reuse in that case. Otherwise, * allocating a new buffer is probably faster than * waiting for the GPU to finish. */ bo_gem = DRMLISTENTRY(drm_intel_bo_gem, bucket->head.next, head); if (!drm_intel_gem_bo_busy(&bo_gem->bo)) { alloc_from_cache = 1; DRMLISTDEL(&bo_gem->head); } } if (alloc_from_cache) { if (!drm_intel_gem_bo_madvise_internal (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) { drm_intel_gem_bo_free(&bo_gem->bo); drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem, bucket); goto retry; } } } pthread_mutex_unlock(&bufmgr_gem->lock); if (!alloc_from_cache) { struct drm_i915_gem_create create; bo_gem = calloc(1, sizeof(*bo_gem)); if (!bo_gem) return NULL; bo_gem->bo.size = bo_size; memset(&create, 0, sizeof(create)); create.size = bo_size; do { ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CREATE, &create); } while (ret == -1 && errno == EINTR); bo_gem->gem_handle = create.handle; bo_gem->bo.handle = bo_gem->gem_handle; if (ret != 0) { free(bo_gem); return NULL; } bo_gem->bo.bufmgr = bufmgr; } bo_gem->name = name; atomic_set(&bo_gem->refcount, 1); bo_gem->validate_index = -1; bo_gem->reloc_tree_fences = 0; bo_gem->used_as_reloc_target = 0; bo_gem->tiling_mode = I915_TILING_NONE; bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; bo_gem->reusable = 1; drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem); DBG("bo_create: buf %d (%s) %ldb\n", bo_gem->gem_handle, bo_gem->name, size); return &bo_gem->bo; } static drm_intel_bo * drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr, const char *name, unsigned long size, unsigned int alignment) { return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, BO_ALLOC_FOR_RENDER); } static drm_intel_bo * drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name, unsigned long size, unsigned int alignment) { return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0); } static drm_intel_bo * drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name, int x, int y, int cpp, uint32_t *tiling_mode, unsigned long *pitch, unsigned long flags) { drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr; drm_intel_bo *bo; unsigned long size, stride, aligned_y = y; int ret; if (*tiling_mode == I915_TILING_NONE) aligned_y = ALIGN(y, 2); else if (*tiling_mode == I915_TILING_X) aligned_y = ALIGN(y, 8); else if (*tiling_mode == I915_TILING_Y) aligned_y = ALIGN(y, 32); stride = x * cpp; stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, *tiling_mode); size = stride * aligned_y; size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode); bo = drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags); if (!bo) return NULL; ret = drm_intel_gem_bo_set_tiling(bo, tiling_mode, stride); if (ret != 0) { drm_intel_gem_bo_unreference(bo); return NULL; } *pitch = stride; return bo; } /** * Returns a drm_intel_bo wrapping the given buffer object handle. * * This can be used when one application needs to pass a buffer object * to another. */ drm_intel_bo * drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr, const char *name, unsigned int handle) { drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr; drm_intel_bo_gem *bo_gem; int ret; struct drm_gem_open open_arg; struct drm_i915_gem_get_tiling get_tiling; bo_gem = calloc(1, sizeof(*bo_gem)); if (!bo_gem) return NULL; memset(&open_arg, 0, sizeof(open_arg)); open_arg.name = handle; do { ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_OPEN, &open_arg); } while (ret == -1 && errno == EINTR); if (ret != 0) { fprintf(stderr, "Couldn't reference %s handle 0x%08x: %s\n", name, handle, strerror(errno)); free(bo_gem); return NULL; } bo_gem->bo.size = open_arg.size; bo_gem->bo.offset = 0; bo_gem->bo.virtual = NULL; bo_gem->bo.bufmgr = bufmgr; bo_gem->name = name; atomic_set(&bo_gem->refcount, 1); bo_gem->validate_index = -1; bo_gem->gem_handle = open_arg.handle; bo_gem->global_name = handle; bo_gem->reusable = 0; memset(&get_tiling, 0, sizeof(get_tiling)); get_tiling.handle = bo_gem->gem_handle; ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling); if (ret != 0) { drm_intel_gem_bo_unreference(&bo_gem->bo); return NULL; } bo_gem->tiling_mode = get_tiling.tiling_mode; bo_gem->swizzle_mode = get_tiling.swizzle_mode; if (bo_gem->tiling_mode == I915_TILING_NONE) bo_gem->reloc_tree_fences = 0; else bo_gem->reloc_tree_fences = 1; drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem); DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name); return &bo_gem->bo; } static void drm_intel_gem_bo_free(drm_intel_bo *bo) { drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; struct drm_gem_close close; int ret; if (bo_gem->mem_virtual) munmap(bo_gem->mem_virtual, bo_gem->bo.size); if (bo_gem->gtt_virtual) munmap(bo_gem->gtt_virtual, bo_gem->bo.size); free(bo_gem->reloc_target_bo); free(bo_gem->relocs); /* Close this object */ memset(&close, 0, sizeof(close)); close.handle = bo_gem->gem_handle; ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close); if (ret != 0) { fprintf(stderr, "DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n", bo_gem->gem_handle, bo_gem->name, strerror(errno)); } free(bo); } /** Frees all cached buffers significantly older than @time. */ static void drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time) { int i; for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) { struct drm_intel_gem_bo_bucket *bucket = &bufmgr_gem->cache_bucket[i]; while (!DRMLISTEMPTY(&bucket->head)) { drm_intel_bo_gem *bo_gem; bo_gem = DRMLISTENTRY(drm_intel_bo_gem, bucket->head.next, head); if (time - bo_gem->free_time <= 1) break; DRMLISTDEL(&bo_gem->head); drm_intel_gem_bo_free(&bo_gem->bo); } } } static void drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time) { drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; struct drm_intel_gem_bo_bucket *bucket; uint32_t tiling_mode; int i; /* Unreference all the target buffers */ for (i = 0; i < bo_gem->reloc_count; i++) { drm_intel_gem_bo_unreference_locked_timed(bo_gem-> reloc_target_bo[i], time); } bo_gem->reloc_count = 0; bo_gem->used_as_reloc_target = 0; DBG("bo_unreference final: %d (%s)\n", bo_gem->gem_handle, bo_gem->name); bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size); /* Put the buffer into our internal cache for reuse if we can. */ tiling_mode = I915_TILING_NONE; if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL && drm_intel_gem_bo_set_tiling(bo, &tiling_mode, 0) == 0 && drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem, I915_MADV_DONTNEED)) { bo_gem->free_time = time; bo_gem->name = NULL; bo_gem->validate_index = -1; DRMLISTADDTAIL(&bo_gem->head, &bucket->head); drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time); } else { drm_intel_gem_bo_free(bo); } } static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo, time_t time) { drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; assert(atomic_read(&bo_gem->refcount) > 0); if (atomic_dec_and_test(&bo_gem->refcount)) drm_intel_gem_bo_unreference_final(bo, time); } static void drm_intel_gem_bo_unreference(drm_intel_bo *bo) { drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; assert(atomic_read(&bo_gem->refcount) > 0); if (atomic_dec_and_test(&bo_gem->refcount)) { drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; struct timespec time; clock_gettime(CLOCK_MONOTONIC, &time); pthread_mutex_lock(&bufmgr_gem->lock); drm_intel_gem_bo_unreference_final(bo, time.tv_sec); pthread_mutex_unlock(&bufmgr_gem->lock); } } static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable) { drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; struct drm_i915_gem_set_domain set_domain; int ret; pthread_mutex_lock(&bufmgr_gem->lock); /* Allow recursive mapping. Mesa may recursively map buffers with * nested display loops. */ if (!bo_gem->mem_virtual) { struct drm_i915_gem_mmap mmap_arg; DBG("bo_map: %d (%s)\n", bo_gem->gem_handle, bo_gem->name); memset(&mmap_arg, 0, sizeof(mmap_arg)); mmap_arg.handle = bo_gem->gem_handle; mmap_arg.offset = 0; mmap_arg.size = bo->size; do { ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg); } while (ret == -1 && errno == EINTR); if (ret != 0) { fprintf(stderr, "%s:%d: Error mapping buffer %d (%s): %s .\n", __FILE__, __LINE__, bo_gem->gem_handle, bo_gem->name, strerror(errno)); pthread_mutex_unlock(&bufmgr_gem->lock); return ret; } bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr; } DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name, bo_gem->mem_virtual); bo->virtual = bo_gem->mem_virtual; set_domain.handle = bo_gem->gem_handle; set_domain.read_domains = I915_GEM_DOMAIN_CPU; if (write_enable) set_domain.write_domain = I915_GEM_DOMAIN_CPU; else set_domain.write_domain = 0; do { ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain); } while (ret == -1 && errno == EINTR); if (ret != 0) { fprintf(stderr, "%s:%d: Error setting to CPU domain %d: %s\n", __FILE__, __LINE__, bo_gem->gem_handle, strerror(errno)); pthread_mutex_unlock(&bufmgr_gem->lock); return ret; } pthread_mutex_unlock(&bufmgr_gem->lock); return 0; } int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo) { drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; struct drm_i915_gem_set_domain set_domain; int ret; pthread_mutex_lock(&bufmgr_gem->lock); /* Get a mapping of the buffer if we haven't before. */ if (bo_gem->gtt_virtual == NULL) { struct drm_i915_gem_mmap_gtt mmap_arg; DBG("bo_map_gtt: mmap %d (%s)\n", bo_gem->gem_handle, bo_gem->name); memset(&mmap_arg, 0, sizeof(mmap_arg)); mmap_arg.handle = bo_gem->gem_handle; /* Get the fake offset back... */ do { ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg); } while (ret == -1 && errno == EINTR); if (ret != 0) { fprintf(stderr, "%s:%d: Error preparing buffer map %d (%s): %s .\n", __FILE__, __LINE__, bo_gem->gem_handle, bo_gem->name, strerror(errno)); pthread_mutex_unlock(&bufmgr_gem->lock); return ret; } /* and mmap it */ bo_gem->gtt_virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED, bufmgr_gem->fd, mmap_arg.offset); if (bo_gem->gtt_virtual == MAP_FAILED) { fprintf(stderr, "%s:%d: Error mapping buffer %d (%s): %s .\n", __FILE__, __LINE__, bo_gem->gem_handle, bo_gem->name, strerror(errno)); pthread_mutex_unlock(&bufmgr_gem->lock); return errno; } } bo->virtual = bo_gem->gtt_virtual; DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name, bo_gem->gtt_virtual); /* Now move it to the GTT domain so that the CPU caches are flushed */ set_domain.handle = bo_gem->gem_handle; set_domain.read_domains = I915_GEM_DOMAIN_GTT; set_domain.write_domain = I915_GEM_DOMAIN_GTT; do { ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain); } while (ret == -1 && errno == EINTR); if (ret != 0) { fprintf(stderr, "%s:%d: Error setting domain %d: %s\n", __FILE__, __LINE__, bo_gem->gem_handle, strerror(errno)); } pthread_mutex_unlock(&bufmgr_gem->lock); return ret; } int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo) { drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; int ret = 0; if (bo == NULL) return 0; assert(bo_gem->gtt_virtual != NULL); pthread_mutex_lock(&bufmgr_gem->lock); bo->virtual = NULL; pthread_mutex_unlock(&bufmgr_gem->lock); return ret; } static int drm_intel_gem_bo_unmap(drm_intel_bo *bo) { drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; struct drm_i915_gem_sw_finish sw_finish; int ret; if (bo == NULL) return 0; assert(bo_gem->mem_virtual != NULL); pthread_mutex_lock(&bufmgr_gem->lock); /* Cause a flush to happen if the buffer's pinned for scanout, so the * results show up in a timely manner. */ sw_finish.handle = bo_gem->gem_handle; do { ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SW_FINISH, &sw_finish); } while (ret == -1 && errno == EINTR); bo->virtual = NULL; pthread_mutex_unlock(&bufmgr_gem->lock); return 0; } static int drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset, unsigned long size, const void *data) { drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; struct drm_i915_gem_pwrite pwrite; int ret; memset(&pwrite, 0, sizeof(pwrite)); pwrite.handle = bo_gem->gem_handle; pwrite.offset = offset; pwrite.size = size; pwrite.data_ptr = (uint64_t) (uintptr_t) data; do { ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite); } while (ret == -1 && errno == EINTR); if (ret != 0) { fprintf(stderr, "%s:%d: Error writing data to buffer %d: (%d %d) %s .\n", __FILE__, __LINE__, bo_gem->gem_handle, (int)offset, (int)size, strerror(errno)); } return 0; } static int drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id) { drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr; struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id; int ret; get_pipe_from_crtc_id.crtc_id = crtc_id; ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID, &get_pipe_from_crtc_id); if (ret != 0) { /* We return -1 here to signal that we don't * know which pipe is associated with this crtc. * This lets the caller know that this information * isn't available; using the wrong pipe for * vblank waiting can cause the chipset to lock up */ return -1; } return get_pipe_from_crtc_id.pipe; } static int drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset, unsigned long size, void *data) { drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; struct drm_i915_gem_pread pread; int ret; memset(&pread, 0, sizeof(pread)); pread.handle = bo_gem->gem_handle; pread.offset = offset; pread.size = size; pread.data_ptr = (uint64_t) (uintptr_t) data; do { ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PREAD, &pread); } while (ret == -1 && errno == EINTR); if (ret != 0) { fprintf(stderr, "%s:%d: Error reading data from buffer %d: (%d %d) %s .\n", __FILE__, __LINE__, bo_gem->gem_handle, (int)offset, (int)size, strerror(errno)); } return 0; } /** Waits for all GPU rendering to the object to have completed. */ static void drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo) { drm_intel_gem_bo_start_gtt_access(bo, 0); } /** * Sets the object to the GTT read and possibly write domain, used by the X * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt(). * * In combination with drm_intel_gem_bo_pin() and manual fence management, we * can do tiled pixmaps this way. */ void drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable) { drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; struct drm_i915_gem_set_domain set_domain; int ret; set_domain.handle = bo_gem->gem_handle; set_domain.read_domains = I915_GEM_DOMAIN_GTT; set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0; do { ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain); } while (ret == -1 && errno == EINTR); if (ret != 0) { fprintf(stderr, "%s:%d: Error setting memory domains %d (%08x %08x): %s .\n", __FILE__, __LINE__, bo_gem->gem_handle, set_domain.read_domains, set_domain.write_domain, strerror(errno)); } } static void drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr) { drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr; int i; free(bufmgr_gem->exec_objects); free(bufmgr_gem->exec_bos); pthread_mutex_destroy(&bufmgr_gem->lock); /* Free any cached buffer objects we were going to reuse */ for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) { struct drm_intel_gem_bo_bucket *bucket = &bufmgr_gem->cache_bucket[i]; drm_intel_bo_gem *bo_gem; while (!DRMLISTEMPTY(&bucket->head)) { bo_gem = DRMLISTENTRY(drm_intel_bo_gem, bucket->head.next, head); DRMLISTDEL(&bo_gem->head); drm_intel_gem_bo_free(&bo_gem->bo); } } free(bufmgr); } /** * Adds the target buffer to the validation list and adds the relocation * to the reloc_buffer's relocation list. * * The relocation entry at the given offset must already contain the * precomputed relocation value, because the kernel will optimize out * the relocation entry write when the buffer hasn't moved from the * last known offset in target_bo. */ static int drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset, drm_intel_bo *target_bo, uint32_t target_offset, uint32_t read_domains, uint32_t write_domain) { drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo; pthread_mutex_lock(&bufmgr_gem->lock); /* Create a new relocation list if needed */ if (bo_gem->relocs == NULL) drm_intel_setup_reloc_list(bo); /* Check overflow */ assert(bo_gem->reloc_count < bufmgr_gem->max_relocs); /* Check args */ assert(offset <= bo->size - 4); assert((write_domain & (write_domain - 1)) == 0); /* Make sure that we're not adding a reloc to something whose size has * already been accounted for. */ assert(!bo_gem->used_as_reloc_target); bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size; bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences; /* Flag the target to disallow further relocations in it. */ target_bo_gem->used_as_reloc_target = 1; bo_gem->relocs[bo_gem->reloc_count].offset = offset; bo_gem->relocs[bo_gem->reloc_count].delta = target_offset; bo_gem->relocs[bo_gem->reloc_count].target_handle = target_bo_gem->gem_handle; bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains; bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain; bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset; bo_gem->reloc_target_bo[bo_gem->reloc_count] = target_bo; drm_intel_gem_bo_reference(target_bo); bo_gem->reloc_count++; pthread_mutex_unlock(&bufmgr_gem->lock); return 0; } /** * Walk the tree of relocations rooted at BO and accumulate the list of * validations to be performed and update the relocation buffers with * index values into the validation list. */ static void drm_intel_gem_bo_process_reloc(drm_intel_bo *bo) { drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; int i; if (bo_gem->relocs == NULL) return; for (i = 0; i < bo_gem->reloc_count; i++) { drm_intel_bo *target_bo = bo_gem->reloc_target_bo[i]; /* Continue walking the tree depth-first. */ drm_intel_gem_bo_process_reloc(target_bo); /* Add the target to the validate list */ drm_intel_add_validate_buffer(target_bo); } } static void drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem) { int i; for (i = 0; i < bufmgr_gem->exec_count; i++) { drm_intel_bo *bo = bufmgr_gem->exec_bos[i]; drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; /* Update the buffer offset */ if (bufmgr_gem->exec_objects[i].offset != bo->offset) { DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n", bo_gem->gem_handle, bo_gem->name, bo->offset, (unsigned long long)bufmgr_gem->exec_objects[i]. offset); bo->offset = bufmgr_gem->exec_objects[i].offset; } } } static int drm_intel_gem_bo_exec(drm_intel_bo *bo, int used, drm_clip_rect_t * cliprects, int num_cliprects, int DR4) { drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; struct drm_i915_gem_execbuffer execbuf; int ret, i; pthread_mutex_lock(&bufmgr_gem->lock); /* Update indices and set up the validate list. */ drm_intel_gem_bo_process_reloc(bo); /* Add the batch buffer to the validation list. There are no * relocations pointing to it. */ drm_intel_add_validate_buffer(bo); execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects; execbuf.buffer_count = bufmgr_gem->exec_count; execbuf.batch_start_offset = 0; execbuf.batch_len = used; execbuf.cliprects_ptr = (uintptr_t) cliprects; execbuf.num_cliprects = num_cliprects; execbuf.DR1 = 0; execbuf.DR4 = DR4; do { ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_EXECBUFFER, &execbuf); } while (ret != 0 && errno == EAGAIN); if (ret != 0 && errno == ENOMEM) { fprintf(stderr, "Execbuffer fails to pin. " "Estimate: %u. Actual: %u. Available: %u\n", drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos, bufmgr_gem-> exec_count), drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos, bufmgr_gem-> exec_count), (unsigned int)bufmgr_gem->gtt_size); } drm_intel_update_buffer_offsets(bufmgr_gem); if (bufmgr_gem->bufmgr.debug) drm_intel_gem_dump_validation_list(bufmgr_gem); for (i = 0; i < bufmgr_gem->exec_count; i++) { drm_intel_bo *bo = bufmgr_gem->exec_bos[i]; drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; /* Disconnect the buffer from the validate list */ bo_gem->validate_index = -1; bufmgr_gem->exec_bos[i] = NULL; } bufmgr_gem->exec_count = 0; pthread_mutex_unlock(&bufmgr_gem->lock); return 0; } static int drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment) { drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; struct drm_i915_gem_pin pin; int ret; memset(&pin, 0, sizeof(pin)); pin.handle = bo_gem->gem_handle; pin.alignment = alignment; do { ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PIN, &pin); } while (ret == -1 && errno == EINTR); if (ret != 0) return -errno; bo->offset = pin.offset; return 0; } static int drm_intel_gem_bo_unpin(drm_intel_bo *bo) { drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; struct drm_i915_gem_unpin unpin; int ret; memset(&unpin, 0, sizeof(unpin)); unpin.handle = bo_gem->gem_handle; ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin); if (ret != 0) return -errno; return 0; } static int drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode, uint32_t stride) { drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; struct drm_i915_gem_set_tiling set_tiling; int ret; if (bo_gem->global_name == 0 && *tiling_mode == bo_gem->tiling_mode) return 0; /* If we're going from non-tiling to tiling, bump fence count */ if (bo_gem->tiling_mode == I915_TILING_NONE) bo_gem->reloc_tree_fences++; memset(&set_tiling, 0, sizeof(set_tiling)); set_tiling.handle = bo_gem->gem_handle; set_tiling.tiling_mode = *tiling_mode; set_tiling.stride = stride; do { ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling); } while (ret == -1 && errno == EINTR); if (ret != 0) { *tiling_mode = bo_gem->tiling_mode; return -errno; } bo_gem->tiling_mode = set_tiling.tiling_mode; bo_gem->swizzle_mode = set_tiling.swizzle_mode; /* If we're going from tiling to non-tiling, drop fence count */ if (bo_gem->tiling_mode == I915_TILING_NONE) bo_gem->reloc_tree_fences--; drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem); *tiling_mode = bo_gem->tiling_mode; return 0; } static int drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode, uint32_t * swizzle_mode) { drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; *tiling_mode = bo_gem->tiling_mode; *swizzle_mode = bo_gem->swizzle_mode; return 0; } static int drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name) { drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; struct drm_gem_flink flink; int ret; if (!bo_gem->global_name) { memset(&flink, 0, sizeof(flink)); flink.handle = bo_gem->gem_handle; ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink); if (ret != 0) return -errno; bo_gem->global_name = flink.name; bo_gem->reusable = 0; } *name = bo_gem->global_name; return 0; } /** * Enables unlimited caching of buffer objects for reuse. * * This is potentially very memory expensive, as the cache at each bucket * size is only bounded by how many buffers of that size we've managed to have * in flight at once. */ void drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr) { drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr; bufmgr_gem->bo_reuse = 1; } /** * Return the additional aperture space required by the tree of buffer objects * rooted at bo. */ static int drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo) { drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; int i; int total = 0; if (bo == NULL || bo_gem->included_in_check_aperture) return 0; total += bo->size; bo_gem->included_in_check_aperture = 1; for (i = 0; i < bo_gem->reloc_count; i++) total += drm_intel_gem_bo_get_aperture_space(bo_gem-> reloc_target_bo[i]); return total; } /** * Count the number of buffers in this list that need a fence reg * * If the count is greater than the number of available regs, we'll have * to ask the caller to resubmit a batch with fewer tiled buffers. * * This function over-counts if the same buffer is used multiple times. */ static unsigned int drm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count) { int i; unsigned int total = 0; for (i = 0; i < count; i++) { drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i]; if (bo_gem == NULL) continue; total += bo_gem->reloc_tree_fences; } return total; } /** * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready * for the next drm_intel_bufmgr_check_aperture_space() call. */ static void drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo) { drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; int i; if (bo == NULL || !bo_gem->included_in_check_aperture) return; bo_gem->included_in_check_aperture = 0; for (i = 0; i < bo_gem->reloc_count; i++) drm_intel_gem_bo_clear_aperture_space_flag(bo_gem-> reloc_target_bo[i]); } /** * Return a conservative estimate for the amount of aperture required * for a collection of buffers. This may double-count some buffers. */ static unsigned int drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count) { int i; unsigned int total = 0; for (i = 0; i < count; i++) { drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i]; if (bo_gem != NULL) total += bo_gem->reloc_tree_size; } return total; } /** * Return the amount of aperture needed for a collection of buffers. * This avoids double counting any buffers, at the cost of looking * at every buffer in the set. */ static unsigned int drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count) { int i; unsigned int total = 0; for (i = 0; i < count; i++) { total += drm_intel_gem_bo_get_aperture_space(bo_array[i]); /* For the first buffer object in the array, we get an * accurate count back for its reloc_tree size (since nothing * had been flagged as being counted yet). We can save that * value out as a more conservative reloc_tree_size that * avoids double-counting target buffers. Since the first * buffer happens to usually be the batch buffer in our * callers, this can pull us back from doing the tree * walk on every new batch emit. */ if (i == 0) { drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i]; bo_gem->reloc_tree_size = total; } } for (i = 0; i < count; i++) drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]); return total; } /** * Return -1 if the batchbuffer should be flushed before attempting to * emit rendering referencing the buffers pointed to by bo_array. * * This is required because if we try to emit a batchbuffer with relocations * to a tree of buffers that won't simultaneously fit in the aperture, * the rendering will return an error at a point where the software is not * prepared to recover from it. * * However, we also want to emit the batchbuffer significantly before we reach * the limit, as a series of batchbuffers each of which references buffers * covering almost all of the aperture means that at each emit we end up * waiting to evict a buffer from the last rendering, and we get synchronous * performance. By emitting smaller batchbuffers, we eat some CPU overhead to * get better parallelism. */ static int drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count) { drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr; unsigned int total = 0; unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4; int total_fences; /* Check for fence reg constraints if necessary */ if (bufmgr_gem->available_fences) { total_fences = drm_intel_gem_total_fences(bo_array, count); if (total_fences > bufmgr_gem->available_fences) return -1; } total = drm_intel_gem_estimate_batch_space(bo_array, count); if (total > threshold) total = drm_intel_gem_compute_batch_space(bo_array, count); if (total > threshold) { DBG("check_space: overflowed available aperture, " "%dkb vs %dkb\n", total / 1024, (int)bufmgr_gem->gtt_size / 1024); return -1; } else { DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024, (int)bufmgr_gem->gtt_size / 1024); return 0; } } /* * Disable buffer reuse for objects which are shared with the kernel * as scanout buffers */ static int drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo) { drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; bo_gem->reusable = 0; return 0; } static int _drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo) { drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; int i; for (i = 0; i < bo_gem->reloc_count; i++) { if (bo_gem->reloc_target_bo[i] == target_bo) return 1; if (_drm_intel_gem_bo_references(bo_gem->reloc_target_bo[i], target_bo)) return 1; } return 0; } /** Return true if target_bo is referenced by bo's relocation tree. */ static int drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo) { drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo; if (bo == NULL || target_bo == NULL) return 0; if (target_bo_gem->used_as_reloc_target) return _drm_intel_gem_bo_references(bo, target_bo); return 0; } /** * Initializes the GEM buffer manager, which uses the kernel to allocate, map, * and manage map buffer objections. * * \param fd File descriptor of the opened DRM device. */ drm_intel_bufmgr * drm_intel_bufmgr_gem_init(int fd, int batch_size) { drm_intel_bufmgr_gem *bufmgr_gem; struct drm_i915_gem_get_aperture aperture; drm_i915_getparam_t gp; int ret, i; unsigned long size; bufmgr_gem = calloc(1, sizeof(*bufmgr_gem)); bufmgr_gem->fd = fd; if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) { free(bufmgr_gem); return NULL; } ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture); if (ret == 0) bufmgr_gem->gtt_size = aperture.aper_available_size; else { fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n", strerror(errno)); bufmgr_gem->gtt_size = 128 * 1024 * 1024; fprintf(stderr, "Assuming %dkB available aperture size.\n" "May lead to reduced performance or incorrect " "rendering.\n", (int)bufmgr_gem->gtt_size / 1024); } gp.param = I915_PARAM_CHIPSET_ID; gp.value = &bufmgr_gem->pci_device; ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); if (ret) { fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno); fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value); } if (!IS_I965G(bufmgr_gem)) { gp.param = I915_PARAM_NUM_FENCES_AVAIL; gp.value = &bufmgr_gem->available_fences; ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); if (ret) { fprintf(stderr, "get fences failed: %d [%d]\n", ret, errno); fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value); bufmgr_gem->available_fences = 0; } } /* Let's go with one relocation per every 2 dwords (but round down a bit * since a power of two will mean an extra page allocation for the reloc * buffer). * * Every 4 was too few for the blender benchmark. */ bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2; bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc; bufmgr_gem->bufmgr.bo_alloc_for_render = drm_intel_gem_bo_alloc_for_render; bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled; bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference; bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference; bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map; bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap; bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata; bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata; bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering; bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc; bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin; bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin; bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling; bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling; bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink; bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec; bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy; bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise; bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy; bufmgr_gem->bufmgr.debug = 0; bufmgr_gem->bufmgr.check_aperture_space = drm_intel_gem_check_aperture_space; bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse; bufmgr_gem->bufmgr.get_pipe_from_crtc_id = drm_intel_gem_get_pipe_from_crtc_id; bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references; /* Initialize the linked lists for BO reuse cache. */ for (i = 0, size = 4096; i < DRM_INTEL_GEM_BO_BUCKETS; i++, size *= 2) { DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head); bufmgr_gem->cache_bucket[i].size = size; } return &bufmgr_gem->bufmgr; }