summaryrefslogtreecommitdiff
path: root/linux-core/drm_context.c
blob: 83ad291ecf57d8343ad817214ba5e157d234cb31 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
/**
 * \file drm_context.c
 * IOCTLs for generic contexts
 *
 * \author Rickard E. (Rik) Faith <faith@valinux.com>
 * \author Gareth Hughes <gareth@valinux.com>
 */

/*
 * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
 *
 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 */

/*
 * ChangeLog:
 *  2001-11-16	Torsten Duwe <duwe@caldera.de>
 *		added context constructor/destructor hooks,
 *		needed by SiS driver's memory management.
 */

#include "drmP.h"

/******************************************************************/
/** \name Context bitmap support */
/*@{*/

/**
 * Free a handle from the context bitmap.
 *
 * \param dev DRM device.
 * \param ctx_handle context handle.
 *
 * Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry
 * in drm_device::ctx_idr, while holding the drm_device::struct_mutex
 * lock.
 */
void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle)
{
	mutex_lock(&dev->struct_mutex);
	idr_remove(&dev->ctx_idr, ctx_handle);
	mutex_unlock(&dev->struct_mutex);
}

/**
 * Context bitmap allocation.
 *
 * \param dev DRM device.
 * \return (non-negative) context handle on success or a negative number on failure.
 *
 * Allocate a new idr from drm_device::ctx_idr while holding the
 * drm_device::struct_mutex lock.
 */
static int drm_ctxbitmap_next(struct drm_device *dev)
{
	int new_id;
	int ret;

again:
	if (idr_pre_get(&dev->ctx_idr, GFP_KERNEL) == 0) {
		DRM_ERROR("Out of memory expanding drawable idr\n");
		return -ENOMEM;
	}
	mutex_lock(&dev->struct_mutex);
	ret = idr_get_new_above(&dev->ctx_idr, NULL,
				DRM_RESERVED_CONTEXTS, &new_id);
	if (ret == -EAGAIN) {
		mutex_unlock(&dev->struct_mutex);
		goto again;
	}

	mutex_unlock(&dev->struct_mutex);
	return new_id;
}

/**
 * Context bitmap initialization.
 *
 * \param dev DRM device.
 *
 * Initialise the drm_device::ctx_idr
 */
int drm_ctxbitmap_init(struct drm_device *dev)
{
	idr_init(&dev->ctx_idr);
	return 0;
}

/**
 * Context bitmap cleanup.
 *
 * \param dev DRM device.
 *
 * Free all idr members using drm_ctx_sarea_free helper function
 * while holding the drm_device::struct_mutex lock.
 */
void drm_ctxbitmap_cleanup(struct drm_device *dev)
{
	mutex_lock(&dev->struct_mutex);
	idr_remove_all(&dev->ctx_idr);
	mutex_unlock(&dev->struct_mutex);
}

/*@}*/

/******************************************************************/
/** \name Per Context SAREA Support */
/*@{*/

/**
 * Get per-context SAREA.
 *
 * \param inode device inode.
 * \param file_priv DRM file private.
 * \param cmd command.
 * \param arg user argument pointing to a drm_ctx_priv_map structure.
 * \return zero on success or a negative number on failure.
 *
 * Gets the map from drm_device::ctx_idr with the handle specified and
 * returns its handle.
 */
int drm_getsareactx(struct drm_device *dev, void *data,
		    struct drm_file *file_priv)
{
	struct drm_ctx_priv_map *request = data;
	struct drm_map *map;
	struct drm_map_list *_entry;

	mutex_lock(&dev->struct_mutex);

	map = idr_find(&dev->ctx_idr, request->ctx_id);
	if (!map) {
		mutex_unlock(&dev->struct_mutex);
		return -EINVAL;
	}

	mutex_unlock(&dev->struct_mutex);

	request->handle = NULL;
	list_for_each_entry(_entry, &dev->maplist, head) {
		if (_entry->map == map) {
			request->handle =
			    (void *)(unsigned long)_entry->user_token;
			break;
		}
	}
	if (request->handle == NULL)
		return -EINVAL;

	return 0;
}

/**
 * Set per-context SAREA.
 *
 * \param inode device inode.
 * \param file_priv DRM file private.
 * \param cmd command.
 * \param arg user argument pointing to a drm_ctx_priv_map structure.
 * \return zero on success or a negative number on failure.
 *
 * Searches the mapping specified in \p arg and update the entry in
 * drm_device::ctx_idr with it.
 */
int drm_setsareactx(struct drm_device *dev, void *data,
		    struct drm_file *file_priv)
{
	struct drm_ctx_priv_map *request = data;
	struct drm_map *map = NULL;
	struct drm_map_list *r_list = NULL;

	mutex_lock(&dev->struct_mutex);
	list_for_each_entry(r_list, &dev->maplist, head) {
		if (r_list->map
		    && r_list->user_token == (unsigned long) request->handle)
			goto found;
	}
      bad:
	mutex_unlock(&dev->struct_mutex);
	return -EINVAL;

      found:
	map = r_list->map;
	if (!map)
		goto bad;

	if (IS_ERR(idr_replace(&dev->ctx_idr, map, request->ctx_id)))
		goto bad;

	mutex_unlock(&dev->struct_mutex);

	return 0;
}

/*@}*/

/******************************************************************/
/** \name The actual DRM context handling routines */
/*@{*/

/**
 * Switch context.
 *
 * \param dev DRM device.
 * \param old old context handle.
 * \param new new context handle.
 * \return zero on success or a negative number on failure.
 *
 * Attempt to set drm_device::context_flag.
 */
static int drm_context_switch(struct drm_device *dev, int old, int new)
{
	if (test_and_set_bit(0, &dev->context_flag)) {
		DRM_ERROR("Reentering -- FIXME\n");
		return -EBUSY;
	}

	DRM_DEBUG("Context switch from %d to %d\n", old, new);

	if (new == dev->last_context) {
		clear_bit(0, &dev->context_flag);
		return 0;
	}

	return 0;
}

/**
 * Complete context switch.
 *
 * \param dev DRM device.
 * \param new new context handle.
 * \return zero on success or a negative number on failure.
 *
 * Updates drm_device::last_context and drm_device::last_switch. Verifies the
 * hardware lock is held, clears the drm_device::context_flag and wakes up
 * drm_device::context_wait.
 */
static int drm_context_switch_complete(struct drm_device *dev, int new)
{
	dev->last_context = new;	/* PRE/POST: This is the _only_ writer. */
	dev->last_switch = jiffies;

	if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
		DRM_ERROR("Lock isn't held after context switch\n");
	}

	/* If a context switch is ever initiated
	   when the kernel holds the lock, release
	   that lock here. */
	clear_bit(0, &dev->context_flag);
	wake_up(&dev->context_wait);

	return 0;
}

/**
 * Reserve contexts.
 *
 * \param inode device inode.
 * \param file_priv DRM file private.
 * \param cmd command.
 * \param arg user argument pointing to a drm_ctx_res structure.
 * \return zero on success or a negative number on failure.
 */
int drm_resctx(struct drm_device *dev, void *data,
	       struct drm_file *file_priv)
{
	struct drm_ctx_res *res = data;
	struct drm_ctx ctx;
	int i;

	if (res->count >= DRM_RESERVED_CONTEXTS) {
		memset(&ctx, 0, sizeof(ctx));
		for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
			ctx.handle = i;
			if (copy_to_user(&res->contexts[i], &ctx, sizeof(ctx)))
				return -EFAULT;
		}
	}
	res->count = DRM_RESERVED_CONTEXTS;

	return 0;
}

/**
 * Add context.
 *
 * \param inode device inode.
 * \param file_priv DRM file private.
 * \param cmd command.
 * \param arg user argument pointing to a drm_ctx structure.
 * \return zero on success or a negative number on failure.
 *
 * Get a new handle for the context and copy to userspace.
 */
int drm_addctx(struct drm_device *dev, void *data,
	       struct drm_file *file_priv)
{
	struct drm_ctx_list *ctx_entry;
	struct drm_ctx *ctx = data;

	ctx->handle = drm_ctxbitmap_next(dev);
	if (ctx->handle == DRM_KERNEL_CONTEXT) {
		/* Skip kernel's context and get a new one. */
		ctx->handle = drm_ctxbitmap_next(dev);
	}
	DRM_DEBUG("%d\n", ctx->handle);
	if (ctx->handle == -1) {
		DRM_DEBUG("Not enough free contexts.\n");
		/* Should this return -EBUSY instead? */
		return -ENOMEM;
	}

	if (ctx->handle != DRM_KERNEL_CONTEXT) {
		if (dev->driver->context_ctor)
			if (!dev->driver->context_ctor(dev, ctx->handle)) {
				DRM_DEBUG("Running out of ctxs or memory.\n");
				return -ENOMEM;
			}
	}

	ctx_entry = drm_alloc(sizeof(*ctx_entry), DRM_MEM_CTXLIST);
	if (!ctx_entry) {
		DRM_DEBUG("out of memory\n");
		return -ENOMEM;
	}

	INIT_LIST_HEAD(&ctx_entry->head);
	ctx_entry->handle = ctx->handle;
	ctx_entry->tag = file_priv;

	mutex_lock(&dev->ctxlist_mutex);
	list_add(&ctx_entry->head, &dev->ctxlist);
	++dev->ctx_count;
	mutex_unlock(&dev->ctxlist_mutex);

	return 0;
}

int drm_modctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
	/* This does nothing */
	return 0;
}

/**
 * Get context.
 *
 * \param inode device inode.
 * \param file_priv DRM file private.
 * \param cmd command.
 * \param arg user argument pointing to a drm_ctx structure.
 * \return zero on success or a negative number on failure.
 */
int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
	struct drm_ctx *ctx = data;

	/* This is 0, because we don't handle any context flags */
	ctx->flags = 0;

	return 0;
}

/**
 * Switch context.
 *
 * \param inode device inode.
 * \param file_priv DRM file private.
 * \param cmd command.
 * \param arg user argument pointing to a drm_ctx structure.
 * \return zero on success or a negative number on failure.
 *
 * Calls context_switch().
 */
int drm_switchctx(struct drm_device *dev, void *data,
		  struct drm_file *file_priv)
{
	struct drm_ctx *ctx = data;

	DRM_DEBUG("%d\n", ctx->handle);
	return drm_context_switch(dev, dev->last_context, ctx->handle);
}

/**
 * New context.
 *
 * \param inode device inode.
 * \param file_priv DRM file private.
 * \param cmd command.
 * \param arg user argument pointing to a drm_ctx structure.
 * \return zero on success or a negative number on failure.
 *
 * Calls context_switch_complete().
 */
int drm_newctx(struct drm_device *dev, void *data,
	       struct drm_file *file_priv)
{
	struct drm_ctx *ctx = data;

	DRM_DEBUG("%d\n", ctx->handle);
	drm_context_switch_complete(dev, ctx->handle);

	return 0;
}

/**
 * Remove context.
 *
 * \param inode device inode.
 * \param file_priv DRM file private.
 * \param cmd command.
 * \param arg user argument pointing to a drm_ctx structure.
 * \return zero on success or a negative number on failure.
 *
 * If not the special kernel context, calls ctxbitmap_free() to free the specified context.
 */
int drm_rmctx(struct drm_device *dev, void *data,
	      struct drm_file *file_priv)
{
	struct drm_ctx *ctx = data;

	DRM_DEBUG("%d\n", ctx->handle);
	if (ctx->handle == DRM_KERNEL_CONTEXT + 1) {
		file_priv->remove_auth_on_close = 1;
	}
	if (ctx->handle != DRM_KERNEL_CONTEXT) {
		if (dev->driver->context_dtor)
			dev->driver->context_dtor(dev, ctx->handle);
		drm_ctxbitmap_free(dev, ctx->handle);
	}

	mutex_lock(&dev->ctxlist_mutex);
	if (!list_empty(&dev->ctxlist)) {
		struct drm_ctx_list *pos, *n;

		list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
			if (pos->handle == ctx->handle) {
				list_del(&pos->head);
				drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST);
				--dev->ctx_count;
			}
		}
	}
	mutex_unlock(&dev->ctxlist_mutex);

	return 0;
}

/*@}*/
an class="hl opt">; mutex_lock(&bo->mutex); if (type) *type = VM_FAULT_MINOR; if (address > vma->vm_end) { page = NOPAGE_SIGBUS; goto out_unlock; } dev = bo->dev; if (drm_mem_reg_is_pci(dev, &bo->mem)) { DRM_ERROR("Invalid compat nopage.\n"); page = NOPAGE_SIGBUS; goto out_unlock; } ttm = bo->ttm; drm_ttm_fixup_caching(ttm); page_offset = (address - vma->vm_start) >> PAGE_SHIFT; page = drm_ttm_get_page(ttm, page_offset); if (!page) { page = NOPAGE_OOM; goto out_unlock; } get_page(page); out_unlock: mutex_unlock(&bo->mutex); return page; } int drm_bo_map_bound(struct vm_area_struct *vma) { struct drm_buffer_object *bo = (struct drm_buffer_object *)vma->vm_private_data; int ret = 0; unsigned long bus_base; unsigned long bus_offset; unsigned long bus_size; ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base, &bus_offset, &bus_size); BUG_ON(ret); if (bus_size) { struct drm_mem_type_manager *man = &bo->dev->bm.man[bo->mem.mem_type]; unsigned long pfn = (bus_base + bus_offset) >> PAGE_SHIFT; pgprot_t pgprot = drm_io_prot(man->drm_bus_maptype, vma); ret = io_remap_pfn_range(vma, vma->vm_start, pfn, vma->vm_end - vma->vm_start, pgprot); } return ret; } int drm_bo_add_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma) { p_mm_entry_t *entry, *n_entry; vma_entry_t *v_entry; struct mm_struct *mm = vma->vm_mm; v_entry = drm_ctl_alloc(sizeof(*v_entry), DRM_MEM_BUFOBJ); if (!v_entry) { DRM_ERROR("Allocation of vma pointer entry failed\n"); return -ENOMEM; } v_entry->vma = vma; list_add_tail(&v_entry->head, &bo->vma_list); list_for_each_entry(entry, &bo->p_mm_list, head) { if (mm == entry->mm) { atomic_inc(&entry->refcount); return 0; } else if ((unsigned long)mm < (unsigned long)entry->mm) ; } n_entry = drm_ctl_alloc(sizeof(*n_entry), DRM_MEM_BUFOBJ); if (!n_entry) { DRM_ERROR("Allocation of process mm pointer entry failed\n"); return -ENOMEM; } INIT_LIST_HEAD(&n_entry->head); n_entry->mm = mm; n_entry->locked = 0; atomic_set(&n_entry->refcount, 0); list_add_tail(&n_entry->head, &entry->head); return 0; } void drm_bo_delete_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma) { p_mm_entry_t *entry, *n; vma_entry_t *v_entry, *v_n; int found = 0; struct mm_struct *mm = vma->vm_mm; list_for_each_entry_safe(v_entry, v_n, &bo->vma_list, head) { if (v_entry->vma == vma) { found = 1; list_del(&v_entry->head); drm_ctl_free(v_entry, sizeof(*v_entry), DRM_MEM_BUFOBJ); break; } } BUG_ON(!found); list_for_each_entry_safe(entry, n, &bo->p_mm_list, head) { if (mm == entry->mm) { if (atomic_add_negative(-1, &entry->refcount)) { list_del(&entry->head); BUG_ON(entry->locked); drm_ctl_free(entry, sizeof(*entry), DRM_MEM_BUFOBJ); } return; } } BUG_ON(1); } int drm_bo_lock_kmm(struct drm_buffer_object * bo) { p_mm_entry_t *entry; int lock_ok = 1; list_for_each_entry(entry, &bo->p_mm_list, head) { BUG_ON(entry->locked); if (!down_write_trylock(&entry->mm->mmap_sem)) { lock_ok = 0; break; } entry->locked = 1; } if (lock_ok) return 0; list_for_each_entry(entry, &bo->p_mm_list, head) { if (!entry->locked) break; up_write(&entry->mm->mmap_sem); entry->locked = 0; } /* * Possible deadlock. Try again. Our callers should handle this * and restart. */ return -EAGAIN; } void drm_bo_unlock_kmm(struct drm_buffer_object * bo) { p_mm_entry_t *entry; list_for_each_entry(entry, &bo->p_mm_list, head) { BUG_ON(!entry->locked); up_write(&entry->mm->mmap_sem); entry->locked = 0; } } int drm_bo_remap_bound(struct drm_buffer_object *bo) { vma_entry_t *v_entry; int ret = 0; if (drm_mem_reg_is_pci(bo->dev, &bo->mem)) { list_for_each_entry(v_entry, &bo->vma_list, head) { ret = drm_bo_map_bound(v_entry->vma); if (ret) break; } } return ret; } void drm_bo_finish_unmap(struct drm_buffer_object *bo) { vma_entry_t *v_entry; list_for_each_entry(v_entry, &bo->vma_list, head) { v_entry->vma->vm_flags &= ~VM_PFNMAP; } } #endif #ifdef DRM_IDR_COMPAT_FN /* only called when idp->lock is held */ static void __free_layer(struct idr *idp, struct idr_layer *p) { p->ary[0] = idp->id_free; idp->id_free = p; idp->id_free_cnt++; } static void free_layer(struct idr *idp, struct idr_layer *p) { unsigned long flags; /* * Depends on the return element being zeroed. */ spin_lock_irqsave(&idp->lock, flags); __free_layer(idp, p); spin_unlock_irqrestore(&idp->lock, flags); } /** * idr_for_each - iterate through all stored pointers * @idp: idr handle * @fn: function to be called for each pointer * @data: data passed back to callback function * * Iterate over the pointers registered with the given idr. The * callback function will be called for each pointer currently * registered, passing the id, the pointer and the data pointer passed * to this function. It is not safe to modify the idr tree while in * the callback, so functions such as idr_get_new and idr_remove are * not allowed. * * We check the return of @fn each time. If it returns anything other * than 0, we break out and return that value. * * The caller must serialize idr_find() vs idr_get_new() and idr_remove(). */ int idr_for_each(struct idr *idp, int (*fn)(int id, void *p, void *data), void *data) { int n, id, max, error = 0; struct idr_layer *p; struct idr_layer *pa[MAX_LEVEL]; struct idr_layer **paa = &pa[0]; n = idp->layers * IDR_BITS; p = idp->top; max = 1 << n; id = 0; while (id < max) { while (n > 0 && p) { n -= IDR_BITS; *paa++ = p; p = p->ary[(id >> n) & IDR_MASK]; } if (p) { error = fn(id, (void *)p, data); if (error) break; } id += 1 << n; while (n < fls(id)) { n += IDR_BITS; p = *--paa; } } return error; } EXPORT_SYMBOL(idr_for_each); /** * idr_remove_all - remove all ids from the given idr tree * @idp: idr handle * * idr_destroy() only frees up unused, cached idp_layers, but this * function will remove all id mappings and leave all idp_layers * unused. * * A typical clean-up sequence for objects stored in an idr tree, will * use idr_for_each() to free all objects, if necessay, then * idr_remove_all() to remove all ids, and idr_destroy() to free * up the cached idr_layers. */ void idr_remove_all(struct idr *idp) { int n, id, max, error = 0; struct idr_layer *p; struct idr_layer *pa[MAX_LEVEL]; struct idr_layer **paa = &pa[0]; n = idp->layers * IDR_BITS; p = idp->top; max = 1 << n; id = 0; while (id < max && !error) { while (n > IDR_BITS && p) { n -= IDR_BITS; *paa++ = p; p = p->ary[(id >> n) & IDR_MASK]; } id += 1 << n; while (n < fls(id)) { if (p) { memset(p, 0, sizeof *p); free_layer(idp, p); } n += IDR_BITS; p = *--paa; } } idp->top = NULL; idp->layers = 0; } EXPORT_SYMBOL(idr_remove_all); #endif /* DRM_IDR_COMPAT_FN */ #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)) /** * idr_replace - replace pointer for given id * @idp: idr handle * @ptr: pointer you want associated with the id * @id: lookup key * * Replace the pointer registered with an id and return the old value. * A -ENOENT return indicates that @id was not found. * A -EINVAL return indicates that @id was not within valid constraints. * * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove(). */ void *idr_replace(struct idr *idp, void *ptr, int id) { int n; struct idr_layer *p, *old_p; n = idp->layers * IDR_BITS; p = idp->top; id &= MAX_ID_MASK; if (id >= (1 << n)) return ERR_PTR(-EINVAL); n -= IDR_BITS; while ((n > 0) && p) { p = p->ary[(id >> n) & IDR_MASK]; n -= IDR_BITS; } n = id & IDR_MASK; if (unlikely(p == NULL || !test_bit(n, &p->bitmap))) return ERR_PTR(-ENOENT); old_p = p->ary[n]; p->ary[n] = ptr; return (void *)old_p; } EXPORT_SYMBOL(idr_replace); #endif #if defined(DRM_KMAP_ATOMIC_PROT_PFN) #define drm_kmap_get_fixmap_pte(vaddr) \ pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr)) void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t protection) { enum fixed_addresses idx; unsigned long vaddr; static pte_t *km_pte; static int initialized = 0; if (unlikely(!initialized)) { km_pte = drm_kmap_get_fixmap_pte(__fix_to_virt(FIX_KMAP_BEGIN)); initialized = 1; } pagefault_disable(); idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); set_pte(km_pte-idx, pfn_pte(pfn, protection)); return (void*) vaddr; } EXPORT_SYMBOL(kmap_atomic_prot_pfn); #endif