summaryrefslogtreecommitdiff
path: root/bsd-core/drm_context.c
blob: 4155ee929c51ffcbd159958f5dedc31d3bdb4ae4 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
/*-
 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors:
 *    Rickard E. (Rik) Faith <faith@valinux.com>
 *    Gareth Hughes <gareth@valinux.com>
 *
 */

/** @file drm_context.c
 * Implementation of the context management ioctls.
 */

#include "drmP.h"

/* ================================================================
 * Context bitmap support
 */

void drm_ctxbitmap_free(drm_device_t *dev, int ctx_handle)
{
	if (ctx_handle < 0 || ctx_handle >= DRM_MAX_CTXBITMAP || 
	    dev->ctx_bitmap == NULL) {
		DRM_ERROR("Attempt to free invalid context handle: %d\n",
		   ctx_handle);
		return;
	}

	DRM_LOCK();
	clear_bit(ctx_handle, dev->ctx_bitmap);
	dev->context_sareas[ctx_handle] = NULL;
	DRM_UNLOCK();
	return;
}

int drm_ctxbitmap_next(drm_device_t *dev)
{
	int bit;

	if (dev->ctx_bitmap == NULL)
		return -1;

	DRM_LOCK();
	bit = find_first_zero_bit( dev->ctx_bitmap, DRM_MAX_CTXBITMAP );
	if (bit >= DRM_MAX_CTXBITMAP) {
		DRM_UNLOCK();
		return -1;
	}

	set_bit(bit, dev->ctx_bitmap);
	DRM_DEBUG("drm_ctxbitmap_next bit : %d\n", bit);
	if ((bit+1) > dev->max_context) {
		dev->max_context = (bit+1);
		if (dev->context_sareas != NULL) {
			drm_local_map_t **ctx_sareas;

			ctx_sareas = realloc(dev->context_sareas,
			    dev->max_context * sizeof(*dev->context_sareas),
			    M_DRM, M_NOWAIT);
			if (ctx_sareas == NULL) {
				clear_bit(bit, dev->ctx_bitmap);
				DRM_UNLOCK();
				return -1;
			}
			dev->context_sareas = ctx_sareas;
			dev->context_sareas[bit] = NULL;
		} else {
			/* max_context == 1 at this point */
			dev->context_sareas = malloc(dev->max_context * 
			    sizeof(*dev->context_sareas), M_DRM, M_NOWAIT);
			if (dev->context_sareas == NULL) {
				clear_bit(bit, dev->ctx_bitmap);
				DRM_UNLOCK();
				return -1;
			}
			dev->context_sareas[bit] = NULL;
		}
	}
	DRM_UNLOCK();
	return bit;
}

int drm_ctxbitmap_init(drm_device_t *dev)
{
	int i;
   	int temp;

	DRM_LOCK();
	dev->ctx_bitmap = malloc(PAGE_SIZE, M_DRM, M_NOWAIT | M_ZERO);
	if ( dev->ctx_bitmap == NULL ) {
		DRM_UNLOCK();
		return ENOMEM;
	}
	dev->context_sareas = NULL;
	dev->max_context = -1;
	DRM_UNLOCK();

	for ( i = 0 ; i < DRM_RESERVED_CONTEXTS ; i++ ) {
		temp = drm_ctxbitmap_next(dev);
	   	DRM_DEBUG( "drm_ctxbitmap_init : %d\n", temp );
	}

	return 0;
}

void drm_ctxbitmap_cleanup(drm_device_t *dev)
{
	DRM_LOCK();
	if (dev->context_sareas != NULL)
		free(dev->context_sareas, M_DRM);
	free(dev->ctx_bitmap, M_DRM);
	DRM_UNLOCK();
}

/* ================================================================
 * Per Context SAREA Support
 */

int drm_getsareactx( drm_device_t *dev, void *data, struct drm_file *file_priv )
{
	drm_ctx_priv_map_t *request = data;
	drm_local_map_t *map;

	DRM_LOCK();
	if (dev->max_context < 0 ||
	    request->ctx_id >= (unsigned) dev->max_context) {
		DRM_UNLOCK();
		return EINVAL;
	}

	map = dev->context_sareas[request->ctx_id];
	DRM_UNLOCK();

	request->handle = map->handle;

	return 0;
}

int drm_setsareactx(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
	drm_ctx_priv_map_t *request = data;
	drm_local_map_t *map = NULL;

	DRM_LOCK();
	TAILQ_FOREACH(map, &dev->maplist, link) {
		if (map->handle == request->handle) {
			if (dev->max_context < 0)
				goto bad;
			if (request->ctx_id >= (unsigned) dev->max_context)
				goto bad;
			dev->context_sareas[request->ctx_id] = map;
			DRM_UNLOCK();
			return 0;
		}
	}

bad:
	DRM_UNLOCK();
	return EINVAL;
}

/* ================================================================
 * The actual DRM context handling routines
 */

int drm_context_switch(drm_device_t *dev, int old, int new)
{
        if ( test_and_set_bit( 0, &dev->context_flag ) ) {
                DRM_ERROR( "Reentering -- FIXME\n" );
                return EBUSY;
        }

        DRM_DEBUG( "Context switch from %d to %d\n", old, new );

        if ( new == dev->last_context ) {
                clear_bit( 0, &dev->context_flag );
                return 0;
        }

        return 0;
}

int drm_context_switch_complete(drm_device_t *dev, int new)
{
        dev->last_context = new;  /* PRE/POST: This is the _only_ writer. */

        if ( !_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) ) {
                DRM_ERROR( "Lock isn't held after context switch\n" );
        }

				/* If a context switch is ever initiated
                                   when the kernel holds the lock, release
                                   that lock here. */
        clear_bit( 0, &dev->context_flag );

        return 0;
}

int drm_resctx(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
	drm_ctx_res_t *res = data;
	drm_ctx_t ctx;
	int i;

	if ( res->count >= DRM_RESERVED_CONTEXTS ) {
		bzero(&ctx, sizeof(ctx));
		for ( i = 0 ; i < DRM_RESERVED_CONTEXTS ; i++ ) {
			ctx.handle = i;
			if ( DRM_COPY_TO_USER( &res->contexts[i],
					   &ctx, sizeof(ctx) ) )
				return EFAULT;
		}
	}
	res->count = DRM_RESERVED_CONTEXTS;

	return 0;
}

int drm_addctx(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
	drm_ctx_t *ctx = data;

	ctx->handle = drm_ctxbitmap_next(dev);
	if ( ctx->handle == DRM_KERNEL_CONTEXT ) {
				/* Skip kernel's context and get a new one. */
		ctx->handle = drm_ctxbitmap_next(dev);
	}
	DRM_DEBUG( "%d\n", ctx->handle );
	if ( ctx->handle == -1 ) {
		DRM_DEBUG( "Not enough free contexts.\n" );
				/* Should this return -EBUSY instead? */
		return ENOMEM;
	}

	if (dev->driver.context_ctor && ctx->handle != DRM_KERNEL_CONTEXT) {
		DRM_LOCK();
		dev->driver.context_ctor(dev, ctx->handle);
		DRM_UNLOCK();
	}

	return 0;
}

int drm_modctx(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
	/* This does nothing */
	return 0;
}

int drm_getctx(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
	drm_ctx_t *ctx = data;

	/* This is 0, because we don't handle any context flags */
	ctx->flags = 0;

	return 0;
}

int drm_switchctx(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
	drm_ctx_t *ctx = data;

	DRM_DEBUG( "%d\n", ctx->handle );
	return drm_context_switch(dev, dev->last_context, ctx->handle);
}

int drm_newctx(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
	drm_ctx_t *ctx = data;

	DRM_DEBUG( "%d\n", ctx->handle );
	drm_context_switch_complete(dev, ctx->handle);

	return 0;
}

int drm_rmctx(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
	drm_ctx_t *ctx = data;

	DRM_DEBUG( "%d\n", ctx->handle );
	if ( ctx->handle != DRM_KERNEL_CONTEXT ) {
		if (dev->driver.context_dtor) {
			DRM_LOCK();
			dev->driver.context_dtor(dev, ctx->handle);
			DRM_UNLOCK();
		}

		drm_ctxbitmap_free(dev, ctx->handle);
	}

	return 0;
}
an class="hl str">"Fence completely signaled 0x%08lx\n", fence->base.hash.key); list_del_init(&fence->ring); } } /* * Reinstate lost waiting types. */ if ((fc->waiting_types & type) != type) { head = head->prev; list_for_each_entry(fence, head, ring) { if (&fence->ring == &fc->ring) break; diff = (fc->highest_waiting_sequence - fence->sequence) & driver->sequence_mask; if (diff > driver->wrap_diff) break; fc->waiting_types |= fence->waiting_types & ~fence->signaled_types; } } if (wake) wake_up_all(&fc->fence_queue); } EXPORT_SYMBOL(drm_fence_handler); static void drm_fence_unring(struct drm_device *dev, struct list_head *ring) { struct drm_fence_manager *fm = &dev->fm; unsigned long flags; write_lock_irqsave(&fm->lock, flags); list_del_init(ring); write_unlock_irqrestore(&fm->lock, flags); } void drm_fence_usage_deref_locked(struct drm_fence_object **fence) { struct drm_fence_object *tmp_fence = *fence; struct drm_device *dev = tmp_fence->dev; struct drm_fence_manager *fm = &dev->fm; DRM_ASSERT_LOCKED(&dev->struct_mutex); *fence = NULL; if (atomic_dec_and_test(&tmp_fence->usage)) { drm_fence_unring(dev, &tmp_fence->ring); DRM_DEBUG("Destroyed a fence object 0x%08lx\n", tmp_fence->base.hash.key); atomic_dec(&fm->count); BUG_ON(!list_empty(&tmp_fence->base.list)); drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE); } } EXPORT_SYMBOL(drm_fence_usage_deref_locked); void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence) { struct drm_fence_object *tmp_fence = *fence; struct drm_device *dev = tmp_fence->dev; struct drm_fence_manager *fm = &dev->fm; *fence = NULL; if (atomic_dec_and_test(&tmp_fence->usage)) { mutex_lock(&dev->struct_mutex); if (atomic_read(&tmp_fence->usage) == 0) { drm_fence_unring(dev, &tmp_fence->ring); atomic_dec(&fm->count); BUG_ON(!list_empty(&tmp_fence->base.list)); drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE); } mutex_unlock(&dev->struct_mutex); } } EXPORT_SYMBOL(drm_fence_usage_deref_unlocked); struct drm_fence_object *drm_fence_reference_locked(struct drm_fence_object *src) { DRM_ASSERT_LOCKED(&src->dev->struct_mutex); atomic_inc(&src->usage); return src; } void drm_fence_reference_unlocked(struct drm_fence_object **dst, struct drm_fence_object *src) { mutex_lock(&src->dev->struct_mutex); *dst = src; atomic_inc(&src->usage); mutex_unlock(&src->dev->struct_mutex); } EXPORT_SYMBOL(drm_fence_reference_unlocked); static void drm_fence_object_destroy(struct drm_file *priv, struct drm_user_object *base) { struct drm_fence_object *fence = drm_user_object_entry(base, struct drm_fence_object, base); drm_fence_usage_deref_locked(&fence); } int drm_fence_object_signaled(struct drm_fence_object *fence, uint32_t mask) { unsigned long flags; int signaled; struct drm_device *dev = fence->dev; struct drm_fence_manager *fm = &dev->fm; struct drm_fence_driver *driver = dev->driver->fence_driver; mask &= fence->type; read_lock_irqsave(&fm->lock, flags); signaled = (mask & fence->signaled_types) == mask; read_unlock_irqrestore(&fm->lock, flags); if (!signaled && driver->poll) { write_lock_irqsave(&fm->lock, flags); driver->poll(dev, fence->fence_class, mask); signaled = (mask & fence->signaled_types) == mask; write_unlock_irqrestore(&fm->lock, flags); } return signaled; } EXPORT_SYMBOL(drm_fence_object_signaled); int drm_fence_object_flush(struct drm_fence_object *fence, uint32_t type) { struct drm_device *dev = fence->dev; struct drm_fence_manager *fm = &dev->fm; struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class]; struct drm_fence_driver *driver = dev->driver->fence_driver; unsigned long irq_flags; uint32_t saved_pending_flush; uint32_t diff; int call_flush; if (type & ~fence->type) { DRM_ERROR("Flush trying to extend fence type, " "0x%x, 0x%x\n", type, fence->type); return -EINVAL; } write_lock_irqsave(&fm->lock, irq_flags); fence->waiting_types |= type; fc->waiting_types |= fence->waiting_types; diff = (fence->sequence - fc->highest_waiting_sequence) & driver->sequence_mask; if (diff < driver->wrap_diff) fc->highest_waiting_sequence = fence->sequence; /* * fence->waiting_types has changed. Determine whether * we need to initiate some kind of flush as a result of this. */ saved_pending_flush = fc->pending_flush; if (driver->needed_flush) fc->pending_flush |= driver->needed_flush(fence); if (driver->poll) driver->poll(dev, fence->fence_class, fence->waiting_types); call_flush = fc->pending_flush; write_unlock_irqrestore(&fm->lock, irq_flags); if (call_flush && driver->flush) driver->flush(dev, fence->fence_class); return 0; } EXPORT_SYMBOL(drm_fence_object_flush); /* * Make sure old fence objects are signaled before their fence sequences are * wrapped around and reused. */ void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class, uint32_t sequence) { struct drm_fence_manager *fm = &dev->fm; struct drm_fence_class_manager *fc = &fm->fence_class[fence_class]; struct drm_fence_object *fence; unsigned long irq_flags; struct drm_fence_driver *driver = dev->driver->fence_driver; int call_flush; uint32_t diff; write_lock_irqsave(&fm->lock, irq_flags); list_for_each_entry_reverse(fence, &fc->ring, ring) { diff = (sequence - fence->sequence) & driver->sequence_mask; if (diff <= driver->flush_diff) break; fence->waiting_types = fence->type; fc->waiting_types |= fence->type; if (driver->needed_flush) fc->pending_flush |= driver->needed_flush(fence); } if (driver->poll) driver->poll(dev, fence_class, fc->waiting_types); call_flush = fc->pending_flush; write_unlock_irqrestore(&fm->lock, irq_flags); if (call_flush && driver->flush) driver->flush(dev, fence->fence_class); /* * FIXME: Shold we implement a wait here for really old fences? */ } EXPORT_SYMBOL(drm_fence_flush_old); int drm_fence_object_wait(struct drm_fence_object *fence, int lazy, int ignore_signals, uint32_t mask) { struct drm_device *dev = fence->dev; struct drm_fence_driver *driver = dev->driver->fence_driver; struct drm_fence_manager *fm = &dev->fm; struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class]; int ret = 0; unsigned long _end = 3 * DRM_HZ; if (mask & ~fence->type) { DRM_ERROR("Wait trying to extend fence type" " 0x%08x 0x%08x\n", mask, fence->type); BUG(); return -EINVAL; } if (driver->wait) return driver->wait(fence, lazy, !ignore_signals, mask); drm_fence_object_flush(fence, mask); if (driver->has_irq(dev, fence->fence_class, mask)) { if (!ignore_signals) ret = wait_event_interruptible_timeout (fc->fence_queue, drm_fence_object_signaled(fence, mask), 3 * DRM_HZ); else ret = wait_event_timeout (fc->fence_queue, drm_fence_object_signaled(fence, mask), 3 * DRM_HZ); if (unlikely(ret == -ERESTARTSYS)) return -EAGAIN; if (unlikely(ret == 0)) return -EBUSY; return 0; } return drm_fence_wait_polling(fence, lazy, !ignore_signals, mask, _end); } EXPORT_SYMBOL(drm_fence_object_wait); int drm_fence_object_emit(struct drm_fence_object *fence, uint32_t fence_flags, uint32_t fence_class, uint32_t type) { struct drm_device *dev = fence->dev; struct drm_fence_manager *fm = &dev->fm; struct drm_fence_driver *driver = dev->driver->fence_driver; struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class]; unsigned long flags; uint32_t sequence; uint32_t native_types; int ret; drm_fence_unring(dev, &fence->ring); ret = driver->emit(dev, fence_class, fence_flags, &sequence, &native_types); if (ret) return ret; write_lock_irqsave(&fm->lock, flags); fence->fence_class = fence_class; fence->type = type; fence->waiting_types = 0; fence->signaled_types = 0; fence->error = 0; fence->sequence = sequence; fence->native_types = native_types; if (list_empty(&fc->ring)) fc->highest_waiting_sequence = sequence - 1; list_add_tail(&fence->ring, &fc->ring); fc->latest_queued_sequence = sequence; write_unlock_irqrestore(&fm->lock, flags); return 0; } EXPORT_SYMBOL(drm_fence_object_emit); static int drm_fence_object_init(struct drm_device *dev, uint32_t fence_class, uint32_t type, uint32_t fence_flags, struct drm_fence_object *fence) { int ret = 0; unsigned long flags; struct drm_fence_manager *fm = &dev->fm; mutex_lock(&dev->struct_mutex); atomic_set(&fence->usage, 1); mutex_unlock(&dev->struct_mutex); write_lock_irqsave(&fm->lock, flags); INIT_LIST_HEAD(&fence->ring); /* * Avoid hitting BUG() for kernel-only fence objects. */ INIT_LIST_HEAD(&fence->base.list); fence->fence_class = fence_class; fence->type = type; fence->signaled_types = 0; fence->waiting_types = 0; fence->sequence = 0; fence->error = 0; fence->dev = dev; write_unlock_irqrestore(&fm->lock, flags); if (fence_flags & DRM_FENCE_FLAG_EMIT) { ret = drm_fence_object_emit(fence, fence_flags, fence->fence_class, type); } return ret; } int drm_fence_add_user_object(struct drm_file *priv, struct drm_fence_object *fence, int shareable) { struct drm_device *dev = priv->minor->dev; int ret; mutex_lock(&dev->struct_mutex); ret = drm_add_user_object(priv, &fence->base, shareable); if (ret) goto out; atomic_inc(&fence->usage); fence->base.type = drm_fence_type; fence->base.remove = &drm_fence_object_destroy; DRM_DEBUG("Fence 0x%08lx created\n", fence->base.hash.key); out: mutex_unlock(&dev->struct_mutex); return ret; } EXPORT_SYMBOL(drm_fence_add_user_object); int drm_fence_object_create(struct drm_device *dev, uint32_t fence_class, uint32_t type, unsigned flags, struct drm_fence_object **c_fence) { struct drm_fence_object *fence; int ret; struct drm_fence_manager *fm = &dev->fm; fence = drm_ctl_calloc(1, sizeof(*fence), DRM_MEM_FENCE); if (!fence) { DRM_ERROR("Out of memory creating fence object\n"); return -ENOMEM; } ret = drm_fence_object_init(dev, fence_class, type, flags, fence); if (ret) { drm_fence_usage_deref_unlocked(&fence); return ret; } *c_fence = fence; atomic_inc(&fm->count); return 0; } EXPORT_SYMBOL(drm_fence_object_create); void drm_fence_manager_init(struct drm_device *dev) { struct drm_fence_manager *fm = &dev->fm; struct drm_fence_class_manager *fence_class; struct drm_fence_driver *fed = dev->driver->fence_driver; int i; unsigned long flags; rwlock_init(&fm->lock); write_lock_irqsave(&fm->lock, flags); fm->initialized = 0; if (!fed) goto out_unlock; fm->initialized = 1; fm->num_classes = fed->num_classes; BUG_ON(fm->num_classes > _DRM_FENCE_CLASSES); for (i = 0; i < fm->num_classes; ++i) { fence_class = &fm->fence_class[i]; memset(fence_class, 0, sizeof(*fence_class)); INIT_LIST_HEAD(&fence_class->ring); DRM_INIT_WAITQUEUE(&fence_class->fence_queue); } atomic_set(&fm->count, 0); out_unlock: write_unlock_irqrestore(&fm->lock, flags); } void drm_fence_fill_arg(struct drm_fence_object *fence, struct drm_fence_arg *arg) { struct drm_device *dev = fence->dev; struct drm_fence_manager *fm = &dev->fm; unsigned long irq_flags; read_lock_irqsave(&fm->lock, irq_flags); arg->handle = fence->base.hash.key; arg->fence_class = fence->fence_class; arg->type = fence->type; arg->signaled = fence->signaled_types; arg->error = fence->error; arg->sequence = fence->sequence; read_unlock_irqrestore(&fm->lock, irq_flags); } EXPORT_SYMBOL(drm_fence_fill_arg); void drm_fence_manager_takedown(struct drm_device *dev) { } struct drm_fence_object *drm_lookup_fence_object(struct drm_file *priv, uint32_t handle) { struct drm_device *dev = priv->minor->dev; struct drm_user_object *uo; struct drm_fence_object *fence; mutex_lock(&dev->struct_mutex); uo = drm_lookup_user_object(priv, handle); if (!uo || (uo->type != drm_fence_type)) { mutex_unlock(&dev->struct_mutex); return NULL; } fence = drm_fence_reference_locked(drm_user_object_entry(uo, struct drm_fence_object, base)); mutex_unlock(&dev->struct_mutex); return fence; } int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { int ret; struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg *arg = data; struct drm_fence_object *fence; ret = 0; if (!fm->initialized) { DRM_ERROR("The DRM driver does not support fencing.\n"); return -EINVAL; } if (arg->flags & DRM_FENCE_FLAG_EMIT) LOCK_TEST_WITH_RETURN(dev, file_priv); ret = drm_fence_object_create(dev, arg->fence_class, arg->type, arg->flags, &fence); if (ret) return ret; ret = drm_fence_add_user_object(file_priv, fence, arg->flags & DRM_FENCE_FLAG_SHAREABLE); if (ret) { drm_fence_usage_deref_unlocked(&fence); return ret; } /* * usage > 0. No need to lock dev->struct_mutex; */ arg->handle = fence->base.hash.key; drm_fence_fill_arg(fence, arg); drm_fence_usage_deref_unlocked(&fence); return ret; } int drm_fence_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { int ret; struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg *arg = data; struct drm_fence_object *fence; struct drm_user_object *uo; ret = 0; if (!fm->initialized) { DRM_ERROR("The DRM driver does not support fencing.\n"); return -EINVAL; } ret = drm_user_object_ref(file_priv, arg->handle, drm_fence_type, &uo); if (ret) return ret; fence = drm_lookup_fence_object(file_priv, arg->handle); drm_fence_fill_arg(fence, arg); drm_fence_usage_deref_unlocked(&fence); return ret; } int drm_fence_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { int ret; struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg *arg = data; ret = 0; if (!fm->initialized) { DRM_ERROR("The DRM driver does not support fencing.\n"); return -EINVAL; } return drm_user_object_unref(file_priv, arg->handle, drm_fence_type); } int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { int ret; struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg *arg = data; struct drm_fence_object *fence; ret = 0; if (!fm->initialized) { DRM_ERROR("The DRM driver does not support fencing.\n"); return -EINVAL; } fence = drm_lookup_fence_object(file_priv, arg->handle); if (!fence) return -EINVAL; drm_fence_fill_arg(fence, arg); drm_fence_usage_deref_unlocked(&fence); return ret; } int drm_fence_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { int ret; struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg *arg = data; struct drm_fence_object *fence; ret = 0; if (!fm->initialized) { DRM_ERROR("The DRM driver does not support fencing.\n"); return -EINVAL; } fence = drm_lookup_fence_object(file_priv, arg->handle); if (!fence) return -EINVAL; ret = drm_fence_object_flush(fence, arg->type); drm_fence_fill_arg(fence, arg); drm_fence_usage_deref_unlocked(&fence); return ret; } int drm_fence_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { int ret; struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg *arg = data; struct drm_fence_object *fence; ret = 0; if (!fm->initialized) { DRM_ERROR("The DRM driver does not support fencing.\n"); return -EINVAL; } fence = drm_lookup_fence_object(file_priv, arg->handle); if (!fence) return -EINVAL; ret = drm_fence_object_wait(fence, arg->flags & DRM_FENCE_FLAG_WAIT_LAZY, 0, arg->type); drm_fence_fill_arg(fence, arg); drm_fence_usage_deref_unlocked(&fence); return ret; } int drm_fence_emit_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { int ret; struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg *arg = data; struct drm_fence_object *fence; ret = 0; if (!fm->initialized) { DRM_ERROR("The DRM driver does not support fencing.\n"); return -EINVAL; } LOCK_TEST_WITH_RETURN(dev, file_priv); fence = drm_lookup_fence_object(file_priv, arg->handle); if (!fence) return -EINVAL; ret = drm_fence_object_emit(fence, arg->flags, arg->fence_class, arg->type); drm_fence_fill_arg(fence, arg); drm_fence_usage_deref_unlocked(&fence); return ret; } int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { int ret; struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg *arg = data; struct drm_fence_object *fence; ret = 0; if (!fm->initialized) { DRM_ERROR("The DRM driver does not support fencing.\n"); return -EINVAL; } if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized\n"); return -EINVAL; } LOCK_TEST_WITH_RETURN(dev, file_priv); ret = drm_fence_buffer_objects(dev, NULL, arg->flags, NULL, &fence); if (ret) return ret; if (!(arg->flags & DRM_FENCE_FLAG_NO_USER)) { ret = drm_fence_add_user_object(file_priv, fence, arg->flags & DRM_FENCE_FLAG_SHAREABLE); if (ret) return ret; } arg->handle = fence->base.hash.key; drm_fence_fill_arg(fence, arg); drm_fence_usage_deref_unlocked(&fence); return ret; }