summaryrefslogtreecommitdiff
path: root/bsd-core/drm_context.c
blob: 7761e9b8b687bc58921b5306bdb74b1569317cf4 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
/**************************************************************************
 *
 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 *
 **************************************************************************/
/*
 * Simple memory MANager interface that keeps track on allocate regions on a
 * per "owner" basis. All regions associated with an "owner" can be released
 * with a simple call. Typically if the "owner" exists. The owner is any
 * "unsigned long" identifier. Can typically be a pointer to a file private
 * struct or a context identifier.
 *
 * Authors:
 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
 */

#ifndef DRM_SMAN_H
#define DRM_SMAN_H

#include "drmP.h"
#include "drm_hashtab.h"

/*
 * A class that is an abstration of a simple memory allocator.
 * The sman implementation provides a default such allocator
 * using the drm_memrange.c implementation. But the user can replace it.
 * See the SiS implementation, which may use the SiS FB kernel module
 * for memory management.
 */

struct drm_sman_mm {
	/* private info. If allocated, needs to be destroyed by the destroy
	   function */
	void *private;

	/* Allocate a memory block with given size and alignment.
	   Return an opaque reference to the memory block */

	void *(*allocate) (void *private, unsigned long size,
			   unsigned alignment);

	/* Free a memory block. "ref" is the opaque reference that we got from
	   the "alloc" function */

	void (*free) (void *private, void *ref);

	/* Free all resources associated with this allocator */

	void (*destroy) (void *private);

	/* Return a memory offset from the opaque reference returned from the
	   "alloc" function */

	unsigned long (*offset) (void *private, void *ref);
};

struct drm_memblock_item {
	struct list_head owner_list;
	struct drm_hash_item user_hash;
	void *mm_info;
	struct drm_sman_mm *mm;
	struct drm_sman *sman;
};

struct drm_sman {
	struct drm_sman_mm *mm;
	int num_managers;
	struct drm_open_hash owner_hash_tab;
	struct drm_open_hash user_hash_tab;
	struct list_head owner_items;
};

/*
 * Take down a memory manager. This function should only be called after a
 * successful init and after a call to drm_sman_cleanup.
 */

extern void drm_sman_takedown(struct drm_sman * sman);

/*
 * Allocate structures for a manager.
 * num_managers are the number of memory pools to manage. (VRAM, AGP, ....)
 * user_order is the log2 of the number of buckets in the user hash table.
 *	    set this to approximately log2 of the max number of memory regions
 *	    that will be allocated for _all_ pools together.
 * owner_order is the log2 of the number of buckets in the owner hash table.
 *	    set this to approximately log2 of
 *	    the number of client file connections that will
 *	    be using the manager.
 *
 */

extern int drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
			 unsigned int user_order, unsigned int owner_order);

/*
 * Initialize a drm_memrange.c allocator. Should be called only once for each
 * manager unless a customized allogator is used.
 */

extern int drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
			      unsigned long start, unsigned long size);

/*
 * Initialize a customized allocator for one of the managers.
 * (See the SiS module). The object pointed to by "allocator" is copied,
 * so it can be destroyed after this call.
 */

extern int drm_sman_set_manager(struct drm_sman * sman, unsigned int mananger,
				struct drm_sman_mm * allocator);

/*
 * Allocate a memory block. Aligment is not implemented yet.
 */

extern struct drm_memblock_item *drm_sman_alloc(struct drm_sman * sman,
						unsigned int manager,
						unsigned long size,
						unsigned alignment,
						unsigned long owner);
/*
 * Free a memory block identified by its user hash key.
 */

extern int drm_sman_free_key(struct drm_sman * sman, unsigned int key);

/*
 * returns 1 iff there are no stale memory blocks associated with this owner.
 * Typically called to determine if we need to idle the hardware and call
 * drm_sman_owner_cleanup. If there are no stale memory blocks, it removes all
 * resources associated with owner.
 */

extern int drm_sman_owner_clean(struct drm_sman * sman, unsigned long owner);

/*
 * Frees all stale memory blocks associated with this owner. Note that this
 * requires that the hardware is finished with all blocks, so the graphics engine
 * should be idled before this call is made. This function also frees
 * any resources associated with "owner" and should be called when owner
 * is not going to be referenced anymore.
 */

extern void drm_sman_owner_cleanup(struct drm_sman * sman, unsigned long owner);

/*
 * Frees all stale memory blocks associated with the memory manager.
 * See idling above.
 */

extern void drm_sman_cleanup(struct drm_sman * sman);

#endif
a> 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699
/* drm_context.h -- IOCTLs for generic contexts -*- linux-c -*-
 * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
 *
 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors:
 *    Rickard E. (Rik) Faith <faith@valinux.com>
 *    Gareth Hughes <gareth@valinux.com>
 * $FreeBSD: src/sys/dev/drm/drm_context.h,v 1.3 2003/03/09 02:08:28 anholt Exp $
 */

#include "drmP.h"

#if __HAVE_CTX_BITMAP

/* ================================================================
 * Context bitmap support
 */

void DRM(ctxbitmap_free)( drm_device_t *dev, int ctx_handle )
{
	if ( ctx_handle < 0 ) goto failed;
	if ( !dev->ctx_bitmap ) goto failed;

	if ( ctx_handle < DRM_MAX_CTXBITMAP ) {
		DRM_LOCK;
		clear_bit( ctx_handle, dev->ctx_bitmap );
		dev->context_sareas[ctx_handle] = NULL;
		DRM_UNLOCK;
		return;
	}
failed:
       	DRM_ERROR( "Attempt to free invalid context handle: %d\n",
		   ctx_handle );
       	return;
}

int DRM(ctxbitmap_next)( drm_device_t *dev )
{
	int bit;

	if(!dev->ctx_bitmap) return -1;

	DRM_LOCK;
	bit = find_first_zero_bit( dev->ctx_bitmap, DRM_MAX_CTXBITMAP );
	if ( bit < DRM_MAX_CTXBITMAP ) {
		set_bit( bit, dev->ctx_bitmap );
	   	DRM_DEBUG( "drm_ctxbitmap_next bit : %d\n", bit );
		if((bit+1) > dev->max_context) {
			dev->max_context = (bit+1);
			if(dev->context_sareas) {
				drm_local_map_t **ctx_sareas;

				ctx_sareas = DRM(realloc)(dev->context_sareas,
						(dev->max_context - 1) * 
						sizeof(*dev->context_sareas),
						dev->max_context * 
						sizeof(*dev->context_sareas),
						DRM_MEM_MAPS);
				if(!ctx_sareas) {
					clear_bit(bit, dev->ctx_bitmap);
					DRM_UNLOCK;
					return -1;
				}
				dev->context_sareas = ctx_sareas;
				dev->context_sareas[bit] = NULL;
			} else {
				/* max_context == 1 at this point */
				dev->context_sareas = DRM(alloc)(
						dev->max_context * 
						sizeof(*dev->context_sareas),
						DRM_MEM_MAPS);
				if(!dev->context_sareas) {
					clear_bit(bit, dev->ctx_bitmap);
					DRM_UNLOCK;
					return -1;
				}
				dev->context_sareas[bit] = NULL;
			}
		}
		DRM_UNLOCK;
		return bit;
	}
	DRM_UNLOCK;
	return -1;
}

int DRM(ctxbitmap_init)( drm_device_t *dev )
{
	int i;
   	int temp;

	DRM_LOCK;
	dev->ctx_bitmap = (atomic_t *) DRM(alloc)( PAGE_SIZE,
							DRM_MEM_CTXBITMAP );
	if ( dev->ctx_bitmap == NULL ) {
		DRM_UNLOCK;
		return DRM_ERR(ENOMEM);
	}
	memset( (void *)dev->ctx_bitmap, 0, PAGE_SIZE );
	dev->context_sareas = NULL;
	dev->max_context = -1;
	DRM_UNLOCK;

	for ( i = 0 ; i < DRM_RESERVED_CONTEXTS ; i++ ) {
		temp = DRM(ctxbitmap_next)( dev );
	   	DRM_DEBUG( "drm_ctxbitmap_init : %d\n", temp );
	}

	return 0;
}

void DRM(ctxbitmap_cleanup)( drm_device_t *dev )
{
	DRM_LOCK;
	if( dev->context_sareas ) DRM(free)( dev->context_sareas,
					     sizeof(*dev->context_sareas) * 
					     dev->max_context,
					     DRM_MEM_MAPS );
	DRM(free)( (void *)dev->ctx_bitmap, PAGE_SIZE, DRM_MEM_CTXBITMAP );
	DRM_UNLOCK;
}

/* ================================================================
 * Per Context SAREA Support
 */

int DRM(getsareactx)( DRM_IOCTL_ARGS )
{
	DRM_DEVICE;
	drm_ctx_priv_map_t request;
	drm_local_map_t *map;

	DRM_COPY_FROM_USER_IOCTL( request, (drm_ctx_priv_map_t *)data, 
			   sizeof(request) );

	DRM_LOCK;
	if (dev->max_context < 0 || request.ctx_id >= (unsigned) dev->max_context) {
		DRM_UNLOCK;
		return DRM_ERR(EINVAL);
	}

	map = dev->context_sareas[request.ctx_id];
	DRM_UNLOCK;

	request.handle = map->handle;

	DRM_COPY_TO_USER_IOCTL( (drm_ctx_priv_map_t *)data, request, sizeof(request) );

	return 0;
}

int DRM(setsareactx)( DRM_IOCTL_ARGS )
{
	DRM_DEVICE;
	drm_ctx_priv_map_t request;
	drm_local_map_t *map = NULL;
	drm_map_list_entry_t *list;

	DRM_COPY_FROM_USER_IOCTL( request, (drm_ctx_priv_map_t *)data,
			   sizeof(request) );

	DRM_LOCK;
	TAILQ_FOREACH(list, dev->maplist, link) {
		map=list->map;
		if(map->handle == request.handle) {
			if (dev->max_context < 0)
				goto bad;
			if (request.ctx_id >= (unsigned) dev->max_context)
				goto bad;
			dev->context_sareas[request.ctx_id] = map;
			DRM_UNLOCK;
			return 0;
		}
	}

bad:
	DRM_UNLOCK;
	return DRM_ERR(EINVAL);
}

/* ================================================================
 * The actual DRM context handling routines
 */

int DRM(context_switch)( drm_device_t *dev, int old, int new )
{
        if ( test_and_set_bit( 0, &dev->context_flag ) ) {
                DRM_ERROR( "Reentering -- FIXME\n" );
                return DRM_ERR(EBUSY);
        }

        DRM_DEBUG( "Context switch from %d to %d\n", old, new );

        if ( new == dev->last_context ) {
                clear_bit( 0, &dev->context_flag );
                return 0;
        }

        if ( DRM(flags) & DRM_FLAG_NOCTX ) {
                DRM(context_switch_complete)( dev, new );
        }

        return 0;
}

int DRM(context_switch_complete)( drm_device_t *dev, int new )
{
        dev->last_context = new;  /* PRE/POST: This is the _only_ writer. */
        dev->last_switch  = jiffies;

        if ( !_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) ) {
                DRM_ERROR( "Lock isn't held after context switch\n" );
        }

				/* If a context switch is ever initiated
                                   when the kernel holds the lock, release
                                   that lock here. */
        clear_bit( 0, &dev->context_flag );
        DRM_WAKEUP( (void *)&dev->context_wait );

        return 0;
}

int DRM(resctx)( DRM_IOCTL_ARGS )
{
	drm_ctx_res_t res;
	drm_ctx_t ctx;
	int i;

	DRM_COPY_FROM_USER_IOCTL( res, (drm_ctx_res_t *)data, sizeof(res) );

	if ( res.count >= DRM_RESERVED_CONTEXTS ) {
		memset( &ctx, 0, sizeof(ctx) );
		for ( i = 0 ; i < DRM_RESERVED_CONTEXTS ; i++ ) {
			ctx.handle = i;
			if ( DRM_COPY_TO_USER( &res.contexts[i],
					   &i, sizeof(i) ) )
				return DRM_ERR(EFAULT);
		}
	}
	res.count = DRM_RESERVED_CONTEXTS;

	DRM_COPY_TO_USER_IOCTL( (drm_ctx_res_t *)data, res, sizeof(res) );

	return 0;
}

int DRM(addctx)( DRM_IOCTL_ARGS )
{
	DRM_DEVICE;
	drm_ctx_t ctx;

	DRM_COPY_FROM_USER_IOCTL( ctx, (drm_ctx_t *)data, sizeof(ctx) );

	ctx.handle = DRM(ctxbitmap_next)( dev );
	if ( ctx.handle == DRM_KERNEL_CONTEXT ) {
				/* Skip kernel's context and get a new one. */
		ctx.handle = DRM(ctxbitmap_next)( dev );
	}
	DRM_DEBUG( "%d\n", ctx.handle );
	if ( ctx.handle == -1 ) {
		DRM_DEBUG( "Not enough free contexts.\n" );
				/* Should this return -EBUSY instead? */
		return DRM_ERR(ENOMEM);
	}

	DRM_COPY_TO_USER_IOCTL( (drm_ctx_t *)data, ctx, sizeof(ctx) );

	return 0;
}

int DRM(modctx)( DRM_IOCTL_ARGS )
{
	/* This does nothing */
	return 0;
}

int DRM(getctx)( DRM_IOCTL_ARGS )
{
	drm_ctx_t ctx;

	DRM_COPY_FROM_USER_IOCTL( ctx, (drm_ctx_t *)data, sizeof(ctx) );

	/* This is 0, because we don't handle any context flags */
	ctx.flags = 0;

	DRM_COPY_TO_USER_IOCTL( (drm_ctx_t *)data, ctx, sizeof(ctx) );

	return 0;
}

int DRM(switchctx)( DRM_IOCTL_ARGS )
{
	DRM_DEVICE;
	drm_ctx_t ctx;

	DRM_COPY_FROM_USER_IOCTL( ctx, (drm_ctx_t *)data, sizeof(ctx) );

	DRM_DEBUG( "%d\n", ctx.handle );
	return DRM(context_switch)( dev, dev->last_context, ctx.handle );
}

int DRM(newctx)( DRM_IOCTL_ARGS )
{
	DRM_DEVICE;
	drm_ctx_t ctx;

	DRM_COPY_FROM_USER_IOCTL( ctx, (drm_ctx_t *)data, sizeof(ctx) );

	DRM_DEBUG( "%d\n", ctx.handle );
	DRM(context_switch_complete)( dev, ctx.handle );

	return 0;
}

int DRM(rmctx)( DRM_IOCTL_ARGS )
{
	DRM_DEVICE;
	drm_ctx_t ctx;

	DRM_COPY_FROM_USER_IOCTL( ctx, (drm_ctx_t *)data, sizeof(ctx) );

	DRM_DEBUG( "%d\n", ctx.handle );
	if ( ctx.handle != DRM_KERNEL_CONTEXT ) {
		DRM(ctxbitmap_free)( dev, ctx.handle );
	}

	return 0;
}


#else /* __HAVE_CTX_BITMAP */

/* ================================================================
 * Old-style context support
 */


int DRM(context_switch)(drm_device_t *dev, int old, int new)
{
	drm_queue_t *q;

#if 0
	atomic_inc(&dev->total_ctx);
#endif

	if (test_and_set_bit(0, &dev->context_flag)) {
		DRM_ERROR("Reentering -- FIXME\n");
		return DRM_ERR(EBUSY);
	}

	DRM_DEBUG("Context switch from %d to %d\n", old, new);

	if (new >= dev->queue_count) {
		clear_bit(0, &dev->context_flag);
		return DRM_ERR(EINVAL);
	}

	if (new == dev->last_context) {
		clear_bit(0, &dev->context_flag);
		return 0;
	}

	q = dev->queuelist[new];
	atomic_inc(&q->use_count);
	if (atomic_read(&q->use_count) == 1) {
		atomic_dec(&q->use_count);
		clear_bit(0, &dev->context_flag);
		return DRM_ERR(EINVAL);
	}

	if (DRM(flags) & DRM_FLAG_NOCTX) {
		DRM(context_switch_complete)(dev, new);
	}

	atomic_dec(&q->use_count);

	return 0;
}

int DRM(context_switch_complete)(drm_device_t *dev, int new)
{
	drm_device_dma_t *dma = dev->dma;

	dev->last_context = new;  /* PRE/POST: This is the _only_ writer. */
	dev->last_switch  = jiffies;

	if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
		DRM_ERROR("Lock isn't held after context switch\n");
	}

	if (!dma || !(dma->next_buffer && dma->next_buffer->while_locked)) {
		if (DRM(lock_free)(dev, &dev->lock.hw_lock->lock,
				  DRM_KERNEL_CONTEXT)) {
			DRM_ERROR("Cannot free lock\n");
		}
	}

	clear_bit(0, &dev->context_flag);
	DRM_WAKEUP_INT(&dev->context_wait);

	return 0;
}

static int DRM(init_queue)(drm_device_t *dev, drm_queue_t *q, drm_ctx_t *ctx)
{
	DRM_DEBUG("\n");

	if (atomic_read(&q->use_count) != 1
	    || atomic_read(&q->finalization)
	    || atomic_read(&q->block_count)) {
		DRM_ERROR("New queue is already in use: u%ld f%ld b%ld\n",
			  (unsigned long)atomic_read(&q->use_count),
			  (unsigned long)atomic_read(&q->finalization),
			  (unsigned long)atomic_read(&q->block_count));
	}

	atomic_set(&q->finalization,  0);
	atomic_set(&q->block_count,   0);
	atomic_set(&q->block_read,    0);
	atomic_set(&q->block_write,   0);
	atomic_set(&q->total_queued,  0);
	atomic_set(&q->total_flushed, 0);
	atomic_set(&q->total_locks,   0);

	q->write_queue = 0;
	q->read_queue = 0;
	q->flush_queue = 0;

	q->flags = ctx->flags;

	DRM(waitlist_create)(&q->waitlist, dev->dma->buf_count);

	return 0;
}


/* drm_alloc_queue:
PRE: 1) dev->queuelist[0..dev->queue_count] is allocated and will not
	disappear (so all deallocation must be done after IOCTLs are off)
     2) dev->queue_count < dev->queue_slots
     3) dev->queuelist[i].use_count == 0 and
	dev->queuelist[i].finalization == 0 if i not in use
POST: 1) dev->queuelist[i].use_count == 1
      2) dev->queue_count < dev->queue_slots */

static int DRM(alloc_queue)(drm_device_t *dev)
{
	int	    i;
	drm_queue_t *queue;
	int	    oldslots;
	int	    newslots;
				/* Check for a free queue */
	for (i = 0; i < dev->queue_count; i++) {
		atomic_inc(&dev->queuelist[i]->use_count);
		if (atomic_read(&dev->queuelist[i]->use_count) == 1
		    && !atomic_read(&dev->queuelist[i]->finalization)) {
			DRM_DEBUG("%d (free)\n", i);
			return i;
		}
		atomic_dec(&dev->queuelist[i]->use_count);
	}
				/* Allocate a new queue */
	DRM_LOCK;

	queue = gamma_alloc(sizeof(*queue), DRM_MEM_QUEUES);
	memset(queue, 0, sizeof(*queue));
	atomic_set(&queue->use_count, 1);

	++dev->queue_count;
	if (dev->queue_count >= dev->queue_slots) {
		oldslots = dev->queue_slots * sizeof(*dev->queuelist);
		if (!dev->queue_slots) dev->queue_slots = 1;
		dev->queue_slots *= 2;
		newslots = dev->queue_slots * sizeof(*dev->queuelist);

		dev->queuelist = DRM(realloc)(dev->queuelist,
					      oldslots,
					      newslots,
					      DRM_MEM_QUEUES);
		if (!dev->queuelist) {
			DRM_UNLOCK;
			DRM_DEBUG("out of memory\n");
			return DRM_ERR(ENOMEM);
		}
	}
	dev->queuelist[dev->queue_count-1] = queue;

	DRM_UNLOCK;
	DRM_DEBUG("%d (new)\n", dev->queue_count - 1);
	return dev->queue_count - 1;
}

int DRM(resctx)( DRM_IOCTL_ARGS )
{
	drm_ctx_res_t	res;
	drm_ctx_t	ctx;
	int		i;

	DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
	
	DRM_COPY_FROM_USER_IOCTL( res, (drm_ctx_res_t *)data, sizeof(res) );

	if (res.count >= DRM_RESERVED_CONTEXTS) {
		memset(&ctx, 0, sizeof(ctx));
		for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
			ctx.handle = i;
			if (DRM_COPY_TO_USER(&res.contexts[i],
					 &i,
					 sizeof(i)))
				return DRM_ERR(EFAULT);
		}
	}
	res.count = DRM_RESERVED_CONTEXTS;

	DRM_COPY_TO_USER_IOCTL( (drm_ctx_res_t *)data, res, sizeof(res) );

	return 0;
}

int DRM(addctx)( DRM_IOCTL_ARGS )
{
	DRM_DEVICE;
	drm_ctx_t	ctx;

	DRM_COPY_FROM_USER_IOCTL( ctx, (drm_ctx_t *)data, sizeof(ctx) );

	if ((ctx.handle = DRM(alloc_queue)(dev)) == DRM_KERNEL_CONTEXT) {
				/* Init kernel's context and get a new one. */
		DRM(init_queue)(dev, dev->queuelist[ctx.handle], &ctx);
		ctx.handle = DRM(alloc_queue)(dev);
	}
	DRM(init_queue)(dev, dev->queuelist[ctx.handle], &ctx);
	DRM_DEBUG("%d\n", ctx.handle);
	
	DRM_COPY_TO_USER_IOCTL( (drm_ctx_t *)data, ctx, sizeof(ctx) );

	return 0;
}

int DRM(modctx)( DRM_IOCTL_ARGS )
{
	DRM_DEVICE;
	drm_ctx_t	ctx;
	drm_queue_t	*q;

	DRM_COPY_FROM_USER_IOCTL( ctx, (drm_ctx_t *)data, sizeof(ctx) );

	DRM_DEBUG("%d\n", ctx.handle);

	if (ctx.handle < 0 || ctx.handle >= dev->queue_count) 
		return DRM_ERR(EINVAL);
	q = dev->queuelist[ctx.handle];

	atomic_inc(&q->use_count);
	if (atomic_read(&q->use_count) == 1) {
				/* No longer in use */
		atomic_dec(&q->use_count);
		return DRM_ERR(EINVAL);
	}

	if (DRM_BUFCOUNT(&q->waitlist)) {
		atomic_dec(&q->use_count);
		return DRM_ERR(EBUSY);
	}

	q->flags = ctx.flags;

	atomic_dec(&q->use_count);
	return 0;
}

int DRM(getctx)( DRM_IOCTL_ARGS )
{
	DRM_DEVICE;
	drm_ctx_t	ctx;
	drm_queue_t	*q;

	DRM_COPY_FROM_USER_IOCTL( ctx, (drm_ctx_t *)data, sizeof(ctx) );

	DRM_DEBUG("%d\n", ctx.handle);

	if (ctx.handle >= dev->queue_count) 
		return DRM_ERR(EINVAL);
	q = dev->queuelist[ctx.handle];

	atomic_inc(&q->use_count);
	if (atomic_read(&q->use_count) == 1) {
				/* No longer in use */
		atomic_dec(&q->use_count);
		return DRM_ERR(EINVAL);
	}

	ctx.flags = q->flags;
	atomic_dec(&q->use_count);

	DRM_COPY_TO_USER_IOCTL( (drm_ctx_t *)data, ctx, sizeof(ctx) );

	return 0;
}

int DRM(switchctx)( DRM_IOCTL_ARGS )
{
	DRM_DEVICE;
	drm_ctx_t	ctx;

	DRM_COPY_FROM_USER_IOCTL( ctx, (drm_ctx_t *)data, sizeof(ctx) );

	DRM_DEBUG("%d\n", ctx.handle);
	return DRM(context_switch)(dev, dev->last_context, ctx.handle);
}

int DRM(newctx)( DRM_IOCTL_ARGS )
{
	DRM_DEVICE;
	drm_ctx_t	ctx;

	DRM_COPY_FROM_USER_IOCTL( ctx, (drm_ctx_t *)data, sizeof(ctx) );

	DRM_DEBUG("%d\n", ctx.handle);
	DRM(context_switch_complete)(dev, ctx.handle);

	return 0;
}

int DRM(rmctx)( DRM_IOCTL_ARGS )
{
	DRM_DEVICE;
	drm_ctx_t	ctx;
	drm_queue_t	*q;
	drm_buf_t	*buf;

	DRM_COPY_FROM_USER_IOCTL( ctx, (drm_ctx_t *)data, sizeof(ctx) );

	DRM_DEBUG("%d\n", ctx.handle);

	if (ctx.handle >= dev->queue_count) return DRM_ERR(EINVAL);
	q = dev->queuelist[ctx.handle];

	atomic_inc(&q->use_count);
	if (atomic_read(&q->use_count) == 1) {
				/* No longer in use */
		atomic_dec(&q->use_count);
		return DRM_ERR(EINVAL);
	}

	atomic_inc(&q->finalization); /* Mark queue in finalization state */
	atomic_sub(2, &q->use_count); /* Mark queue as unused (pending
					 finalization) */

	while (test_and_set_bit(0, &dev->interrupt_flag)) {
		static int never;
		int retcode;
		retcode = tsleep(&never, PZERO|PCATCH, "never", 1);
		if (retcode)
			return retcode;
	}
				/* Remove queued buffers */
	while ((buf = DRM(waitlist_get)(&q->waitlist))) {
		DRM(free_buffer)(dev, buf);
	}
	clear_bit(0, &dev->interrupt_flag);

				/* Wakeup blocked processes */
	wakeup( &q->block_read );
	wakeup( &q->block_write );
	DRM_WAKEUP_INT( &q->flush_queue );
				/* Finalization over.  Queue is made
				   available when both use_count and
				   finalization become 0, which won't
				   happen until all the waiting processes
				   stop waiting. */
	atomic_dec(&q->finalization);
	return 0;
}

#endif /* __HAVE_CTX_BITMAP */