summaryrefslogtreecommitdiff
path: root/linux-core/i810_drm.h
blob: 93775f68ab927823040e9b2ac4e622db3258f145 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
#ifndef _I810_DRM_H_
#define _I810_DRM_H_

/* WARNING: These defines must be the same as what the Xserver uses.
 * if you change them, you must change the defines in the Xserver.
 */

#ifndef _I810_DEFINES_
#define _I810_DEFINES_

#define I810_DMA_BUF_ORDER		12
#define I810_DMA_BUF_SZ 		(1<<I810_DMA_BUF_ORDER)
#define I810_DMA_BUF_NR 		256
#define I810_NR_SAREA_CLIPRECTS 	8

/* Each region is a minimum of 64k, and there are at most 64 of them.
 */
#define I810_NR_TEX_REGIONS 64
#define I810_LOG_MIN_TEX_REGION_SIZE 16
#endif

#define I810_UPLOAD_TEX0IMAGE  0x1 /* handled clientside */
#define I810_UPLOAD_TEX1IMAGE  0x2 /* handled clientside */
#define I810_UPLOAD_CTX        0x4
#define I810_UPLOAD_BUFFERS    0x8
#define I810_UPLOAD_TEX0       0x10
#define I810_UPLOAD_TEX1       0x20
#define I810_UPLOAD_CLIPRECTS  0x40


/* Indices into buf.Setup where various bits of state are mirrored per
 * context and per buffer.  These can be fired at the card as a unit,
 * or in a piecewise fashion as required.
 */

/* Destbuffer state 
 *    - backbuffer linear offset and pitch -- invarient in the current dri
 *    - zbuffer linear offset and pitch -- also invarient
 *    - drawing origin in back and depth buffers.
 *
 * Keep the depth/back buffer state here to accommodate private buffers
 * in the future.
 */
#define I810_DESTREG_DI0  0	/* CMD_OP_DESTBUFFER_INFO (2 dwords) */
#define I810_DESTREG_DI1  1
#define I810_DESTREG_DV0  2	/* GFX_OP_DESTBUFFER_VARS (2 dwords) */
#define I810_DESTREG_DV1  3
#define I810_DESTREG_DR0  4	/* GFX_OP_DRAWRECT_INFO (4 dwords) */
#define I810_DESTREG_DR1  5
#define I810_DESTREG_DR2  6
#define I810_DESTREG_DR3  7
#define I810_DESTREG_DR4  8
#define I810_DEST_SETUP_SIZE 10

/* Context state
 */
#define I810_CTXREG_CF0   0	/* GFX_OP_COLOR_FACTOR */
#define I810_CTXREG_CF1   1	
#define I810_CTXREG_ST0   2     /* GFX_OP_STIPPLE */
#define I810_CTXREG_ST1   3
#define I810_CTXREG_VF    4	/* GFX_OP_VERTEX_FMT */
#define I810_CTXREG_MT    5	/* GFX_OP_MAP_TEXELS */
#define I810_CTXREG_MC0   6	/* GFX_OP_MAP_COLOR_STAGES - stage 0 */
#define I810_CTXREG_MC1   7     /* GFX_OP_MAP_COLOR_STAGES - stage 1 */
#define I810_CTXREG_MC2   8	/* GFX_OP_MAP_COLOR_STAGES - stage 2 */
#define I810_CTXREG_MA0   9	/* GFX_OP_MAP_ALPHA_STAGES - stage 0 */
#define I810_CTXREG_MA1   10	/* GFX_OP_MAP_ALPHA_STAGES - stage 1 */
#define I810_CTXREG_MA2   11	/* GFX_OP_MAP_ALPHA_STAGES - stage 2 */
#define I810_CTXREG_SDM   12	/* GFX_OP_SRC_DEST_MONO */
#define I810_CTXREG_FOG   13	/* GFX_OP_FOG_COLOR */
#define I810_CTXREG_B1    14	/* GFX_OP_BOOL_1 */
#define I810_CTXREG_B2    15	/* GFX_OP_BOOL_2 */
#define I810_CTXREG_LCS   16	/* GFX_OP_LINEWIDTH_CULL_SHADE_MODE */
#define I810_CTXREG_PV    17	/* GFX_OP_PV_RULE -- Invarient! */
#define I810_CTXREG_ZA    18	/* GFX_OP_ZBIAS_ALPHAFUNC */
#define I810_CTXREG_AA    19	/* GFX_OP_ANTIALIAS */
#define I810_CTX_SETUP_SIZE 20 

/* Texture state (per tex unit)
 */
#define I810_TEXREG_MI0  0	/* GFX_OP_MAP_INFO (4 dwords) */
#define I810_TEXREG_MI1  1	
#define I810_TEXREG_MI2  2	
#define I810_TEXREG_MI3  3	
#define I810_TEXREG_MF   4	/* GFX_OP_MAP_FILTER */
#define I810_TEXREG_MLC  5	/* GFX_OP_MAP_LOD_CTL */
#define I810_TEXREG_MLL  6	/* GFX_OP_MAP_LOD_LIMITS */
#define I810_TEXREG_MCS  7	/* GFX_OP_MAP_COORD_SETS ??? */
#define I810_TEX_SETUP_SIZE 8

/* Flags for clear ioctl
 */
#define I810_FRONT   0x1
#define I810_BACK    0x2
#define I810_DEPTH   0x4

typedef enum _drm_i810_init_func {
	I810_INIT_DMA = 0x01,
	I810_CLEANUP_DMA = 0x02,
	I810_INIT_DMA_1_4 = 0x03
 } drm_i810_init_func_t;

/* This is the init structure after v1.2 */
typedef struct _drm_i810_init {
	drm_i810_init_func_t func;
#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
	int ring_map_idx;
	int buffer_map_idx;
#else
	unsigned int mmio_offset;
	unsigned int buffers_offset;
#endif
	int sarea_priv_offset;
	unsigned int ring_start;
	unsigned int ring_end;
	unsigned int ring_size;
	unsigned int front_offset;
	unsigned int back_offset;
	unsigned int depth_offset;
	unsigned int overlay_offset;
	unsigned int overlay_physical;
	unsigned int w;
	unsigned int h;
	unsigned int pitch;
	unsigned int pitch_bits; 
} drm_i810_init_t;

/* This is the init structure prior to v1.2 */
typedef struct _drm_i810_pre12_init {
        drm_i810_init_func_t func;
#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
	int ring_map_idx;
	int buffer_map_idx;
#else
        unsigned int mmio_offset;
	unsigned int buffers_offset;
#endif
	int sarea_priv_offset;
	unsigned int ring_start;
	unsigned int ring_end;
	unsigned int ring_size;
	unsigned int front_offset;
	unsigned int back_offset;
	unsigned int depth_offset;
	unsigned int w;
	unsigned int h;
	unsigned int pitch;
	unsigned int pitch_bits; 
} drm_i810_pre12_init_t;

/* Warning: If you change the SAREA structure you must change the Xserver
 * structure as well */

typedef struct _drm_i810_tex_region {
	unsigned char next, prev; /* indices to form a circular LRU  */
	unsigned char in_use;	/* owned by a client, or free? */
	int age;		/* tracked by clients to update local LRU's */
} drm_i810_tex_region_t;

typedef struct _drm_i810_sarea {
   	unsigned int ContextState[I810_CTX_SETUP_SIZE];
   	unsigned int BufferState[I810_DEST_SETUP_SIZE];
   	unsigned int TexState[2][I810_TEX_SETUP_SIZE];
   	unsigned int dirty;

	unsigned int nbox;
	drm_clip_rect_t boxes[I810_NR_SAREA_CLIPRECTS];

	/* Maintain an LRU of contiguous regions of texture space.  If
	 * you think you own a region of texture memory, and it has an
	 * age different to the one you set, then you are mistaken and
	 * it has been stolen by another client.  If global texAge
	 * hasn't changed, there is no need to walk the list.
	 *
	 * These regions can be used as a proxy for the fine-grained
	 * texture information of other clients - by maintaining them
	 * in the same lru which is used to age their own textures,
	 * clients have an approximate lru for the whole of global
	 * texture space, and can make informed decisions as to which
	 * areas to kick out.  There is no need to choose whether to
	 * kick out your own texture or someone else's - simply eject
	 * them all in LRU order.  
	 */
   
	drm_i810_tex_region_t texList[I810_NR_TEX_REGIONS+1]; 
				/* Last elt is sentinal */
        int texAge;		/* last time texture was uploaded */
        int last_enqueue;	/* last time a buffer was enqueued */
	int last_dispatch;	/* age of the most recently dispatched buffer */
	int last_quiescent;     /*  */
	int ctxOwner;		/* last context to upload state */

	int vertex_prim;

	int pf_enabled;               /* is pageflipping allowed? */
	int pf_active;
	int pf_current_page;	    /* which buffer is being displayed? */
} drm_i810_sarea_t;

/* WARNING: If you change any of these defines, make sure to change the
 * defines in the Xserver file (xf86drmMga.h)
 */

/* i810 specific ioctls
 * The device specific ioctl range is 0x40 to 0x79.
 */
#define DRM_IOCTL_I810_INIT		DRM_IOW( 0x40, drm_i810_init_t)
#define DRM_IOCTL_I810_VERTEX		DRM_IOW( 0x41, drm_i810_vertex_t)
#define DRM_IOCTL_I810_CLEAR		DRM_IOW( 0x42, drm_i810_clear_t)
#define DRM_IOCTL_I810_FLUSH		DRM_IO(  0x43)
#define DRM_IOCTL_I810_GETAGE		DRM_IO(  0x44)
#define DRM_IOCTL_I810_GETBUF		DRM_IOWR(0x45, drm_i810_dma_t)
#define DRM_IOCTL_I810_SWAP		DRM_IO(  0x46)
#define DRM_IOCTL_I810_COPY		DRM_IOW( 0x47, drm_i810_copy_t)
#define DRM_IOCTL_I810_DOCOPY		DRM_IO(  0x48)
#define DRM_IOCTL_I810_OV0INFO		DRM_IOR( 0x49, drm_i810_overlay_t)
#define DRM_IOCTL_I810_FSTATUS		DRM_IO ( 0x4a)
#define DRM_IOCTL_I810_OV0FLIP		DRM_IO ( 0x4b)
#define DRM_IOCTL_I810_MC		DRM_IOW( 0x4c, drm_i810_mc_t)
#define DRM_IOCTL_I810_RSTATUS		DRM_IO ( 0x4d )
#define DRM_IOCTL_I810_FLIP             DRM_IO ( 0x4e )

typedef struct _drm_i810_clear {
	int clear_color;
	int clear_depth;
	int flags;
} drm_i810_clear_t;

/* These may be placeholders if we have more cliprects than
 * I810_NR_SAREA_CLIPRECTS.  In that case, the client sets discard to
 * false, indicating that the buffer will be dispatched again with a
 * new set of cliprects.
 */
typedef struct _drm_i810_vertex {
   	int idx;		/* buffer index */
	int used;		/* nr bytes in use */
	int discard;		/* client is finished with the buffer? */
} drm_i810_vertex_t;

typedef struct _drm_i810_copy_t {
   	int idx;		/* buffer index */
	int used;		/* nr bytes in use */
	void *address;		/* Address to copy from */
} drm_i810_copy_t;

#define PR_TRIANGLES         (0x0<<18)
#define PR_TRISTRIP_0        (0x1<<18)
#define PR_TRISTRIP_1        (0x2<<18)
#define PR_TRIFAN            (0x3<<18)
#define PR_POLYGON           (0x4<<18)
#define PR_LINES             (0x5<<18)
#define PR_LINESTRIP         (0x6<<18)
#define PR_RECTS             (0x7<<18)
#define PR_MASK              (0x7<<18)


typedef struct drm_i810_dma {
	void *virtual;
	int request_idx;
	int request_size;
	int granted;
} drm_i810_dma_t;

typedef struct _drm_i810_overlay_t {
	unsigned int offset;    /* Address of the Overlay Regs */
	unsigned int physical;
} drm_i810_overlay_t;

typedef struct _drm_i810_mc {
	int idx;                /* buffer index */
	int used;               /* nr bytes in use */
	int num_blocks;         /* number of GFXBlocks */
	int *length;            /* List of lengths for GFXBlocks (FUTURE)*/
	unsigned int last_render; /* Last Render Request */
} drm_i810_mc_t;


#endif /* _I810_DRM_H_ */
next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: * Rickard E. (Rik) Faith <faith@valinux.com> * Gareth Hughes <gareth@valinux.com> */ #ifndef _DRM_P_H_ #define _DRM_P_H_ #ifdef __KERNEL__ #ifdef __alpha__ /* add include of current.h so that "current" is defined * before static inline funcs in wait.h. Doing this so we * can build the DRM (part of PI DRI). 4/21/2000 S + B */ #include <asm/current.h> #endif /* __alpha__ */ #include <linux/config.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/miscdevice.h> #include <linux/fs.h> #include <linux/proc_fs.h> #include <linux/init.h> #include <linux/file.h> #include <linux/pci.h> #include <linux/wrapper.h> #include <linux/version.h> #include <linux/sched.h> #include <linux/smp_lock.h> /* For (un)lock_kernel */ #include <linux/mm.h> #if defined(__alpha__) || defined(__powerpc__) #include <asm/pgtable.h> /* For pte_wrprotect */ #endif #include <asm/io.h> #include <asm/mman.h> #include <asm/uaccess.h> #ifdef CONFIG_MTRR #include <asm/mtrr.h> #endif #if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE) #include <linux/types.h> #include <linux/agp_backend.h> #endif #include <linux/tqueue.h> #include <linux/poll.h> #include <asm/pgalloc.h> #include "drm.h" /* DRM template customization defaults */ #ifndef __HAVE_AGP #define __HAVE_AGP 0 #endif #ifndef __HAVE_MTRR #define __HAVE_MTRR 0 #endif #ifndef __HAVE_CTX_BITMAP #define __HAVE_CTX_BITMAP 0 #endif #ifndef __HAVE_DMA #define __HAVE_DMA 0 #endif #ifndef __HAVE_DMA_IRQ #define __HAVE_DMA_IRQ 0 #endif #ifndef __HAVE_DMA_WAITLIST #define __HAVE_DMA_WAITLIST 0 #endif #ifndef __HAVE_DMA_FREELIST #define __HAVE_DMA_FREELIST 0 #endif #ifndef __HAVE_DMA_HISTOGRAM #define __HAVE_DMA_HISTOGRAM 0 #endif #define __REALLY_HAVE_AGP (__HAVE_AGP && (defined(CONFIG_AGP) || \ defined(CONFIG_AGP_MODULE))) #define __REALLY_HAVE_MTRR (__HAVE_MTRR && defined(CONFIG_MTRR)) /* Begin the DRM... */ #define DRM_DEBUG_CODE 2 /* Include debugging code (if > 1, then also include looping detection. */ #define DRM_HASH_SIZE 16 /* Size of key hash table */ #define DRM_KERNEL_CONTEXT 0 /* Change drm_resctx if changed */ #define DRM_RESERVED_CONTEXTS 1 /* Change drm_resctx if changed */ #define DRM_LOOPING_LIMIT 5000000 #define DRM_BSZ 1024 /* Buffer size for /dev/drm? output */ #define DRM_TIME_SLICE (HZ/20) /* Time slice for GLXContexts */ #define DRM_LOCK_SLICE 1 /* Time slice for lock, in jiffies */ #define DRM_FLAG_DEBUG 0x01 #define DRM_FLAG_NOCTX 0x02 #define DRM_MEM_DMA 0 #define DRM_MEM_SAREA 1 #define DRM_MEM_DRIVER 2 #define DRM_MEM_MAGIC 3 #define DRM_MEM_IOCTLS 4 #define DRM_MEM_MAPS 5 #define DRM_MEM_VMAS 6 #define DRM_MEM_BUFS 7 #define DRM_MEM_SEGS 8 #define DRM_MEM_PAGES 9 #define DRM_MEM_FILES 10 #define DRM_MEM_QUEUES 11 #define DRM_MEM_CMDS 12 #define DRM_MEM_MAPPINGS 13 #define DRM_MEM_BUFLISTS 14 #define DRM_MEM_AGPLISTS 15 #define DRM_MEM_TOTALAGP 16 #define DRM_MEM_BOUNDAGP 17 #define DRM_MEM_CTXBITMAP 18 #define DRM_MEM_STUB 19 #define DRM_MEM_SGLISTS 20 #define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8) /* Backward compatibility section */ #ifndef minor #define minor(x) MINOR((x)) #endif #ifndef MODULE_LICENSE #define MODULE_LICENSE(x) #endif #ifndef preempt_disable #define preempt_disable() #define preempt_enable() #endif #ifndef pte_offset_map #define pte_offset_map pte_offset #define pte_unmap(pte) #endif #if LINUX_VERSION_CODE < 0x020500 static inline struct page * vmalloc_to_page(void * vmalloc_addr) { unsigned long addr = (unsigned long) vmalloc_addr; struct page *page = NULL; pgd_t *pgd = pgd_offset_k(addr); pmd_t *pmd; pte_t *ptep, pte; if (!pgd_none(*pgd)) { pmd = pmd_offset(pgd, addr); if (!pmd_none(*pmd)) { preempt_disable(); ptep = pte_offset_map(pmd, addr); pte = *ptep; if (pte_present(pte)) page = pte_page(pte); pte_unmap(ptep); preempt_enable(); } } return page; } #endif #if LINUX_VERSION_CODE < 0x020500 #define DRM_RPR_ARG(vma) #else #define DRM_RPR_ARG(vma) vma, #endif #define VM_OFFSET(vma) ((vma)->vm_pgoff << PAGE_SHIFT) /* Macros to make printk easier */ #define DRM_ERROR(fmt, arg...) \ printk(KERN_ERR "[" DRM_NAME ":" __FUNCTION__ "] *ERROR* " fmt , ##arg) #define DRM_MEM_ERROR(area, fmt, arg...) \ printk(KERN_ERR "[" DRM_NAME ":" __FUNCTION__ ":%s] *ERROR* " fmt , \ DRM(mem_stats)[area].name , ##arg) #define DRM_INFO(fmt, arg...) printk(KERN_INFO "[" DRM_NAME "] " fmt , ##arg) #if DRM_DEBUG_CODE #define DRM_DEBUG(fmt, arg...) \ do { \ if ( DRM(flags) & DRM_FLAG_DEBUG ) \ printk(KERN_DEBUG \ "[" DRM_NAME ":" __FUNCTION__ "] " fmt , \ ##arg); \ } while (0) #else #define DRM_DEBUG(fmt, arg...) do { } while (0) #endif #define DRM_PROC_LIMIT (PAGE_SIZE-80) #define DRM_PROC_PRINT(fmt, arg...) \ len += sprintf(&buf[len], fmt , ##arg); \ if (len > DRM_PROC_LIMIT) { *eof = 1; return len - offset; } #define DRM_PROC_PRINT_RET(ret, fmt, arg...) \ len += sprintf(&buf[len], fmt , ##arg); \ if (len > DRM_PROC_LIMIT) { ret; *eof = 1; return len - offset; } /* Mapping helper macros */ #define DRM_IOREMAP(map) \ (map)->handle = DRM(ioremap)( (map)->offset, (map)->size ) #define DRM_IOREMAP_NOCACHE(map) \ (map)->handle = DRM(ioremap_nocache)((map)->offset, (map)->size) #define DRM_IOREMAPFREE(map) \ do { \ if ( (map)->handle && (map)->size ) \ DRM(ioremapfree)( (map)->handle, (map)->size ); \ } while (0) #define DRM_FIND_MAP(_map, _o) \ do { \ struct list_head *_list; \ list_for_each( _list, &dev->maplist->head ) { \ drm_map_list_t *_entry = (drm_map_list_t *)_list; \ if ( _entry->map && \ _entry->map->offset == (_o) ) { \ (_map) = _entry->map; \ break; \ } \ } \ } while(0) /* Internal types and structures */ #define DRM_ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0])) #define DRM_MIN(a,b) ((a)<(b)?(a):(b)) #define DRM_MAX(a,b) ((a)>(b)?(a):(b)) #define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1)) #define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x)) #define DRM_WAITCOUNT(dev,idx) DRM_BUFCOUNT(&dev->queuelist[idx]->waitlist) #define DRM_GET_PRIV_SAREA(_dev, _ctx, _map) do { \ (_map) = (_dev)->context_sareas[_ctx]; \ } while(0) typedef int drm_ioctl_t( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ); typedef struct drm_pci_list { u16 vendor; u16 device; } drm_pci_list_t; typedef struct drm_ioctl_desc { drm_ioctl_t *func; int auth_needed; int root_only; } drm_ioctl_desc_t; typedef struct drm_devstate { pid_t owner; /* X server pid holding x_lock */ } drm_devstate_t; typedef struct drm_magic_entry { drm_magic_t magic; struct drm_file *priv; struct drm_magic_entry *next; } drm_magic_entry_t; typedef struct drm_magic_head { struct drm_magic_entry *head; struct drm_magic_entry *tail; } drm_magic_head_t; typedef struct drm_vma_entry { struct vm_area_struct *vma; struct drm_vma_entry *next; pid_t pid; } drm_vma_entry_t; typedef struct drm_buf { int idx; /* Index into master buflist */ int total; /* Buffer size */ int order; /* log-base-2(total) */ int used; /* Amount of buffer in use (for DMA) */ unsigned long offset; /* Byte offset (used internally) */ void *address; /* Address of buffer */ unsigned long bus_address; /* Bus address of buffer */ struct drm_buf *next; /* Kernel-only: used for free list */ __volatile__ int waiting; /* On kernel DMA queue */ __volatile__ int pending; /* On hardware DMA queue */ wait_queue_head_t dma_wait; /* Processes waiting */ pid_t pid; /* PID of holding process */ int context; /* Kernel queue for this buffer */ int while_locked;/* Dispatch this buffer while locked */ enum { DRM_LIST_NONE = 0, DRM_LIST_FREE = 1, DRM_LIST_WAIT = 2, DRM_LIST_PEND = 3, DRM_LIST_PRIO = 4, DRM_LIST_RECLAIM = 5 } list; /* Which list we're on */ #if DRM_DMA_HISTOGRAM cycles_t time_queued; /* Queued to kernel DMA queue */ cycles_t time_dispatched; /* Dispatched to hardware */ cycles_t time_completed; /* Completed by hardware */ cycles_t time_freed; /* Back on freelist */ #endif int dev_priv_size; /* Size of buffer private stoarge */ void *dev_private; /* Per-buffer private storage */ } drm_buf_t; #if DRM_DMA_HISTOGRAM #define DRM_DMA_HISTOGRAM_SLOTS 9 #define DRM_DMA_HISTOGRAM_INITIAL 10 #define DRM_DMA_HISTOGRAM_NEXT(current) ((current)*10) typedef struct drm_histogram { atomic_t total; atomic_t queued_to_dispatched[DRM_DMA_HISTOGRAM_SLOTS]; atomic_t dispatched_to_completed[DRM_DMA_HISTOGRAM_SLOTS]; atomic_t completed_to_freed[DRM_DMA_HISTOGRAM_SLOTS]; atomic_t queued_to_completed[DRM_DMA_HISTOGRAM_SLOTS]; atomic_t queued_to_freed[DRM_DMA_HISTOGRAM_SLOTS]; atomic_t dma[DRM_DMA_HISTOGRAM_SLOTS]; atomic_t schedule[DRM_DMA_HISTOGRAM_SLOTS]; atomic_t ctx[DRM_DMA_HISTOGRAM_SLOTS]; atomic_t lacq[DRM_DMA_HISTOGRAM_SLOTS]; atomic_t lhld[DRM_DMA_HISTOGRAM_SLOTS]; } drm_histogram_t; #endif /* bufs is one longer than it has to be */ typedef struct drm_waitlist { int count; /* Number of possible buffers */ drm_buf_t **bufs; /* List of pointers to buffers */ drm_buf_t **rp; /* Read pointer */ drm_buf_t **wp; /* Write pointer */ drm_buf_t **end; /* End pointer */ spinlock_t read_lock; spinlock_t write_lock; } drm_waitlist_t; typedef struct drm_freelist { int initialized; /* Freelist in use */ atomic_t count; /* Number of free buffers */ drm_buf_t *next; /* End pointer */ wait_queue_head_t waiting; /* Processes waiting on free bufs */ int low_mark; /* Low water mark */ int high_mark; /* High water mark */ atomic_t wfh; /* If waiting for high mark */ spinlock_t lock; } drm_freelist_t; typedef struct drm_buf_entry { int buf_size; int buf_count; drm_buf_t *buflist; int seg_count; int page_order; unsigned long *seglist; drm_freelist_t freelist; } drm_buf_entry_t; typedef struct drm_hw_lock { __volatile__ unsigned int lock; char padding[60]; /* Pad to cache line */ } drm_hw_lock_t; typedef struct drm_file { int authenticated; int minor; pid_t pid; uid_t uid; drm_magic_t magic; unsigned long ioctl_count; struct drm_file *next; struct drm_file *prev; struct drm_device *dev; int remove_auth_on_close; } drm_file_t; typedef struct drm_queue { atomic_t use_count; /* Outstanding uses (+1) */ atomic_t finalization; /* Finalization in progress */ atomic_t block_count; /* Count of processes waiting */ atomic_t block_read; /* Queue blocked for reads */ wait_queue_head_t read_queue; /* Processes waiting on block_read */ atomic_t block_write; /* Queue blocked for writes */ wait_queue_head_t write_queue; /* Processes waiting on block_write */ #if 1 atomic_t total_queued; /* Total queued statistic */ atomic_t total_flushed;/* Total flushes statistic */ atomic_t total_locks; /* Total locks statistics */ #endif drm_ctx_flags_t flags; /* Context preserving and 2D-only */ drm_waitlist_t waitlist; /* Pending buffers */ wait_queue_head_t flush_queue; /* Processes waiting until flush */ } drm_queue_t; typedef struct drm_lock_data { drm_hw_lock_t *hw_lock; /* Hardware lock */ pid_t pid; /* PID of lock holder (0=kernel) */ wait_queue_head_t lock_queue; /* Queue of blocked processes */ unsigned long lock_time; /* Time of last lock in jiffies */ } drm_lock_data_t; typedef struct drm_device_dma { #if 0 /* Performance Counters */ atomic_t total_prio; /* Total DRM_DMA_PRIORITY */ atomic_t total_bytes; /* Total bytes DMA'd */ atomic_t total_dmas; /* Total DMA buffers dispatched */ atomic_t total_missed_dma; /* Missed drm_do_dma */ atomic_t total_missed_lock; /* Missed lock in drm_do_dma */ atomic_t total_missed_free; /* Missed drm_free_this_buffer */ atomic_t total_missed_sched;/* Missed drm_dma_schedule */ atomic_t total_tried; /* Tried next_buffer */ atomic_t total_hit; /* Sent next_buffer */ atomic_t total_lost; /* Lost interrupt */ #endif drm_buf_entry_t bufs[DRM_MAX_ORDER+1]; int buf_count; drm_buf_t **buflist; /* Vector of pointers info bufs */ int seg_count; int page_count; unsigned long *pagelist; unsigned long byte_count; enum { _DRM_DMA_USE_AGP = 0x01, _DRM_DMA_USE_SG = 0x02 } flags; /* DMA support */ drm_buf_t *this_buffer; /* Buffer being sent */ drm_buf_t *next_buffer; /* Selected buffer to send */ drm_queue_t *next_queue; /* Queue from which buffer selected*/ wait_queue_head_t waiting; /* Processes waiting on free bufs */ } drm_device_dma_t; #if __REALLY_HAVE_AGP typedef struct drm_agp_mem { unsigned long handle; agp_memory *memory; unsigned long bound; /* address */ int pages; struct drm_agp_mem *prev; struct drm_agp_mem *next; } drm_agp_mem_t; typedef struct drm_agp_head { agp_kern_info agp_info; const char *chipset; drm_agp_mem_t *memory; unsigned long mode; int enabled; int acquired; unsigned long base; int agp_mtrr; int cant_use_aperture; unsigned long page_mask; } drm_agp_head_t; #endif typedef struct drm_sg_mem { unsigned long handle; void *virtual; int pages; struct page **pagelist; dma_addr_t *busaddr; } drm_sg_mem_t; typedef struct drm_sigdata { int context; drm_hw_lock_t *lock; } drm_sigdata_t; typedef struct drm_map_list { struct list_head head; drm_map_t *map; } drm_map_list_t; typedef struct drm_device { const char *name; /* Simple driver name */ char *unique; /* Unique identifier: e.g., busid */ int unique_len; /* Length of unique field */ dev_t device; /* Device number for mknod */ char *devname; /* For /proc/interrupts */ int blocked; /* Blocked due to VC switch? */ struct proc_dir_entry *root; /* Root for this device's entries */ /* Locks */ spinlock_t count_lock; /* For inuse, open_count, buf_use */ struct semaphore struct_sem; /* For others */ /* Usage Counters */ int open_count; /* Outstanding files open */ atomic_t ioctl_count; /* Outstanding IOCTLs pending */ atomic_t vma_count; /* Outstanding vma areas open */ int buf_use; /* Buffers in use -- cannot alloc */ atomic_t buf_alloc; /* Buffer allocation in progress */ /* Performance counters */ unsigned long counters; drm_stat_type_t types[15]; atomic_t counts[15]; /* Authentication */ drm_file_t *file_first; drm_file_t *file_last; drm_magic_head_t magiclist[DRM_HASH_SIZE]; /* Memory management */ drm_map_list_t *maplist; /* Linked list of regions */ int map_count; /* Number of mappable regions */ drm_map_t **context_sareas; int max_context; drm_vma_entry_t *vmalist; /* List of vmas (for debugging) */ drm_lock_data_t lock; /* Information on hardware lock */ /* DMA queues (contexts) */ int queue_count; /* Number of active DMA queues */ int queue_reserved; /* Number of reserved DMA queues */ int queue_slots; /* Actual length of queuelist */ drm_queue_t **queuelist; /* Vector of pointers to DMA queues */ drm_device_dma_t *dma; /* Optional pointer for DMA support */ /* Context support */ int irq; /* Interrupt used by board */ __volatile__ long context_flag; /* Context swapping flag */ __volatile__ long interrupt_flag; /* Interruption handler flag */ __volatile__ long dma_flag; /* DMA dispatch flag */ struct timer_list timer; /* Timer for delaying ctx switch */ wait_queue_head_t context_wait; /* Processes waiting on ctx switch */ int last_checked; /* Last context checked for DMA */ int last_context; /* Last current context */ unsigned long last_switch; /* jiffies at last context switch */ struct tq_struct tq; cycles_t ctx_start; cycles_t lck_start; #if __HAVE_DMA_HISTOGRAM drm_histogram_t histo; #endif /* Callback to X server for context switch and for heavy-handed reset. */ char buf[DRM_BSZ]; /* Output buffer */ char *buf_rp; /* Read pointer */ char *buf_wp; /* Write pointer */ char *buf_end; /* End pointer */ struct fasync_struct *buf_async;/* Processes waiting for SIGIO */ wait_queue_head_t buf_readers; /* Processes waiting to read */ wait_queue_head_t buf_writers; /* Processes waiting to ctx switch */ #if __REALLY_HAVE_AGP drm_agp_head_t *agp; #endif struct pci_dev *pdev; #ifdef __alpha__ #if LINUX_VERSION_CODE < 0x020403 struct pci_controler *hose; #else struct pci_controller *hose; #endif #endif drm_sg_mem_t *sg; /* Scatter gather memory */ unsigned long *ctx_bitmap; void *dev_private; drm_sigdata_t sigdata; /* For block_all_signals */ sigset_t sigmask; } drm_device_t; /* ================================================================ * Internal function definitions */ /* Misc. support (drm_init.h) */ extern int DRM(flags); extern void DRM(parse_options)( char *s ); extern int DRM(cpu_valid)( void ); /* Driver support (drm_drv.h) */ extern int DRM(version)(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern int DRM(open)(struct inode *inode, struct file *filp); extern int DRM(release)(struct inode *inode, struct file *filp); extern int DRM(ioctl)(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern int DRM(lock)(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern int DRM(unlock)(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); /* Device support (drm_fops.h) */ extern int DRM(open_helper)(struct inode *inode, struct file *filp, drm_device_t *dev); extern int DRM(flush)(struct file *filp); extern int DRM(release_fuck)(struct inode *inode, struct file *filp); extern int DRM(fasync)(int fd, struct file *filp, int on); extern ssize_t DRM(read)(struct file *filp, char *buf, size_t count, loff_t *off); extern int DRM(write_string)(drm_device_t *dev, const char *s); extern unsigned int DRM(poll)(struct file *filp, struct poll_table_struct *wait); /* Mapping support (drm_vm.h) */ extern struct page *DRM(vm_nopage)(struct vm_area_struct *vma, unsigned long address, int write_access); extern struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma, unsigned long address, int write_access); extern struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma, unsigned long address, int write_access); extern struct page *DRM(vm_sg_nopage)(struct vm_area_struct *vma, unsigned long address, int write_access); extern void DRM(vm_open)(struct vm_area_struct *vma); extern void DRM(vm_close)(struct vm_area_struct *vma); extern void DRM(vm_shm_close)(struct vm_area_struct *vma); extern int DRM(mmap_dma)(struct file *filp, struct vm_area_struct *vma); extern int DRM(mmap)(struct file *filp, struct vm_area_struct *vma); /* Memory management support (drm_memory.h) */ extern void DRM(mem_init)(void); extern int DRM(mem_info)(char *buf, char **start, off_t offset, int request, int *eof, void *data); extern void *DRM(alloc)(size_t size, int area); extern void *DRM(realloc)(void *oldpt, size_t oldsize, size_t size, int area); extern char *DRM(strdup)(const char *s, int area); extern void DRM(strfree)(const char *s, int area); extern void DRM(free)(void *pt, size_t size, int area); extern unsigned long DRM(alloc_pages)(int order, int area); extern void DRM(free_pages)(unsigned long address, int order, int area); extern void *DRM(ioremap)(unsigned long offset, unsigned long size); extern void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size); extern void DRM(ioremapfree)(void *pt, unsigned long size); #if __REALLY_HAVE_AGP extern agp_memory *DRM(alloc_agp)(int pages, u32 type); extern int DRM(free_agp)(agp_memory *handle, int pages); extern int DRM(bind_agp)(agp_memory *handle, unsigned int start); extern int DRM(unbind_agp)(agp_memory *handle); #endif /* Misc. IOCTL support (drm_ioctl.h) */ extern int DRM(irq_busid)(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern int DRM(getunique)(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern int DRM(setunique)(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern int DRM(getmap)(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern int DRM(getclient)(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern int DRM(getstats)(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); /* Context IOCTL support (drm_context.h) */ extern int DRM(resctx)( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ); extern int DRM(addctx)( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ); extern int DRM(modctx)( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ); extern int DRM(getctx)( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ); extern int DRM(switchctx)( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ); extern int DRM(newctx)( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ); extern int DRM(rmctx)( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ); extern int DRM(context_switch)(drm_device_t *dev, int old, int new); extern int DRM(context_switch_complete)(drm_device_t *dev, int new); #if __HAVE_CTX_BITMAP extern int DRM(ctxbitmap_init)( drm_device_t *dev ); extern void DRM(ctxbitmap_cleanup)( drm_device_t *dev ); #endif extern int DRM(setsareactx)( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ); extern int DRM(getsareactx)( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ); /* Drawable IOCTL support (drm_drawable.h) */ extern int DRM(adddraw)(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern int DRM(rmdraw)(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); /* Authentication IOCTL support (drm_auth.h) */ extern int DRM(add_magic)(drm_device_t *dev, drm_file_t *priv, drm_magic_t magic); extern int DRM(remove_magic)(drm_device_t *dev, drm_magic_t magic); extern int DRM(getmagic)(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern int DRM(authmagic)(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); /* Locking IOCTL support (drm_lock.h) */ extern int DRM(block)(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); extern int DRM(unblock)(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg);