summaryrefslogtreecommitdiff
path: root/shared/via_drm.h
blob: da80aa92b9dd85ff355cacdc1c745539c20509e9 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
/*
 * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
 * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sub license,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 * DEALINGS IN THE SOFTWARE.
 */
#ifndef _VIA_DRM_H_
#define _VIA_DRM_H_

/* WARNING: These defines must be the same as what the Xserver uses.
 * if you change them, you must change the defines in the Xserver.
 */

#ifndef _VIA_DEFINES_
#define _VIA_DEFINES_

#define VIA_DMA_BUF_ORDER		12
#define VIA_DMA_BUF_SZ 		        (1 << VIA_DMA_BUF_ORDER)
#define VIA_DMA_BUF_NR 			256
#define VIA_NR_SAREA_CLIPRECTS 		8
#define VIA_NR_XVMC_LOCKS               2
#define VIA_MAX_CACHELINE_SIZE          64
#define XVMCLOCKPTR(saPriv,lockNo)					\
        ((volatile int *)(((((unsigned long) (saPriv)->XvMCLockArea) +	\
                            (VIA_MAX_CACHELINE_SIZE - 1)) &             \
                           ~(VIA_MAX_CACHELINE_SIZE - 1)) +             \
                          VIA_MAX_CACHELINE_SIZE*(lockNo)))

/* Each region is a minimum of 64k, and there are at most 64 of them.
 */
#define VIA_NR_TEX_REGIONS 64
#define VIA_LOG_MIN_TEX_REGION_SIZE 16
#endif

#define VIA_UPLOAD_TEX0IMAGE  0x1 /* handled clientside */
#define VIA_UPLOAD_TEX1IMAGE  0x2 /* handled clientside */
#define VIA_UPLOAD_CTX        0x4
#define VIA_UPLOAD_BUFFERS    0x8
#define VIA_UPLOAD_TEX0       0x10
#define VIA_UPLOAD_TEX1       0x20
#define VIA_UPLOAD_CLIPRECTS  0x40
#define VIA_UPLOAD_ALL        0xff

/* VIA specific ioctls */
#define DRM_IOCTL_VIA_ALLOCMEM	DRM_IOWR(0x40, drm_via_mem_t)
#define DRM_IOCTL_VIA_FREEMEM	DRM_IOW(0x41, drm_via_mem_t)
#define DRM_IOCTL_VIA_AGP_INIT	DRM_IOWR(0x42, drm_via_agp_t)
#define DRM_IOCTL_VIA_FB_INIT	DRM_IOWR(0x43, drm_via_fb_t)
#define DRM_IOCTL_VIA_MAP_INIT	DRM_IOWR(0x44, drm_via_init_t)
#define DRM_IOCTL_VIA_DEC_FUTEX DRM_IOW(0x45, drm_via_futex_t)

/* Indices into buf.Setup where various bits of state are mirrored per
 * context and per buffer.  These can be fired at the card as a unit,
 * or in a piecewise fashion as required.
 */
 
#define VIA_TEX_SETUP_SIZE 8

/* Flags for clear ioctl
 */
#define VIA_FRONT   0x1
#define VIA_BACK    0x2
#define VIA_DEPTH   0x4
#define VIA_STENCIL 0x8
#define VIDEO 0
#define AGP 1
typedef struct {
        unsigned int offset;
        unsigned int size;
} drm_via_agp_t;    

typedef struct {
        unsigned int offset;
        unsigned int size;
} drm_via_fb_t;    

typedef struct {
        unsigned int context;
        unsigned int type;
        unsigned int size;
        unsigned long index;
        unsigned long offset;
} drm_via_mem_t;    

typedef struct _drm_via_init {
        enum {
                VIA_INIT_MAP = 0x01,
                VIA_CLEANUP_MAP = 0x02
        } func;

        unsigned long sarea_priv_offset;
        unsigned long fb_offset;
        unsigned long mmio_offset;
        unsigned long agpAddr;
} drm_via_init_t;

typedef struct _drm_via_futex {
        enum {
                VIA_FUTEX_WAIT = 0x00,
                VIA_FUTEX_WAKE = 0X01
        }fut;
        unsigned int op;
        unsigned int ms;
        unsigned int lock;
        unsigned int val;
} drm_via_futex_t;

/* Warning: If you change the SAREA structure you must change the Xserver
 * structure as well */

typedef struct _drm_via_tex_region {
        unsigned char next, prev;	/* indices to form a circular LRU  */
        unsigned char inUse;	/* owned by a client, or free? */
        int age;			/* tracked by clients to update local LRU's */
} drm_via_tex_region_t;

typedef struct _drm_via_sarea {
        unsigned int dirty;
        unsigned int nbox;
        drm_clip_rect_t boxes[VIA_NR_SAREA_CLIPRECTS];   
        drm_via_tex_region_t texList[VIA_NR_TEX_REGIONS + 1]; 
        int texAge;			/* last time texture was uploaded */
        int ctxOwner;		/* last context to upload state */
        int vertexPrim;

        /*
         * Below is for XvMC.
         */

        unsigned int XvMCSubPicOn;         /* Subpicture displaying flag */
        unsigned int XvMCDisplaying;       /* Surface displaying flag */
        unsigned int XvMCCtxNoGrabbed;     /* Last context to hold decoder */

        /*
         * We want the lock integers alone on, and aligned to, a cache line.
         * Therefore this somewhat strange construct.
         */

        char XvMCLockArea[VIA_MAX_CACHELINE_SIZE * (VIA_NR_XVMC_LOCKS + 1)];

} drm_via_sarea_t;


typedef struct _drm_via_flush_agp {
        unsigned int offset;
        unsigned int size;
        unsigned int index;		
        int discard;	/* client is finished with the buffer? */
} drm_via_flush_agp_t;

typedef struct _drm_via_flush_sys {
        unsigned int offset;
        unsigned int size;
        unsigned long index;		
        int discard;	/* client is finished with the buffer? */
} drm_via_flush_sys_t;

#ifdef __KERNEL__

int via_fb_init( DRM_IOCTL_ARGS );		
int via_mem_alloc( DRM_IOCTL_ARGS );				
int via_mem_free( DRM_IOCTL_ARGS );		
int via_agp_init( DRM_IOCTL_ARGS );				
int via_map_init( DRM_IOCTL_ARGS );				
int via_decoder_futex( DRM_IOCTL_ARGS ); 

#endif
#endif /* _VIA_DRM_H_ */
ass="hl com"> * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #include "drmP.h" #include "drm_core.h" static void __exit drm_cleanup(drm_device_t * dev); int drm_fb_loaded = 0; static int drm_version(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); /** Ioctl table */ drm_ioctl_desc_t drm_ioctls[] = { [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = {drm_version, 0, 0, 0}, [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = {drm_getunique, 0, 0, 0}, [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = {drm_getmagic, 0, 0, 0}, [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = {drm_irq_by_busid, 0, 1, 1}, [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP)] = {drm_getmap, 0, 0, 0}, [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT)] = {drm_getclient, 0, 0, 0}, [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] = {drm_getstats, 0, 0, 0}, [DRM_IOCTL_NR(DRM_IOCTL_SET_VERSION)] = {drm_setversion, 0, 1, 1}, [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = {drm_setunique, 1, 1, 1}, [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = {drm_noop, 1, 1, 1}, [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = {drm_noop, 1, 1, 1}, [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = {drm_authmagic, 1, 1, 1}, [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = {drm_addmap_ioctl, 1, 1, 1}, [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = {drm_rmmap_ioctl, 1, 0, 0}, [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = {drm_setsareactx, 1, 1, 1}, [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = {drm_getsareactx, 1, 0, 0}, [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = {drm_addctx, 1, 1, 1}, [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = {drm_rmctx, 1, 1, 1}, [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = {drm_modctx, 1, 1, 1}, [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = {drm_getctx, 1, 0, 0}, [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = {drm_switchctx, 1, 1, 1}, [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = {drm_newctx, 1, 1, 1}, [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = {drm_resctx, 1, 0, 0}, [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = {drm_adddraw, 1, 1, 1}, [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = {drm_rmdraw, 1, 1, 1}, [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = {drm_lock, 1, 0, 0}, [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = {drm_unlock, 1, 0, 0}, [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = {drm_noop, 1, 0, 0}, [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = {drm_addbufs, 1, 1, 1}, [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = {drm_markbufs, 1, 1, 1}, [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = {drm_infobufs, 1, 0, 0}, [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = {drm_mapbufs, 1, 0, 0}, [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = {drm_freebufs, 1, 0, 0}, /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */ [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = {NULL, 1, 0, 0}, [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = {drm_control, 1, 1, 1}, #if __OS_HAS_AGP [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = {drm_agp_acquire_ioctl, 1, 1, 1}, [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = {drm_agp_release_ioctl, 1, 1, 1}, [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = {drm_agp_enable_ioctl, 1, 1, 1}, [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = {drm_agp_info_ioctl, 1, 0, 0}, [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = {drm_agp_alloc, 1, 1, 1}, [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = {drm_agp_free, 1, 1, 1}, [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = {drm_agp_bind, 1, 1, 1}, [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = {drm_agp_unbind, 1, 1, 1}, #endif [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] = {drm_sg_alloc, 1, 1, 1}, [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = {drm_sg_free, 1, 1, 1}, [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0, 0, 0}, }; #define DRIVER_IOCTL_COUNT DRM_ARRAY_SIZE( drm_ioctls ) /** * Take down the DRM device. * * \param dev DRM device structure. * * Frees every resource in \p dev. * * \sa drm_device */ int drm_lastclose(drm_device_t * dev) { drm_magic_entry_t *pt, *next; drm_map_list_t *r_list; drm_vma_entry_t *vma, *vma_next; int i; DRM_DEBUG("\n"); if (dev->driver->lastclose) dev->driver->lastclose(dev); DRM_DEBUG("driver lastclose completed\n"); if (dev->unique) { drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER); dev->unique=NULL; dev->unique_len=0; } if (dev->irq_enabled) drm_irq_uninstall(dev); down(&dev->struct_sem); del_timer(&dev->timer); if (dev->unique) { drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER); dev->unique = NULL; dev->unique_len = 0; } /* Clear pid list */ for (i = 0; i < DRM_HASH_SIZE; i++) { for (pt = dev->magiclist[i].head; pt; pt = next) { next = pt->next; drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC); } dev->magiclist[i].head = dev->magiclist[i].tail = NULL; } /* Clear AGP information */ if (drm_core_has_AGP(dev) && dev->agp) { drm_agp_mem_t *entry; drm_agp_mem_t *nexte; /* Remove AGP resources, but leave dev->agp intact until drv_cleanup is called. */ for (entry = dev->agp->memory; entry; entry = nexte) { nexte = entry->next; if (entry->bound) drm_unbind_agp(entry->memory); drm_free_agp(entry->memory, entry->pages); drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); } dev->agp->memory = NULL; if (dev->agp->acquired) drm_agp_release(dev); dev->agp->acquired = 0; dev->agp->enabled = 0; } if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg) { drm_sg_cleanup(dev->sg); dev->sg = NULL; } /* Clear vma list (only built for debugging) */ if (dev->vmalist) { for (vma = dev->vmalist; vma; vma = vma_next) { vma_next = vma->next; drm_free(vma, sizeof(*vma), DRM_MEM_VMAS); } dev->vmalist = NULL; } if (dev->maplist) { while (!list_empty(&dev->maplist->head)) { struct list_head *list = dev->maplist->head.next; r_list = list_entry(list, drm_map_list_t, head); drm_rmmap_locked(dev, r_list->map); } } if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) { for (i = 0; i < dev->queue_count; i++) { if (dev->queuelist[i]) { drm_free(dev->queuelist[i], sizeof(*dev->queuelist[0]), DRM_MEM_QUEUES); dev->queuelist[i] = NULL; } } drm_free(dev->queuelist, dev->queue_slots * sizeof(*dev->queuelist), DRM_MEM_QUEUES); dev->queuelist = NULL; } dev->queue_count = 0; if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) drm_dma_takedown(dev); if (dev->lock.hw_lock) { dev->sigdata.lock = dev->lock.hw_lock = NULL; /* SHM removed */ dev->lock.filp = NULL; wake_up_interruptible(&dev->lock.lock_queue); } up(&dev->struct_sem); DRM_DEBUG("lastclose completed\n"); return 0; } void __exit drm_cleanup_pci(struct pci_dev *pdev) { drm_device_t *dev = pci_get_drvdata(pdev); pci_set_drvdata(pdev, NULL); pci_release_regions(pdev); if (dev) drm_cleanup(dev); } EXPORT_SYMBOL(drm_cleanup_pci); /** * Module initialization. Called via init_module at module load time, or via * linux/init/main.c (this is not currently supported). * * \return zero on success or a negative number on failure. * * Initializes an array of drm_device structures, and attempts to * initialize all available devices, using consecutive minors, registering the * stubs and initializing the AGP device. * * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and * after the initialization for driver customization. */ int drm_init(struct drm_driver *driver, struct pci_device_id *pciidlist) { struct pci_dev *pdev; struct pci_device_id *pid; int rc, i; DRM_DEBUG("\n"); for (i = 0; (pciidlist[i].vendor != 0) && !drm_fb_loaded; i++) { pid = &pciidlist[i]; pdev = NULL; /* pass back in pdev to account for multiple identical cards */ while ((pdev = pci_get_subsys(pid->vendor, pid->device, pid->subvendor, pid->subdevice, pdev))) { /* is there already a driver loaded, or (short circuit saves work) */ /* does something like VesaFB have control of the memory region? */ if (pci_dev_driver(pdev) || pci_request_regions(pdev, "DRM scan")) { /* go into stealth mode */ drm_fb_loaded = 1; pci_dev_put(pdev); break; } /* no fbdev or vesadev, put things back and wait for normal probe */ pci_release_regions(pdev); } } if (!drm_fb_loaded) pci_register_driver(&driver->pci_driver); else { for (i = 0; pciidlist[i].vendor != 0; i++) { pid = &pciidlist[i]; pdev = NULL; /* pass back in pdev to account for multiple identical cards */ while ((pdev = pci_get_subsys(pid->vendor, pid->device, pid->subvendor, pid->subdevice, pdev))) { /* stealth mode requires a manual probe */ pci_dev_get(pdev); if ((rc = drm_get_dev(pdev, &pciidlist[i], driver))) { pci_dev_put(pdev); return rc; } } } DRM_INFO("Used old pci detect: framebuffer loaded\n"); } return 0; } EXPORT_SYMBOL(drm_init); /** * Called via cleanup_module() at module unload time. * * Cleans up all DRM device, calling drm_lastclose(). * * \sa drm_init */ static void __exit drm_cleanup(drm_device_t * dev) { DRM_DEBUG("\n"); if (!dev) { DRM_ERROR("cleanup called no dev\n"); return; } drm_lastclose(dev); if (dev->maplist) { drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS); dev->maplist = NULL; } if (!drm_fb_loaded) pci_disable_device(dev->pdev); drm_ctxbitmap_cleanup(dev); if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && dev->agp && dev->agp->agp_mtrr >= 0) { int retval; retval = mtrr_del(dev->agp->agp_mtrr, dev->agp->agp_info.aper_base, dev->agp->agp_info.aper_size * 1024 * 1024); DRM_DEBUG("mtrr_del=%d\n", retval); } if (drm_core_has_AGP(dev) && dev->agp) { drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS); dev->agp = NULL; } if (dev->driver->unload) dev->driver->unload(dev); drm_put_head(&dev->primary); if (drm_put_dev(dev)) DRM_ERROR("Cannot unload module\n"); } void __exit drm_exit(struct drm_driver *driver) { int i; drm_device_t *dev = NULL; drm_head_t *head; DRM_DEBUG("\n"); if (drm_fb_loaded) { for (i = 0; i < cards_limit; i++) { head = drm_heads[i]; if (!head) continue; if (!head->dev) continue; if (head->dev->driver != driver) continue; dev = head->dev; } if (dev) { /* release the pci driver */ if (dev->pdev) pci_dev_put(dev->pdev); drm_cleanup(dev); } } else pci_unregister_driver(&driver->pci_driver); DRM_INFO("Module unloaded\n"); } EXPORT_SYMBOL(drm_exit); /** File operations structure */ static struct file_operations drm_stub_fops = { .owner = THIS_MODULE, .open = drm_stub_open }; static int __init drm_core_init(void) { int ret = -ENOMEM; cards_limit = (cards_limit < DRM_MAX_MINOR + 1 ? cards_limit : DRM_MAX_MINOR + 1); drm_heads = drm_calloc(cards_limit, sizeof(*drm_heads), DRM_MEM_STUB); if (!drm_heads) goto err_p1; if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops)) goto err_p1; drm_class = drm_sysfs_create(THIS_MODULE, "drm"); if (IS_ERR(drm_class)) { printk(KERN_ERR "DRM: Error creating drm class.\n"); ret = PTR_ERR(drm_class); goto err_p2; } drm_proc_root = create_proc_entry("dri", S_IFDIR, NULL);