/* drm_drv.h -- Generic driver template -*- linux-c -*- * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com * * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: * Rickard E. (Rik) Faith * Gareth Hughes */ /* * To use this template, you must at least define the following (samples * given for the MGA driver): * * #define DRIVER_AUTHOR "VA Linux Systems, Inc." * * #define DRIVER_NAME "mga" * #define DRIVER_DESC "Matrox G200/G400" * #define DRIVER_DATE "20001127" * * #define DRIVER_MAJOR 2 * #define DRIVER_MINOR 0 * #define DRIVER_PATCHLEVEL 2 * * #define DRIVER_IOCTL_COUNT DRM_ARRAY_SIZE( mga_ioctls ) * * #define DRM(x) mga_##x */ #ifndef __MUST_HAVE_AGP #define __MUST_HAVE_AGP 0 #endif #ifndef __HAVE_CTX_BITMAP #define __HAVE_CTX_BITMAP 0 #endif #ifndef __HAVE_DMA_IRQ #define __HAVE_DMA_IRQ 0 #endif #ifndef __HAVE_DMA_QUEUE #define __HAVE_DMA_QUEUE 0 #endif #ifndef __HAVE_MULTIPLE_DMA_QUEUES #define __HAVE_MULTIPLE_DMA_QUEUES 0 #endif #ifndef __HAVE_DMA_SCHEDULE #define __HAVE_DMA_SCHEDULE 0 #endif #ifndef __HAVE_DMA_FLUSH #define __HAVE_DMA_FLUSH 0 #endif #ifndef __HAVE_DMA_READY #define __HAVE_DMA_READY 0 #endif #ifndef __HAVE_DMA_QUIESCENT #define __HAVE_DMA_QUIESCENT 0 #endif #ifndef __HAVE_RELEASE #define __HAVE_RELEASE 0 #endif #ifndef __HAVE_COUNTERS #define __HAVE_COUNTERS 0 #endif #ifndef __HAVE_SG #define __HAVE_SG 0 #endif #ifndef __HAVE_KERNEL_CTX_SWITCH #define __HAVE_KERNEL_CTX_SWITCH 0 #endif #ifndef __HAVE_DRIVER_FOPS_READ #define __HAVE_DRIVER_FOPS_READ 0 #endif #ifndef __HAVE_DRIVER_FOPS_POLL #define __HAVE_DRIVER_FOPS_POLL 0 #endif #ifndef DRIVER_PREINIT #define DRIVER_PREINIT() #endif #ifndef DRIVER_POSTINIT #define DRIVER_POSTINIT() #endif #ifndef DRIVER_PRERELEASE #define DRIVER_PRERELEASE() #endif #ifndef DRIVER_PRETAKEDOWN #define DRIVER_PRETAKEDOWN() #endif #ifndef DRIVER_POSTCLEANUP #define DRIVER_POSTCLEANUP() #endif #ifndef DRIVER_PRESETUP #define DRIVER_PRESETUP() #endif #ifndef DRIVER_POSTSETUP #define DRIVER_POSTSETUP() #endif #ifndef DRIVER_IOCTLS #define DRIVER_IOCTLS #endif #ifndef DRIVER_FOPS #define DRIVER_FOPS \ static struct file_operations DRM(fops) = { \ .owner = THIS_MODULE, \ .open = DRM(open), \ .flush = DRM(flush), \ .release = DRM(release), \ .ioctl = DRM(ioctl), \ .mmap = DRM(mmap), \ .fasync = DRM(fasync), \ .poll = DRM(poll), \ .read = DRM(read), \ } #endif #ifndef MODULE /* DRM(options) is called by the kernel to parse command-line options * passed via the boot-loader (e.g., LILO). It calls the insmod option * routine, drm_parse_drm. */ /* Use an additional macro to avoid preprocessor troubles */ #define DRM_OPTIONS_FUNC DRM(options) static int __init DRM(options)( char *str ) { DRM(parse_options)( str ); return 1; } __setup( DRIVER_NAME "=", DRM_OPTIONS_FUNC ); #undef DRM_OPTIONS_FUNC #endif /* * The default number of instances (minor numbers) to initialize. */ #ifndef DRIVER_NUM_CARDS #define DRIVER_NUM_CARDS 1 #endif static drm_device_t *DRM(device); static int *DRM(minor); static int DRM(numdevs) = 0; DRIVER_FOPS; static drm_ioctl_desc_t DRM(ioctls)[] = { [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { DRM(version), 0, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { DRM(getunique), 0, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { DRM(getmagic), 0, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { DRM(irq_busid), 0, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP)] = { DRM(getmap), 0, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT)] = { DRM(getclient), 0, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] = { DRM(getstats), 0, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { DRM(setunique), 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { DRM(noop), 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { DRM(noop), 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { DRM(authmagic), 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { DRM(addmap), 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = { DRM(rmmap), 1, 0 }, #if __HAVE_CTX_BITMAP [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = { DRM(setsareactx), 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = { DRM(getsareactx), 1, 0 }, #endif [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { DRM(addctx), 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { DRM(rmctx), 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { DRM(modctx), 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { DRM(getctx), 1, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { DRM(switchctx), 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { DRM(newctx), 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { DRM(resctx), 1, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { DRM(adddraw), 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { DRM(rmdraw), 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { DRM(lock), 1, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { DRM(unlock), 1, 0 }, #if __HAVE_DMA_FLUSH /* Gamma only, really */ [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { DRM(finish), 1, 0 }, #else [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { DRM(noop), 1, 0 }, #endif #if __HAVE_DMA [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { DRM(addbufs), 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { DRM(markbufs), 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { DRM(infobufs), 1, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { DRM(mapbufs), 1, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { DRM(freebufs), 1, 0 }, /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */ [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { DRM(control), 1, 1 }, #endif #if __REALLY_HAVE_AGP [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { DRM(agp_acquire), 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { DRM(agp_release), 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { DRM(agp_enable), 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { DRM(agp_info), 1, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { DRM(agp_alloc), 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { DRM(agp_free), 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { DRM(agp_bind), 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { DRM(agp_unbind), 1, 1 }, #endif #if __HAVE_SG [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] = { DRM(sg_alloc), 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = { DRM(sg_free), 1, 1 }, #endif #if __HAVE_VBL_IRQ [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = { DRM(wait_vblank), 0, 0 }, #endif DRIVER_IOCTLS }; #define DRIVER_IOCTL_COUNT DRM_ARRAY_SIZE( DRM(ioctls) ) #ifdef MODULE static char *drm_opts = NULL; #endif MODULE_AUTHOR( DRIVER_AUTHOR ); MODULE_DESCRIPTION( DRIVER_DESC ); MODULE_PARM( drm_opts, "s" ); MODULE_LICENSE("GPL and additional rights"); static int DRM(setup)( drm_device_t *dev ) { int i; DRIVER_PRESETUP(); atomic_set( &dev->ioctl_count, 0 ); atomic_set( &dev->vma_count, 0 ); dev->buf_use = 0; atomic_set( &dev->buf_alloc, 0 ); #if __HAVE_DMA i = DRM(dma_setup)( dev ); if ( i < 0 ) return i; #endif dev->counters = 6 + __HAVE_COUNTERS; dev->types[0] = _DRM_STAT_LOCK; dev->types[1] = _DRM_STAT_OPENS; dev->types[2] = _DRM_STAT_CLOSES; dev->types[3] = _DRM_STAT_IOCTLS; dev->types[4] = _DRM_STAT_LOCKS; dev->types[5] = _DRM_STAT_UNLOCKS; #ifdef __HAVE_COUNTER6 dev->types[6] = __HAVE_COUNTER6; #endif #ifdef __HAVE_COUNTER7 dev->types[7] = __HAVE_COUNTER7; #endif #ifdef __HAVE_COUNTER8 dev->types[8] = __HAVE_COUNTER8; #endif #ifdef __HAVE_COUNTER9 dev->types[9] = __HAVE_COUNTER9; #endif #ifdef __HAVE_COUNTER10 dev->types[10] = __HAVE_COUNTER10; #endif #ifdef __HAVE_COUNTER11 dev->types[11] = __HAVE_COUNTER11; #endif #ifdef __HAVE_COUNTER12 dev->types[12] = __HAVE_COUNTER12; #endif #ifdef __HAVE_COUNTER13 dev->types[13] = __HAVE_COUNTER13; #endif #ifdef __HAVE_COUNTER14 dev->types[14] = __HAVE_COUNTER14; #endif #ifdef __HAVE_COUNTER15 dev->types[14] = __HAVE_COUNTER14; #endif for ( i = 0 ; i < DRM_ARRAY_SIZE(dev->counts) ; i++ ) atomic_set( &dev->counts[i], 0 ); for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) { dev->magiclist[i].head = NULL; dev->magiclist[i].tail = NULL; } dev->maplist = DRM(alloc)(sizeof(*dev->maplist), DRM_MEM_MAPS); if(dev->maplist == NULL) return -ENOMEM; memset(dev->maplist, 0, sizeof(*dev->maplist)); INIT_LIST_HEAD(&dev->maplist->head); dev->vmalist = NULL; dev->sigdata.lock = dev->lock.hw_lock = NULL; init_waitqueue_head( &dev->lock.lock_queue ); dev->queue_count = 0; dev->queue_reserved = 0; dev->queue_slots = 0; dev->queuelist = NULL; dev->irq = 0; dev->context_flag = 0; dev->interrupt_flag = 0; dev->dma_flag = 0; dev->last_context = 0; dev->last_switch = 0; dev->last_checked = 0; init_timer( &dev->timer ); init_waitqueue_head( &dev->context_wait ); dev->ctx_start = 0; dev->lck_start = 0; dev->buf_rp = dev->buf; dev->buf_wp = dev->buf; dev->buf_end = dev->buf + DRM_BSZ; dev->buf_async = NULL; init_waitqueue_head( &dev->buf_readers ); init_waitqueue_head( &dev->buf_writers ); DRM_DEBUG( "\n" ); /* The kernel's context could be created here, but is now created * in drm_dma_enqueue. This is more resource-efficient for * hardware that does not do DMA, but may mean that * drm_select_queue fails between the time the interrupt is * initialized and the time the queues are initialized. */ DRIVER_POSTSETUP(); return 0; } static int DRM(takedown)( drm_device_t *dev ) { drm_magic_entry_t *pt, *next; drm_map_t *map; drm_map_list_t *r_list; struct list_head *list, *list_next; drm_vma_entry_t *vma, *vma_next; int i; DRM_DEBUG( "\n" ); DRIVER_PRETAKEDOWN(); #if __HAVE_DMA_IRQ if ( dev->irq ) DRM(irq_uninstall)( dev ); #endif down( &dev->struct_sem ); del_timer( &dev->timer ); if ( dev->devname ) { DRM(free)( dev->devname, strlen( dev->devname ) + 1, DRM_MEM_DRIVER ); dev->devname = NULL; } if ( dev->unique ) { DRM(free)( dev->unique, strlen( dev->unique ) + 1, DRM_MEM_DRIVER ); dev->unique = NULL; dev->unique_len = 0; } /* Clear pid list */ for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) { for ( pt = dev->magiclist[i].head ; pt ; pt = next ) { next = pt->next; DRM(free)( pt, sizeof(*pt), DRM_MEM_MAGIC ); } dev->magiclist[i].head = dev->magiclist[i].tail = NULL; } #if __REALLY_HAVE_AGP /* Clear AGP information */ if ( dev->agp ) { drm_agp_mem_t *entry; drm_agp_mem_t *nexte; /* Remove AGP resources, but leave dev->agp intact until drv_cleanup is called. */ for ( entry = dev->agp->memory ; entry ; entry = nexte ) { nexte = entry->next; if ( entry->bound ) DRM(unbind_agp)( entry->memory ); DRM(free_agp)( entry->memory, entry->pages ); DRM(free)( entry, sizeof(*entry), DRM_MEM_AGPLISTS ); } dev->agp->memory = NULL; if ( dev->agp->acquired ) DRM(agp_do_release)(); dev->agp->acquired = 0; dev->agp->enabled = 0; } #endif /* Clear vma list (only built for debugging) */ if ( dev->vmalist ) { for ( vma = dev->vmalist ; vma ; vma = vma_next ) { vma_next = vma->next; DRM(free)( vma, sizeof(*vma), DRM_MEM_VMAS ); } dev->vmalist = NULL; } if( dev->maplist ) { for(list = dev->maplist->head.next; list != &dev->maplist->head; list = list_next) { list_next = list->next; r_list = (drm_map_list_t *)list; map = r_list->map; DRM(free)(r_list, sizeof(*r_list), DRM_MEM_MAPS); if(!map) continue; switch ( map->type ) { case _DRM_REGISTERS: case _DRM_FRAME_BUFFER: #if __REALLY_HAVE_MTRR if ( map->mtrr >= 0 ) { int retcode; retcode = mtrr_del( map->mtrr, map->offset, map->size ); DRM_DEBUG( "mtrr_del=%d\n", retcode ); } #endif DRM(ioremapfree)( map->handle, map->size ); break; case _DRM_SHM: vfree(map->handle); break; case _DRM_AGP: /* Do nothing here, because this is all * handled in the AGP/GART driver. */ break; case _DRM_SCATTER_GATHER: /* Handle it, but do nothing, if HAVE_SG * isn't defined. */ #if __HAVE_SG if(dev->sg) { DRM(sg_cleanup)(dev->sg); dev->sg = NULL; } #endif break; } DRM(free)(map, sizeof(*map), DRM_MEM_MAPS); } DRM(free)(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS); dev->maplist = NULL; } #if __HAVE_DMA_QUEUE || __HAVE_MULTIPLE_DMA_QUEUES if ( dev->queuelist ) { for ( i = 0 ; i < dev->queue_count ; i++ ) { #if __HAVE_DMA_WAITLIST DRM(waitlist_destroy)( &dev->queuelist[i]->waitlist ); #endif if ( dev->queuelist[i] ) { DRM(free)( dev->queuelist[i], sizeof(*dev->queuelist[0]), DRM_MEM_QUEUES ); dev->queuelist[i] = NULL; } } DRM(free)( dev->queuelist, dev->queue_slots * sizeof(*dev->queuelist), DRM_MEM_QUEUES ); dev->queuelist = NULL; } dev->queue_count = 0; #endif #if __HAVE_DMA DRM(dma_takedown)( dev ); #endif if ( dev->lock.hw_lock ) { dev->sigdata.lock = dev->lock.hw_lock = NULL; /* SHM removed */ dev->lock.filp = 0; wake_up_interruptible( &dev->lock.lock_queue ); } up( &dev->struct_sem ); return 0; } /* * Figure out how many instances to initialize. */ static int drm_count_cards(void) { int num = 0; #if defined(DRIVER_CARD_LIST) int i; drm_pci_list_t *l; u16 device, vendor; struct pci_dev *pdev = NULL; #endif DRM_DEBUG( "\n" ); #if defined(DRIVER_COUNT_CARDS) num = DRIVER_COUNT_CARDS(); #elif defined(DRIVER_CARD_LIST) for (i = 0, l = DRIVER_CARD_LIST; l[i].vendor != 0; i++) { pdev = NULL; vendor = l[i].vendor; device = l[i].device; if(device == 0xffff) device = PCI_ANY_ID; if(vendor == 0xffff) vendor = PCI_ANY_ID; while ((pdev = pci_find_device(vendor, device, pdev))) { num++; } } #else num = DRIVER_NUM_CARDS; #endif DRM_DEBUG("numdevs = %d\n", num); return num; } /* drm_init is called via init_module at module load time, or via * linux/init/main.c (this is not currently supported). */ static int __init drm_init( void ) { drm_device_t *dev; int i; #if __HAVE_CTX_BITMAP int retcode; #endif DRM_DEBUG( "\n" ); #ifdef MODULE DRM(parse_options)( drm_opts ); #endif DRM(numdevs) = drm_count_cards(); /* Force at least one instance. */ if (DRM(numdevs) <= 0) DRM(numdevs) = 1; DRM(device) = kmalloc(sizeof(*DRM(device)) * DRM(numdevs), GFP_KERNEL); if (!DRM(device)) { return -ENOMEM; } DRM(minor) = kmalloc(sizeof(*DRM(minor)) * DRM(numdevs), GFP_KERNEL); if (!DRM(minor)) { kfree(DRM(device)); return -ENOMEM; } DRIVER_PREINIT(); DRM(mem_init)(); for (i = 0; i < DRM(numdevs); i++) { dev = &(DRM(device)[i]); memset( (void *)dev, 0, sizeof(*dev) ); dev->count_lock = SPIN_LOCK_UNLOCKED; sema_init( &dev->struct_sem, 1 ); if ((DRM(minor)[i] = DRM(stub_register)(DRIVER_NAME, &DRM(fops),dev)) < 0) return -EPERM; dev->device = MKDEV(DRM_MAJOR, DRM(minor)[i] ); dev->name = DRIVER_NAME; #if __REALLY_HAVE_AGP dev->agp = DRM(agp_init)(); #if __MUST_HAVE_AGP if ( dev->agp == NULL ) { DRM_ERROR( "Cannot initialize the agpgart module.\n" ); DRM(stub_unregister)(DRM(minor)[i]); DRM(takedown)( dev ); return -ENOMEM; } #endif #if __REALLY_HAVE_MTRR if (dev->agp) dev->agp->agp_mtrr = mtrr_add( dev->agp->agp_info.aper_base, dev->agp->agp_info.aper_size*1024*1024, MTRR_TYPE_WRCOMB, 1 ); #endif #endif #if __HAVE_CTX_BITMAP retcode = DRM(ctxbitmap_init)( dev ); if( retcode ) { DRM_ERROR( "Cannot allocate memory for context bitmap.\n" ); DRM(stub_unregister)(DRM(minor)[i]); DRM(takedown)( dev ); return retcode; } #endif DRM_INFO( "Initialized %s %d.%d.%d %s on minor %d\n", DRIVER_NAME, DRIVER_MAJOR, DRIVER_MINOR, DRIVER_PATCHLEVEL, DRIVER_DATE, DRM(minor)[i] ); } DRIVER_POSTINIT(); return 0; } /* drm_cleanup is called via cleanup_module at module unload time. */ static void __exit drm_cleanup( void ) { drm_device_t *dev; int i; DRM_DEBUG( "\n" ); for (i = DRM(numdevs) - 1; i >= 0; i--) { dev = &(DRM(device)[i]); if ( DRM(stub_unregister)(DRM(minor)[i]) ) { DRM_ERROR( "Cannot unload module\n" ); } else { DRM_DEBUG("minor %d unregistered\n", DRM(minor)[i]); if (i == 0) { DRM_INFO( "Module unloaded\n" ); } } #if __HAVE_CTX_BITMAP DRM(ctxbitmap_cleanup)( dev ); #endif #if __REALLY_HAVE_AGP && __REALLY_HAVE_MTRR if ( dev->agp && dev->agp->agp_mtrr >= 0) { int retval; retval = mtrr_del( dev->agp->agp_mtrr, dev->agp->agp_info.aper_base, dev->agp->agp_info.aper_size*1024*1024 ); DRM_DEBUG( "mtrr_del=%d\n", retval ); } #endif DRM(takedown)( dev ); #if __REALLY_HAVE_AGP if ( dev->agp ) { DRM(agp_uninit)(); DRM(free)( dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS ); dev->agp = NULL; } #endif } DRIVER_POSTCLEANUP(); kfree(DRM(minor)); kfree(DRM(device)); DRM(numdevs) = 0; } module_init( drm_init ); module_exit( drm_cleanup ); int DRM(version)( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ) { drm_version_t version; int len; if ( copy_from_user( &version, (drm_version_t *)arg, sizeof(version) ) ) return -EFAULT; #define DRM_COPY( name, value ) \ len = strlen( value ); \ if ( len > name##_len ) len = name##_len; \ name##_len = strlen( value ); \ if ( len && name ) { \ if ( copy_to_user( name, value, len ) ) \ return -EFAULT; \ } version.version_major = DRIVER_MAJOR; version.version_minor = DRIVER_MINOR; version.version_patchlevel = DRIVER_PATCHLEVEL; DRM_COPY( version.name, DRIVER_NAME ); DRM_COPY( version.date, DRIVER_DATE ); DRM_COPY( version.desc, DRIVER_DESC ); if ( copy_to_user( (drm_version_t *)arg, &version, sizeof(version) ) ) return -EFAULT; return 0; } int DRM(open)( struct inode *inode, struct file *filp ) { drm_device_t *dev = NULL; int retcode = 0; int i; for (i = 0; i < DRM(numdevs); i++) { if (minor(inode->i_rdev) == DRM(minor)[i]) { dev = &(DRM(device)[i]); break; } } if (!dev) { return -ENODEV; } retcode = DRM(open_helper)( inode, filp, dev ); if ( !retcode ) { atomic_inc( &dev->counts[_DRM_STAT_OPENS] ); spin_lock( &dev->count_lock ); if ( !dev->open_count++ ) { spin_unlock( &dev->count_lock ); return DRM(setup)( dev ); } spin_unlock( &dev->count_lock ); } return retcode; } int DRM(release)( struct inode *inode, struct file *filp ) { drm_file_t *priv = filp->private_data; drm_device_t *dev; int retcode = 0; lock_kernel(); dev = priv->dev; DRM_DEBUG( "open_count = %d\n", dev->open_count ); DRIVER_PRERELEASE(); /* ======================================================== * Begin inline drm_release */ DRM_DEBUG( "pid = %d, device = 0x%lx, open_count = %d\n", current->pid, (long)dev->device, dev->open_count ); if ( priv->lock_count && dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) && dev->lock.filp == filp ) { DRM_DEBUG( "File %p released, freeing lock for context %d\n", filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) ); #if __HAVE_RELEASE DRIVER_RELEASE(); #endif DRM(lock_free)( dev, &dev->lock.hw_lock->lock, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) ); /* FIXME: may require heavy-handed reset of hardware at this point, possibly processed via a callback to the X server. */ } #if __HAVE_RELEASE else if ( priv->lock_count && dev->lock.hw_lock ) { /* The lock is required to reclaim buffers */ DECLARE_WAITQUEUE( entry, current ); add_wait_queue( &dev->lock.lock_queue, &entry ); for (;;) { current->state = TASK_INTERRUPTIBLE; if ( !dev->lock.hw_lock ) { /* Device has been unregistered */ retcode = -EINTR; break; } if ( DRM(lock_take)( &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT ) ) { dev->lock.filp = filp; dev->lock.lock_time = jiffies; atomic_inc( &dev->counts[_DRM_STAT_LOCKS] ); break; /* Got lock */ } /* Contention */ schedule(); if ( signal_pending( current ) ) { retcode = -ERESTARTSYS; break; } } current->state = TASK_RUNNING; remove_wait_queue( &dev->lock.lock_queue, &entry ); if( !retcode ) { DRIVER_RELEASE(); DRM(lock_free)( dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT ); } } #elif __HAVE_DMA DRM(reclaim_buffers)( filp ); #endif DRM(fasync)( -1, filp, 0 ); down( &dev->struct_sem ); if ( priv->remove_auth_on_close == 1 ) { drm_file_t *temp = dev->file_first; while ( temp ) { temp->authenticated = 0; temp = temp->next; } } if ( priv->prev ) { priv->prev->next = priv->next; } else { dev->file_first = priv->next; } if ( priv->next ) { priv->next->prev = priv->prev; } else { dev->file_last = priv->prev; } up( &dev->struct_sem ); DRM(free)( priv, sizeof(*priv), DRM_MEM_FILES ); /* ======================================================== * End inline drm_release */ atomic_inc( &dev->counts[_DRM_STAT_CLOSES] ); spin_lock( &dev->count_lock ); if ( !--dev->open_count ) { if ( atomic_read( &dev->ioctl_count ) || dev->blocked ) { DRM_ERROR( "Device busy: %d %d\n", atomic_read( &dev->ioctl_count ), dev->blocked ); spin_unlock( &dev->count_lock ); unlock_kernel(); return -EBUSY; } spin_unlock( &dev->count_lock ); unlock_kernel(); return DRM(takedown)( dev ); } spin_unlock( &dev->count_lock ); unlock_kernel(); return retcode; } /* DRM(ioctl) is called whenever a process performs an ioctl on /dev/drm. */ int DRM(ioctl)( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ) { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; drm_ioctl_desc_t *ioctl; drm_ioctl_t *func; int nr = DRM_IOCTL_NR(cmd); int retcode = 0; atomic_inc( &dev->ioctl_count ); atomic_inc( &dev->counts[_DRM_STAT_IOCTLS] ); ++priv->ioctl_count; DRM_DEBUG( "pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n", current->pid, cmd, nr, (long)dev->device, priv->authenticated ); if ( nr >= DRIVER_IOCTL_COUNT ) { retcode = -EINVAL; } else { ioctl = &DRM(ioctls)[nr]; func = ioctl->func; if ( !func ) { DRM_DEBUG( "no function\n" ); retcode = -EINVAL; } else if ( ( ioctl->root_only && !capable( CAP_SYS_ADMIN ) )|| ( ioctl->auth_needed && !priv->authenticated ) ) { retcode = -EACCES; } else { retcode = func( inode, filp, cmd, arg ); } } atomic_dec( &dev->ioctl_count ); return retcode; } int DRM(lock)( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ) { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; DECLARE_WAITQUEUE( entry, current ); drm_lock_t lock; int ret = 0; #if __HAVE_MULTIPLE_DMA_QUEUES drm_queue_t *q; #endif ++priv->lock_count; if ( copy_from_user( &lock, (drm_lock_t *)arg, sizeof(lock) ) ) return -EFAULT; if ( lock.context == DRM_KERNEL_CONTEXT ) { DRM_ERROR( "Process %d using kernel context %d\n", current->pid, lock.context ); return -EINVAL; } DRM_DEBUG( "%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n", lock.context, current->pid, dev->lock.hw_lock->lock, lock.flags ); #if __HAVE_DMA_QUEUE if ( lock.context < 0 ) return -EINVAL; #elif __HAVE_MULTIPLE_DMA_QUEUES if ( lock.context < 0 || lock.context >= dev->queue_count ) return -EINVAL; q = dev->queuelist[lock.context]; #endif #if __HAVE_DMA_FLUSH ret = DRM(flush_block_and_flush)( dev, lock.context, lock.flags ); #endif if ( !ret ) { add_wait_queue( &dev->lock.lock_queue, &entry ); for (;;) { current->state = TASK_INTERRUPTIBLE; if ( !dev->lock.hw_lock ) { /* Device has been unregistered */ ret = -EINTR; break; } if ( DRM(lock_take)( &dev->lock.hw_lock->lock, lock.context ) ) { dev->lock.filp = filp; dev->lock.lock_time = jiffies; atomic_inc( &dev->counts[_DRM_STAT_LOCKS] ); break; /* Got lock */ } /* Contention */ schedule(); if ( signal_pending( current ) ) { ret = -ERESTARTSYS; break; } } current->state = TASK_RUNNING; remove_wait_queue( &dev->lock.lock_queue, &entry ); } #if __HAVE_DMA_FLUSH DRM(flush_unblock)( dev, lock.context, lock.flags ); /* cleanup phase */ #endif if ( !ret ) { sigemptyset( &dev->sigmask ); sigaddset( &dev->sigmask, SIGSTOP ); sigaddset( &dev->sigmask, SIGTSTP ); sigaddset( &dev->sigmask, SIGTTIN ); sigaddset( &dev->sigmask, SIGTTOU ); dev->sigdata.context = lock.context; dev->sigdata.lock = dev->lock.hw_lock; block_all_signals( DRM(notifier), &dev->sigdata, &dev->sigmask ); #if __HAVE_DMA_READY if ( lock.flags & _DRM_LOCK_READY ) { DRIVER_DMA_READY(); } #endif #if __HAVE_DMA_QUIESCENT if ( lock.flags & _DRM_LOCK_QUIESCENT ) { DRIVER_DMA_QUIESCENT(); } #endif #if __HAVE_KERNEL_CTX_SWITCH if ( dev->last_context != lock.context ) { DRM(context_switch)(dev, dev->last_context, lock.context); } #endif } DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" ); return ret; } int DRM(unlock)( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ) { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; drm_lock_t lock; if ( copy_from_user( &lock, (drm_lock_t *)arg, sizeof(lock) ) ) return -EFAULT; if ( lock.context == DRM_KERNEL_CONTEXT ) { DRM_ERROR( "Process %d using kernel context %d\n", current->pid, lock.context ); return -EINVAL; } atomic_inc( &dev->counts[_DRM_STAT_UNLOCKS] ); #if __HAVE_KERNEL_CTX_SWITCH /* We no longer really hold it, but if we are the next * agent to request it then we should just be able to * take it immediately and not eat the ioctl. */ dev->lock.filp = 0; { __volatile__ unsigned int *plock = &dev->lock.hw_lock->lock; unsigned int old, new, prev, ctx; ctx = lock.context; do { old = *plock; new = ctx; prev = cmpxchg(plock, old, new); } while (prev != old); } wake_up_interruptible(&dev->lock.lock_queue); #else DRM(lock_transfer)( dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT ); #if __HAVE_DMA_SCHEDULE DRM(dma_schedule)( dev, 1 ); #endif /* FIXME: Do we ever really need to check this??? */ if ( 1 /* !dev->context_flag */ ) { if ( DRM(lock_free)( dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT ) ) { DRM_ERROR( "\n" ); } } #endif /* !__HAVE_KERNEL_CTX_SWITCH */ unblock_all_signals(); return 0; } 0' href='#n770'>770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040
/* mach64_drv.h -- Private header for mach64 driver -*- linux-c -*-
 * Created: Fri Nov 24 22:07:58 2000 by gareth@valinux.com
 *
 * Copyright 2000 Gareth Hughes
 * Copyright 2002 Frank C. Earl
 * Copyright 2002-2003 Leif Delgass
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT OWNER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors:
 *    Gareth Hughes <gareth@valinux.com>
 *    Frank C. Earl <fearl@airmail.net>
 *    Leif Delgass <ldelgass@retinalburn.net>
 *    José Fonseca <j_r_fonseca@yahoo.co.uk>
 */

#ifndef __MACH64_DRV_H__
#define __MACH64_DRV_H__


/* FIXME: remove these when not needed */
/* Development driver options */
#define MACH64_EXTRA_CHECKING     0 /* Extra sanity checks for DMA/freelist management */
#define MACH64_VERBOSE		  0 /* Verbose debugging output */

typedef struct drm_mach64_freelist {
	struct list_head  list;  /* List pointers for free_list, placeholders, or pending list */
   	drm_buf_t *buf;          /* Pointer to the buffer */
	int discard;             /* This flag is set when we're done (re)using a buffer */
	u32 ring_ofs;            /* dword offset in ring of last descriptor for this buffer */
} drm_mach64_freelist_t;

typedef struct drm_mach64_descriptor_ring {
	dma_addr_t handle;       /* handle (bus address) of ring returned by pci_alloc_consistent() */
	void *start;             /* write pointer (cpu address) to start of descriptor ring */
	u32 start_addr;          /* bus address of beginning of descriptor ring */
       	int size;                /* size of ring in bytes */

	u32 head_addr;           /* bus address of descriptor ring head */
	u32 head;                /* dword offset of descriptor ring head */
	u32 tail;                /* dword offset of descriptor ring tail */
	u32 tail_mask;           /* mask used to wrap ring */
	int space;               /* number of free bytes in ring */
} drm_mach64_descriptor_ring_t;

typedef struct drm_mach64_private {
	drm_mach64_sarea_t *sarea_priv;

	int is_pci;
	drm_mach64_dma_mode_t driver_mode;       /* Async DMA, sync DMA, or MMIO */

	int usec_timeout;                        /* Timeout for the wait functions */

	drm_mach64_descriptor_ring_t ring;       /* DMA descriptor table (ring buffer) */
	int ring_running;                        /* Is bus mastering is enabled */

	struct list_head free_list;              /* Free-list head */
	struct list_head placeholders;           /* Placeholder list for buffers held by clients */
	struct list_head pending;                /* Buffers pending completion */

	u32 frame_ofs[MACH64_MAX_QUEUED_FRAMES]; /* dword ring offsets of most recent frame swaps */

	unsigned int fb_bpp;
	unsigned int front_offset, front_pitch;
	unsigned int back_offset, back_pitch;

	unsigned int depth_bpp;
	unsigned int depth_offset, depth_pitch;

	u32 front_offset_pitch;
	u32 back_offset_pitch;
	u32 depth_offset_pitch;

	drm_local_map_t *sarea;
	drm_local_map_t *fb;
	drm_local_map_t *mmio;
	drm_local_map_t *ring_map;
	drm_local_map_t *dev_buffers; /* this is a pointer to a structure in dev */
	drm_local_map_t *agp_textures;
} drm_mach64_private_t;

				/* mach64_dma.c */
extern int mach64_dma_init( DRM_IOCTL_ARGS );
extern int mach64_dma_idle( DRM_IOCTL_ARGS );
extern int mach64_dma_flush( DRM_IOCTL_ARGS );
extern int mach64_engine_reset( DRM_IOCTL_ARGS );
extern int mach64_dma_buffers( DRM_IOCTL_ARGS );

extern int mach64_init_freelist( drm_device_t *dev );
extern void mach64_destroy_freelist( drm_device_t *dev );
extern drm_buf_t *mach64_freelist_get( drm_mach64_private_t *dev_priv );

extern int mach64_do_wait_for_fifo( drm_mach64_private_t *dev_priv,
				    int entries );
extern int mach64_do_wait_for_idle( drm_mach64_private_t *dev_priv );
extern int mach64_wait_ring( drm_mach64_private_t *dev_priv, int n );
extern int mach64_do_dispatch_pseudo_dma( drm_mach64_private_t *dev_priv );
extern int mach64_do_release_used_buffers( drm_mach64_private_t *dev_priv );
extern void mach64_dump_engine_info( drm_mach64_private_t *dev_priv );
extern void mach64_dump_ring_info( drm_mach64_private_t *dev_priv );
extern int mach64_do_engine_reset( drm_mach64_private_t *dev_priv );

extern int mach64_do_dma_idle( drm_mach64_private_t *dev_priv );
extern int mach64_do_dma_flush( drm_mach64_private_t *dev_priv );
extern int mach64_do_cleanup_dma( drm_device_t *dev );

				/* mach64_state.c */
extern int mach64_dma_clear( DRM_IOCTL_ARGS );
extern int mach64_dma_swap( DRM_IOCTL_ARGS );
extern int mach64_dma_vertex( DRM_IOCTL_ARGS );
extern int mach64_dma_blit( DRM_IOCTL_ARGS );
extern int mach64_get_param( DRM_IOCTL_ARGS );
extern int mach64_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence);

extern irqreturn_t mach64_driver_irq_handler( DRM_IRQ_ARGS );
extern void mach64_driver_irq_preinstall( drm_device_t *dev );
extern void mach64_driver_irq_postinstall( drm_device_t *dev );
extern void mach64_driver_irq_uninstall( drm_device_t *dev );

/* ================================================================
 * Registers
 */

#define MACH64_AGP_BASE				0x0148
#define MACH64_AGP_CNTL				0x014c
#define MACH64_ALPHA_TST_CNTL			0x0550


#define MACH64_DSP_CONFIG 			0x0420
#define MACH64_DSP_ON_OFF 			0x0424
#define MACH64_EXT_MEM_CNTL 			0x04ac
#define MACH64_GEN_TEST_CNTL 			0x04d0
#define MACH64_HW_DEBUG 			0x047c
#define MACH64_MEM_ADDR_CONFIG 			0x0434
#define MACH64_MEM_BUF_CNTL 			0x042c
#define MACH64_MEM_CNTL 			0x04b0


#define MACH64_BM_ADDR				0x0648
#define MACH64_BM_COMMAND			0x0188
#define MACH64_BM_DATA				0x0648
#define MACH64_BM_FRAME_BUF_OFFSET		0x0180
#define MACH64_BM_GUI_TABLE			0x01b8
#define MACH64_BM_GUI_TABLE_CMD			0x064c
#	define MACH64_CIRCULAR_BUF_SIZE_16KB		(0 << 0)
#	define MACH64_CIRCULAR_BUF_SIZE_32KB		(1 << 0)
#	define MACH64_CIRCULAR_BUF_SIZE_64KB		(2 << 0)
#	define MACH64_CIRCULAR_BUF_SIZE_128KB		(3 << 0)
#	define MACH64_LAST_DESCRIPTOR			(1 << 31)
#define MACH64_BM_HOSTDATA			0x0644
#define MACH64_BM_STATUS			0x018c
#define MACH64_BM_SYSTEM_MEM_ADDR		0x0184
#define MACH64_BM_SYSTEM_TABLE			0x01bc
#define MACH64_BUS_CNTL				0x04a0
#	define MACH64_BUS_MSTR_RESET			(1 << 1)
#	define MACH64_BUS_APER_REG_DIS			(1 << 4)
#	define MACH64_BUS_FLUSH_BUF			(1 << 2)
#	define MACH64_BUS_MASTER_DIS			(1 << 6)
#	define MACH64_BUS_EXT_REG_EN			(1 << 27)

#define MACH64_CLR_CMP_CLR			0x0700
#define MACH64_CLR_CMP_CNTL			0x0708
#define MACH64_CLR_CMP_MASK			0x0704
#define MACH64_CONFIG_CHIP_ID 			0x04e0
#define MACH64_CONFIG_CNTL 			0x04dc
#define MACH64_CONFIG_STAT0 			0x04e4
#define MACH64_CONFIG_STAT1 			0x0494
#define MACH64_CONFIG_STAT2 			0x0498
#define MACH64_CONTEXT_LOAD_CNTL		0x072c
#define MACH64_CONTEXT_MASK			0x0720
#define MACH64_COMPOSITE_SHADOW_ID		0x0798
#define MACH64_CRC_SIG 				0x04e8
#define MACH64_CUSTOM_MACRO_CNTL 		0x04d4

#define MACH64_DP_BKGD_CLR			0x06c0
#define MACH64_DP_FOG_CLR			0x06c4
#define MACH64_DP_FGRD_BKGD_CLR			0x06e0
#define MACH64_DP_FRGD_CLR			0x06c4
#define MACH64_DP_FGRD_CLR_MIX			0x06dc

#define MACH64_DP_MIX				0x06d4
#	define BKGD_MIX_NOT_D				(0 << 0)
#	define BKGD_MIX_ZERO				(1 << 0)
#	define BKGD_MIX_ONE				(2 << 0)
#	define MACH64_BKGD_MIX_D			(3 << 0)
#	define BKGD_MIX_NOT_S				(4 << 0)
#	define BKGD_MIX_D_XOR_S				(5 << 0)
#	define BKGD_MIX_NOT_D_XOR_S			(6 << 0)
#	define MACH64_BKGD_MIX_S			(7 << 0)
#	define BKGD_MIX_NOT_D_OR_NOT_S			(8 << 0)
#	define BKGD_MIX_D_OR_NOT_S			(9 << 0)
#	define BKGD_MIX_NOT_D_OR_S			(10 << 0)
#	define BKGD_MIX_D_OR_S				(11 << 0)
#	define BKGD_MIX_D_AND_S				(12 << 0)
#	define BKGD_MIX_NOT_D_AND_S			(13 << 0)
#	define BKGD_MIX_D_AND_NOT_S			(14 << 0)
#	define BKGD_MIX_NOT_D_AND_NOT_S			(15 << 0)
#	define BKGD_MIX_D_PLUS_S_DIV2			(23 << 0)
#	define FRGD_MIX_NOT_D				(0 << 16)
#	define FRGD_MIX_ZERO				(1 << 16)
#	define FRGD_MIX_ONE				(2 << 16)
#	define FRGD_MIX_D				(3 << 16)
#	define FRGD_MIX_NOT_S				(4 << 16)
#	define FRGD_MIX_D_XOR_S				(5 << 16)
#	define FRGD_MIX_NOT_D_XOR_S			(6 << 16)
#	define MACH64_FRGD_MIX_S			(7 << 16)
#	define FRGD_MIX_NOT_D_OR_NOT_S			(8 << 16)
#	define FRGD_MIX_D_OR_NOT_S			(9 << 16)
#	define FRGD_MIX_NOT_D_OR_S			(10 << 16)
#	define FRGD_MIX_D_OR_S				(11 << 16)
#	define FRGD_MIX_D_AND_S				(12 << 16)
#	define FRGD_MIX_NOT_D_AND_S			(13 << 16)
#	define FRGD_MIX_D_AND_NOT_S			(14 << 16)
#	define FRGD_MIX_NOT_D_AND_NOT_S			(15 << 16)
#	define FRGD_MIX_D_PLUS_S_DIV2			(23 << 16)

#define MACH64_DP_PIX_WIDTH			0x06d0
#	define MACH64_HOST_TRIPLE_ENABLE		(1 << 13)
#	define MACH64_BYTE_ORDER_MSB_TO_LSB		(0 << 24)
#	define MACH64_BYTE_ORDER_LSB_TO_MSB		(1 << 24)

#define MACH64_DP_SRC				0x06d8
#	define MACH64_BKGD_SRC_BKGD_CLR			(0 << 0)
#	define MACH64_BKGD_SRC_FRGD_CLR			(1 << 0)
#	define MACH64_BKGD_SRC_HOST			(2 << 0)
#	define MACH64_BKGD_SRC_BLIT			(3 << 0)
#	define MACH64_BKGD_SRC_PATTERN			(4 << 0)
#	define MACH64_BKGD_SRC_3D			(5 << 0)
#	define MACH64_FRGD_SRC_BKGD_CLR			(0 << 8)
#	define MACH64_FRGD_SRC_FRGD_CLR			(1 << 8)
#	define MACH64_FRGD_SRC_HOST			(2 << 8)
#	define MACH64_FRGD_SRC_BLIT			(3 << 8)
#	define MACH64_FRGD_SRC_PATTERN			(4 << 8)
#	define MACH64_FRGD_SRC_3D			(5 << 8)
#	define MACH64_MONO_SRC_ONE			(0 << 16)
#	define MACH64_MONO_SRC_PATTERN			(1 << 16)
#	define MACH64_MONO_SRC_HOST			(2 << 16)
#	define MACH64_MONO_SRC_BLIT			(3 << 16)

#define MACH64_DP_WRITE_MASK			0x06c8

#define MACH64_DST_CNTL				0x0530
#	define MACH64_DST_X_RIGHT_TO_LEFT		(0 << 0)
#	define MACH64_DST_X_LEFT_TO_RIGHT		(1 << 0)
#	define MACH64_DST_Y_BOTTOM_TO_TOP		(0 << 1)
#	define MACH64_DST_Y_TOP_TO_BOTTOM		(1 << 1)
#	define MACH64_DST_X_MAJOR			(0 << 2)
#	define MACH64_DST_Y_MAJOR			(1 << 2)
#	define MACH64_DST_X_TILE			(1 << 3)
#	define MACH64_DST_Y_TILE			(1 << 4)
#	define MACH64_DST_LAST_PEL			(1 << 5)
#	define MACH64_DST_POLYGON_ENABLE		(1 << 6)
#	define MACH64_DST_24_ROTATION_ENABLE		(1 << 7)

#define MACH64_DST_HEIGHT_WIDTH			0x0518
#define MACH64_DST_OFF_PITCH			0x0500
#define MACH64_DST_WIDTH_HEIGHT			0x06ec
#define MACH64_DST_X_Y				0x06e8
#define MACH64_DST_Y_X				0x050c

#define MACH64_FIFO_STAT			0x0710
#	define MACH64_FIFO_SLOT_MASK			0x0000ffff
#	define MACH64_FIFO_ERR				(1 << 31)

#define MACH64_GEN_TEST_CNTL			0x04d0
#	define MACH64_GUI_ENGINE_ENABLE			(1 << 8)
#define MACH64_GUI_CMDFIFO_DEBUG		0x0170
#define MACH64_GUI_CMDFIFO_DATA			0x0174
#define MACH64_GUI_CNTL				0x0178
#       define MACH64_CMDFIFO_SIZE_MASK                 0x00000003ul
#       define MACH64_CMDFIFO_SIZE_192                  0x00000000ul
#       define MACH64_CMDFIFO_SIZE_128                  0x00000001ul
#       define MACH64_CMDFIFO_SIZE_64                   0x00000002ul
#define MACH64_GUI_STAT				0x0738
#	define MACH64_GUI_ACTIVE			(1 << 0)
#define MACH64_GUI_TRAJ_CNTL			0x0730

#define MACH64_HOST_CNTL			0x0640
#define MACH64_HOST_DATA0			0x0600

#define MACH64_ONE_OVER_AREA			0x029c
#define MACH64_ONE_OVER_AREA_UC			0x0300

#define MACH64_PAT_REG0				0x0680
#define MACH64_PAT_REG1				0x0684

#define MACH64_SC_LEFT                          0x06a0
#define MACH64_SC_RIGHT                         0x06a4
#define MACH64_SC_LEFT_RIGHT                    0x06a8
#define MACH64_SC_TOP                           0x06ac
#define MACH64_SC_BOTTOM                        0x06b0
#define MACH64_SC_TOP_BOTTOM                    0x06b4

#define MACH64_SCALE_3D_CNTL			0x05fc
#define MACH64_SCRATCH_REG0			0x0480
#define MACH64_SCRATCH_REG1			0x0484
#define MACH64_SECONDARY_TEX_OFF		0x0778
#define MACH64_SETUP_CNTL			0x0304
#define MACH64_SRC_CNTL				0x05b4
#	define MACH64_SRC_BM_ENABLE			(1 << 8)
#	define MACH64_SRC_BM_SYNC			(1 << 9)
#	define MACH64_SRC_BM_OP_FRAME_TO_SYSTEM		(0 << 10)
#	define MACH64_SRC_BM_OP_SYSTEM_TO_FRAME		(1 << 10)
#	define MACH64_SRC_BM_OP_REG_TO_SYSTEM		(2 << 10)
#	define MACH64_SRC_BM_OP_SYSTEM_TO_REG		(3 << 10)
#define MACH64_SRC_HEIGHT1			0x0594
#define MACH64_SRC_HEIGHT2			0x05ac
#define MACH64_SRC_HEIGHT1_WIDTH1		0x0598
#define MACH64_SRC_HEIGHT2_WIDTH2		0x05b0
#define MACH64_SRC_OFF_PITCH			0x0580
#define MACH64_SRC_WIDTH1			0x0590
#define MACH64_SRC_Y_X				0x058c

#define MACH64_TEX_0_OFF			0x05c0
#define MACH64_TEX_CNTL				0x0774
#define MACH64_TEX_SIZE_PITCH			0x0770
#define MACH64_TIMER_CONFIG 			0x0428

#define MACH64_VERTEX_1_ARGB			0x0254
#define MACH64_VERTEX_1_S			0x0240
#define MACH64_VERTEX_1_SECONDARY_S		0x0328
#define MACH64_VERTEX_1_SECONDARY_T		0x032c
#define MACH64_VERTEX_1_SECONDARY_W		0x0330
#define MACH64_VERTEX_1_SPEC_ARGB		0x024c
#define MACH64_VERTEX_1_T			0x0244
#define MACH64_VERTEX_1_W			0x0248
#define MACH64_VERTEX_1_X_Y			0x0258
#define MACH64_VERTEX_1_Z			0x0250
#define MACH64_VERTEX_2_ARGB			0x0274
#define MACH64_VERTEX_2_S			0x0260
#define MACH64_VERTEX_2_SECONDARY_S		0x0334
#define MACH64_VERTEX_2_SECONDARY_T		0x0338
#define MACH64_VERTEX_2_SECONDARY_W		0x033c
#define MACH64_VERTEX_2_SPEC_ARGB		0x026c
#define MACH64_VERTEX_2_T			0x0264
#define MACH64_VERTEX_2_W			0x0268
#define MACH64_VERTEX_2_X_Y			0x0278
#define MACH64_VERTEX_2_Z			0x0270
#define MACH64_VERTEX_3_ARGB			0x0294
#define MACH64_VERTEX_3_S			0x0280
#define MACH64_VERTEX_3_SECONDARY_S		0x02a0
#define MACH64_VERTEX_3_SECONDARY_T		0x02a4
#define MACH64_VERTEX_3_SECONDARY_W		0x02a8
#define MACH64_VERTEX_3_SPEC_ARGB		0x028c
#define MACH64_VERTEX_3_T			0x0284
#define MACH64_VERTEX_3_W			0x0288
#define MACH64_VERTEX_3_X_Y			0x0298
#define MACH64_VERTEX_3_Z			0x0290

#define MACH64_Z_CNTL				0x054c
#define MACH64_Z_OFF_PITCH			0x0548

#define MACH64_CRTC_VLINE_CRNT_VLINE		0x0410
#	define MACH64_CRTC_VLINE_MASK		        0x000007ff
#	define MACH64_CRTC_CRNT_VLINE_MASK		0x07ff0000
#define MACH64_CRTC_OFF_PITCH			0x0414
#define MACH64_CRTC_INT_CNTL			0x0418
#	define MACH64_CRTC_VBLANK			(1 << 0)
#	define MACH64_CRTC_VBLANK_INT_EN		(1 << 1)
#	define MACH64_CRTC_VBLANK_INT			(1 << 2)
#	define MACH64_CRTC_VLINE_INT_EN			(1 << 3)
#	define MACH64_CRTC_VLINE_INT			(1 << 4)
#	define MACH64_CRTC_VLINE_SYNC			(1 << 5) /* 0=even, 1=odd */
#	define MACH64_CRTC_FRAME			(1 << 6) /* 0=even, 1=odd */
#	define MACH64_CRTC_SNAPSHOT_INT_EN		(1 << 7)
#	define MACH64_CRTC_SNAPSHOT_INT			(1 << 8)
#	define MACH64_CRTC_I2C_INT_EN			(1 << 9)
#	define MACH64_CRTC_I2C_INT			(1 << 10)
#	define MACH64_CRTC2_VBLANK			(1 << 11) /* LT Pro */
#	define MACH64_CRTC2_VBLANK_INT_EN		(1 << 12) /* LT Pro */
#	define MACH64_CRTC2_VBLANK_INT			(1 << 13) /* LT Pro */
#	define MACH64_CRTC2_VLINE_INT_EN		(1 << 14) /* LT Pro */
#	define MACH64_CRTC2_VLINE_INT			(1 << 15) /* LT Pro */
#	define MACH64_CRTC_CAPBUF0_INT_EN		(1 << 16)
#	define MACH64_CRTC_CAPBUF0_INT			(1 << 17)
#	define MACH64_CRTC_CAPBUF1_INT_EN		(1 << 18)
#	define MACH64_CRTC_CAPBUF1_INT			(1 << 19)
#	define MACH64_CRTC_OVERLAY_EOF_INT_EN		(1 << 20)
#	define MACH64_CRTC_OVERLAY_EOF_INT		(1 << 21)
#	define MACH64_CRTC_ONESHOT_CAP_INT_EN		(1 << 22)
#	define MACH64_CRTC_ONESHOT_CAP_INT		(1 << 23)
#	define MACH64_CRTC_BUSMASTER_EOL_INT_EN		(1 << 24)
#	define MACH64_CRTC_BUSMASTER_EOL_INT		(1 << 25)
#	define MACH64_CRTC_GP_INT_EN			(1 << 26)
#	define MACH64_CRTC_GP_INT			(1 << 27)
#	define MACH64_CRTC2_VLINE_SYNC			(1 << 28) /* LT Pro */  /* 0=even, 1=odd */
#	define MACH64_CRTC_SNAPSHOT2_INT_EN		(1 << 29) /* LT Pro */
#	define MACH64_CRTC_SNAPSHOT2_INT		(1 << 30) /* LT Pro */
#	define MACH64_CRTC_VBLANK2_INT			(1 << 31)
#	define MACH64_CRTC_INT_ENS				\
		(						\
			MACH64_CRTC_VBLANK_INT_EN |		\
			MACH64_CRTC_VLINE_INT_EN |		\
			MACH64_CRTC_SNAPSHOT_INT_EN |		\
			MACH64_CRTC_I2C_INT_EN |		\
			MACH64_CRTC2_VBLANK_INT_EN |		\
			MACH64_CRTC2_VLINE_INT_EN |		\
			MACH64_CRTC_CAPBUF0_INT_EN |		\
			MACH64_CRTC_CAPBUF1_INT_EN |		\
			MACH64_CRTC_OVERLAY_EOF_INT_EN |	\
			MACH64_CRTC_ONESHOT_CAP_INT_EN |	\
			MACH64_CRTC_BUSMASTER_EOL_INT_EN |	\
			MACH64_CRTC_GP_INT_EN |			\
			MACH64_CRTC_SNAPSHOT2_INT_EN |		\
			0					\
		)
#	define MACH64_CRTC_INT_ACKS			\
		(					\
			MACH64_CRTC_VBLANK_INT |	\
			MACH64_CRTC_VLINE_INT |		\
			MACH64_CRTC_SNAPSHOT_INT |	\
			MACH64_CRTC_I2C_INT |		\
			MACH64_CRTC2_VBLANK_INT |	\
			MACH64_CRTC2_VLINE_INT |	\
			MACH64_CRTC_CAPBUF0_INT |	\
			MACH64_CRTC_CAPBUF1_INT |	\
			MACH64_CRTC_OVERLAY_EOF_INT |	\
			MACH64_CRTC_ONESHOT_CAP_INT |	\
			MACH64_CRTC_BUSMASTER_EOL_INT |	\
			MACH64_CRTC_GP_INT |		\
			MACH64_CRTC_SNAPSHOT2_INT |	\
			MACH64_CRTC_VBLANK2_INT |	\
			0				\
		)

#define MACH64_DATATYPE_CI8				2
#define MACH64_DATATYPE_ARGB1555			3
#define MACH64_DATATYPE_RGB565				4
#define MACH64_DATATYPE_ARGB8888			6
#define MACH64_DATATYPE_RGB332				7
#define MACH64_DATATYPE_Y8				8
#define MACH64_DATATYPE_RGB8				9
#define MACH64_DATATYPE_VYUY422				11
#define MACH64_DATATYPE_YVYU422				12
#define MACH64_DATATYPE_AYUV444				14
#define MACH64_DATATYPE_ARGB4444			15

#define MACH64_READ(reg)	DRM_READ32(dev_priv->mmio, (reg) )
#define MACH64_WRITE(reg,val)	DRM_WRITE32(dev_priv->mmio, (reg), (val) )


#define DWMREG0		0x0400
#define DWMREG0_END	0x07ff
#define DWMREG1		0x0000
#define DWMREG1_END	0x03ff

#define ISREG0(r)	(((r) >= DWMREG0) && ((r) <= DWMREG0_END))
#define DMAREG0(r)	(((r) - DWMREG0) >> 2)
#define DMAREG1(r)	((((r) - DWMREG1) >> 2 ) | 0x0100)
#define DMAREG(r)	(ISREG0(r) ? DMAREG0(r) : DMAREG1(r))

#define MMREG0		0x0000
#define MMREG0_END	0x00ff

#define ISMMREG0(r)	(((r) >= MMREG0) && ((r) <= MMREG0_END))
#define MMSELECT0(r)	(((r) << 2) + DWMREG0)
#define MMSELECT1(r)	(((((r) & 0xff) << 2) + DWMREG1))
#define MMSELECT(r)	(ISMMREG0(r) ? MMSELECT0(r) : MMSELECT1(r))

/* ================================================================
 * DMA constants
 */

/* DMA descriptor field indices:
 * The descriptor fields are loaded into the read-only 
 * BM_* system bus master registers during a bus-master operation
 */
#define MACH64_DMA_FRAME_BUF_OFFSET	0        /* BM_FRAME_BUF_OFFSET */
#define MACH64_DMA_SYS_MEM_ADDR		1        /* BM_SYSTEM_MEM_ADDR */
#define MACH64_DMA_COMMAND		2        /* BM_COMMAND */
#define MACH64_DMA_RESERVED		3        /* BM_STATUS */

/* BM_COMMAND descriptor field flags */
#define MACH64_DMA_HOLD_OFFSET		(1<<30)  /* Don't increment DMA_FRAME_BUF_OFFSET */
#define MACH64_DMA_EOL			(1<<31)  /* End of descriptor list flag */

#define MACH64_DMA_CHUNKSIZE	        0x1000   /* 4kB per DMA descriptor */
#define MACH64_APERTURE_OFFSET	        0x7ff800 /* frame-buffer offset for gui-masters */


/* ================================================================
 * Misc helper macros
 */

static __inline__ void mach64_set_dma_eol( volatile u32 * addr )
{
#if defined(__i386__)
	int nr = 31;
	
	/* Taken from include/asm-i386/bitops.h linux header */
        __asm__ __volatile__( "lock;"
                "btsl %1,%0"
                :"=m" (*addr)
                :"Ir" (nr));
#elif defined(__powerpc__)
	u32 old;
	u32 mask = cpu_to_le32( MACH64_DMA_EOL );

	/* Taken from the include/asm-ppc/bitops.h linux header */
	__asm__ __volatile__("\n\
1:	lwarx	%0,0,%3 \n\
	or	%0,%0,%2 \n\
	stwcx.	%0,0,%3 \n\
	bne-	1b"
	: "=&r" (old), "=m" (*addr)
	: "r" (mask), "r" (addr), "m" (*addr)
	: "cc");
#elif defined(__alpha__)
	u32 temp;
	u32 mask = MACH64_DMA_EOL;

	/* Taken from the include/asm-alpha/bitops.h linux header */
	__asm__ __volatile__(
	"1:	ldl_l %0,%3\n"
	"	bis %0,%2,%0\n"
	"	stl_c %0,%1\n"
	"	beq %0,2f\n"
	".subsection 2\n"
	"2:	br 1b\n"
	".previous"
	:"=&r" (temp), "=m" (*addr)
	:"Ir" (mask), "m" (*addr));
#else
	u32 mask = cpu_to_le32( MACH64_DMA_EOL );

	*addr |= mask;
#endif
}

static __inline__ void mach64_clear_dma_eol( volatile u32 * addr )
{
#if defined(__i386__)
	int nr = 31;
	
	/* Taken from include/asm-i386/bitops.h linux header */
        __asm__ __volatile__( "lock;"
                "btrl %1,%0"
                :"=m" (*addr)
                :"Ir" (nr));
#elif defined(__powerpc__)
	u32 old;
	u32 mask = cpu_to_le32( MACH64_DMA_EOL );

	/* Taken from the include/asm-ppc/bitops.h linux header */
	__asm__ __volatile__("\n\
1:	lwarx	%0,0,%3 \n\
	andc	%0,%0,%2 \n\
	stwcx.	%0,0,%3 \n\
	bne-	1b"
	: "=&r" (old), "=m" (*addr)
	: "r" (mask), "r" (addr), "m" (*addr)
	: "cc");
#elif defined(__alpha__)
	u32 temp;
	u32 mask = ~MACH64_DMA_EOL;

	/* Taken from the include/asm-alpha/bitops.h linux header */
	__asm__ __volatile__(
	"1:	ldl_l %0,%3\n"
	"	and %0,%2,%0\n"
	"	stl_c %0,%1\n"
	"	beq %0,2f\n"
	".subsection 2\n"
	"2:	br 1b\n"
	".previous"
	:"=&r" (temp), "=m" (*addr)
	:"Ir" (mask), "m" (*addr));
#else
	u32 mask = cpu_to_le32( ~MACH64_DMA_EOL );

	*addr &= mask;
#endif
}

static __inline__ void mach64_ring_start( drm_mach64_private_t *dev_priv )
{
	drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
	
	DRM_DEBUG( "%s: head_addr: 0x%08x head: %d tail: %d space: %d\n",
		   __FUNCTION__, 
		   ring->head_addr, ring->head, ring->tail, ring->space );

	if ( mach64_do_wait_for_idle( dev_priv ) < 0 ) {
		mach64_do_engine_reset( dev_priv );
	}

	if (dev_priv->driver_mode != MACH64_MODE_MMIO ) {
		/* enable bus mastering and block 1 registers */
		MACH64_WRITE( MACH64_BUS_CNTL, 
			      ( MACH64_READ(MACH64_BUS_CNTL) & 	~MACH64_BUS_MASTER_DIS ) 
			      | MACH64_BUS_EXT_REG_EN );
		mach64_do_wait_for_idle( dev_priv );
	}
	
	/* reset descriptor table ring head */
	MACH64_WRITE( MACH64_BM_GUI_TABLE_CMD, 
		      ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB );
	
	dev_priv->ring_running = 1;
}

static __inline__ void mach64_ring_resume( drm_mach64_private_t *dev_priv, 
					   drm_mach64_descriptor_ring_t *ring )
{
	DRM_DEBUG( "%s: head_addr: 0x%08x head: %d tail: %d space: %d\n",
		   __FUNCTION__, 
		   ring->head_addr, ring->head, ring->tail, ring->space );

	/* reset descriptor table ring head */
	MACH64_WRITE( MACH64_BM_GUI_TABLE_CMD, 
		      ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB );

	if ( dev_priv->driver_mode == MACH64_MODE_MMIO ) {
		mach64_do_dispatch_pseudo_dma( dev_priv );
	} else {
		/* enable GUI bus mastering, and sync the bus master to the GUI */
		MACH64_WRITE( MACH64_SRC_CNTL, 
			      MACH64_SRC_BM_ENABLE | MACH64_SRC_BM_SYNC |
			      MACH64_SRC_BM_OP_SYSTEM_TO_REG );

		/* kick off the transfer */
		MACH64_WRITE( MACH64_DST_HEIGHT_WIDTH, 0 );
		if ( dev_priv->driver_mode == MACH64_MODE_DMA_SYNC ) {
			if ( (mach64_do_wait_for_idle( dev_priv )) < 0 ) {
				DRM_ERROR( "%s: idle failed, resetting engine\n", 
					   __FUNCTION__);
				mach64_dump_engine_info( dev_priv );
				mach64_do_engine_reset( dev_priv );
				return;
			}
			mach64_do_release_used_buffers( dev_priv );
		}
	}
}

static __inline__ void mach64_ring_tick( drm_mach64_private_t *dev_priv, 
					 drm_mach64_descriptor_ring_t *ring )
{
	DRM_DEBUG( "%s: head_addr: 0x%08x head: %d tail: %d space: %d\n",
		   __FUNCTION__, 
		   ring->head_addr, ring->head, ring->tail, ring->space );

	if ( !dev_priv->ring_running ) {
		mach64_ring_start( dev_priv );
		
		if ( ring->head != ring->tail ) {
			mach64_ring_resume( dev_priv, ring );
		}
	} else {
		/* GUI_ACTIVE must be read before BM_GUI_TABLE to 
		 * correctly determine the ring head 
		 */
		int gui_active = MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE;
		
		ring->head_addr = MACH64_READ(MACH64_BM_GUI_TABLE) & 0xfffffff0;
		
		if ( gui_active ) {
			/* If not idle, BM_GUI_TABLE points one descriptor 
			 * past the current head 
			 */
			if ( ring->head_addr == ring->start_addr ) {
				ring->head_addr += ring->size;
			}
			ring->head_addr -= 4 * sizeof(u32);
		}

		if( ring->head_addr < ring->start_addr || 
		    ring->head_addr >= ring->start_addr + ring->size ) {
			DRM_ERROR( "bad ring head address: 0x%08x\n", ring->head_addr );
			mach64_dump_ring_info( dev_priv );
			mach64_do_engine_reset( dev_priv );
			return;
		}
	
		ring->head = (ring->head_addr - ring->start_addr) / sizeof(u32);
		
		if ( !gui_active && ring->head != ring->tail ) {
			mach64_ring_resume( dev_priv, ring );
		}
	}
}

static __inline__ void mach64_ring_stop( drm_mach64_private_t *dev_priv )
{
	DRM_DEBUG( "%s: head_addr: 0x%08x head: %d tail: %d space: %d\n",
		   __FUNCTION__, 
		   dev_priv->ring.head_addr, dev_priv->ring.head, 
		   dev_priv->ring.tail, dev_priv->ring.space );

	/* restore previous SRC_CNTL to disable busmastering */
	mach64_do_wait_for_fifo( dev_priv, 1 );
	MACH64_WRITE( MACH64_SRC_CNTL, 0 );

	/* disable busmastering but keep the block 1 registers enabled */ 
	mach64_do_wait_for_idle( dev_priv );
	MACH64_WRITE( MACH64_BUS_CNTL, MACH64_READ( MACH64_BUS_CNTL ) 
		      | MACH64_BUS_MASTER_DIS | MACH64_BUS_EXT_REG_EN );
		
	dev_priv->ring_running = 0;
}

static __inline__ void
mach64_update_ring_snapshot( drm_mach64_private_t *dev_priv )
{
	drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;

	DRM_DEBUG( "%s\n", __FUNCTION__ );
	
	mach64_ring_tick( dev_priv, ring );

	ring->space = (ring->head - ring->tail) * sizeof(u32);
	if ( ring->space <= 0 ) {
		ring->space += ring->size;
	}
}

/* ================================================================
 * DMA descriptor ring macros
 */

#define RING_LOCALS									\
	int _ring_tail, _ring_write; unsigned int _ring_mask; volatile u32 *_ring

#define RING_WRITE_OFS  _ring_write

#define BEGIN_RING( n ) 								\
do {											\
	if ( MACH64_VERBOSE ) {								\
		DRM_INFO( "BEGIN_RING( %d ) in %s\n",					\
			   (n), __FUNCTION__ );						\
	}										\
	if ( dev_priv->ring.space <= (n) * sizeof(u32) ) {				\
		int ret;								\
		if ((ret=mach64_wait_ring( dev_priv, (n) * sizeof(u32))) < 0 ) {	\
			DRM_ERROR( "wait_ring failed, resetting engine\n");		\
			mach64_dump_engine_info( dev_priv );				\
			mach64_do_engine_reset( dev_priv );				\
			return ret;							\
		}									\
	}										\
	dev_priv->ring.space -= (n) * sizeof(u32);					\
	_ring = (u32 *) dev_priv->ring.start;						\
	_ring_tail = _ring_write = dev_priv->ring.tail;					\
	_ring_mask = dev_priv->ring.tail_mask;						\
} while (0)

#define OUT_RING( x )						\
do {								\
	if ( MACH64_VERBOSE ) {					\
		DRM_INFO( "   OUT_RING( 0x%08x ) at 0x%x\n",	\
			   (unsigned int)(x), _ring_write );	\
	}							\
	_ring[_ring_write++] = cpu_to_le32( x );		\
	_ring_write &= _ring_mask;				\
} while (0)

#define ADVANCE_RING() 							\
do {									\
	if ( MACH64_VERBOSE ) {						\
		DRM_INFO( "ADVANCE_RING() wr=0x%06x tail=0x%06x\n",	\
			  _ring_write, _ring_tail );			\
	}								\
	DRM_MEMORYBARRIER();						\
	mach64_clear_dma_eol( &_ring[(_ring_tail - 2) & _ring_mask] );	\
	DRM_MEMORYBARRIER();						\
	dev_priv->ring.tail = _ring_write;				\
	mach64_ring_tick( dev_priv, &(dev_priv)->ring );		\
} while (0)


/* ================================================================
 * DMA macros
 */

#define DMALOCALS				\
	drm_mach64_freelist_t *_entry = NULL;	\
	drm_buf_t *_buf = NULL; 		\
	u32 *_buf_wptr; int _outcount

#define GETBUFPTR( __buf )						\
((dev_priv->is_pci) ? 							\
	((u32 *)(__buf)->address) : 					\
	((u32 *)((char *)dev_priv->dev_buffers->handle + (__buf)->offset)))

#define GETBUFADDR( __buf ) ((u32)(__buf)->bus_address)

#define GETRINGOFFSET() (_entry->ring_ofs)

static __inline__ int mach64_find_pending_buf_entry ( drm_mach64_private_t *dev_priv, 
						      drm_mach64_freelist_t **entry, 
						      drm_buf_t *buf )
{
	struct list_head *ptr;
#if MACH64_EXTRA_CHECKING
	if (list_empty(&dev_priv->pending)) {
		DRM_ERROR("Empty pending list in %s\n", __FUNCTION__);
		return DRM_ERR(EINVAL);
	}
#endif
	ptr = dev_priv->pending.prev;
	*entry = list_entry(ptr, drm_mach64_freelist_t, list);
	while ((*entry)->buf != buf) {
		if (ptr == &dev_priv->pending) {
			return DRM_ERR(EFAULT);
		}
		ptr = ptr->prev;
		*entry = list_entry(ptr, drm_mach64_freelist_t, list);
	}
	return 0;
}

#define DMASETPTR( _p ) 			\
do {						\
	_buf = (_p);				\
	_outcount = 0;				\
	_buf_wptr = GETBUFPTR( _buf );		\
} while(0)

/* FIXME: use a private set of smaller buffers for state emits, clears, and swaps? */
#define DMAGETPTR( filp, dev_priv, n )					\
do {									\
	if ( MACH64_VERBOSE ) {						\
		DRM_INFO( "DMAGETPTR( %d ) in %s\n",			\
			  n, __FUNCTION__ );				\
	}								\
	_buf = mach64_freelist_get( dev_priv );				\
	if (_buf == NULL) {						\
		DRM_ERROR("%s: couldn't get buffer in DMAGETPTR\n",	\
			   __FUNCTION__ );				\
		return DRM_ERR(EAGAIN);					\
	}								\
	if (_buf->pending) {						\
	        DRM_ERROR("%s: pending buf in DMAGETPTR\n",		\
			   __FUNCTION__ );				\
		return DRM_ERR(EFAULT);					\
	}								\
	_buf->filp = filp;						\
	_outcount = 0;							\
									\
        _buf_wptr = GETBUFPTR( _buf );					\
} while (0)

#define DMAOUTREG( reg, val )					\
do {								\
	if ( MACH64_VERBOSE ) {					\
		DRM_INFO( "   DMAOUTREG( 0x%x = 0x%08x )\n",	\
			  reg, val );				\
	}							\
	_buf_wptr[_outcount++] = cpu_to_le32(DMAREG(reg));	\
	_buf_wptr[_outcount++] = cpu_to_le32((val));		\
	_buf->used += 8;					\
} while (0)

#define DMAADVANCE( dev_priv, _discard )						     \
do {											     \
	struct list_head *ptr;								     \
	RING_LOCALS;									     \
											     \
	if ( MACH64_VERBOSE ) {								     \
		DRM_INFO( "DMAADVANCE() in %s\n", __FUNCTION__ );			     \
	}										     \
											     \
	if (_buf->used <= 0) {								     \
		DRM_ERROR( "DMAADVANCE() in %s: sending empty buf %d\n",		     \
				   __FUNCTION__, _buf->idx );				     \
		return DRM_ERR(EFAULT);							     \
	}										     \
	if (_buf->pending) {								     \
                /* This is a resued buffer, so we need to find it in the pending list */     \
		int ret;								     \
		if ( (ret=mach64_find_pending_buf_entry(dev_priv, &_entry, _buf)) ) {	     \
			DRM_ERROR( "DMAADVANCE() in %s: couldn't find pending buf %d\n",     \
				   __FUNCTION__, _buf->idx );				     \
			return ret;							     \
		}									     \
		if (_entry->discard) {							     \
			DRM_ERROR( "DMAADVANCE() in %s: sending discarded pending buf %d\n", \
				   __FUNCTION__, _buf->idx );				     \
			return DRM_ERR(EFAULT);						     \
		}									     \
     	} else {									     \
		if (list_empty(&dev_priv->placeholders)) {				     \
			DRM_ERROR( "DMAADVANCE() in %s: empty placeholder list\n",	     \
			   	__FUNCTION__ );						     \
			return DRM_ERR(EFAULT);						     \
		}									     \
		ptr = dev_priv->placeholders.next;					     \
		list_del(ptr);								     \
		_entry = list_entry(ptr, drm_mach64_freelist_t, list);			     \
		_buf->pending = 1;							     \
		_entry->buf = _buf;							     \
		list_add_tail(ptr, &dev_priv->pending);					     \
	}										     \
	_entry->discard = (_discard);							     \
	ADD_BUF_TO_RING( dev_priv );							     \
} while (0)

#define DMADISCARDBUF()									\
do {											\
	if (_entry == NULL) {								\
		int ret;								\
		if ( (ret=mach64_find_pending_buf_entry(dev_priv, &_entry, _buf)) ) {	\
			DRM_ERROR( "%s: couldn't find pending buf %d\n",		\
				   __FUNCTION__, _buf->idx );				\
			return ret;							\
		}									\
	}										\
	_entry->discard = 1;								\
} while(0)

#define ADD_BUF_TO_RING( dev_priv )							\
do {											\
	int bytes, pages, remainder;							\
	u32 address, page;								\
	int i;										\
											\
	bytes = _buf->used;								\
	address = GETBUFADDR( _buf );							\
											\
	pages = (bytes + MACH64_DMA_CHUNKSIZE - 1) / MACH64_DMA_CHUNKSIZE;		\
											\
	BEGIN_RING( pages * 4 );							\
											\
	for ( i = 0 ; i < pages-1 ; i++ ) {						\
		page = address + i * MACH64_DMA_CHUNKSIZE;				\
		OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR );			\
		OUT_RING( page );							\
		OUT_RING( MACH64_DMA_CHUNKSIZE | MACH64_DMA_HOLD_OFFSET );		\
		OUT_RING( 0 );								\
	}										\
											\
	/* generate the final descriptor for any remaining commands in this buffer */	\
	page = address + i * MACH64_DMA_CHUNKSIZE;					\
	remainder = bytes - i * MACH64_DMA_CHUNKSIZE;					\
											\
	/* Save dword offset of last descriptor for this buffer.			\
	 * This is needed to check for completion of the buffer in freelist_get		\
	 */										\
	_entry->ring_ofs = RING_WRITE_OFS;						\
											\
	OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR );				\
	OUT_RING( page );								\
	OUT_RING( remainder | MACH64_DMA_HOLD_OFFSET | MACH64_DMA_EOL );		\
	OUT_RING( 0 );									\
											\
	ADVANCE_RING();									\
} while(0)

#define DMAADVANCEHOSTDATA( dev_priv )							\
do {											\
	struct list_head *ptr;								\
	RING_LOCALS;									\
											\
	if ( MACH64_VERBOSE ) {								\
		DRM_INFO( "DMAADVANCEHOSTDATA() in %s\n", __FUNCTION__ );		\
	}										\
											\
	if (_buf->used <= 0) {								\
		DRM_ERROR( "DMAADVANCEHOSTDATA() in %s: sending empty buf %d\n",	\
				   __FUNCTION__, _buf->idx );				\
		return DRM_ERR(EFAULT);							\
	}										\
	if (list_empty(&dev_priv->placeholders)) {					\
		DRM_ERROR( "%s: empty placeholder list in DMAADVANCEHOSTDATA()\n",	\
			   __FUNCTION__ );						\
		return DRM_ERR(EFAULT);							\
	}										\
											\
        ptr = dev_priv->placeholders.next;						\
	list_del(ptr);									\
	_entry = list_entry(ptr, drm_mach64_freelist_t, list);				\
	_entry->buf = _buf;								\
	_entry->buf->pending = 1;							\
	list_add_tail(ptr, &dev_priv->pending);						\
	_entry->discard = 1;								\
	ADD_HOSTDATA_BUF_TO_RING( dev_priv );						\
} while (0)

#define ADD_HOSTDATA_BUF_TO_RING( dev_priv )						 \
do {											 \
	int bytes, pages, remainder;							 \
	u32 address, page;								 \
	int i;										 \
											 \
	bytes = _buf->used - MACH64_HOSTDATA_BLIT_OFFSET;				 \
	pages = (bytes + MACH64_DMA_CHUNKSIZE - 1) / MACH64_DMA_CHUNKSIZE;		 \
	address = GETBUFADDR( _buf );							 \
											 \
	BEGIN_RING( 4 + pages * 4 );							 \
											 \
	OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR );				 \
	OUT_RING( address );								 \
	OUT_RING( MACH64_HOSTDATA_BLIT_OFFSET | MACH64_DMA_HOLD_OFFSET );		 \
	OUT_RING( 0 );									 \
											 \
	address += MACH64_HOSTDATA_BLIT_OFFSET;						 \
											 \
	for ( i = 0 ; i < pages-1 ; i++ ) {						 \
		page = address + i * MACH64_DMA_CHUNKSIZE;				 \
		OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_HOSTDATA );		 \
		OUT_RING( page );							 \
		OUT_RING( MACH64_DMA_CHUNKSIZE | MACH64_DMA_HOLD_OFFSET );		 \
		OUT_RING( 0 );								 \
	}										 \
											 \
	/* generate the final descriptor for any remaining commands in this buffer */	 \
	page = address + i * MACH64_DMA_CHUNKSIZE;					 \
	remainder = bytes - i * MACH64_DMA_CHUNKSIZE;					 \
											 \
	/* Save dword offset of last descriptor for this buffer.			 \
	 * This is needed to check for completion of the buffer in freelist_get		 \
	 */										 \
	_entry->ring_ofs = RING_WRITE_OFS;						 \
											 \
	OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_HOSTDATA );			 \
	OUT_RING( page );								 \
	OUT_RING( remainder | MACH64_DMA_HOLD_OFFSET | MACH64_DMA_EOL );		 \
	OUT_RING( 0 );									 \
											 \
	ADVANCE_RING();									 \
} while(0)

#endif /* __MACH64_DRV_H__ */