/* drm_bufs.h -- Generic buffer template -*- linux-c -*- * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com * * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: * Rickard E. (Rik) Faith * Gareth Hughes */ #include "drmP.h" #ifndef __HAVE_PCI_DMA #define __HAVE_PCI_DMA 0 #endif #ifndef __HAVE_SG #define __HAVE_SG 0 #endif #ifndef DRIVER_BUF_PRIV_T #define DRIVER_BUF_PRIV_T u32 #endif #ifndef DRIVER_AGP_BUFFERS_MAP #if __HAVE_AGP && __HAVE_DMA #error "You must define DRIVER_AGP_BUFFERS_MAP()" #else #define DRIVER_AGP_BUFFERS_MAP( dev ) NULL #endif #endif /* * Compute order. Can be made faster. */ int DRM(order)( unsigned long size ) { int order; unsigned long tmp; for ( order = 0, tmp = size ; tmp >>= 1 ; ++order ); if ( size & ~(1 << order) ) ++order; return order; } int DRM(addmap)( DRM_IOCTL_ARGS ) { DRM_DEVICE; drm_map_t *map; drm_map_list_entry_t *list; if (!(dev->flags & (FREAD|FWRITE))) return DRM_ERR(EACCES); /* Require read/write */ map = (drm_map_t *) DRM(alloc)( sizeof(*map), DRM_MEM_MAPS ); if ( !map ) return DRM_ERR(ENOMEM); *map = *(drm_map_t *)data; /* Only allow shared memory to be removable since we only keep enough * book keeping information about shared memory to allow for removal * when processes fork. */ if ( (map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM ) { DRM(free)( map, sizeof(*map), DRM_MEM_MAPS ); return DRM_ERR(EINVAL); } DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n", map->offset, map->size, map->type ); if ( (map->offset & PAGE_MASK) || (map->size & PAGE_MASK) ) { DRM(free)( map, sizeof(*map), DRM_MEM_MAPS ); return DRM_ERR(EINVAL); } map->mtrr = -1; map->handle = 0; switch ( map->type ) { case _DRM_REGISTERS: case _DRM_FRAME_BUFFER: #if !defined(__sparc__) && !defined(__alpha__) if ( map->offset + map->size < map->offset ) { DRM(free)( map, sizeof(*map), DRM_MEM_MAPS ); return DRM_ERR(EINVAL); } #endif #ifdef __alpha__ map->offset += dev->hose->mem_space->start; #endif #if __REALLY_HAVE_MTRR if ( map->type == _DRM_FRAME_BUFFER || (map->flags & _DRM_WRITE_COMBINING) ) { #ifdef __FreeBSD__ int retcode = 0, act; struct mem_range_desc mrdesc; mrdesc.mr_base = map->offset; mrdesc.mr_len = map->size; mrdesc.mr_flags = MDF_WRITECOMBINE; act = MEMRANGE_SET_UPDATE; bcopy(DRIVER_NAME, &mrdesc.mr_owner, strlen(DRIVER_NAME)); retcode = mem_range_attr_set(&mrdesc, &act); map->mtrr=1; #elif defined __NetBSD__ struct mtrr mtrrmap; int one = 1; mtrrmap.base = map->offset; mtrrmap.len = map->size; mtrrmap.type = MTRR_TYPE_WC; mtrrmap.flags = MTRR_PRIVATE | MTRR_VALID; mtrrmap.owner = p->p_pid; /* USER? KERNEL? XXX */ map->mtrr = mtrr_get( &mtrrmap, &one, p, MTRR_GETSET_KERNEL ); #endif } #endif /* __REALLY_HAVE_MTRR */ map->handle = DRM(ioremap)( map->offset, map->size ); break; case _DRM_SHM: map->handle = (void *)DRM(alloc)(map->size, DRM_MEM_SAREA); DRM_DEBUG( "%ld %d %p\n", map->size, DRM(order)( map->size ), map->handle ); if ( !map->handle ) { DRM(free)( map, sizeof(*map), DRM_MEM_MAPS ); return DRM_ERR(ENOMEM); } map->offset = (unsigned long)map->handle; if ( map->flags & _DRM_CONTAINS_LOCK ) { dev->lock.hw_lock = map->handle; /* Pointer to lock */ } break; #if __REALLY_HAVE_AGP case _DRM_AGP: #ifdef __alpha__ map->offset += dev->hose->mem_space->start; #endif map->offset += dev->agp->base; map->mtrr = dev->agp->agp_mtrr; /* for getmap */ break; #endif case _DRM_SCATTER_GATHER: if (!dev->sg) { DRM(free)(map, sizeof(*map), DRM_MEM_MAPS); return DRM_ERR(EINVAL); } map->offset = map->offset + dev->sg->handle; break; default: DRM(free)( map, sizeof(*map), DRM_MEM_MAPS ); return DRM_ERR(EINVAL); } list = DRM(alloc)(sizeof(*list), DRM_MEM_MAPS); if(!list) { DRM(free)(map, sizeof(*map), DRM_MEM_MAPS); return DRM_ERR(EINVAL); } memset(list, 0, sizeof(*list)); list->map = map; DRM_LOCK; TAILQ_INSERT_TAIL(dev->maplist, list, link); DRM_UNLOCK; *(drm_map_t *)data = *map; if ( map->type != _DRM_SHM ) { ((drm_map_t *)data)->handle = (void *)map->offset; } return 0; } /* Remove a map private from list and deallocate resources if the mapping * isn't in use. */ int DRM(rmmap)( DRM_IOCTL_ARGS ) { DRM_DEVICE; drm_map_list_entry_t *list; drm_map_t *map; drm_map_t request; int found_maps = 0; DRM_COPY_FROM_USER_IOCTL( request, (drm_map_t *)data, sizeof(request) ); DRM_LOCK; TAILQ_FOREACH(list, dev->maplist, link) { map = list->map; if(map->handle == request.handle && map->flags & _DRM_REMOVABLE) break; } /* List has wrapped around to the head pointer, or its empty we didn't * find anything. */ if(list == NULL) { DRM_UNLOCK; return DRM_ERR(EINVAL); } TAILQ_REMOVE(dev->maplist, list, link); DRM(free)(list, sizeof(*list), DRM_MEM_MAPS); if(!found_maps) { switch (map->type) { case _DRM_REGISTERS: case _DRM_FRAME_BUFFER: #if __REALLY_HAVE_MTRR if (map->mtrr >= 0) { int retcode; #ifdef __FreeBSD__ int act; struct mem_range_desc mrdesc; mrdesc.mr_base = map->offset; mrdesc.mr_len = map->size; mrdesc.mr_flags = MDF_WRITECOMBINE; act = MEMRANGE_SET_REMOVE; bcopy(DRIVER_NAME, &mrdesc.mr_owner, strlen(DRIVER_NAME)); retcode = mem_range_attr_set(&mrdesc, &act); #elif defined __NetBSD__ struct mtrr mtrrmap; int one = 1; mtrrmap.base = map->offset; mtrrmap.len = map->size; mtrrmap.type = 0; mtrrmap.flags = 0; mtrrmap.owner = p->p_pid; retcode = mtrr_set( &mtrrmap, &one, p, MTRR_GETSET_KERNEL); DRM_DEBUG("mtrr_del = %d\n", retcode); #endif } #endif DRM(ioremapfree)(map->handle, map->size); break; case _DRM_SHM: DRM(free)( map->handle, map->size, DRM_MEM_SAREA ); break; case _DRM_AGP: case _DRM_SCATTER_GATHER: break; } DRM(free)(map, sizeof(*map), DRM_MEM_MAPS); } DRM_UNLOCK; return 0; } #if __HAVE_DMA static void DRM(cleanup_buf_error)(drm_buf_entry_t *entry) { int i; if (entry->seg_count) { for (i = 0; i < entry->seg_count; i++) { DRM(free)((void *)entry->seglist[i], entry->buf_size, DRM_MEM_DMA); } DRM(free)(entry->seglist, entry->seg_count * sizeof(*entry->seglist), DRM_MEM_SEGS); entry->seg_count = 0; } if(entry->buf_count) { for(i = 0; i < entry->buf_count; i++) { if(entry->buflist[i].dev_private) { DRM(free)(entry->buflist[i].dev_private, entry->buflist[i].dev_priv_size, DRM_MEM_BUFS); } } DRM(free)(entry->buflist, entry->buf_count * sizeof(*entry->buflist), DRM_MEM_BUFS); #if __HAVE_DMA_FREELIST DRM(freelist_destroy)(&entry->freelist); #endif entry->buf_count = 0; } } #if __REALLY_HAVE_AGP int DRM(addbufs_agp)( DRM_IOCTL_ARGS ) { DRM_DEVICE; drm_device_dma_t *dma = dev->dma; drm_buf_desc_t request; drm_buf_entry_t *entry; drm_buf_t *buf; unsigned long offset; unsigned long agp_offset; int count; int order; int size; int alignment; int page_order; int total; int byte_count; int i; drm_buf_t **temp_buflist; if ( !dma ) return DRM_ERR(EINVAL); DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_desc_t *)data, sizeof(request) ); count = request.count; order = DRM(order)( request.size ); size = 1 << order; alignment = (request.flags & _DRM_PAGE_ALIGN) ? round_page(size) : size; page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; total = PAGE_SIZE << page_order; byte_count = 0; agp_offset = dev->agp->base + request.agp_start; DRM_DEBUG( "count: %d\n", count ); DRM_DEBUG( "order: %d\n", order ); DRM_DEBUG( "size: %d\n", size ); DRM_DEBUG( "agp_offset: 0x%lx\n", agp_offset ); DRM_DEBUG( "alignment: %d\n", alignment ); DRM_DEBUG( "page_order: %d\n", page_order ); DRM_DEBUG( "total: %d\n", total ); if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return DRM_ERR(EINVAL); if ( dev->queue_count ) return DRM_ERR(EBUSY); /* Not while in use */ DRM_SPINLOCK( &dev->count_lock ); if ( dev->buf_use ) { DRM_SPINUNLOCK( &dev->count_lock ); return DRM_ERR(EBUSY); } atomic_inc( &dev->buf_alloc ); DRM_SPINUNLOCK( &dev->count_lock ); DRM_LOCK; entry = &dma->bufs[order]; if ( entry->buf_count ) { DRM_UNLOCK; atomic_dec( &dev->buf_alloc ); return DRM_ERR(ENOMEM); /* May only call once for each order */ } if (count < 0 || count > 4096) { DRM_UNLOCK; atomic_dec( &dev->buf_alloc ); return DRM_ERR(EINVAL); } entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist), DRM_MEM_BUFS ); if ( !entry->buflist ) { DRM_UNLOCK; atomic_dec( &dev->buf_alloc ); return DRM_ERR(ENOMEM); } memset( entry->buflist, 0, count * sizeof(*entry->buflist) ); entry->buf_size = size; entry->page_order = page_order; offset = 0; while ( entry->buf_count < count ) { buf = &entry->buflist[entry->buf_count]; buf->idx = dma->buf_count + entry->buf_count; buf->total = alignment; buf->order = order; buf->used = 0; buf->offset = (dma->byte_count + offset); buf->bus_address = agp_offset + offset; buf->address = (void *)(agp_offset + offset); buf->next = NULL; buf->waiting = 0; buf->pending = 0; buf->dma_wait = 0; buf->pid = 0; buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T); buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T), DRM_MEM_BUFS ); if(!buf->dev_private) { /* Set count correctly so we free the proper amount. */ entry->buf_count = count; DRM(cleanup_buf_error)(entry); } memset( buf->dev_private, 0, buf->dev_priv_size ); #if __HAVE_DMA_HISTOGRAM buf->time_queued = 0; buf->time_dispatched = 0; buf->time_completed = 0; buf->time_freed = 0; #endif offset += alignment; entry->buf_count++; byte_count += PAGE_SIZE << page_order; } DRM_DEBUG( "byte_count: %d\n", byte_count ); temp_buflist = DRM(realloc)( dma->buflist, dma->buf_count * sizeof(*dma->buflist), (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), DRM_MEM_BUFS ); if(!temp_buflist) { /* Free the entry because it isn't valid */ DRM(cleanup_buf_error)(entry); DRM_UNLOCK; atomic_dec( &dev->buf_alloc ); return DRM_ERR(ENOMEM); } dma->buflist = temp_buflist; for ( i = 0 ; i < entry->buf_count ; i++ ) { dma->buflist[i + dma->buf_count] = &entry->buflist[i]; } dma->buf_count += entry->buf_count; dma->byte_count += byte_count; DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count ); DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count ); #if __HAVE_DMA_FREELIST DRM(freelist_create)( &entry->freelist, entry->buf_count ); for ( i = 0 ; i < entry->buf_count ; i++ ) { DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] ); } #endif DRM_UNLOCK; request.count = entry->buf_count; request.size = size; DRM_COPY_TO_USER_IOCTL( (drm_buf_desc_t *)data, request, sizeof(request) ); dma->flags = _DRM_DMA_USE_AGP; atomic_dec( &dev->buf_alloc ); return 0; } #endif /* __REALLY_HAVE_AGP */ #if __HAVE_PCI_DMA int DRM(addbufs_pci)( DRM_IOCTL_ARGS ) { DRM_DEVICE; drm_device_dma_t *dma = dev->dma; drm_buf_desc_t request; int count; int order; int size; int total; int page_order; drm_buf_entry_t *entry; unsigned long page; drm_buf_t *buf; int alignment; unsigned long offset; int i; int byte_count; int page_count; unsigned long *temp_pagelist; drm_buf_t **temp_buflist; if ( !dma ) return DRM_ERR(EINVAL); DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_desc_t *)data, sizeof(request) ); count = request.count; order = DRM(order)( request.size ); size = 1 << order; DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n", request.count, request.size, size, order, dev->queue_count ); if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return DRM_ERR(EINVAL); if ( dev->queue_count ) return DRM_ERR(EBUSY); /* Not while in use */ alignment = (request.flags & _DRM_PAGE_ALIGN) ? round_page(size) : size; page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; total = PAGE_SIZE << page_order; DRM_SPINLOCK( &dev->count_lock ); if ( dev->buf_use ) { DRM_SPINUNLOCK( &dev->count_lock ); return DRM_ERR(EBUSY); } atomic_inc( &dev->buf_alloc ); DRM_SPINUNLOCK( &dev->count_lock ); DRM_LOCK; entry = &dma->bufs[order]; if ( entry->buf_count ) { DRM_UNLOCK; atomic_dec( &dev->buf_alloc ); return DRM_ERR(ENOMEM); /* May only call once for each order */ } if (count < 0 || count > 4096) { DRM_UNLOCK; atomic_dec( &dev->buf_alloc ); return DRM_ERR(EINVAL); } entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist), DRM_MEM_BUFS ); if ( !entry->buflist ) { DRM_UNLOCK; atomic_dec( &dev->buf_alloc ); return DRM_ERR(ENOMEM); } memset( entry->buflist, 0, count * sizeof(*entry->buflist) ); entry->seglist = DRM(alloc)( count * sizeof(*entry->seglist), DRM_MEM_SEGS ); if ( !entry->seglist ) { DRM(free)( entry->buflist, count * sizeof(*entry->buflist), DRM_MEM_BUFS ); DRM_UNLOCK; atomic_dec( &dev->buf_alloc ); return DRM_ERR(ENOMEM); } memset( entry->seglist, 0, count * sizeof(*entry->seglist) ); temp_pagelist = DRM(realloc)( dma->pagelist, dma->page_count * sizeof(*dma->pagelist), (dma->page_count + (count << page_order)) * sizeof(*dma->pagelist), DRM_MEM_PAGES ); if(!temp_pagelist) { DRM(free)( entry->buflist, count * sizeof(*entry->buflist), DRM_MEM_BUFS ); DRM(free)( entry->seglist, count * sizeof(*entry->seglist), DRM_MEM_SEGS ); DRM_UNLOCK; atomic_dec( &dev->buf_alloc ); return DRM_ERR(ENOMEM); } dma->pagelist = temp_pagelist; DRM_DEBUG( "pagelist: %d entries\n", dma->page_count + (count << page_order) ); entry->buf_size = size; entry->page_order = page_order; byte_count = 0; page_count = 0; while ( entry->buf_count < count ) { page = (unsigned long)DRM(alloc)( size, DRM_MEM_DMA ); if ( !page ) break; entry->seglist[entry->seg_count++] = page; for ( i = 0 ; i < (1 << page_order) ; i++ ) { DRM_DEBUG( "page %d @ 0x%08lx\n", dma->page_count + page_count, page + PAGE_SIZE * i ); dma->pagelist[dma->page_count + page_count++] = page + PAGE_SIZE * i; } for ( offset = 0 ; offset + size <= total && entry->buf_count < count ; offset += alignment, ++entry->buf_count ) { buf = &entry->buflist[entry->buf_count]; buf->idx = dma->buf_count + entry->buf_count; buf->total = alignment; buf->order = order; buf->used = 0; buf->offset = (dma->byte_count + byte_count + offset); buf->address = (void *)(page + offset); buf->next = NULL; buf->waiting = 0; buf->pending = 0; buf->dma_wait = 0; buf->pid = 0; #if __HAVE_DMA_HISTOGRAM buf->time_queued = 0; buf->time_dispatched = 0; buf->time_completed = 0; buf->time_freed = 0; #endif DRM_DEBUG( "buffer %d @ %p\n", entry->buf_count, buf->address ); } byte_count += PAGE_SIZE << page_order; } temp_buflist = DRM(realloc)( dma->buflist, dma->buf_count * sizeof(*dma->buflist), (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), DRM_MEM_BUFS ); if(!temp_buflist) { /* Free the entry because it isn't valid */ DRM(cleanup_buf_error)(entry); DRM_UNLOCK; atomic_dec( &dev->buf_alloc ); return DRM_ERR(ENOMEM); } dma->buflist = temp_buflist; for ( i = 0 ; i < entry->buf_count ; i++ ) { dma->buflist[i + dma->buf_count] = &entry->buflist[i]; } dma->buf_count += entry->buf_count; dma->seg_count += entry->seg_count; dma->page_count += entry->seg_count << page_order; dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order); #if __HAVE_DMA_FREELIST DRM(freelist_create)( &entry->freelist, entry->buf_count ); for ( i = 0 ; i < entry->buf_count ; i++ ) { DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] ); } #endif DRM_UNLOCK; request.count = entry->buf_count; request.size = size; DRM_COPY_TO_USER_IOCTL( (drm_buf_desc_t *)data, request, sizeof(request) ); atomic_dec( &dev->buf_alloc ); return 0; } #endif /* __HAVE_PCI_DMA */ #if __REALLY_HAVE_SG int DRM(addbufs_sg)( DRM_IOCTL_ARGS ) { DRM_DEVICE; drm_device_dma_t *dma = dev->dma; drm_buf_desc_t request; drm_buf_entry_t *entry; drm_buf_t *buf; unsigned long offset; unsigned long agp_offset; int count; int order; int size; int alignment; int page_order; int total; int byte_count; int i; drm_buf_t **temp_buflist; if ( !dma ) return DRM_ERR(EINVAL); DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_desc_t *)data, sizeof(request) ); count = request.count; order = DRM(order)( request.size ); size = 1 << order; alignment = (request.flags & _DRM_PAGE_ALIGN) ? round_page(size) : size; page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; total = PAGE_SIZE << page_order; byte_count = 0; agp_offset = request.agp_start; DRM_DEBUG( "count: %d\n", count ); DRM_DEBUG( "order: %d\n", order ); DRM_DEBUG( "size: %d\n", size ); DRM_DEBUG( "agp_offset: %ld\n", agp_offset ); DRM_DEBUG( "alignment: %d\n", alignment ); DRM_DEBUG( "page_order: %d\n", page_order ); DRM_DEBUG( "total: %d\n", total ); if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return DRM_ERR(EINVAL); if ( dev->queue_count ) return DRM_ERR(EBUSY); /* Not while in use */ DRM_SPINLOCK( &dev->count_lock ); if ( dev->buf_use ) { DRM_SPINUNLOCK( &dev->count_lock ); return DRM_ERR(EBUSY); } atomic_inc( &dev->buf_alloc ); DRM_SPINUNLOCK( &dev->count_lock ); DRM_LOCK; entry = &dma->bufs[order]; if ( entry->buf_count ) { DRM_UNLOCK; atomic_dec( &dev->buf_alloc ); return DRM_ERR(ENOMEM); /* May only call once for each order */ } if (count < 0 || count > 4096) { DRM_UNLOCK; atomic_dec( &dev->buf_alloc ); return DRM_ERR(EINVAL); } entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist), DRM_MEM_BUFS ); if ( !entry->buflist ) { DRM_UNLOCK; atomic_dec( &dev->buf_alloc ); return DRM_ERR(ENOMEM); } memset( entry->buflist, 0, count * sizeof(*entry->buflist) ); entry->buf_size = size; entry->page_order = page_order; offset = 0; while ( entry->buf_count < count ) { buf = &entry->buflist[entry->buf_count]; buf->idx = dma->buf_count + entry->buf_count; buf->total = alignment; buf->order = order; buf->used = 0; buf->offset = (dma->byte_count + offset); buf->bus_address = agp_offset + offset; buf->address = (void *)(agp_offset + offset + dev->sg->handle); buf->next = NULL; buf->waiting = 0; buf->pending = 0; buf->dma_wait = 0; buf->pid = 0; buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T); buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T), DRM_MEM_BUFS ); if(!buf->dev_private) { /* Set count correctly so we free the proper amount. */ entry->buf_count = count; DRM(cleanup_buf_error)(entry); DRM_UNLOCK; atomic_dec( &dev->buf_alloc ); return DRM_ERR(ENOMEM); } memset( buf->dev_private, 0, buf->dev_priv_size ); # if __HAVE_DMA_HISTOGRAM buf->time_queued = 0; buf->time_dispatched = 0; buf->time_completed = 0; buf->time_freed = 0; # endif DRM_DEBUG( "buffer %d @ %p\n", entry->buf_count, buf->address ); offset += alignment; entry->buf_count++; byte_count += PAGE_SIZE << page_order; } DRM_DEBUG( "byte_count: %d\n", byte_count ); temp_buflist = DRM(realloc)( dma->buflist, dma->buf_count * sizeof(*dma->buflist), (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), DRM_MEM_BUFS ); if(!temp_buflist) { /* Free the entry because it isn't valid */ DRM(cleanup_buf_error)(entry); DRM_UNLOCK; atomic_dec( &dev->buf_alloc ); return DRM_ERR(ENOMEM); } dma->buflist = temp_buflist; for ( i = 0 ; i < entry->buf_count ; i++ ) { dma->buflist[i + dma->buf_count] = &entry->buflist[i]; } dma->buf_count += entry->buf_count; dma->byte_count += byte_count; DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count ); DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count ); #if __HAVE_DMA_FREELIST DRM(freelist_create)( &entry->freelist, entry->buf_count ); for ( i = 0 ; i < entry->buf_count ; i++ ) { DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] ); } #endif DRM_UNLOCK; request.count = entry->buf_count; request.size = size; DRM_COPY_TO_USER_IOCTL( (drm_buf_desc_t *)data, request, sizeof(request) ); dma->flags = _DRM_DMA_USE_SG; atomic_dec( &dev->buf_alloc ); return 0; } #endif /* __REALLY_HAVE_SG */ int DRM(addbufs)( DRM_IOCTL_ARGS ) { drm_buf_desc_t request; DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_desc_t *)data, sizeof(request) ); #if __REALLY_HAVE_AGP if ( request.flags & _DRM_AGP_BUFFER ) return DRM(addbufs_agp)( kdev, cmd, data, flags, p ); else #endif #if __REALLY_HAVE_SG if ( request.flags & _DRM_SG_BUFFER ) return DRM(addbufs_sg)( kdev, cmd, data, flags, p ); else #endif #if __HAVE_PCI_DMA return DRM(addbufs_pci)( kdev, cmd, data, flags, p ); #else return DRM_ERR(EINVAL); #endif } int DRM(infobufs)( DRM_IOCTL_ARGS ) { DRM_DEVICE; drm_device_dma_t *dma = dev->dma; drm_buf_info_t request; int i; int count; if ( !dma ) return DRM_ERR(EINVAL); DRM_SPINLOCK( &dev->count_lock ); if ( atomic_read( &dev->buf_alloc ) ) { DRM_SPINUNLOCK( &dev->count_lock ); return DRM_ERR(EBUSY); } ++dev->buf_use; /* Can't allocate more after this call */ DRM_SPINUNLOCK( &dev->count_lock ); DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_info_t *)data, sizeof(request) ); for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) { if ( dma->bufs[i].buf_count ) ++count; } DRM_DEBUG( "count = %d\n", count ); if ( request.count >= count ) { for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) { if ( dma->bufs[i].buf_count ) { drm_buf_desc_t *to = &request.list[count]; drm_buf_entry_t *from = &dma->bufs[i]; drm_freelist_t *list = &dma->bufs[i].freelist; if ( DRM_COPY_TO_USER( &to->count, &from->buf_count, sizeof(from->buf_count) ) || DRM_COPY_TO_USER( &to->size, &from->buf_size, sizeof(from->buf_size) ) || DRM_COPY_TO_USER( &to->low_mark, &list->low_mark, sizeof(list->low_mark) ) || DRM_COPY_TO_USER( &to->high_mark, &list->high_mark, sizeof(list->high_mark) ) ) return DRM_ERR(EFAULT); DRM_DEBUG( "%d %d %d %d %d\n", i, dma->bufs[i].buf_count, dma->bufs[i].buf_size, dma->bufs[i].freelist.low_mark, dma->bufs[i].freelist.high_mark ); ++count; } } } request.count = count; DRM_COPY_TO_USER_IOCTL( (drm_buf_info_t *)data, request, sizeof(request) ); return 0; } int DRM(markbufs)( DRM_IOCTL_ARGS ) { DRM_DEVICE; drm_device_dma_t *dma = dev->dma; drm_buf_desc_t request; int order; drm_buf_entry_t *entry; if ( !dma ) return DRM_ERR(EINVAL); DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_desc_t *)data, sizeof(request) ); DRM_DEBUG( "%d, %d, %d\n", request.size, request.low_mark, request.high_mark ); order = DRM(order)( request.size ); if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return DRM_ERR(EINVAL); entry = &dma->bufs[order]; if ( request.low_mark < 0 || request.low_mark > entry->buf_count ) return DRM_ERR(EINVAL); if ( request.high_mark < 0 || request.high_mark > entry->buf_count ) return DRM_ERR(EINVAL); entry->freelist.low_mark = request.low_mark; entry->freelist.high_mark = request.high_mark; return 0; } int DRM(freebufs)( DRM_IOCTL_ARGS ) { DRM_DEVICE; drm_device_dma_t *dma = dev->dma; drm_buf_free_t request; int i; int idx; drm_buf_t *buf; if ( !dma ) return DRM_ERR(EINVAL); DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_free_t *)data, sizeof(request) ); DRM_DEBUG( "%d\n", request.count ); for ( i = 0 ; i < request.count ; i++ ) { if ( DRM_COPY_FROM_USER( &idx, &request.list[i], sizeof(idx) ) ) return DRM_ERR(EFAULT); if ( idx < 0 || idx >= dma->buf_count ) { DRM_ERROR( "Index %d (of %d max)\n", idx, dma->buf_count - 1 ); return DRM_ERR(EINVAL); } buf = dma->buflist[idx]; if ( buf->pid != DRM_CURRENTPID ) { DRM_ERROR( "Process %d freeing buffer owned by %d\n", DRM_CURRENTPID, buf->pid ); return DRM_ERR(EINVAL); } DRM(free_buffer)( dev, buf ); } return 0; } int DRM(mapbufs)( DRM_IOCTL_ARGS ) { DRM_DEVICE; drm_device_dma_t *dma = dev->dma; int retcode = 0; const int zero = 0; vm_offset_t virtual, address; #ifdef __FreeBSD__ #if __FreeBSD_version >= 500000 struct vmspace *vms = p->td_proc->p_vmspace; #else struct vmspace *vms = p->p_vmspace; #endif #endif /* __FreeBSD__ */ #ifdef __NetBSD__ struct vnode *vn; #endif /* __NetBSD__ */ drm_buf_map_t request; int i; if ( !dma ) return DRM_ERR(EINVAL); DRM_SPINLOCK( &dev->count_lock ); if ( atomic_read( &dev->buf_alloc ) ) { DRM_SPINUNLOCK( &dev->count_lock ); return DRM_ERR(EBUSY); } dev->buf_use++; /* Can't allocate more after this call */ DRM_SPINUNLOCK( &dev->count_lock ); DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_map_t *)data, sizeof(request) ); #ifdef __NetBSD__ if(!vfinddev(kdev, VCHR, &vn)) return 0; /* FIXME: Shouldn't this be EINVAL or something? */ #endif /* __NetBSD__ */ if ( request.count >= dma->buf_count ) { if ( (__HAVE_AGP && (dma->flags & _DRM_DMA_USE_AGP)) || (__HAVE_SG && (dma->flags & _DRM_DMA_USE_SG)) ) { drm_map_t *map = DRIVER_AGP_BUFFERS_MAP( dev ); if ( !map ) { retcode = EINVAL; goto done; } #ifdef __FreeBSD__ virtual = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ); retcode = vm_mmap(&vms->vm_map, &virtual, round_page(map->size), PROT_READ|PROT_WRITE, VM_PROT_ALL, MAP_SHARED, SLIST_FIRST(&kdev->si_hlist), (unsigned long)map->offset ); #elif defined(__NetBSD__) virtual = round_page((vaddr_t)vms->vm_daddr + MAXDSIZ); retcode = uvm_mmap(&vms->vm_map, (vaddr_t *)&virtual, round_page(map->size), UVM_PROT_READ | UVM_PROT_WRITE, UVM_PROT_ALL, MAP_SHARED, &vn->v_uobj, map->offset, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur); #endif /* __NetBSD__ */ } else { #ifdef __FreeBSD__ virtual = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ); retcode = vm_mmap(&vms->vm_map, &virtual, round_page(dma->byte_count), PROT_READ|PROT_WRITE, VM_PROT_ALL, MAP_SHARED, SLIST_FIRST(&kdev->si_hlist), 0); #elif defined(__NetBSD__) virtual = round_page((vaddr_t)vms->vm_daddr + MAXDSIZ); retcode = uvm_mmap(&vms->vm_map, (vaddr_t *)&virtual, round_page(dma->byte_count), UVM_PROT_READ | UVM_PROT_WRITE, UVM_PROT_ALL, MAP_SHARED, &vn->v_uobj, 0, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur); #endif /* __NetBSD__ */ } if (retcode) goto done; request.virtual = (void *)virtual; for ( i = 0 ; i < dma->buf_count ; i++ ) { if ( DRM_COPY_TO_USER( &request.list[i].idx, &dma->buflist[i]->idx, sizeof(request.list[0].idx) ) ) { retcode = EFAULT; goto done; } if ( DRM_COPY_TO_USER( &request.list[i].total, &dma->buflist[i]->total, sizeof(request.list[0].total) ) ) { retcode = EFAULT; goto done; } if ( DRM_COPY_TO_USER( &request.list[i].used, &zero, sizeof(zero) ) ) { retcode = EFAULT; goto done; } address = virtual + dma->buflist[i]->offset; /* *** */ if ( DRM_COPY_TO_USER( &request.list[i].address, &address, sizeof(address) ) ) { retcode = EFAULT; goto done; } } } done: request.count = dma->buf_count; DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode ); DRM_COPY_TO_USER_IOCTL( (drm_buf_map_t *)data, request, sizeof(request) ); return DRM_ERR(retcode); } #endif /* __HAVE_DMA */ be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Rickard E. (Rik) Faith <faith@valinux.com> * Jeff Hartmann <jhartmann@valinux.com> * Keith Whitwell <keithw@valinux.com> * * Rewritten by: * Gareth Hughes <gareth@valinux.com> */ #define __NO_VERSION__ #include "mga.h" #include "drmP.h" #include "mga_drv.h" #include <linux/interrupt.h> /* For task queue support */ #include <linux/delay.h> #define MGA_DEFAULT_USEC_TIMEOUT 10000 #define MGA_FREELIST_DEBUG 0 /* ================================================================ * Engine control */ int mga_do_wait_for_idle( drm_mga_private_t *dev_priv ) { u32 status = 0; int i; DRM_DEBUG( "%s\n", __FUNCTION__ ); for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { status = MGA_READ( MGA_STATUS ) & MGA_ENGINE_IDLE_MASK; if ( status == MGA_ENDPRDMASTS ) { MGA_WRITE8( MGA_CRTC_INDEX, 0 ); return 0; } udelay( 1 ); } #if MGA_DMA_DEBUG DRM_ERROR( "failed!\n" ); DRM_INFO( " status=0x%08x\n", status ); #endif return -EBUSY; } int mga_do_dma_idle( drm_mga_private_t *dev_priv ) { u32 status = 0; int i; DRM_DEBUG( "%s\n", __FUNCTION__ ); for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { status = MGA_READ( MGA_STATUS ) & MGA_DMA_IDLE_MASK; if ( status == MGA_ENDPRDMASTS ) return 0; udelay( 1 ); } #if MGA_DMA_DEBUG DRM_ERROR( "failed! status=0x%08x\n", status ); #endif return -EBUSY; } int mga_do_dma_reset( drm_mga_private_t *dev_priv ) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_primary_buffer_t *primary = &dev_priv->prim; DRM_DEBUG( "%s\n", __FUNCTION__ ); /* The primary DMA stream should look like new right about now. */ primary->tail = 0; primary->space = primary->size; primary->last_flush = 0; sarea_priv->last_wrap = 0; /* FIXME: Reset counters, buffer ages etc... */ /* FIXME: What else do we need to reinitialize? WARP stuff? */ return 0; } int mga_do_engine_reset( drm_mga_private_t *dev_priv ) { DRM_DEBUG( "%s\n", __FUNCTION__ ); /* Okay, so we've completely screwed up and locked the engine. * How about we clean up after ourselves? */ MGA_WRITE( MGA_RST, MGA_SOFTRESET ); udelay( 15 ); /* Wait at least 10 usecs */ MGA_WRITE( MGA_RST, 0 ); /* Initialize the registers that get clobbered by the soft * reset. Many of the core register values survive a reset, * but the drawing registers are basically all gone. * * 3D clients should probably die after calling this. The X * server should reset the engine state to known values. */ #if 0 MGA_WRITE( MGA_PRIMPTR, virt_to_bus((void *)dev_priv->prim.status_page) | MGA_PRIMPTREN0 | MGA_PRIMPTREN1 ); #endif MGA_WRITE( MGA_ICLEAR, MGA_SOFTRAPICLR ); MGA_WRITE( MGA_IEN, MGA_SOFTRAPIEN ); /* The primary DMA stream should look like new right about now. */ mga_do_dma_reset( dev_priv ); /* This bad boy will never fail. */ return 0; } /* ================================================================ * Primary DMA stream */ void mga_do_dma_flush( drm_mga_private_t *dev_priv ) { drm_mga_primary_buffer_t *primary = &dev_priv->prim; u32 head, tail; DMA_LOCALS; DRM_DEBUG( "%s:\n", __FUNCTION__ ); if ( primary->tail == primary->last_flush ) { DRM_DEBUG( " bailing out...\n" ); return; } tail = primary->tail + dev_priv->primary->offset; /* We need to pad the stream between flushes, as the card * actually (partially?) reads the first of these commands. * See page 4-16 in the G400 manual, middle of the page or so. */ BEGIN_DMA( 1 ); DMA_BLOCK( MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000 ); ADVANCE_DMA(); primary->last_flush = primary->tail; head = MGA_READ( MGA_PRIMADDRESS ); if ( head <= tail ) { primary->space = primary->size - primary->tail; } else { primary->space = head - tail; } DRM_DEBUG( " head = 0x%06lx\n", head - dev_priv->primary->offset ); DRM_DEBUG( " tail = 0x%06lx\n", tail - dev_priv->primary->offset ); DRM_DEBUG( " space = 0x%06x\n", primary->space ); mga_flush_write_combine(); MGA_WRITE( MGA_PRIMEND, tail | MGA_PAGPXFER ); DRM_DEBUG( "%s: done.\n", __FUNCTION__ ); } void mga_do_dma_wrap_start( drm_mga_private_t *dev_priv ) { drm_mga_primary_buffer_t *primary = &dev_priv->prim; u32 head, tail; DMA_LOCALS; DRM_DEBUG( "%s:\n", __FUNCTION__ ); BEGIN_DMA_WRAP(); DMA_BLOCK( MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000 ); ADVANCE_DMA(); tail = primary->tail + dev_priv->primary->offset; primary->tail = 0; primary->last_flush = 0; primary->last_wrap++; head = MGA_READ( MGA_PRIMADDRESS ); if ( head == dev_priv->primary->offset ) { primary->space = primary->size; } else { primary->space = head - dev_priv->primary->offset; } DRM_DEBUG( " head = 0x%06lx\n", head - dev_priv->primary->offset ); DRM_DEBUG( " tail = 0x%06x\n", primary->tail ); DRM_DEBUG( " wrap = %d\n", primary->last_wrap ); DRM_DEBUG( " space = 0x%06x\n", primary->space ); mga_flush_write_combine(); MGA_WRITE( MGA_PRIMEND, tail | MGA_PAGPXFER ); set_bit( 0, &primary->wrapped ); DRM_DEBUG( "%s: done.\n", __FUNCTION__ ); } void mga_do_dma_wrap_end( drm_mga_private_t *dev_priv ) { drm_mga_primary_buffer_t *primary = &dev_priv->prim; drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; u32 head = dev_priv->primary->offset; DRM_DEBUG( "%s:\n", __FUNCTION__ ); sarea_priv->last_wrap++; DRM_DEBUG( " wrap = %d\n", sarea_priv->last_wrap ); mga_flush_write_combine(); MGA_WRITE( MGA_PRIMADDRESS, head | MGA_DMA_GENERAL ); clear_bit( 0, &primary->wrapped ); DRM_DEBUG( "%s: done.\n", __FUNCTION__ ); } /* ================================================================ * Freelist management */ #define MGA_BUFFER_USED ~0 #define MGA_BUFFER_FREE 0 #if MGA_FREELIST_DEBUG static void mga_freelist_print( drm_device_t *dev ) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_freelist_t *entry; DRM_INFO( "\n" ); DRM_INFO( "current dispatch: last=0x%x done=0x%x\n", dev_priv->sarea_priv->last_dispatch, (unsigned int)(MGA_READ( MGA_PRIMADDRESS ) - dev_priv->primary->offset) ); DRM_INFO( "current freelist:\n" ); for ( entry = dev_priv->head->next ; entry ; entry = entry->next ) { DRM_INFO( " %p idx=%2d age=0x%x 0x%06lx\n", entry, entry->buf->idx, entry->age.head, entry->age.head - dev_priv->primary->offset ); } DRM_INFO( "\n" ); } #endif static int mga_freelist_init( drm_device_t *dev ) { drm_device_dma_t *dma = dev->dma; drm_mga_private_t *dev_priv = dev->dev_private; drm_buf_t *buf; drm_mga_buf_priv_t *buf_priv; drm_mga_freelist_t *entry; int i; DRM_DEBUG( "%s: count=%d\n", __FUNCTION__, dma->buf_count ); dev_priv->head = DRM(alloc)( sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER ); if ( dev_priv->head == NULL ) return -ENOMEM; memset( dev_priv->head, 0, sizeof(drm_mga_freelist_t) ); SET_AGE( &dev_priv->head->age, MGA_BUFFER_USED, 0 ); for ( i = 0 ; i < dma->buf_count ; i++ ) { buf = dma->buflist[i]; buf_priv = buf->dev_private; entry = DRM(alloc)( sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER ); if ( entry == NULL ) return -ENOMEM; memset( entry, 0, sizeof(drm_mga_freelist_t) ); entry->next = dev_priv->head->next; entry->prev = dev_priv->head; SET_AGE( &entry->age, MGA_BUFFER_FREE, 0 ); entry->buf = buf; if ( dev_priv->head->next != NULL ) dev_priv->head->next->prev = entry; if ( entry->next == NULL ) dev_priv->tail = entry; buf_priv->list_entry = entry; buf_priv->discard = 0; buf_priv->dispatched = 0; dev_priv->head->next = entry; } return 0; } static void mga_freelist_cleanup( drm_device_t *dev ) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_freelist_t *entry; drm_mga_freelist_t *next; DRM_DEBUG( "%s\n", __FUNCTION__ ); entry = dev_priv->head; while ( entry ) { next = entry->next; DRM(free)( entry, sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER ); entry = next; } dev_priv->head = dev_priv->tail = NULL; } #if 0 /* FIXME: Still needed? */ static void mga_freelist_reset( drm_device_t *dev ) { drm_device_dma_t *dma = dev->dma; drm_buf_t *buf; drm_mga_buf_priv_t *buf_priv; int i; for ( i = 0 ; i < dma->buf_count ; i++ ) { buf = dma->buflist[i]; buf_priv = buf->dev_private; SET_AGE( &buf_priv->list_entry->age, MGA_BUFFER_FREE, 0 ); } } #endif static drm_buf_t *mga_freelist_get( drm_device_t *dev ) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_freelist_t *next; drm_mga_freelist_t *prev; drm_mga_freelist_t *tail = dev_priv->tail; u32 head, wrap; DRM_DEBUG( "%s:\n", __FUNCTION__ ); head = MGA_READ( MGA_PRIMADDRESS ); wrap = dev_priv->sarea_priv->last_wrap; DRM_DEBUG( " tail=0x%06lx %d\n", tail->age.head ? tail->age.head - dev_priv->primary->offset : 0, tail->age.wrap ); DRM_DEBUG( " head=0x%06lx %d\n", head - dev_priv->primary->offset, wrap ); if ( TEST_AGE( &tail->age, head, wrap ) ) { prev = dev_priv->tail->prev; next = dev_priv->tail; prev->next = NULL; next->prev = next->next = NULL; dev_priv->tail = prev; SET_AGE( &next->age, MGA_BUFFER_USED, 0 ); return next->buf; } DRM_DEBUG( "returning NULL!\n" ); return NULL; } int mga_freelist_put( drm_device_t *dev, drm_buf_t *buf ) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_buf_priv_t *buf_priv = buf->dev_private; drm_mga_freelist_t *head, *entry, *prev; DRM_DEBUG( "%s: age=0x%06lx wrap=%d\n", __FUNCTION__, buf_priv->list_entry->age.head - dev_priv->primary->offset, buf_priv->list_entry->age.wrap ); entry = buf_priv->list_entry; head = dev_priv->head; if ( buf_priv->list_entry->age.head == MGA_BUFFER_USED ) { SET_AGE( &entry->age, MGA_BUFFER_FREE, 0 ); prev = dev_priv->tail; prev->next = entry; entry->prev = prev; entry->next = NULL; } else { prev = head->next; head->next = entry; prev->prev = entry; entry->prev = head; entry->next = prev; } return 0; } /* ================================================================ * DMA initialization, cleanup */ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init ) { drm_mga_private_t *dev_priv; struct list_head *list; int ret; DRM_DEBUG( "%s\n", __FUNCTION__ ); dev_priv = DRM(alloc)( sizeof(drm_mga_private_t), DRM_MEM_DRIVER ); if ( !dev_priv ) return -ENOMEM; dev->dev_private = (void *)dev_priv; memset( dev_priv, 0, sizeof(drm_mga_private_t) ); dev_priv->chipset = init->chipset; dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT; if ( init->sgram ) { dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_BLK; } else { dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_RSTR; } dev_priv->maccess = init->maccess; dev_priv->fb_cpp = init->fb_cpp; dev_priv->front_offset = init->front_offset; dev_priv->front_pitch = init->front_pitch; dev_priv->back_offset = init->back_offset; dev_priv->back_pitch = init->back_pitch; dev_priv->depth_cpp = init->depth_cpp; dev_priv->depth_offset = init->depth_offset; dev_priv->depth_pitch = init->depth_pitch; /* FIXME: Need to support AGP textures... */ dev_priv->texture_offset = init->texture_offset[0]; dev_priv->texture_size = init->texture_size[0]; list_for_each( list, &dev->maplist->head ) { drm_map_list_t *entry = (drm_map_list_t *)list; if ( entry->map && entry->map->type == _DRM_SHM && (entry->map->flags & _DRM_CONTAINS_LOCK) ) { dev_priv->sarea = entry->map; break; } } DRM_FIND_MAP( dev_priv->fb, init->fb_offset ); DRM_FIND_MAP( dev_priv->mmio, init->mmio_offset ); DRM_FIND_MAP( dev_priv->status, init->status_offset ); DRM_FIND_MAP( dev_priv->warp, init->warp_offset ); DRM_FIND_MAP( dev_priv->primary, init->primary_offset ); DRM_FIND_MAP( dev_priv->buffers, init->buffers_offset ); dev_priv->sarea_priv = (drm_mga_sarea_t *)((u8 *)dev_priv->sarea->handle + init->sarea_priv_offset); DRM_IOREMAP( dev_priv->warp ); DRM_IOREMAP( dev_priv->primary ); DRM_IOREMAP( dev_priv->buffers ); ret = mga_warp_install_microcode( dev ); if ( ret < 0 ) { DRM_ERROR( "failed to install WARP ucode!\n" ); mga_do_cleanup_dma( dev ); return ret; } ret = mga_warp_init( dev ); if ( ret < 0 ) { DRM_ERROR( "failed to init WARP engine!\n" ); mga_do_cleanup_dma( dev ); return ret; } dev_priv->prim.status = (u32 *)dev_priv->status->handle; mga_do_wait_for_idle( dev_priv ); /* Init the primary DMA registers. */ MGA_WRITE( MGA_PRIMADDRESS, dev_priv->primary->offset | MGA_DMA_GENERAL ); #if 0 MGA_WRITE( MGA_PRIMPTR, virt_to_bus((void *)dev_priv->prim.status) | MGA_PRIMPTREN0 | /* Soft trap, SECEND, SETUPEND */ MGA_PRIMPTREN1 ); /* DWGSYNC */ #endif dev_priv->prim.start = (u8 *)dev_priv->primary->handle; dev_priv->prim.end = ((u8 *)dev_priv->primary->handle + dev_priv->primary->size); dev_priv->prim.size = dev_priv->primary->size; dev_priv->prim.tail = 0; dev_priv->prim.space = dev_priv->prim.size; dev_priv->prim.wrapped = 0; dev_priv->prim.last_flush = 0; dev_priv->prim.last_wrap = 0; dev_priv->prim.high_mark = 256 * DMA_BLOCK_SIZE; spin_lock_init( &dev_priv->prim.list_lock ); dev_priv->prim.status[0] = dev_priv->primary->offset; dev_priv->prim.status[1] = 0; dev_priv->sarea_priv->last_wrap = 0; dev_priv->sarea_priv->last_frame.head = 0; dev_priv->sarea_priv->last_frame.wrap = 0; if ( mga_freelist_init( dev ) < 0 ) { DRM_ERROR( "could not initialize freelist\n" ); mga_do_cleanup_dma( dev ); return -ENOMEM; } return 0; } int mga_do_cleanup_dma( drm_device_t *dev ) { DRM_DEBUG( "%s\n", __FUNCTION__ ); if ( dev->dev_private ) { drm_mga_private_t *dev_priv = dev->dev_private; DRM_IOREMAPFREE( dev_priv->warp ); DRM_IOREMAPFREE( dev_priv->primary ); DRM_IOREMAPFREE( dev_priv->buffers ); if ( dev_priv->head != NULL ) { mga_freelist_cleanup( dev ); } DRM(free)( dev->dev_private, sizeof(drm_mga_private_t), DRM_MEM_DRIVER ); dev->dev_private = NULL; } return 0; } int mga_dma_init( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ) { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; drm_mga_init_t init; if ( copy_from_user( &init, (drm_mga_init_t *)arg, sizeof(init) ) ) return -EFAULT; switch ( init.func ) { case MGA_INIT_DMA: return mga_do_init_dma( dev, &init ); case MGA_CLEANUP_DMA: return mga_do_cleanup_dma( dev ); } return -EINVAL; } /* ================================================================ * Primary DMA stream management */ int mga_dma_flush( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ) { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private; drm_lock_t lock; LOCK_TEST_WITH_RETURN( dev ); if ( copy_from_user( &lock, (drm_lock_t *)arg, sizeof(lock) ) ) return -EFAULT; DRM_DEBUG( "%s: %s%s%s\n", __FUNCTION__, (lock.flags & _DRM_LOCK_FLUSH) ? "flush, " : "", (lock.flags & _DRM_LOCK_FLUSH_ALL) ? "flush all, " : "", (lock.flags & _DRM_LOCK_QUIESCENT) ? "idle, " : "" ); WRAP_WAIT_WITH_RETURN( dev_priv ); if ( lock.flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL) ) { mga_do_dma_flush( dev_priv ); } if ( lock.flags & _DRM_LOCK_QUIESCENT ) { #if MGA_DMA_DEBUG int ret = mga_do_wait_for_idle( dev_priv ); if ( ret < 0 ) DRM_INFO( __FUNCTION__": -EBUSY\n" ); return ret; #else return mga_do_wait_for_idle( dev_priv ); #endif } else { return 0; } } int mga_dma_reset( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ) { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private; LOCK_TEST_WITH_RETURN( dev ); return mga_do_dma_reset( dev_priv ); } /* ================================================================ * DMA buffer management */ static int mga_dma_get_buffers( drm_device_t *dev, drm_dma_t *d ) { drm_buf_t *buf; int i; for ( i = d->granted_count ; i < d->request_count ; i++ ) { buf = mga_freelist_get( dev ); if ( !buf ) return -EAGAIN; buf->pid = current->pid; if ( copy_to_user( &d->request_indices[i], &buf->idx, sizeof(buf->idx) ) ) return -EFAULT; if ( copy_to_user( &d->request_sizes[i], &buf->total, sizeof(buf->total) ) ) return -EFAULT; d->granted_count++; } return 0; } int mga_dma_buffers( struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg ) { drm_file_t *priv = filp->private_data; drm_device_t *dev = priv->dev; drm_device_dma_t *dma = dev->dma; drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private; drm_dma_t d; int ret = 0; LOCK_TEST_WITH_RETURN( dev ); if ( copy_from_user( &d, (drm_dma_t *)arg, sizeof(d) ) ) return -EFAULT; /* Please don't send us buffers. */ if ( d.send_count != 0 ) { DRM_ERROR( "Process %d trying to send %d buffers via drmDMA\n", current->pid, d.send_count ); return -EINVAL; } /* We'll send you buffers. */ if ( d.request_count < 0 || d.request_count > dma->buf_count ) { DRM_ERROR( "Process %d trying to get %d buffers (of %d max)\n", current->pid, d.request_count, dma->buf_count ); return -EINVAL; } WRAP_TEST_WITH_RETURN( dev_priv ); d.granted_count = 0; if ( d.request_count ) { ret = mga_dma_get_buffers( dev, &d ); } if ( copy_to_user( (drm_dma_t *)arg, &d, sizeof(d) ) ) return -EFAULT; return ret; }