summaryrefslogtreecommitdiff
path: root/linux-core
ModeNameSize
-rw-r--r--.gitignore16logplain
-rw-r--r--Config.in594logplain
-rw-r--r--Doxyfile47630logplain
-rw-r--r--Kconfig3353logplain
-rw-r--r--Makefile9164logplain
-rw-r--r--Makefile.kernel2757logplain
-rw-r--r--README.drm1091logplain
-rw-r--r--ati_pcigart.c6202logplain
l---------drm.h20logplain
-rw-r--r--drmP.h41250logplain
-rw-r--r--drm_agpsupport.c17634logplain
-rw-r--r--drm_auth.c5521logplain
-rw-r--r--drm_bo.c66138logplain
-rw-r--r--drm_bo_lock.c5519logplain
-rw-r--r--drm_bo_move.c15790logplain
-rw-r--r--drm_bufs.c43699logplain
-rw-r--r--drm_compat.c17140logplain
-rw-r--r--drm_compat.h9386logplain
-rw-r--r--drm_context.c11870logplain
-rw-r--r--drm_core.h1469logplain
-rw-r--r--drm_dma.c4669logplain
-rw-r--r--drm_drawable.c5288logplain
-rw-r--r--drm_drv.c20918logplain
-rw-r--r--drm_fence.c22333logplain
-rw-r--r--drm_fops.c13806logplain
-rw-r--r--drm_hashtab.c5323logplain
-rw-r--r--drm_hashtab.h2563logplain
l---------drm_internal.h29logplain
-rw-r--r--drm_ioc32.c32308logplain
-rw-r--r--drm_ioctl.c9024logplain
-rw-r--r--drm_irq.c12426logplain
-rw-r--r--drm_lock.c10793logplain
-rw-r--r--drm_memory.c8068logplain
-rw-r--r--drm_memory.h1936logplain
-rw-r--r--drm_memory_debug.c10415logplain
-rw-r--r--drm_memory_debug.h10169logplain
-rw-r--r--drm_mm.c7405logplain
-rw-r--r--drm_object.c8191logplain
-rw-r--r--drm_objects.h24271logplain
-rw-r--r--drm_os_linux.h4906logplain
-rw-r--r--drm_pci.c4988logplain
-rw-r--r--drm_proc.c17442logplain
-rw-r--r--drm_regman.c5396logplain
l---------drm_sarea.h26logplain
-rw-r--r--drm_scatter.c5721logplain
-rw-r--r--drm_sman.c9108logplain
-rw-r--r--drm_sman.h5974logplain
-rw-r--r--drm_stub.c8432logplain
-rw-r--r--drm_sysfs.c5251logplain
-rw-r--r--drm_ttm.c10853logplain
-rw-r--r--drm_vm.c23859logplain
-rw-r--r--ffb_context.c18711logplain
-rw-r--r--ffb_drv.c8123logplain
-rw-r--r--ffb_drv.h11082logplain
-rw-r--r--i810_dma.c34119logplain
-rw-r--r--i810_drm.h9440logplain
-rw-r--r--i810_drv.c3246logplain
-rw-r--r--i810_drv.h7725logplain
-rw-r--r--i915_buffer.c8073logplain
-rw-r--r--i915_compat.c5024logplain
l---------i915_dma.c25logplain
l---------i915_drm.h25logplain
-rw-r--r--i915_drv.c18558logplain
l---------i915_drv.h25logplain
-rw-r--r--i915_fence.c4793logplain
-rw-r--r--i915_ioc32.c9607logplain
l---------i915_irq.c25logplain
l---------i915_mem.c25logplain
-rw-r--r--imagine_drv.c2523logplain
l---------linux1logplain
l---------mach64_dma.c27logplain
l---------mach64_drm.h27logplain
-rw-r--r--mach64_drv.c3177logplain
l---------mach64_drv.h27logplain
l---------mach64_irq.c27logplain
l---------mach64_state.c29logplain
l---------mga_dma.c24logplain
l---------mga_drm.h24logplain
-rw-r--r--mga_drv.c4693logplain
l---------mga_drv.h24logplain
-rw-r--r--mga_ioc32.c7625logplain
l---------mga_irq.c24logplain
l---------mga_state.c26logplain
l---------mga_ucode.h26logplain
l---------mga_warp.c25logplain
-rw-r--r--nouveau_buffer.c8646logplain
l---------nouveau_dma.c28logplain
l---------nouveau_dma.h28logplain
l---------nouveau_drm.h28logplain
-rw-r--r--nouveau_drv.c3454logplain
l---------nouveau_drv.h28logplain
-rw-r--r--nouveau_fence.c4017logplain
l---------nouveau_fifo.c29logplain
-rw-r--r--nouveau_ioc32.c2302logplain
l---------nouveau_irq.c28logplain
l---------nouveau_mem.c28logplain
l---------nouveau_notifier.c33logplain
l---------nouveau_object.c31logplain
l---------nouveau_reg.h28logplain
-rw-r--r--nouveau_sgdma.c8866logplain
l---------nouveau_state.c30logplain
l---------nouveau_swmthd.c31logplain
l---------nouveau_swmthd.h31logplain
l---------nv04_fb.c24logplain
l---------nv04_fifo.c26logplain
l---------nv04_graph.c27logplain
l---------nv04_instmem.c29logplain
l---------nv04_mc.c24logplain
l---------nv04_timer.c27logplain
l---------nv10_fb.c24logplain
l---------nv10_fifo.c26logplain
l---------nv10_graph.c27logplain
l---------nv20_graph.c27logplain
l---------nv40_fb.c24logplain
l---------nv40_fifo.c26logplain
l---------nv40_graph.c27logplain
l---------nv40_mc.c24logplain
l---------nv50_fifo.c26logplain
l---------nv50_graph.c27logplain
l---------nv50_instmem.c29logplain
l---------nv50_mc.c24logplain
-rw-r--r--nv_drv.c2881logplain
l---------nv_drv.h23logplain
l---------r128_cce.c25logplain
l---------r128_drm.h25logplain
-rw-r--r--r128_drv.c3488logplain
l---------r128_drv.h25logplain
-rw-r--r--r128_ioc32.c7190logplain
l---------r128_irq.c25logplain
l---------r128_state.c27logplain
l---------r300_cmdbuf.c28logplain
l---------r300_reg.h25logplain
l---------radeon_cp.c26logplain
l---------radeon_drm.h27logplain
-rw-r--r--radeon_drv.c4053logplain
l---------radeon_drv.h27logplain
-rw-r--r--radeon_ioc32.c13580logplain
l---------radeon_irq.c27logplain
l---------radeon_mem.c27logplain
l---------radeon_state.c29logplain
l---------savage_bci.c27logplain
l---------savage_drm.h27logplain
-rw-r--r--savage_drv.c2886logplain
l---------savage_drv.h27logplain
l---------savage_state.c29logplain
l---------sis_drm.h24logplain
-rw-r--r--sis_drv.c3494logplain
l---------sis_drv.h24logplain
-rw-r--r--sis_mm.c8417logplain
-rw-r--r--tdfx_drv.c2797logplain
l---------tdfx_drv.h25logplain
l---------via_3d_reg.h27logplain
-rw-r--r--via_buffer.c4512logplain
l---------via_dma.c24logplain
-rw-r--r--via_dmablit.c22420logplain
-rw-r--r--via_dmablit.h5435logplain
l---------via_drm.h24logplain
l---------via_drv.c24logplain
l---------via_drv.h24logplain
-rw-r--r--via_fence.c6493logplain
l---------via_irq.c24logplain
l---------via_map.c24logplain
-rw-r--r--via_mm.c5632logplain
l---------via_verifier.c29logplain
l---------via_verifier.h29logplain
l---------via_video.c26logplain
-rw-r--r--xgi_cmdlist.c9252logplain
-rw-r--r--xgi_cmdlist.h2186logplain
l---------xgi_drm.h24logplain
-rw-r--r--xgi_drv.c11311logplain
-rw-r--r--xgi_drv.h3775logplain
-rw-r--r--xgi_fb.c3897logplain
-rw-r--r--xgi_fence.c3476logplain
-rw-r--r--xgi_ioc32.c4046logplain
-rw-r--r--xgi_misc.c11070logplain
-rw-r--r--xgi_misc.h1682logplain
-rw-r--r--xgi_pcie.c3792logplain
-rw-r--r--xgi_regs.h5944logplain
class="hl opt">(dev, 1); dev->counters += 3; dev->types[6] = _DRM_STAT_IRQ; dev->types[7] = _DRM_STAT_PRIMARY; dev->types[8] = _DRM_STAT_SECONDARY; return 0; } /** * Bootstrap the driver for AGP DMA. * * \todo * Investigate whether there is any benifit to storing the WARP microcode in * AGP memory. If not, the microcode may as well always be put in PCI * memory. * * \todo * This routine needs to set dma_bs->agp_mode to the mode actually configured * in the hardware. Looking just at the Linux AGP driver code, I don't see * an easy way to determine this. * * \sa mga_do_dma_bootstrap, mga_do_pci_dma_bootstrap */ static int mga_do_agp_dma_bootstrap(struct drm_device *dev, drm_mga_dma_bootstrap_t * dma_bs) { drm_mga_private_t *const dev_priv = (drm_mga_private_t *)dev->dev_private; unsigned int warp_size = mga_warp_microcode_size(dev_priv); int err; unsigned offset; const unsigned secondary_size = dma_bs->secondary_bin_count * dma_bs->secondary_bin_size; const unsigned agp_size = (dma_bs->agp_size << 20); struct drm_buf_desc req; struct drm_agp_mode mode; struct drm_agp_info info; struct drm_agp_buffer agp_req; struct drm_agp_binding bind_req; /* Acquire AGP. */ err = drm_agp_acquire(dev); if (err) { DRM_ERROR("Unable to acquire AGP: %d\n", err); return err; } err = drm_agp_info(dev, &info); if (err) { DRM_ERROR("Unable to get AGP info: %d\n", err); return err; } mode.mode = (info.mode & ~0x07) | dma_bs->agp_mode; err = drm_agp_enable(dev, mode); if (err) { DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode); return err; } /* In addition to the usual AGP mode configuration, the G200 AGP cards * need to have the AGP mode "manually" set. */ if (dev_priv->chipset == MGA_CARD_TYPE_G200) { if (mode.mode & 0x02) { MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_ENABLE); } else { MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_DISABLE); } } /* Allocate and bind AGP memory. */ agp_req.size = agp_size; agp_req.type = 0; err = drm_agp_alloc(dev, &agp_req); if (err) { dev_priv->agp_size = 0; DRM_ERROR("Unable to allocate %uMB AGP memory\n", dma_bs->agp_size); return err; } dev_priv->agp_size = agp_size; dev_priv->agp_handle = agp_req.handle; bind_req.handle = agp_req.handle; bind_req.offset = 0; err = drm_agp_bind( dev, &bind_req ); if (err) { DRM_ERROR("Unable to bind AGP memory: %d\n", err); return err; } /* Make drm_addbufs happy by not trying to create a mapping for less * than a page. */ if (warp_size < PAGE_SIZE) warp_size = PAGE_SIZE; offset = 0; err = drm_addmap(dev, offset, warp_size, _DRM_AGP, _DRM_READ_ONLY, &dev_priv->warp); if (err) { DRM_ERROR("Unable to map WARP microcode: %d\n", err); return err; } offset += warp_size; err = drm_addmap(dev, offset, dma_bs->primary_size, _DRM_AGP, _DRM_READ_ONLY, & dev_priv->primary); if (err) { DRM_ERROR("Unable to map primary DMA region: %d\n", err); return err; } offset += dma_bs->primary_size; err = drm_addmap(dev, offset, secondary_size, _DRM_AGP, 0, & dev->agp_buffer_map); if (err) { DRM_ERROR("Unable to map secondary DMA region: %d\n", err); return err; } (void)memset( &req, 0, sizeof(req) ); req.count = dma_bs->secondary_bin_count; req.size = dma_bs->secondary_bin_size; req.flags = _DRM_AGP_BUFFER; req.agp_start = offset; err = drm_addbufs_agp(dev, &req); if (err) { DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err); return err; } #ifdef __linux__ { struct drm_map_list *_entry; unsigned long agp_token = 0; list_for_each_entry(_entry, &dev->maplist, head) { if (_entry->map == dev->agp_buffer_map) agp_token = _entry->user_token; } if (!agp_token) return -EFAULT; dev->agp_buffer_token = agp_token; } #endif offset += secondary_size; err = drm_addmap(dev, offset, agp_size - offset, _DRM_AGP, 0, & dev_priv->agp_textures); if (err) { DRM_ERROR("Unable to map AGP texture region: %d\n", err); return err; } drm_core_ioremap(dev_priv->warp, dev); drm_core_ioremap(dev_priv->primary, dev); drm_core_ioremap(dev->agp_buffer_map, dev); if (!dev_priv->warp->handle || !dev_priv->primary->handle || !dev->agp_buffer_map->handle) { DRM_ERROR("failed to ioremap agp regions! (%p, %p, %p)\n", dev_priv->warp->handle, dev_priv->primary->handle, dev->agp_buffer_map->handle); return -ENOMEM; } dev_priv->dma_access = MGA_PAGPXFER; dev_priv->wagp_enable = MGA_WAGP_ENABLE; DRM_INFO("Initialized card for AGP DMA.\n"); return 0; } /** * Bootstrap the driver for PCI DMA. * * \todo * The algorithm for decreasing the size of the primary DMA buffer could be * better. The size should be rounded up to the nearest page size, then * decrease the request size by a single page each pass through the loop. * * \todo * Determine whether the maximum address passed to drm_pci_alloc is correct. * The same goes for drm_addbufs_pci. * * \sa mga_do_dma_bootstrap, mga_do_agp_dma_bootstrap */ static int mga_do_pci_dma_bootstrap(struct drm_device * dev, drm_mga_dma_bootstrap_t * dma_bs) { drm_mga_private_t *const dev_priv = (drm_mga_private_t *) dev->dev_private; unsigned int warp_size = mga_warp_microcode_size(dev_priv); unsigned int primary_size; unsigned int bin_count; int err; struct drm_buf_desc req; if (dev->dma == NULL) { DRM_ERROR("dev->dma is NULL\n"); return -EFAULT; } /* Make drm_addbufs happy by not trying to create a mapping for less * than a page. */ if (warp_size < PAGE_SIZE) warp_size = PAGE_SIZE; /* The proper alignment is 0x100 for this mapping */ err = drm_addmap(dev, 0, warp_size, _DRM_CONSISTENT, _DRM_READ_ONLY, &dev_priv->warp); if (err != 0) { DRM_ERROR("Unable to create mapping for WARP microcode: %d\n", err); return err; } /* Other than the bottom two bits being used to encode other * information, there don't appear to be any restrictions on the * alignment of the primary or secondary DMA buffers. */ for (primary_size = dma_bs->primary_size; primary_size != 0; primary_size >>= 1 ) { /* The proper alignment for this mapping is 0x04 */ err = drm_addmap(dev, 0, primary_size, _DRM_CONSISTENT, _DRM_READ_ONLY, &dev_priv->primary); if (!err) break; } if (err != 0) { DRM_ERROR("Unable to allocate primary DMA region: %d\n", err); return -ENOMEM; } if (dev_priv->primary->size != dma_bs->primary_size) { DRM_INFO("Primary DMA buffer size reduced from %u to %u.\n", dma_bs->primary_size, (unsigned)dev_priv->primary->size); dma_bs->primary_size = dev_priv->primary->size; } for (bin_count = dma_bs->secondary_bin_count; bin_count > 0; bin_count-- ) { (void)memset(&req, 0, sizeof(req)); req.count = bin_count; req.size = dma_bs->secondary_bin_size; err = drm_addbufs_pci(dev, &req); if (!err) { break; } } if (bin_count == 0) { DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err); return err; } if (bin_count != dma_bs->secondary_bin_count) { DRM_INFO("Secondary PCI DMA buffer bin count reduced from %u " "to %u.\n", dma_bs->secondary_bin_count, bin_count); dma_bs->secondary_bin_count = bin_count; } dev_priv->dma_access = 0; dev_priv->wagp_enable = 0; dma_bs->agp_mode = 0; DRM_INFO("Initialized card for PCI DMA.\n"); return 0; } static int mga_do_dma_bootstrap(struct drm_device *dev, drm_mga_dma_bootstrap_t *dma_bs) { const int is_agp = (dma_bs->agp_mode != 0) && drm_device_is_agp(dev); int err; drm_mga_private_t *const dev_priv = (drm_mga_private_t *) dev->dev_private; dev_priv->used_new_dma_init = 1; /* The first steps are the same for both PCI and AGP based DMA. Map * the cards MMIO registers and map a status page. */ err = drm_addmap(dev, dev_priv->mmio_base, dev_priv->mmio_size, _DRM_REGISTERS, _DRM_READ_ONLY, & dev_priv->mmio); if (err) { DRM_ERROR("Unable to map MMIO region: %d\n", err); return err; } err = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM, _DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL, & dev_priv->status); if (err) { DRM_ERROR("Unable to map status region: %d\n", err); return err; } /* The DMA initialization procedure is slightly different for PCI and * AGP cards. AGP cards just allocate a large block of AGP memory and * carve off portions of it for internal uses. The remaining memory * is returned to user-mode to be used for AGP textures. */ if (is_agp) { err = mga_do_agp_dma_bootstrap(dev, dma_bs); } /* If we attempted to initialize the card for AGP DMA but failed, * clean-up any mess that may have been created. */ if (err) { mga_do_cleanup_dma(dev, MINIMAL_CLEANUP); } /* Not only do we want to try and initialized PCI cards for PCI DMA, * but we also try to initialized AGP cards that could not be * initialized for AGP DMA. This covers the case where we have an AGP * card in a system with an unsupported AGP chipset. In that case the * card will be detected as AGP, but we won't be able to allocate any * AGP memory, etc. */ if (!is_agp || err) { err = mga_do_pci_dma_bootstrap(dev, dma_bs); } return err; } int mga_dma_bootstrap(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_mga_dma_bootstrap_t *bootstrap = data; int err; static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 }; const drm_mga_private_t *const dev_priv = (drm_mga_private_t *) dev->dev_private; err = mga_do_dma_bootstrap(dev, bootstrap); if (err) { mga_do_cleanup_dma(dev, FULL_CLEANUP); return err; } if (dev_priv->agp_textures != NULL) { bootstrap->texture_handle = dev_priv->agp_textures->offset; bootstrap->texture_size = dev_priv->agp_textures->size; } else { bootstrap->texture_handle = 0; bootstrap->texture_size = 0; } bootstrap->agp_mode = modes[bootstrap->agp_mode & 0x07]; return 0; } static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init) { drm_mga_private_t *dev_priv; int ret; DRM_DEBUG("\n"); dev_priv = dev->dev_private; if (init->sgram) { dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_BLK; } else { dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_RSTR; } dev_priv->maccess = init->maccess; dev_priv->fb_cpp = init->fb_cpp; dev_priv->front_offset = init->front_offset; dev_priv->front_pitch = init->front_pitch; dev_priv->back_offset = init->back_offset; dev_priv->back_pitch = init->back_pitch; dev_priv->depth_cpp = init->depth_cpp; dev_priv->depth_offset = init->depth_offset; dev_priv->depth_pitch = init->depth_pitch; /* FIXME: Need to support AGP textures... */ dev_priv->texture_offset = init->texture_offset[0]; dev_priv->texture_size = init->texture_size[0]; dev_priv->sarea = drm_getsarea(dev); if (!dev_priv->sarea) { DRM_ERROR("failed to find sarea!\n"); return -EINVAL; } if (!dev_priv->used_new_dma_init) { dev_priv->dma_access = MGA_PAGPXFER; dev_priv->wagp_enable = MGA_WAGP_ENABLE; dev_priv->status = drm_core_findmap(dev, init->status_offset); if (!dev_priv->status) { DRM_ERROR("failed to find status page!\n"); return -EINVAL; } dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset); if (!dev_priv->mmio) { DRM_ERROR("failed to find mmio region!\n"); return -EINVAL; } dev_priv->warp = drm_core_findmap(dev, init->warp_offset); if (!dev_priv->warp) { DRM_ERROR("failed to find warp microcode region!\n"); return -EINVAL; } dev_priv->primary = drm_core_findmap(dev, init->primary_offset); if (!dev_priv->primary) { DRM_ERROR("failed to find primary dma region!\n"); return -EINVAL; } dev->agp_buffer_token = init->buffers_offset; dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); if (!dev->agp_buffer_map) { DRM_ERROR("failed to find dma buffer region!\n"); return -EINVAL; } drm_core_ioremap(dev_priv->warp, dev); drm_core_ioremap(dev_priv->primary, dev); drm_core_ioremap(dev->agp_buffer_map, dev); } dev_priv->sarea_priv = (drm_mga_sarea_t *) ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset); if (!dev_priv->warp->handle || !dev_priv->primary->handle || ((dev_priv->dma_access != 0) && ((dev->agp_buffer_map == NULL) || (dev->agp_buffer_map->handle == NULL)))) { DRM_ERROR("failed to ioremap agp regions!\n"); return -ENOMEM; } ret = mga_warp_install_microcode(dev_priv); if (ret != 0) { DRM_ERROR("failed to install WARP ucode: %d!\n", ret); return ret; } ret = mga_warp_init(dev_priv); if (ret != 0) { DRM_ERROR("failed to init WARP engine: %d!\n", ret); return ret; } dev_priv->prim.status = (u32 *) dev_priv->status->handle; mga_do_wait_for_idle(dev_priv); /* Init the primary DMA registers. */ MGA_WRITE(MGA_PRIMADDRESS, dev_priv->primary->offset | MGA_DMA_GENERAL); dev_priv->prim.start = (u8 *) dev_priv->primary->handle; dev_priv->prim.end = ((u8 *) dev_priv->primary->handle + dev_priv->primary->size); dev_priv->prim.size = dev_priv->primary->size; dev_priv->prim.tail = 0; dev_priv->prim.space = dev_priv->prim.size; dev_priv->prim.wrapped = 0; dev_priv->prim.last_flush = 0; dev_priv->prim.last_wrap = 0; dev_priv->prim.high_mark = 256 * DMA_BLOCK_SIZE; dev_priv->prim.status[0] = dev_priv->primary->offset; dev_priv->prim.status[1] = 0; dev_priv->sarea_priv->last_wrap = 0; dev_priv->sarea_priv->last_frame.head = 0; dev_priv->sarea_priv->last_frame.wrap = 0; if (mga_freelist_init(dev, dev_priv) < 0) { DRM_ERROR("could not initialize freelist\n"); return -ENOMEM; } return 0; } static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup) { int err = 0; DRM_DEBUG("\n"); /* Make sure interrupts are disabled here because the uninstall ioctl * may not have been called from userspace and after dev_private * is freed, it's too late. */ if (dev->irq_enabled) drm_irq_uninstall(dev); if (dev->dev_private) { drm_mga_private_t *dev_priv = dev->dev_private; if ((dev_priv->warp != NULL) && (dev_priv->warp->type != _DRM_CONSISTENT)) drm_core_ioremapfree(dev_priv->warp, dev); if ((dev_priv->primary != NULL) && (dev_priv->primary->type != _DRM_CONSISTENT)) drm_core_ioremapfree(dev_priv->primary, dev); if (dev->agp_buffer_map != NULL) drm_core_ioremapfree(dev->agp_buffer_map, dev); if (dev_priv->used_new_dma_init) { if (dev_priv->agp_handle != 0) { struct drm_agp_binding unbind_req; struct drm_agp_buffer free_req; unbind_req.handle = dev_priv->agp_handle; drm_agp_unbind(dev, &unbind_req); free_req.handle = dev_priv->agp_handle; drm_agp_free(dev, &free_req); dev_priv->agp_textures = NULL; dev_priv->agp_size = 0; dev_priv->agp_handle = 0; } if ((dev->agp != NULL) && dev->agp->acquired) { err = drm_agp_release(dev); } } dev_priv->warp = NULL; dev_priv->primary = NULL; dev_priv->sarea = NULL; dev_priv->sarea_priv = NULL; dev->agp_buffer_map = NULL; if (full_cleanup) { dev_priv->mmio = NULL; dev_priv->status = NULL; dev_priv->used_new_dma_init = 0; } memset(&dev_priv->prim, 0, sizeof(dev_priv->prim)); dev_priv->warp_pipe = 0; memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys)); if (dev_priv->head != NULL) { mga_freelist_cleanup(dev); } } return err; } int mga_dma_init(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_mga_init_t *init = data; int err; LOCK_TEST_WITH_RETURN(dev, file_priv); switch (init->func) { case MGA_INIT_DMA: err = mga_do_init_dma(dev, init); if (err) { (void)mga_do_cleanup_dma(dev, FULL_CLEANUP); } return err; case MGA_CLEANUP_DMA: return mga_do_cleanup_dma(dev, FULL_CLEANUP); } return -EINVAL; } /* ================================================================ * Primary DMA stream management */ int mga_dma_flush(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; struct drm_lock *lock = data; LOCK_TEST_WITH_RETURN(dev, file_priv); DRM_DEBUG("%s%s%s\n", (lock->flags & _DRM_LOCK_FLUSH) ? "flush, " : "", (lock->flags & _DRM_LOCK_FLUSH_ALL) ? "flush all, " : "", (lock->flags & _DRM_LOCK_QUIESCENT) ? "idle, " : ""); WRAP_WAIT_WITH_RETURN(dev_priv); if (lock->flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL)) { mga_do_dma_flush(dev_priv); } if (lock->flags & _DRM_LOCK_QUIESCENT) { #if MGA_DMA_DEBUG int ret = mga_do_wait_for_idle(dev_priv); if (ret < 0) DRM_INFO("-EBUSY\n"); return ret; #else return mga_do_wait_for_idle(dev_priv); #endif } else { return 0; } } int mga_dma_reset(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; LOCK_TEST_WITH_RETURN(dev, file_priv); return mga_do_dma_reset(dev_priv); } /* ================================================================ * DMA buffer management */ static int mga_dma_get_buffers(struct drm_device * dev, struct drm_file *file_priv, struct drm_dma * d) { struct drm_buf *buf; int i; for (i = d->granted_count; i < d->request_count; i++) { buf = mga_freelist_get(dev); if (!buf) return -EAGAIN; buf->file_priv = file_priv; if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx, sizeof(buf->idx))) return -EFAULT; if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total, sizeof(buf->total))) return -EFAULT; d->granted_count++; } return 0; } int mga_dma_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_device_dma *dma = dev->dma; drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; struct drm_dma *d = data; int ret = 0; LOCK_TEST_WITH_RETURN(dev, file_priv); /* Please don't send us buffers. */ if (d->send_count != 0) { DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", DRM_CURRENTPID, d->send_count); return -EINVAL; } /* We'll send you buffers. */ if (d->request_count < 0 || d->request_count > dma->buf_count) { DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", DRM_CURRENTPID, d->request_count, dma->buf_count); return -EINVAL; } WRAP_TEST_WITH_RETURN(dev_priv); d->granted_count = 0; if (d->request_count) { ret = mga_dma_get_buffers(dev, file_priv, d); } return ret; } /** * Called just before the module is unloaded. */ int mga_driver_unload(struct drm_device * dev) { drm_free(dev->dev_private, sizeof(drm_mga_private_t), DRM_MEM_DRIVER); dev->dev_private = NULL; return 0; } /** * Called when the last opener of the device is closed. */ void mga_driver_lastclose(struct drm_device * dev) { mga_do_cleanup_dma(dev, FULL_CLEANUP); } int mga_driver_dma_quiescent(struct drm_device * dev) { drm_mga_private_t *dev_priv = dev->dev_private; return mga_do_wait_for_idle(dev_priv); }