/* * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. * Copyright 2005 Stephane Marchesin * * The Weather Channel (TM) funded Tungsten Graphics to develop the * initial release of the Radeon 8500 driver under the XFree86 license. * This notice must be preserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Keith Whitwell */ #include "drmP.h" #include "drm.h" #include "drm_sarea.h" #include "nouveau_drv.h" static struct mem_block *split_block(struct mem_block *p, uint64_t start, uint64_t size, struct drm_file *file_priv) { /* Maybe cut off the start of an existing block */ if (start > p->start) { struct mem_block *newblock = drm_alloc(sizeof(*newblock), DRM_MEM_BUFS); if (!newblock) goto out; newblock->start = start; newblock->size = p->size - (start - p->start); newblock->file_priv = NULL; newblock->next = p->next; newblock->prev = p; p->next->prev = newblock; p->next = newblock; p->size -= newblock->size; p = newblock; } /* Maybe cut off the end of an existing block */ if (size < p->size) { struct mem_block *newblock = drm_alloc(sizeof(*newblock), DRM_MEM_BUFS); if (!newblock) goto out; newblock->start = start + size; newblock->size = p->size - size; newblock->file_priv = NULL; newblock->next = p->next; newblock->prev = p; p->next->prev = newblock; p->next = newblock; p->size = size; } out: /* Our block is in the middle */ p->file_priv = file_priv; return p; } struct mem_block *nouveau_mem_alloc_block(struct mem_block *heap, uint64_t size, int align2, struct drm_file *file_priv) { struct mem_block *p; uint64_t mask = (1 << align2) - 1; if (!heap) return NULL; list_for_each(p, heap) { uint64_t start = (p->start + mask) & ~mask; if (p->file_priv == 0 && start + size <= p->start + p->size) return split_block(p, start, size, file_priv); } return NULL; } static struct mem_block *find_block(struct mem_block *heap, uint64_t start) { struct mem_block *p; list_for_each(p, heap) if (p->start == start) return p; return NULL; } void nouveau_mem_free_block(struct mem_block *p) { p->file_priv = NULL; /* Assumes a single contiguous range. Needs a special file_priv in * 'heap' to stop it being subsumed. */ if (p->next->file_priv == 0) { struct mem_block *q = p->next; p->size += q->size; p->next = q->next; p->next->prev = p; drm_free(q, sizeof(*q), DRM_MEM_BUFS); } if (p->prev->file_priv == 0) { struct mem_block *q = p->prev; q->size += p->size; q->next = p->next; q->next->prev = q; drm_free(p, sizeof(*q), DRM_MEM_BUFS); } } /* Initialize. How to check for an uninitialized heap? */ int nouveau_mem_init_heap(struct mem_block **heap, uint64_t start, uint64_t size) { struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS); if (!blocks) return -ENOMEM; *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS); if (!*heap) { drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFS); return -ENOMEM; } blocks->start = start; blocks->size = size; blocks->file_priv = NULL; blocks->next = blocks->prev = *heap; memset(*heap, 0, sizeof(**heap)); (*heap)->file_priv = (struct drm_file *) - 1; (*heap)->next = (*heap)->prev = blocks; return 0; } /* * Free all blocks associated with the releasing file_priv */ void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap) { struct mem_block *p; if (!heap || !heap->next) return; list_for_each(p, heap) { if (p->file_priv == file_priv) p->file_priv = NULL; } /* Assumes a single contiguous range. Needs a special file_priv in * 'heap' to stop it being subsumed. */ list_for_each(p, heap) { while ((p->file_priv == 0) && (p->next->file_priv == 0) && (p->next!=heap)) { struct mem_block *q = p->next; p->size += q->size; p->next = q->next; p->next->prev = p; drm_free(q, sizeof(*q), DRM_MEM_DRIVER); } } } /* * Cleanup everything */ void nouveau_mem_takedown(struct mem_block **heap) { struct mem_block *p; if (!*heap) return; for (p = (*heap)->next; p != *heap;) { struct mem_block *q = p; p = p->next; drm_free(q, sizeof(*q), DRM_MEM_DRIVER); } drm_free(*heap, sizeof(**heap), DRM_MEM_DRIVER); *heap = NULL; } void nouveau_mem_close(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; nouveau_mem_takedown(&dev_priv->agp_heap); nouveau_mem_takedown(&dev_priv->fb_heap); if (dev_priv->pci_heap) nouveau_mem_takedown(&dev_priv->pci_heap); } /*XXX won't work on BSD because of pci_read_config_dword */ static uint32_t nouveau_mem_fb_amount_igp(struct drm_device *dev) { #if defined(__linux__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) struct drm_nouveau_private *dev_priv = dev->dev_private; struct pci_dev *bridge; uint32_t mem; bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0,1)); if (!bridge) { DRM_ERROR("no bridge device\n"); return 0; } if (dev_priv->flags&NV_NFORCE) { pci_read_config_dword(bridge, 0x7C, &mem); return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024; } else if(dev_priv->flags&NV_NFORCE2) { pci_read_config_dword(bridge, 0x84, &mem); return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024; } DRM_ERROR("impossible!\n"); #else DRM_ERROR("Linux kernel >= 2.6.19 required to check for igp memory amount\n"); #endif return 0; } /* returns the amount of FB ram in bytes */ uint64_t nouveau_mem_fb_amount(struct drm_device *dev) { struct drm_nouveau_private *dev_priv=dev->dev_private; switch(dev_priv->card_type) { case NV_04: case NV_05: if (NV_READ(NV03_BOOT_0) & 0x00000100) { return (((NV_READ(NV03_BOOT_0) >> 12) & 0xf)*2+2)*1024*1024; } else switch(NV_READ(NV03_BOOT_0)&NV03_BOOT_0_RAM_AMOUNT) { case NV04_BOOT_0_RAM_AMOUNT_32MB: return 32*1024*1024; case NV04_BOOT_0_RAM_AMOUNT_16MB: return 16*1024*1024; case NV04_BOOT_0_RAM_AMOUNT_8MB: return 8*1024*1024; case NV04_BOOT_0_RAM_AMOUNT_4MB: return 4*1024*1024; } break; case NV_10: case NV_11: case NV_17: case NV_20: case NV_30: case NV_40: case NV_44: case NV_50: default: if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) { return nouveau_mem_fb_amount_igp(dev); } else { uint64_t mem; mem = (NV_READ(NV04_FIFO_DATA) & NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK) >> NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT; return mem*1024*1024; } break; } DRM_ERROR("Unable to detect video ram size. Please report your setup to " DRIVER_EMAIL "\n"); return 0; } static int nouveau_mem_init_agp(struct drm_device *dev, int ttm) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_agp_info info; struct drm_agp_mode mode; int ret; ret = drm_agp_acquire(dev); if (ret) { DRM_ERROR("Unable to acquire AGP: %d\n", ret); return ret; } ret = drm_agp_info(dev, &info); if (ret) { DRM_ERROR("Unable to get AGP info: %d\n", ret); return ret; } /* see agp.h for the AGPSTAT_* modes available */ mode.mode = info.mode; ret = drm_agp_enable(dev, mode); if (ret) { DRM_ERROR("Unable to enable AGP: %d\n", ret); return ret; } if (!ttm) { struct drm_agp_buffer agp_req; struct drm_agp_binding bind_req; agp_req.size = info.aperture_size; agp_req.type = 0; ret = drm_agp_alloc(dev, &agp_req); if (ret) { DRM_ERROR("Unable to alloc AGP: %d\n", ret); return ret; } bind_req.handle = agp_req.handle; bind_req.offset = 0; ret = drm_agp_bind(dev, &bind_req); if (ret) { DRM_ERROR("Unable to bind AGP: %d\n", ret); return ret; } } dev_priv->gart_info.type = NOUVEAU_GART_AGP; dev_priv->gart_info.aper_base = info.aperture_base; dev_priv->gart_info.aper_size = info.aperture_size; return 0; } #define HACK_OLD_MM int nouveau_mem_init_ttm(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t vram_size, bar1_size; int ret; dev_priv->agp_heap = dev_priv->pci_heap = dev_priv->fb_heap = NULL; dev_priv->fb_phys = drm_get_resource_start(dev,1); dev_priv->gart_info.type = NOUVEAU_GART_NONE; drm_bo_driver_init(dev); /* non-mappable vram */ dev_priv->fb_available_size = nouveau_mem_fb_amount(dev); dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram; vram_size = dev_priv->fb_available_size >> PAGE_SHIFT; bar1_size = drm_get_resource_len(dev, 1) >> PAGE_SHIFT; if (bar1_size < vram_size) { if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_PRIV0, bar1_size, vram_size - bar1_size))) { DRM_ERROR("Failed PRIV0 mm init: %d\n", ret); return ret; } vram_size = bar1_size; } /* mappable vram */ #ifdef HACK_OLD_MM vram_size /= 4; #endif if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_VRAM, 0, vram_size))) { DRM_ERROR("Failed VRAM mm init: %d\n", ret); return ret; } /* GART */ #ifndef __powerpc__ if (drm_device_is_agp(dev) && dev->agp) { if ((ret = nouveau_mem_init_agp(dev, 1))) DRM_ERROR("Error initialising AGP: %d\n", ret); } #endif if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) { if ((ret = nouveau_sgdma_init(dev))) DRM_ERROR("Error initialising PCI SGDMA: %d\n", ret); } if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_TT, 0, dev_priv->gart_info.aper_size >> PAGE_SHIFT))) { DRM_ERROR("Failed TT mm init: %d\n", ret); return ret; } #ifdef HACK_OLD_MM vram_size <<= PAGE_SHIFT; DRM_INFO("Old MM using %dKiB VRAM\n", (vram_size * 3) >> 10); if (nouveau_mem_init_heap(&dev_priv->fb_heap, vram_size, vram_size * 3)) return -ENOMEM; #endif return 0; } int nouveau_mem_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t fb_size; int ret = 0; dev_priv->agp_heap = dev_priv->pci_heap = dev_priv->fb_heap = NULL; dev_priv->fb_phys = 0; dev_priv->gart_info.type = NOUVEAU_GART_NONE; /* setup a mtrr over the FB */ dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1), nouveau_mem_fb_amount(dev), DRM_MTRR_WC); /* Init FB */ dev_priv->fb_phys=drm_get_resource_start(dev,1); fb_size = nouveau_mem_fb_amount(dev); /* On at least NV40, RAMIN is actually at the end of vram. * We don't want to allocate this... */ if (dev_priv->card_type >= NV_40) fb_size -= dev_priv->ramin_rsvd_vram; dev_priv->fb_available_size = fb_size; DRM_DEBUG("Available VRAM: %dKiB\n", fb_size>>10); if (fb_size>256*1024*1024) { /* On cards with > 256Mb, you can't map everything. * So we create a second FB heap for that type of memory */ if (nouveau_mem_init_heap(&dev_priv->fb_heap, 0, 256*1024*1024)) return -ENOMEM; if (nouveau_mem_init_heap(&dev_priv->fb_nomap_heap, 256*1024*1024, fb_size-256*1024*1024)) return -ENOMEM; } else { if (nouveau_mem_init_heap(&dev_priv->fb_heap, 0, fb_size)) return -ENOMEM; dev_priv->fb_nomap_heap=NULL; } #ifndef __powerpc__ /* Init AGP / NV50 PCIEGART */ if (drm_device_is_agp(dev) && dev->agp) { if ((ret = nouveau_mem_init_agp(dev, 0))) DRM_ERROR("Error initialising AGP: %d\n", ret); } #endif /*Note: this is *not* just NV50 code, but only used on NV50 for now */ if (dev_priv->gart_info.type == NOUVEAU_GART_NONE && dev_priv->card_type >= NV_50) { ret = nouveau_sgdma_init(dev); if (!ret) { ret = nouveau_sgdma_nottm_hack_init(dev); if (ret) nouveau_sgdma_takedown(dev); } if (ret) DRM_ERROR("Error initialising SG DMA: %d\n", ret); } if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) { if (nouveau_mem_init_heap(&dev_priv->agp_heap, 0, dev_priv->gart_info.aper_size)) { if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) { nouveau_sgdma_nottm_hack_takedown(dev); nouveau_sgdma_takedown(dev); } } } /* NV04-NV40 PCIEGART */ if (!dev_priv->agp_heap && dev_priv->card_type < NV_50) { struct drm_scatter_gather sgreq; DRM_DEBUG("Allocating sg memory for PCI DMA\n"); sgreq.size = 16 << 20; //16MB of PCI scatter-gather zone if (drm_sg_alloc(dev, &sgreq)) { DRM_ERROR("Unable to allocate %ldMB of scatter-gather" " pages for PCI DMA!",sgreq.size>>20); } else { if (nouveau_mem_init_heap(&dev_priv->pci_heap, 0, dev->sg->pages * PAGE_SIZE)) { DRM_ERROR("Unable to initialize pci_heap!"); } } } return 0; } struct mem_block* nouveau_mem_alloc(struct drm_device *dev, int alignment, uint64_t size, int flags, struct drm_file *file_priv) { struct mem_block *block; int type; struct drm_nouveau_private *dev_priv = dev->dev_private; /* * Make things easier on ourselves: all allocations are page-aligned. * We need that to map allocated regions into the user space */ if (alignment < PAGE_SHIFT) alignment = PAGE_SHIFT; /* * Warn about 0 sized allocations, but let it go through. It'll return 1 page */ if (size == 0) DRM_INFO("warning : 0 byte allocation\n"); /* * Keep alloc size a multiple of the page size to keep drm_addmap() happy */ if (size & (~PAGE_MASK)) size = ((size/PAGE_SIZE) + 1) * PAGE_SIZE; #define NOUVEAU_MEM_ALLOC_AGP {\ type=NOUVEAU_MEM_AGP;\ block = nouveau_mem_alloc_block(dev_priv->agp_heap, size,\ alignment, file_priv); \ if (block) goto alloc_ok;\ } #define NOUVEAU_MEM_ALLOC_PCI {\ type = NOUVEAU_MEM_PCI;\ block = nouveau_mem_alloc_block(dev_priv->pci_heap, size, \ alignment, file_priv); \ if ( block ) goto alloc_ok;\ } #define NOUVEAU_MEM_ALLOC_FB {\ type=NOUVEAU_MEM_FB;\ if (!(flags&NOUVEAU_MEM_MAPPED)) {\ block = nouveau_mem_alloc_block(dev_priv->fb_nomap_heap,\ size, alignment, \ file_priv); \ if (block) goto alloc_ok;\ }\ block = nouveau_mem_alloc_block(dev_priv->fb_heap, size,\ alignment, file_priv);\ if (block) goto alloc_ok;\ } if (flags&NOUVEAU_MEM_FB) NOUVEAU_MEM_ALLOC_FB if (flags&NOUVEAU_MEM_AGP) NOUVEAU_MEM_ALLOC_AGP if (flags&NOUVEAU_MEM_PCI) NOUVEAU_MEM_ALLOC_PCI if (flags&NOUVEAU_MEM_FB_ACCEPTABLE) NOUVEAU_MEM_ALLOC_FB if (flags&NOUVEAU_MEM_AGP_ACCEPTABLE) NOUVEAU_MEM_ALLOC_AGP if (flags&NOUVEAU_MEM_PCI_ACCEPTABLE) NOUVEAU_MEM_ALLOC_PCI return NULL; alloc_ok: block->flags=type; if (flags&NOUVEAU_MEM_MAPPED) { struct drm_map_list *entry; int ret = 0; block->flags|=NOUVEAU_MEM_MAPPED; if (type == NOUVEAU_MEM_AGP) { if (dev_priv->gart_info.type != NOUVEAU_GART_SGDMA) ret = drm_addmap(dev, block->start, block->size, _DRM_AGP, 0, &block->map); else ret = drm_addmap(dev, block->start, block->size, _DRM_SCATTER_GATHER, 0, &block->map); } else if (type == NOUVEAU_MEM_FB) ret = drm_addmap(dev, block->start + dev_priv->fb_phys, block->size, _DRM_FRAME_BUFFER, 0, &block->map); else if (type == NOUVEAU_MEM_PCI) ret = drm_addmap(dev, block->start, block->size, _DRM_SCATTER_GATHER, 0, &block->map); if (ret) { nouveau_mem_free_block(block); return NULL; } entry = drm_find_matching_map(dev, block->map); if (!entry) { nouveau_mem_free_block(block); return NULL; } block->map_handle = entry->user_token; } DRM_DEBUG("allocated %lld bytes at 0x%llx type=0x%08x\n", block->size, block->start, block->flags); return block; } void nouveau_mem_free(struct drm_device* dev, struct mem_block* block) { DRM_DEBUG("freeing 0x%llx type=0x%08x\n", block->start, block->flags); if (block->flags&NOUVEAU_MEM_MAPPED) drm_rmmap(dev, block->map); nouveau_mem_free_block(block); } /* * Ioctls */ int nouveau_ioctl_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_nouveau_mem_alloc *alloc = data; struct mem_block *block; NOUVEAU_CHECK_INITIALISED_WITH_RETURN; block=nouveau_mem_alloc(dev, alloc->alignment, alloc->size, alloc->flags, file_priv); if (!block) return -ENOMEM; alloc->map_handle=block->map_handle; alloc->offset=block->start; alloc->flags=block->flags; return 0; } int nouveau_ioctl_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_mem_free *memfree = data; struct mem_block *block; NOUVEAU_CHECK_INITIALISED_WITH_RETURN; block=NULL; if (memfree->flags & NOUVEAU_MEM_FB) block = find_block(dev_priv->fb_heap, memfree->offset); else if (memfree->flags & NOUVEAU_MEM_AGP) block = find_block(dev_priv->agp_heap, memfree->offset); else if (memfree->flags & NOUVEAU_MEM_PCI) block = find_block(dev_priv->pci_heap, memfree->offset); if (!block) return -EFAULT; if (block->file_priv != file_priv) return -EPERM; nouveau_mem_free(dev, block); return 0; } n458'>458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769
/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
 */
/**************************************************************************
 * 
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
 * All Rights Reserved.
 * 
 **************************************************************************/

#include "i915.h"
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"

static inline void i915_print_status_page(drm_device_t * dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	u32 *temp = dev_priv->hw_status_page;

	if (!temp) {
		DRM_DEBUG("no status page\n");
		return;
	}

	DRM_DEBUG("hw_status: Interrupt Status : %x\n", temp[0]);
	DRM_DEBUG("hw_status: LpRing Head ptr : %x\n", temp[1]);
	DRM_DEBUG("hw_status: IRing Head ptr : %x\n", temp[2]);
	DRM_DEBUG("hw_status: Reserved : %x\n", temp[3]);
	DRM_DEBUG("hw_status: Driver Counter : %d\n", temp[5]);

}

/* Really want an OS-independent resettable timer.  Would like to have
 * this loop run for (eg) 3 sec, but have the timer reset every time
 * the head pointer changes, so that EBUSY only happens if the ring
 * actually stalls for (eg) 3 seconds.
 */
int i915_wait_ring(drm_device_t * dev, int n, const char *caller)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
	u32 last_head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
	int i;

	for (i = 0; i < 10000; i++) {
		ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
		ring->space = ring->head - (ring->tail + 8);
		if (ring->space < 0)
			ring->space += ring->Size;
		if (ring->space >= n)
			return 0;

		dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;

		if (ring->head != last_head)
			i = 0;

		last_head = ring->head;
	}

	return DRM_ERR(EBUSY);
}

void i915_kernel_lost_context(drm_device_t * dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	drm_i915_ring_buffer_t *ring = &(dev_priv->ring);

	ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
	ring->tail = I915_READ(LP_RING + RING_TAIL) & TAIL_ADDR;
	ring->space = ring->head - (ring->tail + 8);
	if (ring->space < 0)
		ring->space += ring->Size;

	if (ring->head == ring->tail)
		dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
}

int i915_dma_cleanup(drm_device_t * dev)
{
	/* Make sure interrupts are disabled here because the uninstall ioctl
	 * may not have been called from userspace and after dev_private
	 * is freed, it's too late.
	 */
	if (dev->irq)
		DRM(irq_uninstall) (dev);

	if (dev->dev_private) {
		drm_i915_private_t *dev_priv =
		    (drm_i915_private_t *) dev->dev_private;

		if (dev_priv->ring.virtual_start) {
			drm_core_ioremapfree(&dev_priv->ring.map, dev);
		}

		if (dev_priv->hw_status_page) {
#ifdef __FreeBSD__
#if __FreeBSD_version > 500000
			contigfree(dev_priv->hw_status_page, PAGE_SIZE, M_DRM);
#endif
#else
			pci_free_consistent(dev->pdev, PAGE_SIZE,
					    dev_priv->hw_status_page,
					    dev_priv->dma_status_page);
#endif
			/* Need to rewrite hardware status page */
			I915_WRITE(0x02080, 0x1ffff000);
		}

		DRM(free) (dev->dev_private, sizeof(drm_i915_private_t),
			   DRM_MEM_DRIVER);

		dev->dev_private = NULL;
	}

	return 0;
}

static int i915_initialize(drm_device_t * dev,
			   drm_i915_private_t * dev_priv,
			   drm_i915_init_t * init)
{
	memset(dev_priv, 0, sizeof(drm_i915_private_t));

	DRM_GETSAREA();
	if (!dev_priv->sarea) {
		DRM_ERROR("can not find sarea!\n");
		dev->dev_private = (void *)dev_priv;
		i915_dma_cleanup(dev);
		return DRM_ERR(EINVAL);
	}

	dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
	if (!dev_priv->mmio_map) {
		dev->dev_private = (void *)dev_priv;
		i915_dma_cleanup(dev);
		DRM_ERROR("can not find mmio map!\n");
		return DRM_ERR(EINVAL);
	}

	dev_priv->sarea_priv = (drm_i915_sarea_t *)
	    ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);

	dev_priv->ring.Start = init->ring_start;
	dev_priv->ring.End = init->ring_end;
	dev_priv->ring.Size = init->ring_size;
	dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;

	dev_priv->ring.map.offset = init->ring_start;
	dev_priv->ring.map.size = init->ring_size;
	dev_priv->ring.map.type = 0;
	dev_priv->ring.map.flags = 0;
	dev_priv->ring.map.mtrr = 0;

	drm_core_ioremap(&dev_priv->ring.map, dev);

	if (dev_priv->ring.map.handle == NULL) {
		dev->dev_private = (void *)dev_priv;
		i915_dma_cleanup(dev);
		DRM_ERROR("can not ioremap virtual address for"
			  " ring buffer\n");
		return DRM_ERR(ENOMEM);
	}

	dev_priv->ring.virtual_start = dev_priv->ring.map.handle;

	dev_priv->back_offset = init->back_offset;
	dev_priv->front_offset = init->front_offset;
	dev_priv->current_page = 0;
	dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;

	/* We are using separate values as placeholders for mechanisms for
	 * private backbuffer/depthbuffer usage.
	 */
	dev_priv->use_mi_batchbuffer_start = 0;

	/* Allow hardware batchbuffers unless told otherwise.
	 */
	dev_priv->allow_batchbuffer = 1;

	/* Program Hardware Status Page */
#ifdef __FreeBSD__
	dev_priv->hw_status_page =
	    contigmalloc(PAGE_SIZE, DRM(M_DRM), M_NOWAIT, 0ul, 0, 0, 0);
	dev_priv->dma_status_page = vtophys(dev_priv->hw_status_page);
#else
	dev_priv->hw_status_page =
	    pci_alloc_consistent(dev->pdev, PAGE_SIZE,
				 &dev_priv->dma_status_page);
#endif

	if (!dev_priv->hw_status_page) {
		dev->dev_private = (void *)dev_priv;
		i915_dma_cleanup(dev);
		DRM_ERROR("Can not allocate hardware status page\n");
		return DRM_ERR(ENOMEM);
	}
	memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
	DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);

	I915_WRITE(0x02080, dev_priv->dma_status_page);
	DRM_DEBUG("Enabled hardware status page\n");

	dev->dev_private = (void *)dev_priv;

	return 0;
}

static int i915_resume(drm_device_t * dev)
{
	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;

	DRM_DEBUG("%s\n", __FUNCTION__);

	if (!dev_priv->sarea) {
		DRM_ERROR("can not find sarea!\n");
		return DRM_ERR(EINVAL);
	}

	if (!dev_priv->mmio_map) {
		DRM_ERROR("can not find mmio map!\n");
		return DRM_ERR(EINVAL);
	}

	if (dev_priv->ring.map.handle == NULL) {
		DRM_ERROR("can not ioremap virtual address for"
			  " ring buffer\n");
		return DRM_ERR(ENOMEM);
	}

	/* Program Hardware Status Page */
	if (!dev_priv->hw_status_page) {
		DRM_ERROR("Can not find hardware status page\n");
		return DRM_ERR(EINVAL);
	}
	DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);

	I915_WRITE(0x02080, dev_priv->dma_status_page);
	DRM_DEBUG("Enabled hardware status page\n");

	return 0;
}

int i915_dma_init(DRM_IOCTL_ARGS)
{
	DRM_DEVICE;
	drm_i915_private_t *dev_priv;
	drm_i915_init_t init;
	int retcode = 0;

	DRM_COPY_FROM_USER_IOCTL(init, (drm_i915_init_t __user *) data,
				 sizeof(init));

	switch (init.func) {
	case I915_INIT_DMA:
		dev_priv = DRM(alloc) (sizeof(drm_i915_private_t),
				       DRM_MEM_DRIVER);
		if (dev_priv == NULL)
			return DRM_ERR(ENOMEM);
		retcode = i915_initialize(dev, dev_priv, &init);
		break;
	case I915_CLEANUP_DMA:
		retcode = i915_dma_cleanup(dev);
		break;
	case I915_RESUME_DMA:
		retcode = i915_resume(dev);
		break;
	default:
		retcode = -EINVAL;
		break;
	}

	return retcode;
}

/* Implement basically the same security restrictions as hardware does
 * for MI_BATCH_NON_SECURE.  These can be made stricter at any time.
 *
 * Most of the calculations below involve calculating the size of a
 * particular instruction.  It's important to get the size right as
 * that tells us where the next instruction to check is.  Any illegal
 * instruction detected will be given a size of zero, which is a
 * signal to abort the rest of the buffer.
 */
static int do_validate_cmd(int cmd)
{
	switch (((cmd >> 29) & 0x7)) {
	case 0x0:
		switch ((cmd >> 23) & 0x3f) {
		case 0x0:
			return 1;	/* MI_NOOP */
		case 0x4:
			return 1;	/* MI_FLUSH */
		default:
			return 0;	/* disallow everything else */
		}
		break;
	case 0x1:
		return 0;	/* reserved */
	case 0x2:
		return (cmd & 0xff) + 2;	/* 2d commands */
	case 0x3:
		if (((cmd >> 24) & 0x1f) <= 0x18)
			return 1;

		switch ((cmd >> 24) & 0x1f) {
		case 0x1c:
			return 1;
		case 0x1d:
			switch ((cmd >> 16) & 0xff) {
			case 0x3:
				return (cmd & 0x1f) + 2;
			case 0x4:
				return (cmd & 0xf) + 2;
			default:
				return (cmd & 0xffff) + 2;
			}
		case 0x1e:
			if (cmd & (1 << 23))
				return (cmd & 0xffff) + 1;
			else
				return 1;
		case 0x1f:
			if ((cmd & (1 << 23)) == 0)	/* inline vertices */
				return (cmd & 0x1ffff) + 2;
			else if (cmd & (1 << 17))	/* indirect random */
				if ((cmd & 0xffff) == 0)
					return 0;	/* unknown length, too hard */
				else
					return (((cmd & 0xffff) + 1) / 2) + 1;
			else
				return 2;	/* indirect sequential */
		default:
			return 0;
		}
	default:
		return 0;
	}

	return 0;
}

static int validate_cmd(int cmd)
{
	int ret = do_validate_cmd(cmd);

/* 	printk("validate_cmd( %x ): %d\n", cmd, ret); */

	return ret;
}

static int i915_emit_cmds(drm_device_t * dev, int __user * buffer, int dwords)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	int i;
	RING_LOCALS;

	for (i = 0; i < dwords;) {
		int cmd, sz;

		if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
			return DRM_ERR(EINVAL);

/* 		printk("%d/%d ", i, dwords); */

		if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
			return DRM_ERR(EINVAL);

		BEGIN_LP_RING(sz);