/* * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. * Copyright 2005 Stephane Marchesin * * The Weather Channel (TM) funded Tungsten Graphics to develop the * initial release of the Radeon 8500 driver under the XFree86 license. * This notice must be preserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Keith Whitwell */ #include "drmP.h" #include "drm.h" #include "drm_sarea.h" #include "nouveau_drv.h" static struct mem_block * split_block(struct mem_block *p, uint64_t start, uint64_t size, struct drm_file *file_priv) { /* Maybe cut off the start of an existing block */ if (start > p->start) { struct mem_block *newblock = drm_alloc(sizeof(*newblock), DRM_MEM_BUFS); if (!newblock) goto out; newblock->start = start; newblock->size = p->size - (start - p->start); newblock->file_priv = NULL; newblock->next = p->next; newblock->prev = p; p->next->prev = newblock; p->next = newblock; p->size -= newblock->size; p = newblock; } /* Maybe cut off the end of an existing block */ if (size < p->size) { struct mem_block *newblock = drm_alloc(sizeof(*newblock), DRM_MEM_BUFS); if (!newblock) goto out; newblock->start = start + size; newblock->size = p->size - size; newblock->file_priv = NULL; newblock->next = p->next; newblock->prev = p; p->next->prev = newblock; p->next = newblock; p->size = size; } out: /* Our block is in the middle */ p->file_priv = file_priv; return p; } struct mem_block * nouveau_mem_alloc_block(struct mem_block *heap, uint64_t size, int align2, struct drm_file *file_priv, int tail) { struct mem_block *p; uint64_t mask = (1 << align2) - 1; if (!heap) return NULL; if (tail) { list_for_each_prev(p, heap) { uint64_t start = ((p->start + p->size) - size) & ~mask; if (p->file_priv == 0 && start >= p->start && start + size <= p->start + p->size) return split_block(p, start, size, file_priv); } } else { list_for_each(p, heap) { uint64_t start = (p->start + mask) & ~mask; if (p->file_priv == 0 && start + size <= p->start + p->size) return split_block(p, start, size, file_priv); } } return NULL; } static struct mem_block *find_block(struct mem_block *heap, uint64_t start) { struct mem_block *p; list_for_each(p, heap) if (p->start == start) return p; return NULL; } void nouveau_mem_free_block(struct mem_block *p) { p->file_priv = NULL; /* Assumes a single contiguous range. Needs a special file_priv in * 'heap' to stop it being subsumed. */ if (p->next->file_priv == 0) { struct mem_block *q = p->next; p->size += q->size; p->next = q->next; p->next->prev = p; drm_free(q, sizeof(*q), DRM_MEM_BUFS); } if (p->prev->file_priv == 0) { struct mem_block *q = p->prev; q->size += p->size; q->next = p->next; q->next->prev = q; drm_free(p, sizeof(*q), DRM_MEM_BUFS); } } /* Initialize. How to check for an uninitialized heap? */ int nouveau_mem_init_heap(struct mem_block **heap, uint64_t start, uint64_t size) { struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS); if (!blocks) return -ENOMEM; *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS); if (!*heap) { drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFS); return -ENOMEM; } blocks->start = start; blocks->size = size; blocks->file_priv = NULL; blocks->next = blocks->prev = *heap; memset(*heap, 0, sizeof(**heap)); (*heap)->file_priv = (struct drm_file *) - 1; (*heap)->next = (*heap)->prev = blocks; return 0; } /* * Free all blocks associated with the releasing file_priv */ void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap) { struct mem_block *p; if (!heap || !heap->next) return; list_for_each(p, heap) { if (p->file_priv == file_priv) p->file_priv = NULL; } /* Assumes a single contiguous range. Needs a special file_priv in * 'heap' to stop it being subsumed. */ list_for_each(p, heap) { while ((p->file_priv == 0) && (p->next->file_priv == 0) && (p->next!=heap)) { struct mem_block *q = p->next; p->size += q->size; p->next = q->next; p->next->prev = p; drm_free(q, sizeof(*q), DRM_MEM_DRIVER); } } } /* * Cleanup everything */ void nouveau_mem_takedown(struct mem_block **heap) { struct mem_block *p; if (!*heap) return; for (p = (*heap)->next; p != *heap;) { struct mem_block *q = p; p = p->next; drm_free(q, sizeof(*q), DRM_MEM_DRIVER); } drm_free(*heap, sizeof(**heap), DRM_MEM_DRIVER); *heap = NULL; } void nouveau_mem_close(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; nouveau_mem_takedown(&dev_priv->agp_heap); nouveau_mem_takedown(&dev_priv->fb_heap); if (dev_priv->pci_heap) nouveau_mem_takedown(&dev_priv->pci_heap); } /*XXX BSD needs compat functions for pci access * #define DRM_PCI_DEV struct device * #define drm_pci_get_bsf pci_get_bsf * and a small inline to do *val = pci_read_config(pdev->device, where, 4); * might work */ static int nforce_pci_fn_read_config_dword(int devfn, int where, uint32_t *val) { #ifdef __linux__ DRM_PCI_DEV *pdev; if (!(pdev = drm_pci_get_bsf(0, 0, devfn))) { DRM_ERROR("nForce PCI device function 0x%02x not found\n", devfn); return -ENODEV; } return drm_pci_read_config_dword(pdev, where, val); #else DRM_ERROR("BSD compat for checking IGP memory amount needed\n"); return 0; #endif } static void nouveau_mem_check_nforce_dimms(struct drm_device *dev) { uint32_t mem_ctrlr_pciid; nforce_pci_fn_read_config_dword(3, 0x00, &mem_ctrlr_pciid); mem_ctrlr_pciid >>= 16; if (mem_ctrlr_pciid == 0x01a9 || mem_ctrlr_pciid == 0x01ab || mem_ctrlr_pciid == 0x01ed) { uint32_t dimm[3]; int i; for (i = 0; i < 3; i++) { nforce_pci_fn_read_config_dword(2, 0x40 + i * 4, &dimm[i]); dimm[i] = (dimm[i] >> 8) & 0x4f; } if (dimm[0] + dimm[1] != dimm[2]) DRM_INFO("Your nForce DIMMs are not arranged in " "optimal banks!\n"); } } static uint32_t nouveau_mem_fb_amount_igp(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t mem = 0; if (dev_priv->flags & NV_NFORCE) { nforce_pci_fn_read_config_dword(1, 0x7C, &mem); return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024; } if (dev_priv->flags & NV_NFORCE2) { nforce_pci_fn_read_config_dword(1, 0x84, &mem); return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024; } DRM_ERROR("impossible!\n"); return 0; } /* returns the amount of FB ram in bytes */ uint64_t nouveau_mem_fb_amount(struct drm_device *dev) { struct drm_nouveau_private *dev_priv=dev->dev_private; switch(dev_priv->card_type) { case NV_04: case NV_05: if (NV_READ(NV03_BOOT_0) & 0x00000100) { return (((NV_READ(NV03_BOOT_0) >> 12) & 0xf)*2+2)*1024*1024; } else switch(NV_READ(NV03_BOOT_0)&NV03_BOOT_0_RAM_AMOUNT) { case NV04_BOOT_0_RAM_AMOUNT_32MB: return 32*1024*1024; case NV04_BOOT_0_RAM_AMOUNT_16MB: return 16*1024*1024; case NV04_BOOT_0_RAM_AMOUNT_8MB: return 8*1024*1024; case NV04_BOOT_0_RAM_AMOUNT_4MB: return 4*1024*1024; } break; case NV_10: case NV_11: case NV_17: case NV_20: case NV_30: case NV_40: case NV_44: case NV_50: default: if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) { return nouveau_mem_fb_amount_igp(dev); } else { uint64_t mem; mem = (NV_READ(NV10_PFB_CSTATUS) & NV10_PFB_CSTATUS_RAM_AMOUNT_MB_MASK) >> NV10_PFB_CSTATUS_RAM_AMOUNT_MB_SHIFT; return mem*1024*1024; } break; } DRM_ERROR("Unable to detect video ram size. Please report your setup to " DRIVER_EMAIL "\n"); return 0; } static void nouveau_mem_reset_agp(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t saved_pci_nv_1, saved_pci_nv_19, pmc_enable; saved_pci_nv_1 = NV_READ(NV04_PBUS_PCI_NV_1); saved_pci_nv_19 = NV_READ(NV04_PBUS_PCI_NV_19); /* clear busmaster bit */ NV_WRITE(NV04_PBUS_PCI_NV_1, saved_pci_nv_1 & ~0x4); /* clear SBA and AGP bits */ NV_WRITE(NV04_PBUS_PCI_NV_19, saved_pci_nv_19 & 0xfffff0ff); /* power cycle pgraph, if enabled */ pmc_enable = NV_READ(NV03_PMC_ENABLE); if (pmc_enable & NV_PMC_ENABLE_PGRAPH) { NV_WRITE(NV03_PMC_ENABLE, pmc_enable & ~NV_PMC_ENABLE_PGRAPH); NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH); } /* and restore (gives effect of resetting AGP) */ NV_WRITE(NV04_PBUS_PCI_NV_19, saved_pci_nv_19); NV_WRITE(NV04_PBUS_PCI_NV_1, saved_pci_nv_1); } static int nouveau_mem_init_agp(struct drm_device *dev, int ttm) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_agp_info info; struct drm_agp_mode mode; int ret; nouveau_mem_reset_agp(dev); ret = drm_agp_acquire(dev); if (ret) { DRM_ERROR("Unable to acquire AGP: %d\n", ret); return ret; } ret = drm_agp_info(dev, &info); if (ret) { DRM_ERROR("Unable to get AGP info: %d\n", ret); return ret; } /* see agp.h for the AGPSTAT_* modes available */ mode.mode = info.mode; ret = drm_agp_enable(dev, mode); if (ret) { DRM_ERROR("Unable to enable AGP: %d\n", ret); return ret; } if (!ttm) { struct drm_agp_buffer agp_req; struct drm_agp_binding bind_req; agp_req.size = info.aperture_size; agp_req.type = 0; ret = drm_agp_alloc(dev, &agp_req); if (ret) { DRM_ERROR("Unable to alloc AGP: %d\n", ret); return ret; } bind_req.handle = agp_req.handle; bind_req.offset = 0; ret = drm_agp_bind(dev, &bind_req); if (ret) { DRM_ERROR("Unable to bind AGP: %d\n", ret); return ret; } } dev_priv->gart_info.type = NOUVEAU_GART_AGP; dev_priv->gart_info.aper_base = info.aperture_base; dev_priv->gart_info.aper_size = info.aperture_size; return 0; } #define HACK_OLD_MM int nouveau_mem_init_ttm(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t vram_size, bar1_size; int ret; dev_priv->agp_heap = dev_priv->pci_heap = dev_priv->fb_heap = NULL; dev_priv->fb_phys = drm_get_resource_start(dev,1); dev_priv->gart_info.type = NOUVEAU_GART_NONE; drm_bo_driver_init(dev); /* non-mappable vram */ dev_priv->fb_available_size = nouveau_mem_fb_amount(dev); dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram; vram_size = dev_priv->fb_available_size >> PAGE_SHIFT; bar1_size = drm_get_resource_len(dev, 1) >> PAGE_SHIFT; if (bar1_size < vram_size) { if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_PRIV0, bar1_size, vram_size - bar1_size, 1))) { DRM_ERROR("Failed PRIV0 mm init: %d\n", ret); return ret; } vram_size = bar1_size; } /* mappable vram */ #ifdef HACK_OLD_MM vram_size /= 4; #endif if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_VRAM, 0, vram_size, 1))) { DRM_ERROR("Failed VRAM mm init: %d\n", ret); return ret; } /* GART */ #if !defined(__powerpc__) && !defined(__ia64__) if (drm_device_is_agp(dev) && dev->agp) { if ((ret = nouveau_mem_init_agp(dev, 1))) DRM_ERROR("Error initialising AGP: %d\n", ret); } #endif if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) { if ((ret = nouveau_sgdma_init(dev))) DRM_ERROR("Error initialising PCI SGDMA: %d\n", ret); } if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_TT, 0, dev_priv->gart_info.aper_size >> PAGE_SHIFT, 1))) { DRM_ERROR("Failed TT mm init: %d\n", ret); return ret; } #ifdef HACK_OLD_MM vram_size <<= PAGE_SHIFT; DRM_INFO("Old MM using %dKiB VRAM\n", (vram_size * 3) >> 10); if (nouveau_mem_init_heap(&dev_priv->fb_heap, vram_size, vram_size * 3)) return -ENOMEM; #endif return 0; } int nouveau_mem_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t fb_size; int ret = 0; dev_priv->agp_heap = dev_priv->pci_heap = dev_priv->fb_heap = NULL; dev_priv->fb_phys = 0; dev_priv->gart_info.type = NOUVEAU_GART_NONE; if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) nouveau_mem_check_nforce_dimms(dev); /* setup a mtrr over the FB */ dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1), nouveau_mem_fb_amount(dev), DRM_MTRR_WC); /* Init FB */ dev_priv->fb_phys=drm_get_resource_start(dev,1); fb_size = nouveau_mem_fb_amount(dev); /* On G80, limit VRAM to 512MiB temporarily due to limits in how * we handle VRAM page tables. */ if (dev_priv->card_type >= NV_50 && fb_size > (512 * 1024 * 1024)) fb_size = (512 * 1024 * 1024); fb_size -= dev_priv->ramin_rsvd_vram; dev_priv->fb_available_size = fb_size; DRM_DEBUG("Available VRAM: %dKiB\n", fb_size>>10); if (fb_size>256*1024*1024) { /* On cards with > 256Mb, you can't map everything. * So we create a second FB heap for that type of memory */ if (nouveau_mem_init_heap(&dev_priv->fb_heap, 0, 256*1024*1024)) return -ENOMEM; if (nouveau_mem_init_heap(&dev_priv->fb_nomap_heap, 256*1024*1024, fb_size-256*1024*1024)) return -ENOMEM; } else { if (nouveau_mem_init_heap(&dev_priv->fb_heap, 0, fb_size)) return -ENOMEM; dev_priv->fb_nomap_heap=NULL; } #if !defined(__powerpc__) && !defined(__ia64__) /* Init AGP / NV50 PCIEGART */ if (drm_device_is_agp(dev) && dev->agp) { if ((ret = nouveau_mem_init_agp(dev, 0))) DRM_ERROR("Error initialising AGP: %d\n", ret); } #endif /*Note: this is *not* just NV50 code, but only used on NV50 for now */ if (dev_priv->gart_info.type == NOUVEAU_GART_NONE && dev_priv->card_type >= NV_50) { ret = nouveau_sgdma_init(dev); if (!ret) { ret = nouveau_sgdma_nottm_hack_init(dev); if (ret) nouveau_sgdma_takedown(dev); } if (ret) DRM_ERROR("Error initialising SG DMA: %d\n", ret); } if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) { if (nouveau_mem_init_heap(&dev_priv->agp_heap, 0, dev_priv->gart_info.aper_size)) { if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) { nouveau_sgdma_nottm_hack_takedown(dev); nouveau_sgdma_takedown(dev); } } } /* NV04-NV40 PCIEGART */ if (!dev_priv->agp_heap && dev_priv->card_type < NV_50) { struct drm_scatter_gather sgreq; DRM_DEBUG("Allocating sg memory for PCI DMA\n"); sgreq.size = 16 << 20; //16MB of PCI scatter-gather zone if (drm_sg_alloc(dev, &sgreq)) { DRM_ERROR("Unable to allocate %ldMB of scatter-gather" " pages for PCI DMA!",sgreq.size>>20); } else { if (nouveau_mem_init_heap(&dev_priv->pci_heap, 0, dev->sg->pages * PAGE_SIZE)) { DRM_ERROR("Unable to initialize pci_heap!"); } } } /* G8x: Allocate shared page table to map real VRAM pages into */ if (dev_priv->card_type >= NV_50) { unsigned size = ((512 * 1024 * 1024) / 65536) * 8; ret = nouveau_gpuobj_new(dev, NULL, size, 0, NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ALLOW_NO_REFS, &dev_priv->vm_vram_pt); if (ret) { DRM_ERROR("Error creating VRAM page table: %d\n", ret); return ret; } } return 0; } struct mem_block * nouveau_mem_alloc(struct drm_device *dev, int alignment, uint64_t size, int flags, struct drm_file *file_priv) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct mem_block *block; int type, tail = !(flags & NOUVEAU_MEM_USER); /* * Make things easier on ourselves: all allocations are page-aligned. * We need that to map allocated regions into the user space */ if (alignment < PAGE_SIZE) alignment = PAGE_SIZE; /* Align allocation sizes to 64KiB blocks on G8x. We use a 64KiB * page size in the GPU VM. */ if (flags & NOUVEAU_MEM_FB && dev_priv->card_type >= NV_50) { size = (size + 65535) & ~65535; if (alignment < 65536) alignment = 65536; } /* Further down wants alignment in pages, not bytes */ alignment >>= PAGE_SHIFT; /* * Warn about 0 sized allocations, but let it go through. It'll return 1 page */ if (size == 0) DRM_INFO("warning : 0 byte allocation\n"); /* * Keep alloc size a multiple of the page size to keep drm_addmap() happy */ if (size & (~PAGE_MASK)) size = ((size/PAGE_SIZE) + 1) * PAGE_SIZE; #define NOUVEAU_MEM_ALLOC_AGP {\ type=NOUVEAU_MEM_AGP;\ block = nouveau_mem_alloc_block(dev_priv->agp_heap, size,\ alignment, file_priv, tail); \ if (block) goto alloc_ok;\ } #define NOUVEAU_MEM_ALLOC_PCI {\ type = NOUVEAU_MEM_PCI;\ block = nouveau_mem_alloc_block(dev_priv->pci_heap, size, \ alignment, file_priv, tail); \ if ( block ) goto alloc_ok;\ } #define NOUVEAU_MEM_ALLOC_FB {\ type=NOUVEAU_MEM_FB;\ if (!(flags&NOUVEAU_MEM_MAPPED)) {\ block = nouveau_mem_alloc_block(dev_priv->fb_nomap_heap,\ size, alignment, \ file_priv, tail); \ if (block) goto alloc_ok;\ }\ block = nouveau_mem_alloc_block(dev_priv->fb_heap, size,\ alignment, file_priv, tail);\ if (block) goto alloc_ok;\ } if (flags&NOUVEAU_MEM_FB) NOUVEAU_MEM_ALLOC_FB if (flags&NOUVEAU_MEM_AGP) NOUVEAU_MEM_ALLOC_AGP if (flags&NOUVEAU_MEM_PCI) NOUVEAU_MEM_ALLOC_PCI if (flags&NOUVEAU_MEM_FB_ACCEPTABLE) NOUVEAU_MEM_ALLOC_FB if (flags&NOUVEAU_MEM_AGP_ACCEPTABLE) NOUVEAU_MEM_ALLOC_AGP if (flags&NOUVEAU_MEM_PCI_ACCEPTABLE) NOUVEAU_MEM_ALLOC_PCI return NULL; alloc_ok: block->flags=type; /* On G8x, map memory into VM */ if (block->flags & NOUVEAU_MEM_FB && dev_priv->card_type >= NV_50 && !(flags & NOUVEAU_MEM_NOVM)) { struct nouveau_gpuobj *pt = dev_priv->vm_vram_pt; unsigned offset = block->start; unsigned count = block->size / 65536; unsigned tile = 0; if (!pt) { DRM_ERROR("vm alloc without vm pt\n"); nouveau_mem_free_block(block); return NULL; } /* The tiling stuff is *not* what NVIDIA does - but both the * 2D and 3D engines seem happy with this simpler method. * Should look into why NVIDIA do what they do at some point. */ if (flags & NOUVEAU_MEM_TILE) { if (flags & NOUVEAU_MEM_TILE_ZETA) tile = 0x00002800; else tile = 0x00007000; } while (count--) { unsigned pte = offset / 65536; INSTANCE_WR(pt, (pte * 2) + 0, offset | 1); INSTANCE_WR(pt, (pte * 2) + 1, 0x00000000 | tile); offset += 65536; } } else { block->flags |= NOUVEAU_MEM_NOVM; } if (flags&NOUVEAU_MEM_MAPPED) { struct drm_map_list *entry; int ret = 0; block->flags|=NOUVEAU_MEM_MAPPED; if (type == NOUVEAU_MEM_AGP) { if (dev_priv->gart_info.type != NOUVEAU_GART_SGDMA) ret = drm_addmap(dev, block->start, block->size, _DRM_AGP, 0, &block->map); else ret = drm_addmap(dev, block->start, block->size, _DRM_SCATTER_GATHER, 0, &block->map); } else if (type == NOUVEAU_MEM_FB) ret = drm_addmap(dev, block->start + dev_priv->fb_phys, block->size, _DRM_FRAME_BUFFER, 0, &block->map); else if (type == NOUVEAU_MEM_PCI) ret = drm_addmap(dev, block->start, block->size, _DRM_SCATTER_GATHER, 0, &block->map); if (ret) { nouveau_mem_free_block(block); return NULL; } entry = drm_find_matching_map(dev, block->map); if (!entry) { nouveau_mem_free_block(block); return NULL; } block->map_handle = entry->user_token; } DRM_DEBUG("allocated %lld bytes at 0x%llx type=0x%08x\n", block->size, block->start, block->flags); return block; } void nouveau_mem_free(struct drm_device* dev, struct mem_block* block) { struct drm_nouveau_private *dev_priv = dev->dev_private; DRM_DEBUG("freeing 0x%llx type=0x%08x\n", block->start, block->flags); if (block->flags&NOUVEAU_MEM_MAPPED) drm_rmmap(dev, block->map); /* G8x: Remove pages from vm */ if (block->flags & NOUVEAU_MEM_FB && dev_priv->card_type >= NV_50 && !(block->flags & NOUVEAU_MEM_NOVM)) { struct nouveau_gpuobj *pt = dev_priv->vm_vram_pt; unsigned offset = block->start; unsigned count = block->size / 65536; if (!pt) { DRM_ERROR("vm free without vm pt\n"); goto out_free; } while (count--) { unsigned pte = offset / 65536; INSTANCE_WR(pt, (pte * 2) + 0, 0); INSTANCE_WR(pt, (pte * 2) + 1, 0); offset += 65536; } } out_free: nouveau_mem_free_block(block); } /* * Ioctls */ int nouveau_ioctl_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_mem_alloc *alloc = data; struct mem_block *block; NOUVEAU_CHECK_INITIALISED_WITH_RETURN; if (alloc->flags & NOUVEAU_MEM_INTERNAL) return -EINVAL; block = nouveau_mem_alloc(dev, alloc->alignment, alloc->size, alloc->flags | NOUVEAU_MEM_USER, file_priv); if (!block) return -ENOMEM; alloc->map_handle=block->map_handle; alloc->offset=block->start; alloc->flags=block->flags; if (dev_priv->card_type >= NV_50 && alloc->flags & NOUVEAU_MEM_FB) alloc->offset += 512*1024*1024; return 0; } int nouveau_ioctl_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_mem_free *memfree = data; struct mem_block *block; NOUVEAU_CHECK_INITIALISED_WITH_RETURN; if (dev_priv->card_type >= NV_50 && memfree->flags & NOUVEAU_MEM_FB) memfree->offset -= 512*1024*1024; block=NULL; if (dev_priv->fb_heap && memfree->flags & NOUVEAU_MEM_FB) block = find_block(dev_priv->fb_heap, memfree->offset); else if (dev_priv->agp_heap && memfree->flags & NOUVEAU_MEM_AGP) block = find_block(dev_priv->agp_heap, memfree->offset); else if (dev_priv->pci_heap && memfree->flags & NOUVEAU_MEM_PCI) block = find_block(dev_priv->pci_heap, memfree->offset); if (!block) return -EFAULT; if (block->file_priv != file_priv) return -EPERM; nouveau_mem_free(dev, block); return 0; } int nouveau_ioctl_mem_tile(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_mem_tile *memtile = data; struct mem_block *block = NULL; NOUVEAU_CHECK_INITIALISED_WITH_RETURN; if (dev_priv->card_type < NV_50) return -EINVAL; if (memtile->flags & NOUVEAU_MEM_FB) { memtile->offset -= 512*1024*1024; block = find_block(dev_priv->fb_heap, memtile->offset); } if (!block) return -EINVAL; if (block->file_priv != file_priv) return -EPERM; { struct nouveau_gpuobj *pt = dev_priv->vm_vram_pt; unsigned offset = block->start + memtile->delta; unsigned count = memtile->size / 65536; unsigned tile = 0; if (memtile->flags & NOUVEAU_MEM_TILE) { if (memtile->flags & NOUVEAU_MEM_TILE_ZETA) tile = 0x00002800; else tile = 0x00007000; } while (count--) { unsigned pte = offset / 65536; INSTANCE_WR(pt, (pte * 2) + 0, offset | 1); INSTANCE_WR(pt, (pte * 2) + 1, 0x00000000 | tile); offset += 65536; } } return 0; } n634'>634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874
/**************************************************************************
 *
 * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 **************************************************************************/
/*
 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
 */

#include "drmP.h"

/*
 * Typically called by the IRQ handler.
 */

void drm_fence_handler(struct drm_device * dev, uint32_t fence_class,
		       uint32_t sequence, uint32_t type, uint32_t error)
{
	int wake = 0;
	uint32_t diff;
	uint32_t relevant;
	struct drm_fence_manager *fm = &dev->fm;
	struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
	struct drm_fence_driver *driver = dev->driver->fence_driver;
	struct list_head *head;
	struct drm_fence_object *fence, *next;
	int found = 0;
	int is_exe = (type & DRM_FENCE_TYPE_EXE);
	int ge_last_exe;


	diff = (sequence - fc->exe_flush_sequence) & driver->sequence_mask;

	if (fc->pending_exe_flush && is_exe && diff < driver->wrap_diff)
		fc->pending_exe_flush = 0;

	diff = (sequence - fc->last_exe_flush) & driver->sequence_mask;
	ge_last_exe = diff < driver->wrap_diff;

	if (is_exe && ge_last_exe) {
		fc->last_exe_flush = sequence;
	}

	if (list_empty(&fc->ring))
		return;

	list_for_each_entry(fence, &fc->ring, ring) {
		diff = (sequence - fence->sequence) & driver->sequence_mask;
		if (diff > driver->wrap_diff) {
			found = 1;
			break;
		}
	}

	fc->pending_flush &= ~type;
	head = (found) ? &fence->ring : &fc->ring;

	list_for_each_entry_safe_reverse(fence, next, head, ring) {
		if (&fence->ring == &fc->ring)
			break;

		if (error) {
			fence->error = error;
			fence->signaled = fence->type;
			fence->submitted_flush = fence->type;
			fence->flush_mask = fence->type;
			list_del_init(&fence->ring);
			wake = 1;
			break;
		}

		type |= fence->native_type;
		relevant = type & fence->type;

		if ((fence->signaled | relevant) != fence->signaled) {
			fence->signaled |= relevant;
			fence->flush_mask |= relevant;
			fence->submitted_flush |= relevant;
			DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
				  fence->base.hash.key, fence->signaled);
			wake = 1;
		}

		relevant = fence->flush_mask &
			~(fence->submitted_flush | fence->signaled);

		fc->pending_flush |= relevant;
		fence->submitted_flush |= relevant;

		if (!(fence->type & ~fence->signaled)) {
			DRM_DEBUG("Fence completely signaled 0x%08lx\n",
				  fence->base.hash.key);
			list_del_init(&fence->ring);
		}

	}

	/*
	 * Reinstate lost flush flags.
	 */

	if ((fc->pending_flush & type) != type) {
	        head = head->prev;
		list_for_each_entry(fence, head, ring) {
			if (&fence->ring == &fc->ring)
				break;
	    		diff = (fc->last_exe_flush - fence->sequence) &
				driver->sequence_mask;
			if (diff > driver->wrap_diff)
				break;

			relevant = fence->submitted_flush & ~fence->signaled;
			fc->pending_flush |= relevant;
		}
	}

	if (wake) {
		DRM_WAKEUP(&fc->fence_queue);
	}
}

EXPORT_SYMBOL(drm_fence_handler);

static void drm_fence_unring(struct drm_device * dev, struct list_head *ring)
{
	struct drm_fence_manager *fm = &dev->fm;
	unsigned long flags;

	write_lock_irqsave(&fm->lock, flags);
	list_del_init(ring);
	write_unlock_irqrestore(&fm->lock, flags);
}

void drm_fence_usage_deref_locked(struct drm_fence_object ** fence)
{
	struct drm_fence_object *tmp_fence = *fence;
	struct drm_device *dev = tmp_fence->dev;
	struct drm_fence_manager *fm = &dev->fm;

	DRM_ASSERT_LOCKED(&dev->struct_mutex);
	*fence = NULL;
	if (atomic_dec_and_test(&tmp_fence->usage)) {
		drm_fence_unring(dev, &tmp_fence->ring);
		DRM_DEBUG("Destroyed a fence object 0x%08lx\n",
			  tmp_fence->base.hash.key);
		atomic_dec(&fm->count);
		BUG_ON(!list_empty(&tmp_fence->base.list));
		drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
	}
}
EXPORT_SYMBOL(drm_fence_usage_deref_locked);

void drm_fence_usage_deref_unlocked(struct drm_fence_object ** fence)
{
	struct drm_fence_object *tmp_fence = *fence;
	struct drm_device *dev = tmp_fence->dev;
	struct drm_fence_manager *fm = &dev->fm;

	*fence = NULL;
	if (atomic_dec_and_test(&tmp_fence->usage)) {
		mutex_lock(&dev->struct_mutex);
		if (atomic_read(&tmp_fence->usage) == 0) {
			drm_fence_unring(dev, &tmp_fence->ring);
			atomic_dec(&fm->count);
			BUG_ON(!list_empty(&tmp_fence->base.list));
			drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
		}
		mutex_unlock(&dev->struct_mutex);
	}
}
EXPORT_SYMBOL(drm_fence_usage_deref_unlocked);

struct drm_fence_object
*drm_fence_reference_locked(struct drm_fence_object *src)
{
	DRM_ASSERT_LOCKED(&src->dev->struct_mutex);

	atomic_inc(&src->usage);
	return src;
}

void drm_fence_reference_unlocked(struct drm_fence_object **dst,
				  struct drm_fence_object *src)
{
	mutex_lock(&src->dev->struct_mutex);
	*dst = src;
	atomic_inc(&src->usage);
	mutex_unlock(&src->dev->struct_mutex);
}
EXPORT_SYMBOL(drm_fence_reference_unlocked);

static void drm_fence_object_destroy(struct drm_file *priv, struct drm_user_object * base)
{
	struct drm_fence_object *fence =
	    drm_user_object_entry(base, struct drm_fence_object, base);

	drm_fence_usage_deref_locked(&fence);
}

int drm_fence_object_signaled(struct drm_fence_object * fence,
			      uint32_t mask, int poke_flush)
{
	unsigned long flags;
	int signaled;
	struct drm_device *dev = fence->dev;
	struct drm_fence_manager *fm = &dev->fm;
	struct drm_fence_driver *driver = dev->driver->fence_driver;

	if (poke_flush)
		driver->poke_flush(dev, fence->fence_class);
	read_lock_irqsave(&fm->lock, flags);
	signaled =
	    (fence->type & mask & fence->signaled) == (fence->type & mask);
	read_unlock_irqrestore(&fm->lock, flags);

	return signaled;
}
EXPORT_SYMBOL(drm_fence_object_signaled);

static void drm_fence_flush_exe(struct drm_fence_class_manager * fc,
				struct drm_fence_driver * driver, uint32_t sequence)
{
	uint32_t diff;

	if (!fc->pending_exe_flush) {
		fc->exe_flush_sequence = sequence;
		fc->pending_exe_flush = 1;
	} else {
		diff =
		    (sequence - fc->exe_flush_sequence) & driver->sequence_mask;
		if (diff < driver->wrap_diff) {
			fc->exe_flush_sequence = sequence;
		}
	}
}

int drm_fence_object_flush(struct drm_fence_object * fence,
			   uint32_t type)
{
	struct drm_device *dev = fence->dev;
	struct drm_fence_manager *fm = &dev->fm;
	struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
	struct drm_fence_driver *driver = dev->driver->fence_driver;
	unsigned long flags;

	if (type & ~fence->type) {
		DRM_ERROR("Flush trying to extend fence type, "
			  "0x%x, 0x%x\n", type, fence->type);
		return -EINVAL;
	}

	write_lock_irqsave(&fm->lock, flags);
	fence->flush_mask |= type;
	if ((fence->submitted_flush & fence->signaled)
	    == fence->submitted_flush) {
		if ((fence->type & DRM_FENCE_TYPE_EXE) &&
		    !(fence->submitted_flush & DRM_FENCE_TYPE_EXE)) {
			drm_fence_flush_exe(fc, driver, fence->sequence);
			fence->submitted_flush |= DRM_FENCE_TYPE_EXE;
		} else {
			fc->pending_flush |= (fence->flush_mask &
					      ~fence->submitted_flush);
			fence->submitted_flush = fence->flush_mask;
		}
	}
	write_unlock_irqrestore(&fm->lock, flags);
	driver->poke_flush(dev, fence->fence_class);
	return 0;
}

/*
 * Make sure old fence objects are signaled before their fence sequences are
 * wrapped around and reused.
 */

void drm_fence_flush_old(struct drm_device * dev, uint32_t fence_class, uint32_t sequence)
{
	struct drm_fence_manager *fm = &dev->fm;
	struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
	struct drm_fence_driver *driver = dev->driver->fence_driver;
	uint32_t old_sequence;
	unsigned long flags;
	struct drm_fence_object *fence;
	uint32_t diff;

	write_lock_irqsave(&fm->lock, flags);
	old_sequence = (sequence - driver->flush_diff) & driver->sequence_mask;
	diff = (old_sequence - fc->last_exe_flush) & driver->sequence_mask;

	if ((diff < driver->wrap_diff) && !fc->pending_exe_flush) {
		fc->pending_exe_flush = 1;
		fc->exe_flush_sequence = sequence - (driver->flush_diff / 2);
	}
	write_unlock_irqrestore(&fm->lock, flags);

	mutex_lock(&dev->struct_mutex);
	read_lock_irqsave(&fm->lock, flags);

	if (list_empty(&fc->ring)) {
		read_unlock_irqrestore(&fm->lock, flags);
		mutex_unlock(&dev->struct_mutex);
		return;
	}
	fence = drm_fence_reference_locked(list_entry(fc->ring.next, struct drm_fence_object, ring));
	mutex_unlock(&dev->struct_mutex);
	diff = (old_sequence - fence->sequence) & driver->sequence_mask;
	read_unlock_irqrestore(&fm->lock, flags);
	if (diff < driver->wrap_diff) {
		drm_fence_object_flush(fence, fence->type);
	}
	drm_fence_usage_deref_unlocked(&fence);
}

EXPORT_SYMBOL(drm_fence_flush_old);

static int drm_fence_lazy_wait(struct drm_fence_object *fence,
			       int ignore_signals,
			       uint32_t mask)
{
	struct drm_device *dev = fence->dev;
	struct drm_fence_manager *fm = &dev->fm;
	struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
	int signaled;
	unsigned long _end = jiffies + 3*DRM_HZ;
	int ret = 0;

	do {
		DRM_WAIT_ON(ret, fc->fence_queue, 3 * DRM_HZ,
			    (signaled = drm_fence_object_signaled(fence, mask, 1)));
		if (signaled)
			return 0;
		if (time_after_eq(jiffies, _end))
			break;
	} while (ret == -EINTR && ignore_signals);
	if (drm_fence_object_signaled(fence, mask, 0))
		return 0;
	if (time_after_eq(jiffies, _end))
		ret = -EBUSY;
	if (ret) {
		if (ret == -EBUSY) {
			DRM_ERROR("Fence timeout. "
				  "GPU lockup or fence driver was "
				  "taken down. %d 0x%08x 0x%02x 0x%02x 0x%02x\n",
				  fence->fence_class,
				  fence->sequence,
				  fence->type,
				  mask,
				  fence->signaled);
			DRM_ERROR("Pending exe flush %d 0x%08x\n",
				  fc->pending_exe_flush,
				  fc->exe_flush_sequence);
		}
		return ((ret == -EINTR) ? -EAGAIN : ret);
	}
	return 0;
}

int drm_fence_object_wait(struct drm_fence_object * fence,
			  int lazy, int ignore_signals, uint32_t mask)
{
	struct drm_device *dev = fence->dev;
	struct drm_fence_driver *driver = dev->driver->fence_driver;
	int ret = 0;
	unsigned long _end;
	int signaled;

	if (mask & ~fence->type) {
		DRM_ERROR("Wait trying to extend fence type"
			  " 0x%08x 0x%08x\n", mask, fence->type);
		BUG();
		return -EINVAL;
	}

	if (drm_fence_object_signaled(fence, mask, 0))
		return 0;

	_end = jiffies + 3 * DRM_HZ;

	drm_fence_object_flush(fence, mask);

	if (lazy && driver->lazy_capable) {

		ret = drm_fence_lazy_wait(fence, ignore_signals, mask);
		if (ret)
			return ret;

	} else {

		if (driver->has_irq(dev, fence->fence_class,
				    DRM_FENCE_TYPE_EXE)) {
			ret = drm_fence_lazy_wait(fence, ignore_signals,
						  DRM_FENCE_TYPE_EXE);
			if (ret)
				return ret;
		}

		if (driver->has_irq(dev, fence->fence_class,
				    mask & ~DRM_FENCE_TYPE_EXE)) {
			ret = drm_fence_lazy_wait(fence, ignore_signals,
						  mask);
			if (ret)
				return ret;
		}
	}
	if (drm_fence_object_signaled(fence, mask, 0))
		return 0;

	/*
	 * Avoid kernel-space busy-waits.
	 */
#if 1
	if (!ignore_signals)
		return -EAGAIN;
#endif
	do {
		schedule();
		signaled = drm_fence_object_signaled(fence, mask, 1);
	} while (!signaled && !time_after_eq(jiffies, _end));

	if (!signaled)
		return -EBUSY;

	return 0;
}
EXPORT_SYMBOL(drm_fence_object_wait);


int drm_fence_object_emit(struct drm_fence_object * fence,
			  uint32_t fence_flags, uint32_t fence_class, uint32_t type)
{
	struct drm_device *dev = fence->dev;
	struct drm_fence_manager *fm = &dev->fm;
	struct drm_fence_driver *driver = dev->driver->fence_driver;
	struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
	unsigned long flags;
	uint32_t sequence;
	uint32_t native_type;
	int ret;

	drm_fence_unring(dev, &fence->ring);
	ret = driver->emit(dev, fence_class, fence_flags, &sequence, &native_type);
	if (ret)
		return ret;

	write_lock_irqsave(&fm->lock, flags);
	fence->fence_class = fence_class;
	fence->type = type;
	fence->flush_mask = 0x00;
	fence->submitted_flush = 0x00;
	fence->signaled = 0x00;
	fence->sequence = sequence;
	fence->native_type = native_type;
	if (list_empty(&fc->ring))
		fc->last_exe_flush = sequence - 1;
	list_add_tail(&fence->ring, &fc->ring);
	write_unlock_irqrestore(&fm->lock, flags);
	return 0;
}
EXPORT_SYMBOL(drm_fence_object_emit);

static int drm_fence_object_init(struct drm_device * dev, uint32_t fence_class,
				 uint32_t type,
				 uint32_t fence_flags,
				 struct drm_fence_object * fence)
{
	int ret = 0;
	unsigned long flags;
	struct drm_fence_manager *fm = &dev->fm;

	mutex_lock(&dev->struct_mutex);
	atomic_set(&fence->usage, 1);
	mutex_unlock(&dev->struct_mutex);

	write_lock_irqsave(&fm->lock, flags);
	INIT_LIST_HEAD(&fence->ring);

	/* 
	 *  Avoid hitting BUG() for kernel-only fence objects.
	 */

	INIT_LIST_HEAD(&fence->base.list);
	fence->fence_class = fence_class;
	fence->type = type;
	fence->flush_mask = 0;
	fence->submitted_flush = 0;
	fence->signaled = 0;
	fence->sequence = 0;
	fence->dev = dev;
	write_unlock_irqrestore(&fm->lock, flags);
	if (fence_flags & DRM_FENCE_FLAG_EMIT) {
		ret = drm_fence_object_emit(fence, fence_flags,
					    fence->fence_class, type);
	}
	return ret;
}

int drm_fence_add_user_object(struct drm_file * priv, struct drm_fence_object * fence,
			      int shareable)
{
	struct drm_device *dev = priv->head->dev;
	int ret;

	mutex_lock(&dev->struct_mutex);
	ret = drm_add_user_object(priv, &fence->base, shareable);
	if (ret)
		goto out;
	atomic_inc(&fence->usage);
	fence->base.type = drm_fence_type;
	fence->base.remove = &drm_fence_object_destroy;
	DRM_DEBUG("Fence 0x%08lx created\n", fence->base.hash.key);
out:
	mutex_unlock(&dev->struct_mutex);
	return ret;
}
EXPORT_SYMBOL(drm_fence_add_user_object);

int drm_fence_object_create(struct drm_device * dev, uint32_t fence_class, uint32_t type,
			    unsigned flags, struct drm_fence_object ** c_fence)
{
	struct drm_fence_object *fence;
	int ret;
	struct drm_fence_manager *fm = &dev->fm;

	fence = drm_ctl_calloc(1, sizeof(*fence), DRM_MEM_FENCE);
	if (!fence)
		return -ENOMEM;
	ret = drm_fence_object_init(dev, fence_class, type, flags, fence);
	if (ret) {
		drm_fence_usage_deref_unlocked(&fence);
		return ret;
	}
	*c_fence = fence;
	atomic_inc(&fm->count);

	return 0;
}

EXPORT_SYMBOL(drm_fence_object_create);

void drm_fence_manager_init(struct drm_device * dev)
{
	struct drm_fence_manager *fm = &dev->fm;
	struct drm_fence_class_manager *fence_class;
	struct drm_fence_driver *fed = dev->driver->fence_driver;
	int i;
	unsigned long flags;

	rwlock_init(&fm->lock);
	write_lock_irqsave(&fm->lock, flags);
	fm->initialized = 0;
	if (!fed)
	    goto out_unlock;

	fm->initialized = 1;
	fm->num_classes = fed->num_classes;
	BUG_ON(fm->num_classes > _DRM_FENCE_CLASSES);

	for (i=0; i<fm->num_classes; ++i) {
	    fence_class = &fm->fence_class[i];

	    INIT_LIST_HEAD(&fence_class->ring);
	    fence_class->pending_flush = 0;
	    DRM_INIT_WAITQUEUE(&fence_class->fence_queue);
	}

	atomic_set(&fm->count, 0);
 out_unlock:
	write_unlock_irqrestore(&fm->lock, flags);
}

void drm_fence_fill_arg(struct drm_fence_object *fence, struct drm_fence_arg *arg)
{
	struct drm_device *dev = fence->dev;
	struct drm_fence_manager *fm = &dev->fm;
	unsigned long irq_flags;

	read_lock_irqsave(&fm->lock, irq_flags);
	arg->handle = fence->base.hash.key;
	arg->fence_class = fence->fence_class;
	arg->type = fence->type;
	arg->signaled = fence->signaled;
	arg->error = fence->error;
	arg->sequence = fence->sequence;
	read_unlock_irqrestore(&fm->lock, irq_flags);
}
EXPORT_SYMBOL(drm_fence_fill_arg);


void drm_fence_manager_takedown(struct drm_device * dev)
{
}

struct drm_fence_object *drm_lookup_fence_object(struct drm_file * priv, uint32_t handle)
{
	struct drm_device *dev = priv->head->dev;
	struct drm_user_object *uo;
	struct drm_fence_object *fence;

	mutex_lock(&dev->struct_mutex);
	uo = drm_lookup_user_object(priv, handle);
	if (!uo || (uo->type != drm_fence_type)) {
		mutex_unlock(&dev->struct_mutex);
		return NULL;
	}
	fence = drm_fence_reference_locked(drm_user_object_entry(uo, struct drm_fence_object, base));