/** * \file ati_pcigart.c * ATI PCI GART support * * \author Gareth Hughes */ /* * Created: Wed Dec 13 21:52:19 2000 by gareth@valinux.com * * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include "drmP.h" # define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */ # define ATI_PCIGART_PAGE_MASK (~(ATI_PCIGART_PAGE_SIZE-1)) #define ATI_PCIE_WRITE 0x4 #define ATI_PCIE_READ 0x8 static __inline__ void gart_insert_page_into_table(struct drm_ati_pcigart_info *gart_info, dma_addr_t addr, volatile u32 *pci_gart) { u32 page_base; page_base = (u32)addr & ATI_PCIGART_PAGE_MASK; switch(gart_info->gart_reg_if) { case DRM_ATI_GART_IGP: page_base |= (upper_32_bits(addr) & 0xff) << 4; page_base |= 0xc; break; case DRM_ATI_GART_PCIE: page_base >>= 8; page_base |= (upper_32_bits(addr) & 0xff) << 24; page_base |= ATI_PCIE_READ | ATI_PCIE_WRITE; break; default: case DRM_ATI_GART_PCI: break; } *pci_gart = cpu_to_le32(page_base); } static __inline__ dma_addr_t gart_get_page_from_table(struct drm_ati_pcigart_info *gart_info, volatile u32 *pci_gart) { dma_addr_t retval; switch(gart_info->gart_reg_if) { case DRM_ATI_GART_IGP: retval = (*pci_gart & ATI_PCIGART_PAGE_MASK); retval += (((*pci_gart & 0xf0) >> 4) << 16) << 16; break; case DRM_ATI_GART_PCIE: retval = (*pci_gart & ~0xc); retval <<= 8; break; case DRM_ATI_GART_PCI: retval = *pci_gart; break; } return retval; } int drm_ati_alloc_pcigart_table(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info) { gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size, PAGE_SIZE, gart_info->table_mask); if (gart_info->table_handle == NULL) return -ENOMEM; #ifdef CONFIG_X86 /* IGPs only exist on x86 in any case */ if (gart_info->gart_reg_if == DRM_ATI_GART_IGP) set_memory_uc((unsigned long)gart_info->table_handle->vaddr, gart_info->table_size >> PAGE_SHIFT); #endif memset(gart_info->table_handle->vaddr, 0, gart_info->table_size); return 0; } EXPORT_SYMBOL(drm_ati_alloc_pcigart_table); static void drm_ati_free_pcigart_table(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info) { #ifdef CONFIG_X86 /* IGPs only exist on x86 in any case */ if (gart_info->gart_reg_if == DRM_ATI_GART_IGP) set_memory_wb((unsigned long)gart_info->table_handle->vaddr, gart_info->table_size >> PAGE_SHIFT); #endif drm_pci_free(dev, gart_info->table_handle); gart_info->table_handle = NULL; } int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info) { struct drm_sg_mem *entry = dev->sg; unsigned long pages; int i; int max_pages; /* we need to support large memory configurations */ if (!entry) { return 0; } if (gart_info->bus_addr) { max_pages = (gart_info->table_size / sizeof(u32)); pages = (entry->pages <= max_pages) ? entry->pages : max_pages; for (i = 0; i < pages; i++) { if (!entry->busaddr[i]) break; pci_unmap_page(dev->pdev, entry->busaddr[i], PAGE_SIZE, PCI_DMA_TODEVICE); } if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) gart_info->bus_addr = 0; } if (gart_info->gart_table_location == DRM_ATI_GART_MAIN && gart_info->table_handle) { drm_ati_free_pcigart_table(dev, gart_info); } return 1; } EXPORT_SYMBOL(drm_ati_pcigart_cleanup); int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info) { struct drm_sg_mem *entry = dev->sg; void *address = NULL; unsigned long pages; u32 *pci_gart; dma_addr_t bus_address = 0; int i, j, ret = 0; int max_pages; dma_addr_t entry_addr; if (gart_info->gart_table_location == DRM_ATI_GART_MAIN && gart_info->table_handle == NULL) { DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n"); ret = drm_ati_alloc_pcigart_table(dev, gart_info); if (ret) { DRM_ERROR("cannot allocate PCI GART page!\n"); goto done; } } if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) { address = gart_info->table_handle->vaddr; bus_address = gart_info->table_handle->busaddr; } else { address = gart_info->addr; bus_address = gart_info->bus_addr; } if (!entry) { DRM_ERROR("no scatter/gather memory!\n"); goto done; } pci_gart = (u32 *) address; max_pages = (gart_info->table_size / sizeof(u32)); pages = (entry->pages <= max_pages) ? entry->pages : max_pages; for (i = 0; i < pages; i++) { /* we need to support large memory configurations */ entry->busaddr[i] = pci_map_page(dev->pdev, entry->pagelist[i], 0, PAGE_SIZE, PCI_DMA_TODEVICE); if (entry->busaddr[i] == 0) { DRM_ERROR("unable to map PCIGART pages!\n"); drm_ati_pcigart_cleanup(dev, gart_info); address = NULL; bus_address = 0; goto done; } entry_addr = entry->busaddr[i]; for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) { gart_insert_page_into_table(gart_info, entry_addr, pci_gart); pci_gart++; entry_addr += ATI_PCIGART_PAGE_SIZE; } } ret = 1; mb(); done: gart_info->addr = address; gart_info->bus_addr = bus_address; return ret; } EXPORT_SYMBOL(drm_ati_pcigart_init); static int ati_pcigart_needs_unbind_cache_adjust(struct drm_ttm_backend *backend) { return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1); } static int ati_pcigart_populate(struct drm_ttm_backend *backend, unsigned long num_pages, struct page **pages, struct page *dummy_read_page) { struct ati_pcigart_ttm_backend *atipci_be = container_of(backend, struct ati_pcigart_ttm_backend, backend); atipci_be->pages = pages; atipci_be->num_pages = num_pages; atipci_be->populated = 1; return 0; } static int ati_pcigart_bind_ttm(struct drm_ttm_backend *backend, struct drm_bo_mem_reg *bo_mem) { struct ati_pcigart_ttm_backend *atipci_be = container_of(backend, struct ati_pcigart_ttm_backend, backend); off_t j; int i; struct drm_ati_pcigart_info *info = atipci_be->gart_info; u32 *pci_gart; dma_addr_t offset = bo_mem->mm_node->start; dma_addr_t page_base; pci_gart = info->addr; j = offset; while (j < (offset + atipci_be->num_pages)) { if (gart_get_page_from_table(info, pci_gart + j)) return -EBUSY; j++; } for (i = 0, j = offset; i < atipci_be->num_pages; i++, j++) { struct page *cur_page = atipci_be->pages[i]; /* write value */ page_base = page_to_phys(cur_page); gart_insert_page_into_table(info, page_base, pci_gart + j); } mb(); atipci_be->gart_flush_fn(atipci_be->dev); atipci_be->bound = 1; atipci_be->offset = offset; /* need to traverse table and add entries */ DRM_DEBUG("\n"); return 0; } static int ati_pcigart_unbind_ttm(struct drm_ttm_backend *backend) { struct ati_pcigart_ttm_backend *atipci_be = container_of(backend, struct ati_pcigart_ttm_backend, backend); struct drm_ati_pcigart_info *info = atipci_be->gart_info; unsigned long offset = atipci_be->offset; int i; off_t j; u32 *pci_gart = info->addr; if (atipci_be->bound != 1) return -EINVAL; for (i = 0, j = offset; i < atipci_be->num_pages; i++, j++) { *(pci_gart + j) = 0; } atipci_be->gart_flush_fn(atipci_be->dev); atipci_be->bound = 0; atipci_be->offset = 0; return 0; } static void ati_pcigart_clear_ttm(struct drm_ttm_backend *backend) { struct ati_pcigart_ttm_backend *atipci_be = container_of(backend, struct ati_pcigart_ttm_backend, backend); DRM_DEBUG("\n"); if (atipci_be->pages) { backend->func->unbind(backend); atipci_be->pages = NULL; } atipci_be->num_pages = 0; } static void ati_pcigart_destroy_ttm(struct drm_ttm_backend *backend) { struct ati_pcigart_ttm_backend *atipci_be; if (backend) { DRM_DEBUG("\n"); atipci_be = container_of(backend, struct ati_pcigart_ttm_backend, backend); if (atipci_be) { if (atipci_be->pages) { backend->func->clear(backend); } drm_ctl_free(atipci_be, sizeof(*atipci_be), DRM_MEM_TTM); } } } static struct drm_ttm_backend_func ati_pcigart_ttm_backend = { .needs_ub_cache_adjust = ati_pcigart_needs_unbind_cache_adjust, .populate = ati_pcigart_populate, .clear = ati_pcigart_clear_ttm, .bind = ati_pcigart_bind_ttm, .unbind = ati_pcigart_unbind_ttm, .destroy = ati_pcigart_destroy_ttm, }; struct drm_ttm_backend *ati_pcigart_init_ttm(struct drm_device *dev, struct drm_ati_pcigart_info *info, void (*gart_flush_fn)(struct drm_device *dev)) { struct ati_pcigart_ttm_backend *atipci_be; atipci_be = drm_ctl_calloc(1, sizeof (*atipci_be), DRM_MEM_TTM); if (!atipci_be) return NULL; atipci_be->populated = 0; atipci_be->backend.func = &ati_pcigart_ttm_backend; // atipci_be->backend.mem_type = DRM_BO_MEM_TT; atipci_be->gart_info = info; atipci_be->gart_flush_fn = gart_flush_fn; atipci_be->dev = dev; return &atipci_be->backend; } EXPORT_SYMBOL(ati_pcigart_init_ttm); f='#n215'>215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731
/**************************************************************************
 *
 * This kernel module is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License as
 * published by the Free Software Foundation; either version 2 of the
 * License, or (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 *
 **************************************************************************/
/*
 * This code provides access to unexported mm kernel features. It is necessary
 * to use the new DRM memory manager code with kernels that don't support it
 * directly.
 *
 * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
 *          Linux kernel mm subsystem authors.
 *          (Most code taken from there).
 */

#include "drmP.h"

#if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))

/*
 * These have bad performance in the AGP module for the indicated kernel versions.
 */

int drm_map_page_into_agp(struct page *page)
{
        int i;
        i = change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);
        /* Caller's responsibility to call global_flush_tlb() for
         * performance reasons */
        return i;
}

int drm_unmap_page_from_agp(struct page *page)
{
        int i;
        i = change_page_attr(page, 1, PAGE_KERNEL);
        /* Caller's responsibility to call global_flush_tlb() for
         * performance reasons */
        return i;
}
#endif


#if  (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))

/*
 * The protection map was exported in 2.6.19
 */

pgprot_t vm_get_page_prot(unsigned long vm_flags)
{
#ifdef MODULE
	static pgprot_t drm_protection_map[16] = {
		__P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
		__S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
	};

	return drm_protection_map[vm_flags & 0x0F];
#else
	extern pgprot_t protection_map[];
	return protection_map[vm_flags & 0x0F];
#endif
};
#endif


#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))

/*
 * vm code for kernels below 2.6.15 in which version a major vm write
 * occured. This implement a simple straightforward
 * version similar to what's going to be
 * in kernel 2.6.19+
 * Kernels below 2.6.15 use nopage whereas 2.6.19 and upwards use
 * nopfn.
 */

static struct {
	spinlock_t lock;
	struct page *dummy_page;
	atomic_t present;
} drm_np_retry =
{SPIN_LOCK_UNLOCKED, NOPAGE_OOM, ATOMIC_INIT(0)};


static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
				    struct fault_data *data);


struct page * get_nopage_retry(void)
{
	if (atomic_read(&drm_np_retry.present) == 0) {
		struct page *page = alloc_page(GFP_KERNEL);
		if (!page)
			return NOPAGE_OOM;
		spin_lock(&drm_np_retry.lock);
		drm_np_retry.dummy_page = page;
		atomic_set(&drm_np_retry.present,1);
		spin_unlock(&drm_np_retry.lock);
	}
	get_page(drm_np_retry.dummy_page);
	return drm_np_retry.dummy_page;
}

void free_nopage_retry(void)
{
	if (atomic_read(&drm_np_retry.present) == 1) {
		spin_lock(&drm_np_retry.lock);
		__free_page(drm_np_retry.dummy_page);
		drm_np_retry.dummy_page = NULL;
		atomic_set(&drm_np_retry.present, 0);
		spin_unlock(&drm_np_retry.lock);
	}
}

struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
			       unsigned long address,
			       int *type)
{
	struct fault_data data;

	if (type)
		*type = VM_FAULT_MINOR;

	data.address = address;
	data.vma = vma;
	drm_bo_vm_fault(vma, &data);
	switch (data.type) {
	case VM_FAULT_OOM:
		return NOPAGE_OOM;
	case VM_FAULT_SIGBUS:
		return NOPAGE_SIGBUS;
	default:
		break;
	}

	return NOPAGE_REFAULT;
}

#endif

#if !defined(DRM_FULL_MM_COMPAT) && \
  ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) || \
   (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)))

static int drm_pte_is_clear(struct vm_area_struct *vma,
			    unsigned long addr)
{
	struct mm_struct *mm = vma->vm_mm;
	int ret = 1;
	pte_t *pte;
	pmd_t *pmd;
	pud_t *pud;
	pgd_t *pgd;

	spin_lock(&mm->page_table_lock);
	pgd = pgd_offset(mm, addr);
	if (pgd_none(*pgd))
		goto unlock;
	pud = pud_offset(pgd, addr);
        if (pud_none(*pud))
		goto unlock;
	pmd = pmd_offset(pud, addr);
	if (pmd_none(*pmd))
		goto unlock;
	pte = pte_offset_map(pmd, addr);
	if (!pte)
		goto unlock;
	ret = pte_none(*pte);
	pte_unmap(pte);
 unlock:
	spin_unlock(&mm->page_table_lock);
	return ret;
}

static int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
		  unsigned long pfn)
{
	int ret;
	if (!drm_pte_is_clear(vma, addr))
		return -EBUSY;

	ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, vma->vm_page_prot);
	return ret;
}


static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
				    struct fault_data *data)
{
	unsigned long address = data->address;
	struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
	unsigned long page_offset;
	struct page *page = NULL;
	struct drm_ttm *ttm;
	struct drm_device *dev;
	unsigned long pfn;
	int err;
	unsigned long bus_base;
	unsigned long bus_offset;
	unsigned long bus_size;

	dev = bo->dev;
	while(drm_bo_read_lock(&dev->bm.bm_lock));

	mutex_lock(&bo->mutex);

	err = drm_bo_wait(bo, 0, 1, 0);
	if (err) {
		data->type = (err == -EAGAIN) ?
			VM_FAULT_MINOR : VM_FAULT_SIGBUS;
		goto out_unlock;
	}


	/*
	 * If buffer happens to be in a non-mappable location,
	 * move it to a mappable.
	 */

	if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
		unsigned long _end = jiffies + 3*DRM_HZ;
		uint32_t new_mask = bo->mem.proposed_flags |
			DRM_BO_FLAG_MAPPABLE |
			DRM_BO_FLAG_FORCE_MAPPABLE;

		do {
			err = drm_bo_move_buffer(bo, new_mask, 0, 0);
		} while((err == -EAGAIN) && !time_after_eq(jiffies, _end));

		if (err) {
			DRM_ERROR("Timeout moving buffer to mappable location.\n");
			data->type = VM_FAULT_SIGBUS;
			goto out_unlock;
		}
	}

	if (address > vma->vm_end) {
		data->type = VM_FAULT_SIGBUS;
		goto out_unlock;
	}

	dev = bo->dev;
	err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
				&bus_size);

	if (err) {
		data->type = VM_FAULT_SIGBUS;
		goto out_unlock;
	}

	page_offset = (address - vma->vm_start) >> PAGE_SHIFT;

	if (bus_size) {
		struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type];

		pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
		vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
	} else {
		ttm = bo->ttm;

		drm_ttm_fixup_caching(ttm);
		page = drm_ttm_get_page(ttm, page_offset);
		if (!page) {
			data->type = VM_FAULT_OOM;
			goto out_unlock;
		}
		pfn = page_to_pfn(page);
		vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ?
			vm_get_page_prot(vma->vm_flags) :
			drm_io_prot(_DRM_TTM, vma);
	}

	err = vm_insert_pfn(vma, address, pfn);

	if (!err || err == -EBUSY)
		data->type = VM_FAULT_MINOR;
	else
		data->type = VM_FAULT_OOM;
out_unlock:
	mutex_unlock(&bo->mutex);
	drm_bo_read_unlock(&dev->bm.bm_lock);
	return NULL;
}

#endif

#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) && \
  !defined(DRM_FULL_MM_COMPAT)

/**
 */

unsigned long drm_bo_vm_nopfn(struct vm_area_struct * vma,
			   unsigned long address)
{
	struct fault_data data;
	data.address = address;

	(void) drm_bo_vm_fault(vma, &data);
	if (data.type == VM_FAULT_OOM)
		return NOPFN_OOM;
	else if (data.type == VM_FAULT_SIGBUS)
		return NOPFN_SIGBUS;

	/*
	 * pfn already set.
	 */

	return 0;
}
#endif


#ifdef DRM_ODD_MM_COMPAT

/*
 * VM compatibility code for 2.6.15-2.6.18. This code implements a complicated
 * workaround for a single BUG statement in do_no_page in these versions. The
 * tricky thing is that we need to take the mmap_sem in exclusive mode for _all_
 * vmas mapping the ttm, before dev->struct_mutex is taken. The way we do this is to
 * check first take the dev->struct_mutex, and then trylock all mmap_sems. If this
 * fails for a single mmap_sem, we have to release all sems and the dev->struct_mutex,
 * release the cpu and retry. We also need to keep track of all vmas mapping the ttm.
 * phew.
 */

typedef struct p_mm_entry {
	struct list_head head;
	struct mm_struct *mm;
	atomic_t refcount;
        int locked;
} p_mm_entry_t;

typedef struct vma_entry {
	struct list_head head;
	struct vm_area_struct *vma;
} vma_entry_t;


struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
			       unsigned long address,
			       int *type)
{
	struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
	unsigned long page_offset;
	struct page *page;
	struct drm_ttm *ttm;
	struct drm_device *dev;

	mutex_lock(&bo->mutex);

	if (type)
		*type = VM_FAULT_MINOR;

	if (address > vma->vm_end) {
		page = NOPAGE_SIGBUS;
		goto out_unlock;
	}

	dev = bo->dev;

	if (drm_mem_reg_is_pci(dev, &bo->mem)) {
		DRM_ERROR("Invalid compat nopage.\n");
		page = NOPAGE_SIGBUS;
		goto out_unlock;
	}

	ttm = bo->ttm;
	drm_ttm_fixup_caching(ttm);
	page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
	page = drm_ttm_get_page(ttm, page_offset);
	if (!page) {
		page = NOPAGE_OOM;
		goto out_unlock;
	}

	get_page(page);
out_unlock:
	mutex_unlock(&bo->mutex);
	return page;
}




int drm_bo_map_bound(struct vm_area_struct *vma)
{
	struct drm_buffer_object *bo = (struct drm_buffer_object *)vma->vm_private_data;
	int ret = 0;
	unsigned long bus_base;
	unsigned long bus_offset;
	unsigned long bus_size;

	ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base,
				&bus_offset, &bus_size);
	BUG_ON(ret);

	if (bus_size) {
		struct drm_mem_type_manager *man = &bo->dev->bm.man[bo->mem.mem_type];
		unsigned long pfn = (bus_base + bus_offset) >> PAGE_SHIFT;
		pgprot_t pgprot = drm_io_prot(man->drm_bus_maptype, vma);
		ret = io_remap_pfn_range(vma, vma->vm_start, pfn,
					 vma->vm_end - vma->vm_start,
					 pgprot);
	}

	return ret;
}


int drm_bo_add_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma)
{
	p_mm_entry_t *entry, *n_entry;
	vma_entry_t *v_entry;
	struct mm_struct *mm = vma->vm_mm;

	v_entry = drm_ctl_alloc(sizeof(*v_entry), DRM_MEM_BUFOBJ);
	if (!v_entry) {
		DRM_ERROR("Allocation of vma pointer entry failed\n");
		return -ENOMEM;
	}
	v_entry->vma = vma;

	list_add_tail(&v_entry->head, &bo->vma_list);

	list_for_each_entry(entry, &bo->p_mm_list, head) {
		if (mm == entry->mm) {
			atomic_inc(&entry->refcount);
			return 0;
		} else if ((unsigned long)mm < (unsigned long)entry->mm) ;
	}

	n_entry = drm_ctl_alloc(sizeof(*n_entry), DRM_MEM_BUFOBJ);
	if (!n_entry) {
		DRM_ERROR("Allocation of process mm pointer entry failed\n");
		return -ENOMEM;
	}
	INIT_LIST_HEAD(&n_entry->head);
	n_entry->mm = mm;
	n_entry->locked = 0;
	atomic_set(&n_entry->refcount, 0);
	list_add_tail(&n_entry->head, &entry->head);

	return 0;
}

void drm_bo_delete_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma)
{
	p_mm_entry_t *entry, *n;
	vma_entry_t *v_entry, *v_n;
	int found = 0;
	struct mm_struct *mm = vma->vm_mm;

	list_for_each_entry_safe(v_entry, v_n, &bo->vma_list, head) {
		if (v_entry->vma == vma) {
			found = 1;
			list_del(&v_entry->head);
			drm_ctl_free(v_entry, sizeof(*v_entry), DRM_MEM_BUFOBJ);
			break;
		}
	}
	BUG_ON(!found);

	list_for_each_entry_safe(entry, n, &bo->p_mm_list, head) {
		if (mm == entry->mm) {
			if (atomic_add_negative(-1, &entry->refcount)) {
				list_del(&entry->head);
				BUG_ON(entry->locked);
				drm_ctl_free(entry, sizeof(*entry), DRM_MEM_BUFOBJ);
			}
			return;
		}
	}
	BUG_ON(1);
}



int drm_bo_lock_kmm(struct drm_buffer_object * bo)
{
	p_mm_entry_t *entry;
	int lock_ok = 1;

	list_for_each_entry(entry, &bo->p_mm_list, head) {
		BUG_ON(entry->locked);
		if (!down_write_trylock(&entry->mm->mmap_sem)) {
			lock_ok = 0;
			break;
		}
		entry->locked = 1;
	}

	if (lock_ok)
		return 0;

	list_for_each_entry(entry, &bo->p_mm_list, head) {
		if (!entry->locked)
			break;
		up_write(&entry->mm->mmap_sem);
		entry->locked = 0;
	}

	/*
	 * Possible deadlock. Try again. Our callers should handle this
	 * and restart.
	 */

	return -EAGAIN;
}

void drm_bo_unlock_kmm(struct drm_buffer_object * bo)
{
	p_mm_entry_t *entry;

	list_for_each_entry(entry, &bo->p_mm_list, head) {
		BUG_ON(!entry->locked);
		up_write(&entry->mm->mmap_sem);
		entry->locked = 0;
	}
}

int drm_bo_remap_bound(struct drm_buffer_object *bo)
{
	vma_entry_t *v_entry;
	int ret = 0;

	if (drm_mem_reg_is_pci(bo->dev, &bo->mem)) {
		list_for_each_entry(v_entry, &bo->vma_list, head) {
			ret = drm_bo_map_bound(v_entry->vma);
			if (ret)
				break;
		}
	}

	return ret;
}

void drm_bo_finish_unmap(struct drm_buffer_object *bo)
{
	vma_entry_t *v_entry;

	list_for_each_entry(v_entry, &bo->vma_list, head) {
		v_entry->vma->vm_flags &= ~VM_PFNMAP;
	}
}

#endif

#ifdef DRM_IDR_COMPAT_FN
/* only called when idp->lock is held */
static void __free_layer(struct idr *idp, struct idr_layer *p)
{
	p->ary[0] = idp->id_free;
	idp->id_free = p;
	idp->id_free_cnt++;
}

static void free_layer(struct idr *idp, struct idr_layer *p)
{
	unsigned long flags;

	/*
	 * Depends on the return element being zeroed.
	 */
	spin_lock_irqsave(&idp->lock, flags);
	__free_layer(idp, p);
	spin_unlock_irqrestore(&idp->lock, flags);
}