summaryrefslogtreecommitdiff
path: root/linux
ModeNameSize
-rw-r--r--Makefile.kernel1580logplain
-rw-r--r--Makefile.linux5805logplain
-rw-r--r--README.drm1482logplain
-rw-r--r--agpsupport.c10744logplain
-rw-r--r--auth.c4538logplain
-rw-r--r--bufs.c14336logplain
-rw-r--r--context.c8870logplain
-rw-r--r--ctxbitmap.c2672logplain
-rw-r--r--dma.c13914logplain
-rw-r--r--drawable.c1958logplain
-rw-r--r--drm.h13522logplain
-rw-r--r--drmP.h27072logplain
-rw-r--r--fops.c7101logplain
-rw-r--r--gamma_dma.c20764logplain
-rw-r--r--gamma_drv.c14636logplain
-rw-r--r--gamma_drv.h2667logplain
-rw-r--r--i810_bufs.c11917logplain
-rw-r--r--i810_context.c6143logplain
-rw-r--r--i810_dma.c27244logplain
-rw-r--r--i810_drm.h3050logplain
-rw-r--r--i810_drv.c18828logplain
-rw-r--r--i810_drv.h7593logplain
-rw-r--r--init.c3372logplain
-rw-r--r--ioctl.c3266logplain
-rw-r--r--lists.c6700logplain
-rw-r--r--lock.c6128logplain
-rw-r--r--memory.c12012logplain
-rw-r--r--mga_bufs.c17282logplain
-rw-r--r--mga_context.c6237logplain
-rw-r--r--mga_dma.c32951logplain
-rw-r--r--mga_drm.h8077logplain
-rw-r--r--mga_drv.c19108logplain
-rw-r--r--mga_drv.h13665logplain
-rw-r--r--mga_state.c27588logplain
-rw-r--r--picker.c398logplain
-rw-r--r--proc.c17085logplain
-rw-r--r--r128_bufs.c8502logplain
-rw-r--r--r128_context.c6214logplain
-rw-r--r--r128_dma.c25453logplain
-rw-r--r--r128_drm.h3683logplain
-rw-r--r--r128_drv.c21575logplain
-rw-r--r--r128_drv.h8272logplain
-rw-r--r--tdfx_context.c6299logplain
-rw-r--r--tdfx_drv.c20237logplain
-rw-r--r--tdfx_drv.h3163logplain
-rw-r--r--vm.c9136logplain
lass="hl ppc"> for (pos = list_entry((head)->prev, typeof(*pos), member), \ n = list_entry(pos->member.prev, typeof(*pos), member); \ &pos->member != (head); \ pos = n, n = list_entry(n->member.prev, typeof(*n), member)) #endif #include <linux/mm.h> #include <asm/page.h> #if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) && \ (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))) #define DRM_ODD_MM_COMPAT #endif #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)) #define DRM_FULL_MM_COMPAT #endif /* * Flush relevant caches and clear a VMA structure so that page references * will cause a page fault. Don't flush tlbs. */ extern void drm_clear_vma(struct vm_area_struct *vma, unsigned long addr, unsigned long end); /* * Return the PTE protection map entries for the VMA flags given by * flags. This is a functional interface to the kernel's protection map. */ extern pgprot_t vm_get_page_prot(unsigned long vm_flags); #ifndef GFP_DMA32 #define GFP_DMA32 0 #endif #if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) /* * These are too slow in earlier kernels. */ extern int drm_unmap_page_from_agp(struct page *page); extern int drm_map_page_into_agp(struct page *page); #define map_page_into_agp drm_map_page_into_agp #define unmap_page_from_agp drm_unmap_page_from_agp #endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) extern struct page *get_nopage_retry(void); extern void free_nopage_retry(void); #define NOPAGE_REFAULT get_nopage_retry() #endif #ifndef DRM_FULL_MM_COMPAT /* * For now, just return a dummy page that we've allocated out of * static space. The page will be put by do_nopage() since we've already * filled out the pte. */ struct fault_data { struct vm_area_struct *vma; unsigned long address; pgoff_t pgoff; unsigned int flags; int type; }; #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) extern struct page *drm_bo_vm_nopage(struct vm_area_struct *vma, unsigned long address, int *type); #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) && \ !defined(DRM_FULL_MM_COMPAT) extern unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma, unsigned long address); #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) */ #endif /* ndef DRM_FULL_MM_COMPAT */ #ifdef DRM_ODD_MM_COMPAT struct drm_buffer_object; /* * Add a vma to the ttm vma list, and the * process mm pointer to the ttm mm list. Needs the ttm mutex. */ extern int drm_bo_add_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma); /* * Delete a vma and the corresponding mm pointer from the * ttm lists. Needs the ttm mutex. */ extern void drm_bo_delete_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma); /* * Attempts to lock all relevant mmap_sems for a ttm, while * not releasing the ttm mutex. May return -EAGAIN to avoid * deadlocks. In that case the caller shall release the ttm mutex, * schedule() and try again. */ extern int drm_bo_lock_kmm(struct drm_buffer_object * bo); /* * Unlock all relevant mmap_sems for a ttm. */ extern void drm_bo_unlock_kmm(struct drm_buffer_object * bo);