/** * \file radeon_ioc32.c * * 32-bit ioctl compatibility routines for the Radeon DRM. * * \author Paul Mackerras * * Copyright (C) Paul Mackerras 2005 * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include #include "drmP.h" #include "drm.h" #include "radeon_drm.h" #include "radeon_drv.h" typedef struct drm_radeon_init32 { int func; u32 sarea_priv_offset; int is_pci; int cp_mode; int gart_size; int ring_size; int usec_timeout; unsigned int fb_bpp; unsigned int front_offset, front_pitch; unsigned int back_offset, back_pitch; unsigned int depth_bpp; unsigned int depth_offset, depth_pitch; u32 fb_offset; u32 mmio_offset; u32 ring_offset; u32 ring_rptr_offset; u32 buffers_offset; u32 gart_textures_offset; } drm_radeon_init32_t; static int compat_radeon_cp_init(struct file *file, unsigned int cmd, unsigned long arg) { drm_radeon_init32_t init32; drm_radeon_init_t __user *init; if (copy_from_user(&init32, (void __user *)arg, sizeof(init32))) return -EFAULT; init = compat_alloc_user_space(sizeof(*init)); if (!access_ok(VERIFY_WRITE, init, sizeof(*init)) || __put_user(init32.func, &init->func) || __put_user(init32.sarea_priv_offset, &init->sarea_priv_offset) || __put_user(init32.is_pci, &init->is_pci) || __put_user(init32.cp_mode, &init->cp_mode) || __put_user(init32.gart_size, &init->gart_size) || __put_user(init32.ring_size, &init->ring_size) || __put_user(init32.usec_timeout, &init->usec_timeout) || __put_user(init32.fb_bpp, &init->fb_bpp) || __put_user(init32.front_offset, &init->front_offset) || __put_user(init32.front_pitch, &init->front_pitch) || __put_user(init32.back_offset, &init->back_offset) || __put_user(init32.back_pitch, &init->back_pitch) || __put_user(init32.depth_bpp, &init->depth_bpp) || __put_user(init32.depth_offset, &init->depth_offset) || __put_user(init32.depth_pitch, &init->depth_pitch) || __put_user(init32.fb_offset, &init->fb_offset) || __put_user(init32.mmio_offset, &init->mmio_offset) || __put_user(init32.ring_offset, &init->ring_offset) || __put_user(init32.ring_rptr_offset, &init->ring_rptr_offset) || __put_user(init32.buffers_offset, &init->buffers_offset) || __put_user(init32.gart_textures_offset, &init->gart_textures_offset)) return -EFAULT; return drm_ioctl(file->f_dentry->d_inode, file, DRM_IOCTL_RADEON_CP_INIT, (unsigned long) init); } typedef struct drm_radeon_clear32 { unsigned int flags; unsigned int clear_color; unsigned int clear_depth; unsigned int color_mask; unsigned int depth_mask; /* misnamed field: should be stencil */ u32 depth_boxes; } drm_radeon_clear32_t; static int compat_radeon_cp_clear(struct file *file, unsigned int cmd, unsigned long arg) { drm_radeon_clear32_t clr32; drm_radeon_clear_t __user *clr; if (copy_from_user(&clr32, (void __user *)arg, sizeof(clr32))) return -EFAULT; clr = compat_alloc_user_space(sizeof(*clr)); if (!access_ok(VERIFY_WRITE, clr, sizeof(*clr)) || __put_user(clr32.flags, &clr->flags) || __put_user(clr32.clear_color, &clr->clear_color) || __put_user(clr32.clear_depth, &clr->clear_depth) || __put_user(clr32.color_mask, &clr->color_mask) || __put_user(clr32.depth_mask, &clr->depth_mask) || __put_user((void __user *)(unsigned long)clr32.depth_boxes, &clr->depth_boxes)) return -EFAULT; return drm_ioctl(file->f_dentry->d_inode, file, DRM_IOCTL_RADEON_CLEAR, (unsigned long) clr); } typedef struct drm_radeon_stipple32 { u32 mask; } drm_radeon_stipple32_t; static int compat_radeon_cp_stipple(struct file *file, unsigned int cmd, unsigned long arg) { drm_radeon_stipple32_t __user *argp = (void __user *)arg; drm_radeon_stipple_t __user *request; u32 mask; if (get_user(mask, &argp->mask)) return -EFAULT; request = compat_alloc_user_space(sizeof(*request)); if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) || __put_user((unsigned int __user *)(unsigned long) mask, &request->mask)) return -EFAULT; return drm_ioctl(file->f_dentry->d_inode, file, DRM_IOCTL_RADEON_STIPPLE, (unsigned long) request); } typedef struct drm_radeon_tex_image32 { unsigned int x, y; /* Blit coordinates */ unsigned int width, height; u32 data; } drm_radeon_tex_image32_t; typedef struct drm_radeon_texture32 { unsigned int offset; int pitch; int format; int width; /* Texture image coordinates */ int height; u32 image; } drm_radeon_texture32_t; static int compat_radeon_cp_texture(struct file *file, unsigned int cmd, unsigned long arg) { drm_radeon_texture32_t req32; drm_radeon_texture_t __user *request; drm_radeon_tex_image32_t img32; drm_radeon_tex_image_t __user *image; if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) return -EFAULT; if (req32.image == 0) return -EINVAL; if (copy_from_user(&img32, (void __user *)(unsigned long)req32.image, sizeof(img32))) return -EFAULT; request = compat_alloc_user_space(sizeof(*request) + sizeof(*image)); if (!access_ok(VERIFY_WRITE, request, sizeof(*request) + sizeof(*image))) return -EFAULT; image = (drm_radeon_tex_image_t __user *) (request + 1); if (__put_user(req32.offset, &request->offset) || __put_user(req32.pitch, &request->pitch) || __put_user(req32.format, &request->format) || __put_user(req32.width, &request->width) || __put_user(req32.height, &request->height) || __put_user(image, &request->image) || __put_user(img32.x, &image->x) || __put_user(img32.y, &image->y) || __put_user(img32.width, &image->width) || __put_user(img32.height, &image->height) || __put_user((const void __user *)(unsigned long)img32.data, &image->data)) return -EFAULT; return drm_ioctl(file->f_dentry->d_inode, file, DRM_IOCTL_RADEON_TEXTURE, (unsigned long) request); } typedef struct drm_radeon_vertex2_32 { int idx; /* Index of vertex buffer */ int discard; /* Client finished with buffer? */ int nr_states; u32 state; int nr_prims; u32 prim; } drm_radeon_vertex2_32_t; static int compat_radeon_cp_vertex2(struct file *file, unsigned int cmd, unsigned long arg) { drm_radeon_vertex2_32_t req32; drm_radeon_vertex2_t __user *request; if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) return -EFAULT; request = compat_alloc_user_space(sizeof(*request)); if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) || __put_user(req32.idx, &request->idx) || __put_user(req32.discard, &request->discard) || __put_user(req32.nr_states, &request->nr_states) || __put_user((void __user *)(unsigned long)req32.state, &request->state) || __put_user(req32.nr_prims, &request->nr_prims) || __put_user((void __user *)(unsigned long)req32.prim, &request->prim)) return -EFAULT; return drm_ioctl(file->f_dentry->d_inode, file, DRM_IOCTL_RADEON_VERTEX2, (unsigned long) request); } typedef struct drm_radeon_cmd_buffer32 { int bufsz; u32 buf; int nbox; u32 boxes; } drm_radeon_cmd_buffer32_t; static int compat_radeon_cp_cmdbuf(struct file *file, unsigned int cmd, unsigned long arg) { drm_radeon_cmd_buffer32_t req32; drm_radeon_cmd_buffer_t __user *request; if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) return -EFAULT; request = compat_alloc_user_space(sizeof(*request)); if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) || __put_user(req32.bufsz, &request->bufsz) || __put_user((void __user *)(unsigned long)req32.buf, &request->buf) || __put_user(req32.nbox, &request->nbox) || __put_user((void __user *)(unsigned long)req32.boxes, &request->boxes)) return -EFAULT; return drm_ioctl(file->f_dentry->d_inode, file, DRM_IOCTL_RADEON_CMDBUF, (unsigned long) request); } typedef struct drm_radeon_getparam32 { int param; u32 value; } drm_radeon_getparam32_t; static int compat_radeon_cp_getparam(struct file *file, unsigned int cmd, unsigned long arg) { drm_radeon_getparam32_t req32; drm_radeon_getparam_t __user *request; if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) return -EFAULT; request = compat_alloc_user_space(sizeof(*request)); if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) || __put_user(req32.param, &request->param) || __put_user((void __user *)(unsigned long)req32.value, &request->value)) return -EFAULT; return drm_ioctl(file->f_dentry->d_inode, file, DRM_IOCTL_RADEON_GETPARAM, (unsigned long) request); } typedef struct drm_radeon_mem_alloc32 { int region; int alignment; int size; u32 region_offset; /* offset from start of fb or GART */ } drm_radeon_mem_alloc32_t; static int compat_radeon_mem_alloc(struct file *file, unsigned int cmd, unsigned long arg) { drm_radeon_mem_alloc32_t req32; drm_radeon_mem_alloc_t __user *request; if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) return -EFAULT; request = compat_alloc_user_space(sizeof(*request)); if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) || __put_user(req32.region, &request->region) || __put_user(req32.alignment, &request->alignment) || __put_user(req32.size, &request->size) || __put_user((int __user *)(unsigned long)req32.region_offset, &request->region_offset)) return -EFAULT; return drm_ioctl(file->f_dentry->d_inode, file, DRM_IOCTL_RADEON_ALLOC, (unsigned long) request); } typedef struct drm_radeon_irq_emit32 { u32 irq_seq; } drm_radeon_irq_emit32_t; static int compat_radeon_irq_emit(struct file *file, unsigned int cmd, unsigned long arg) { drm_radeon_irq_emit32_t req32; drm_radeon_irq_emit_t __user *request; if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) return -EFAULT; request = compat_alloc_user_space(sizeof(*request)); if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) || __put_user((int __user *)(unsigned long)req32.irq_seq, &request->irq_seq)) return -EFAULT; return drm_ioctl(file->f_dentry->d_inode, file, DRM_IOCTL_RADEON_IRQ_EMIT, (unsigned long) request); } /* The two 64-bit arches where alignof(u64)==4 in 32-bit code */ #if defined (CONFIG_X86_64) || defined(CONFIG_IA64) typedef struct drm_radeon_setparam32 { int param; u64 value; } __attribute__((packed)) drm_radeon_setparam32_t; static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd, unsigned long arg) { drm_radeon_setparam32_t req32; drm_radeon_setparam_t __user *request; if (copy_from_user(&req32, (void __user *)arg, sizeof(req32))) return -EFAULT; request = compat_alloc_user_space(sizeof(*request)); if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) || __put_user(req32.param, &request->param) || __put_user((void __user *)(unsigned long)req32.value, &request->value)) return -EFAULT; return drm_ioctl(file->f_dentry->d_inode, file, DRM_IOCTL_RADEON_SETPARAM, (unsigned long) request); } #else #define compat_radeon_cp_setparam NULL #endif /* X86_64 || IA64 */ drm_ioctl_compat_t *radeon_compat_ioctls[] = { [DRM_RADEON_CP_INIT] = compat_radeon_cp_init, [DRM_RADEON_CLEAR] = compat_radeon_cp_clear, [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple, [DRM_RADEON_TEXTURE] = compat_radeon_cp_texture, [DRM_RADEON_VERTEX2] = compat_radeon_cp_vertex2, [DRM_RADEON_CMDBUF] = compat_radeon_cp_cmdbuf, [DRM_RADEON_GETPARAM] = compat_radeon_cp_getparam, [DRM_RADEON_SETPARAM] = compat_radeon_cp_setparam, [DRM_RADEON_ALLOC] = compat_radeon_mem_alloc, [DRM_RADEON_IRQ_EMIT] = compat_radeon_irq_emit, }; /** * Called whenever a 32-bit process running under a 64-bit kernel * performs an ioctl on /dev/dri/card. * * \param filp file pointer. * \param cmd command. * \param arg user argument. * \return zero on success or negative number on failure. */ long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { unsigned int nr = DRM_IOCTL_NR(cmd); drm_ioctl_compat_t *fn = NULL; int ret; if (nr < DRM_COMMAND_BASE) return drm_compat_ioctl(filp, cmd, arg); if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls)) fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE]; lock_kernel(); /* XXX for now */ if (fn != NULL) ret = (*fn)(filp, cmd, arg); else ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); unlock_kernel(); return ret; } 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346
/**
 * \file drm_compat.h
 * Backward compatability definitions for Direct Rendering Manager
 *
 * \author Rickard E. (Rik) Faith <faith@valinux.com>
 * \author Gareth Hughes <gareth@valinux.com>
 */

/*
 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
 * All rights reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 */

#ifndef _DRM_COMPAT_H_
#define _DRM_COMPAT_H_

#ifndef minor
#define minor(x) MINOR((x))
#endif

#ifndef MODULE_LICENSE
#define MODULE_LICENSE(x)
#endif

#ifndef preempt_disable
#define preempt_disable()
#define preempt_enable()
#endif

#ifndef pte_offset_map
#define pte_offset_map pte_offset
#define pte_unmap(pte)
#endif

#ifndef module_param
#define module_param(name, type, perm)
#endif

/* older kernels had different irq args */
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
#undef DRM_IRQ_ARGS
#define DRM_IRQ_ARGS		int irq, void *arg, struct pt_regs *regs
#endif

#ifndef list_for_each_safe
#define list_for_each_safe(pos, n, head)				\
	for (pos = (head)->next, n = pos->next; pos != (head);		\
		pos = n, n = pos->next)
#endif

#ifndef list_for_each_entry
#define list_for_each_entry(pos, head, member)				\
       for (pos = list_entry((head)->next, typeof(*pos), member),	\
                    prefetch(pos->member.next);				\
            &pos->member != (head);					\
            pos = list_entry(pos->member.next, typeof(*pos), member),	\
                    prefetch(pos->member.next))
#endif

#ifndef list_for_each_entry_safe
#define list_for_each_entry_safe(pos, n, head, member)                  \
        for (pos = list_entry((head)->next, typeof(*pos), member),      \
                n = list_entry(pos->member.next, typeof(*pos), member); \
             &pos->member != (head);                                    \
             pos = n, n = list_entry(n->member.next, typeof(*n), member))
#endif

#ifndef __user
#define __user
#endif

#if !defined(__put_page)
#define __put_page(p)           atomic_dec(&(p)->count)
#endif

#if !defined(__GFP_COMP)
#define __GFP_COMP 0
#endif

#if !defined(IRQF_SHARED)
#define IRQF_SHARED SA_SHIRQ
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
static inline int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t pgprot)
{
  return remap_page_range(vma, from,
			  pfn << PAGE_SHIFT,
			  size,
			  pgprot);
}

static __inline__ void *kcalloc(size_t nmemb, size_t size, int flags)
{
	void *addr;

	addr = kmalloc(size * nmemb, flags);
	if (addr != NULL)
		memset((void *)addr, 0, size * nmemb);

	return addr;
}
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
#define mutex_lock down
#define mutex_unlock up

#define mutex semaphore

#define mutex_init(a) sema_init((a), 1)

#endif

#ifndef DEFINE_SPINLOCK
#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
#endif

/* old architectures */
#ifdef __AMD64__
#define __x86_64__
#endif

/* sysfs __ATTR macro */
#ifndef __ATTR
#define __ATTR(_name,_mode,_show,_store) { \
        .attr = {.name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE },     \
        .show   = _show,                                        \
        .store  = _store,                                       \
}
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
#define vmalloc_user(_size) ({void * tmp = vmalloc(_size);   \
      if (tmp) memset(tmp, 0, size);			     \
      (tmp);})
#endif

#ifndef list_for_each_entry_safe_reverse
#define list_for_each_entry_safe_reverse(pos, n, head, member)          \
        for (pos = list_entry((head)->prev, typeof(*pos), member),      \
                n = list_entry(pos->member.prev, typeof(*pos), member); \
             &pos->member != (head);                                    \
             pos = n, n = list_entry(n->member.prev, typeof(*n), member))
#endif

#include <linux/mm.h>
#include <asm/page.h>

#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) && \
     (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)))
#define DRM_ODD_MM_COMPAT
#endif

#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21))
#define DRM_FULL_MM_COMPAT
#endif


/*
 * Flush relevant caches and clear a VMA structure so that page references
 * will cause a page fault. Don't flush tlbs.
 */

extern void drm_clear_vma(struct vm_area_struct *vma,
			  unsigned long addr, unsigned long end);

/*
 * Return the PTE protection map entries for the VMA flags given by
 * flags. This is a functional interface to the kernel's protection map.
 */

extern pgprot_t vm_get_page_prot(unsigned long vm_flags);

#ifndef GFP_DMA32
#define GFP_DMA32 GFP_KERNEL
#endif
#ifndef __GFP_DMA32
#define __GFP_DMA32 GFP_KERNEL
#endif

#if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))

/*
 * These are too slow in earlier kernels.
 */

extern int drm_unmap_page_from_agp(struct page *page);
extern int drm_map_page_into_agp(struct page *page);

#define map_page_into_agp drm_map_page_into_agp
#define unmap_page_from_agp drm_unmap_page_from_agp
#endif

#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
extern struct page *get_nopage_retry(void);
extern void free_nopage_retry(void);

#define NOPAGE_REFAULT get_nopage_retry()
#endif


#ifndef DRM_FULL_MM_COMPAT

/*
 * For now, just return a dummy page that we've allocated out of
 * static space. The page will be put by do_nopage() since we've already
 * filled out the pte.
 */

struct fault_data {
	struct vm_area_struct *vma;
	unsigned long address;
	pgoff_t pgoff;
	unsigned int flags;

	int type;
};

#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
extern struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
				     unsigned long address,
				     int *type);
#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) && \
  !defined(DRM_FULL_MM_COMPAT)
extern unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
				     unsigned long address);
#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) */
#endif /* ndef DRM_FULL_MM_COMPAT */

#ifdef DRM_ODD_MM_COMPAT

struct drm_buffer_object;


/*
 * Add a vma to the ttm vma list, and the
 * process mm pointer to the ttm mm list. Needs the ttm mutex.
 */

extern int drm_bo_add_vma(struct drm_buffer_object * bo,
			   struct vm_area_struct *vma);
/*
 * Delete a vma and the corresponding mm pointer from the
 * ttm lists. Needs the ttm mutex.
 */
extern void drm_bo_delete_vma(struct drm_buffer_object * bo,
			      struct vm_area_struct *vma);

/*
 * Attempts to lock all relevant mmap_sems for a ttm, while
 * not releasing the ttm mutex. May return -EAGAIN to avoid
 * deadlocks. In that case the caller shall release the ttm mutex,
 * schedule() and try again.
 */

extern int drm_bo_lock_kmm(struct drm_buffer_object * bo);

/*
 * Unlock all relevant mmap_sems for a ttm.
 */
extern void drm_bo_unlock_kmm(struct drm_buffer_object * bo);

/*
 * If the ttm was bound to the aperture, this function shall be called
 * with all relevant mmap sems held. It deletes the flag VM_PFNMAP from all
 * vmas mapping this ttm. This is needed just after unmapping the ptes of
 * the vma, otherwise the do_nopage() function will bug :(. The function
 * releases the mmap_sems for this ttm.
 */

extern void drm_bo_finish_unmap(struct drm_buffer_object *bo);

/*
 * Remap all vmas of this ttm using io_remap_pfn_range. We cannot
 * fault these pfns in, because the first one will set the vma VM_PFNMAP
 * flag, which will make the next fault bug in do_nopage(). The function
 * releases the mmap_sems for this ttm.
 */

extern int drm_bo_remap_bound(struct drm_buffer_object *bo);


/*
 * Remap a vma for a bound ttm. Call with the ttm mutex held and
 * the relevant mmap_sem locked.
 */
extern int drm_bo_map_bound(struct vm_area_struct *vma);

#endif

/* fixme when functions are upstreamed - upstreamed for 2.6.23 */
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23))
#define DRM_IDR_COMPAT_FN
#endif
#ifdef DRM_IDR_COMPAT_FN
int idr_for_each(struct idr *idp,
		 int (*fn)(int id, void *p, void *data), void *data);
void idr_remove_all(struct idr *idp);
#endif


#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18))
void *idr_replace(struct idr *idp, void *ptr, int id);
#endif

#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
typedef _Bool                   bool;
#endif


#if (defined(CONFIG_X86) && defined(CONFIG_X86_32) && defined(CONFIG_HIMEM))
#define DRM_KMAP_ATOMIC_PROT_PFN
extern void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type,
				  pgprot_t protection);
#endif

#if !defined(flush_agp_mappings)
#define flush_agp_mappings() do {} while(0)
#endif

#ifndef DMA_BIT_MASK
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : (1ULL<<(n)) - 1)
#endif

#endif