/* i915_dma.c -- DMA support for the I915 -*- linux-c -*- */ /************************************************************************** * * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. * All Rights Reserved. * **************************************************************************/ #include "i915.h" #include "drmP.h" #include "drm.h" #include "i915_drm.h" #include "i915_drv.h" static inline void i915_print_status_page(drm_device_t * dev) { drm_i915_private_t *dev_priv = dev->dev_private; u32 *temp = dev_priv->hw_status_page; if (!temp) { DRM_DEBUG("no status page\n"); return; } DRM_DEBUG("hw_status: Interrupt Status : %x\n", temp[0]); DRM_DEBUG("hw_status: LpRing Head ptr : %x\n", temp[1]); DRM_DEBUG("hw_status: IRing Head ptr : %x\n", temp[2]); DRM_DEBUG("hw_status: Reserved : %x\n", temp[3]); DRM_DEBUG("hw_status: Driver Counter : %d\n", temp[5]); } /* Really want an OS-independent resettable timer. Would like to have * this loop run for (eg) 3 sec, but have the timer reset every time * the head pointer changes, so that EBUSY only happens if the ring * actually stalls for (eg) 3 seconds. */ int i915_wait_ring(drm_device_t * dev, int n, const char *caller) { drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_ring_buffer_t *ring = &(dev_priv->ring); u32 last_head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR; int i; for (i = 0; i < 10000; i++) { ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR; ring->space = ring->head - (ring->tail + 8); if (ring->space < 0) ring->space += ring->Size; if (ring->space >= n) return 0; dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; if (ring->head != last_head) i = 0; last_head = ring->head; } return DRM_ERR(EBUSY); } void i915_kernel_lost_context(drm_device_t * dev) { drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_ring_buffer_t *ring = &(dev_priv->ring); ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR; ring->tail = I915_READ(LP_RING + RING_TAIL) & TAIL_ADDR; ring->space = ring->head - (ring->tail + 8); if (ring->space < 0) ring->space += ring->Size; if (ring->head == ring->tail) dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; } int i915_dma_cleanup(drm_device_t * dev) { /* Make sure interrupts are disabled here because the uninstall ioctl * may not have been called from userspace and after dev_private * is freed, it's too late. */ if (dev->irq) DRM(irq_uninstall) (dev); if (dev->dev_private) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; if (dev_priv->ring.virtual_start) { drm_core_ioremapfree(&dev_priv->ring.map, dev); } if (dev_priv->hw_status_page) { #ifdef __FreeBSD__ #if __FreeBSD_version > 500000 contigfree(dev_priv->hw_status_page, PAGE_SIZE, M_DRM); #endif #else pci_free_consistent(dev->pdev, PAGE_SIZE, dev_priv->hw_status_page, dev_priv->dma_status_page); #endif /* Need to rewrite hardware status page */ I915_WRITE(0x02080, 0x1ffff000); } DRM(free) (dev->dev_private, sizeof(drm_i915_private_t), DRM_MEM_DRIVER); dev->dev_private = NULL; } return 0; } static int i915_initialize(drm_device_t * dev, drm_i915_private_t * dev_priv, drm_i915_init_t * init) { memset(dev_priv, 0, sizeof(drm_i915_private_t)); DRM_GETSAREA(); if (!dev_priv->sarea) { DRM_ERROR("can not find sarea!\n"); dev->dev_private = (void *)dev_priv; i915_dma_cleanup(dev); return DRM_ERR(EINVAL); } dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset); if (!dev_priv->mmio_map) { dev->dev_private = (void *)dev_priv; i915_dma_cleanup(dev); DRM_ERROR("can not find mmio map!\n"); return DRM_ERR(EINVAL); } dev_priv->sarea_priv = (drm_i915_sarea_t *) ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset); dev_priv->ring.Start = init->ring_start; dev_priv->ring.End = init->ring_end; dev_priv->ring.Size = init->ring_size; dev_priv->ring.tail_mask = dev_priv->ring.Size - 1; dev_priv->ring.map.offset = init->ring_start; dev_priv->ring.map.size = init->ring_size; dev_priv->ring.map.type = 0; dev_priv->ring.map.flags = 0; dev_priv->ring.map.mtrr = 0; drm_core_ioremap(&dev_priv->ring.map, dev); if (dev_priv->ring.map.handle == NULL) { dev->dev_private = (void *)dev_priv; i915_dma_cleanup(dev); DRM_ERROR("can not ioremap virtual address for" " ring buffer\n"); return DRM_ERR(ENOMEM); } dev_priv->ring.virtual_start = dev_priv->ring.map.handle; dev_priv->back_offset = init->back_offset; dev_priv->front_offset = init->front_offset; dev_priv->current_page = 0; dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; /* We are using separate values as placeholders for mechanisms for * private backbuffer/depthbuffer usage. */ dev_priv->use_mi_batchbuffer_start = 0; /* Allow hardware batchbuffers unless told otherwise. */ dev_priv->allow_batchbuffer = 1; /* Program Hardware Status Page */ #ifdef __FreeBSD__ dev_priv->hw_status_page = contigmalloc(PAGE_SIZE, DRM(M_DRM), M_NOWAIT, 0ul, 0, 0, 0); dev_priv->dma_status_page = vtophys(dev_priv->hw_status_page); #else dev_priv->hw_status_page = pci_alloc_consistent(dev->pdev, PAGE_SIZE, &dev_priv->dma_status_page); #endif if (!dev_priv->hw_status_page) { dev->dev_private = (void *)dev_priv; i915_dma_cleanup(dev); DRM_ERROR("Can not allocate hardware status page\n"); return DRM_ERR(ENOMEM); } memset(dev_priv->hw_status_page, 0, PAGE_SIZE); DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); I915_WRITE(0x02080, dev_priv->dma_status_page); DRM_DEBUG("Enabled hardware status page\n"); dev->dev_private = (void *)dev_priv; return 0; } static int i915_dma_resume(drm_device_t * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; DRM_DEBUG("%s\n", __FUNCTION__); if (!dev_priv->sarea) { DRM_ERROR("can not find sarea!\n"); return DRM_ERR(EINVAL); } if (!dev_priv->mmio_map) { DRM_ERROR("can not find mmio map!\n"); return DRM_ERR(EINVAL); } if (dev_priv->ring.map.handle == NULL) { DRM_ERROR("can not ioremap virtual address for" " ring buffer\n"); return DRM_ERR(ENOMEM); } /* Program Hardware Status Page */ if (!dev_priv->hw_status_page) { DRM_ERROR("Can not find hardware status page\n"); return DRM_ERR(EINVAL); } DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); I915_WRITE(0x02080, dev_priv->dma_status_page); DRM_DEBUG("Enabled hardware status page\n"); return 0; } int i915_dma_init(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_i915_private_t *dev_priv; drm_i915_init_t init; int retcode = 0; DRM_COPY_FROM_USER_IOCTL(init, (drm_i915_init_t __user *) data, sizeof(init)); switch (init.func) { case I915_INIT_DMA: dev_priv = DRM(alloc) (sizeof(drm_i915_private_t), DRM_MEM_DRIVER); if (dev_priv == NULL) return DRM_ERR(ENOMEM); retcode = i915_initialize(dev, dev_priv, &init); break; case I915_CLEANUP_DMA: retcode = i915_dma_cleanup(dev); break; case I915_RESUME_DMA: retcode = i915_dma_resume(dev); break; default: retcode = -EINVAL; break; } return retcode; } /* Implement basically the same security restrictions as hardware does * for MI_BATCH_NON_SECURE. These can be made stricter at any time. * * Most of the calculations below involve calculating the size of a * particular instruction. It's important to get the size right as * that tells us where the next instruction to check is. Any illegal * instruction detected will be given a size of zero, which is a * signal to abort the rest of the buffer. */ static int do_validate_cmd(int cmd) { switch (((cmd >> 29) & 0x7)) { case 0x0: switch ((cmd >> 23) & 0x3f) { case 0x0: return 1; /* MI_NOOP */ case 0x4: return 1; /* MI_FLUSH */ default: return 0; /* disallow everything else */ } break; case 0x1: return 0; /* reserved */ case 0x2: return (cmd & 0xff) + 2; /* 2d commands */ case 0x3: if (((cmd >> 24) & 0x1f) <= 0x18) return 1; switch ((cmd >> 24) & 0x1f) { case 0x1c: return 1; case 0x1d: switch ((cmd >> 16) & 0xff) { case 0x3: return (cmd & 0x1f) + 2; case 0x4: return (cmd & 0xf) + 2; default: return (cmd & 0xffff) + 2; } case 0x1e: if (cmd & (1 << 23)) return (cmd & 0xffff) + 1; else return 1; case 0x1f: if ((cmd & (1 << 23)) == 0) /* inline vertices */ return (cmd & 0x1ffff) + 2; else if (cmd & (1 << 17)) /* indirect random */ if ((cmd & 0xffff) == 0) return 0; /* unknown length, too hard */ else return (((cmd & 0xffff) + 1) / 2) + 1; else return 2; /* indirect sequential */ default: return 0; } default: return 0; } return 0; } static int validate_cmd(int cmd) { int ret = do_validate_cmd(cmd); /* printk("validate_cmd( %x ): %d\n", cmd, ret); */ return ret; } static int i915_emit_cmds(drm_device_t * dev, int __user * buffer, int dwords) { drm_i915_private_t *dev_priv = dev->dev_private; int i; RING_LOCALS; for (i = 0; i < dwords;) { int cmd, sz; if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd))) return DRM_ERR(EINVAL); /* printk("%d/%d ", i, dwords); */ if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) return DRM_ERR(EINVAL); BEGIN_LP_RING(sz); OUT_RING(cmd); while (++i, --sz) { if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd))) { return DRM_ERR(EINVAL); } OUT_RING(cmd); } ADVANCE_LP_RING(); } return 0; } static int i915_emit_box(drm_device_t * dev, drm_clip_rect_t __user * boxes, int i, int DR1, int DR4) { drm_i915_private_t *dev_priv = dev->dev_private; drm_clip_rect_t box; RING_LOCALS; if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) { return EFAULT; } if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { DRM_ERROR("Bad box %d,%d..%d,%d\n", box.x1, box.y1, box.x2, box.y2); return DRM_ERR(EINVAL); } BEGIN_LP_RING(6); OUT_RING(GFX_OP_DRAWRECT_INFO); OUT_RING(DR1); OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); OUT_RING(DR4); OUT_RING(0); ADVANCE_LP_RING(); return 0; } static int i915_dispatch_cmdbuffer(drm_device_t * dev, drm_i915_cmdbuffer_t * cmd) { int nbox = cmd->num_cliprects; int i = 0, count, ret; if (cmd->sz & 0x3) { DRM_ERROR("alignment"); return DRM_ERR(EINVAL); } i915_kernel_lost_context(dev); count = nbox ? nbox : 1; for (i = 0; i < count; i++) { if (i < nbox) { ret = i915_emit_box(dev, cmd->cliprects, i, cmd->DR1, cmd->DR4); if (ret) return ret; } ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4); if (ret) return ret; } return 0; } static int i915_dispatch_batchbuffer(drm_device_t * dev, drm_i915_batchbuffer_t * batch) { drm_i915_private_t *dev_priv = dev->dev_private; drm_clip_rect_t __user *boxes = batch->cliprects; int nbox = batch->num_cliprects; int i = 0, count; RING_LOCALS; if ((batch->start | batch->used) & 0x7) { DRM_ERROR("alignment"); return DRM_ERR(EINVAL); } i915_kernel_lost_context(dev); count = nbox ? nbox : 1; for (i = 0; i < count; i++) { if (i < nbox) { int ret = i915_emit_box(dev, boxes, i, batch->DR1, batch->DR4); if (ret) return ret; } if (dev_priv->use_mi_batchbuffer_start) { BEGIN_LP_RING(2); OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); OUT_RING(batch->start | MI_BATCH_NON_SECURE); ADVANCE_LP_RING(); } else { BEGIN_LP_RING(4); OUT_RING(MI_BATCH_BUFFER); OUT_RING(batch->start | MI_BATCH_NON_SECURE); OUT_RING(batch->start + batch->used - 4); OUT_RING(0); ADVANCE_LP_RING(); } } dev_priv->sarea_priv->last_enqueue = dev_priv->counter++; BEGIN_LP_RING(4); OUT_RING(CMD_STORE_DWORD_IDX); OUT_RING(20); OUT_RING(dev_priv->counter); OUT_RING(0); ADVANCE_LP_RING(); return 0; } static int i915_dispatch_flip(drm_device_t * dev) { drm_i915_private_t *dev_priv = dev->dev_private; RING_LOCALS; DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n", __FUNCTION__, dev_priv->current_page, dev_priv->sarea_priv->pf_current_page); i915_kernel_lost_context(dev); BEGIN_LP_RING(2); OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE); OUT_RING(0); ADVANCE_LP_RING(); BEGIN_LP_RING(6); OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); OUT_RING(0); if (dev_priv->current_page == 0) { OUT_RING(dev_priv->back_offset); dev_priv->current_page = 1; } else { OUT_RING(dev_priv->front_offset); dev_priv->current_page = 0; } OUT_RING(0); ADVANCE_LP_RING(); BEGIN_LP_RING(2); OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); OUT_RING(0); ADVANCE_LP_RING(); dev_priv->sarea_priv->last_enqueue = dev_priv->counter++; BEGIN_LP_RING(4); OUT_RING(CMD_STORE_DWORD_IDX); OUT_RING(20); OUT_RING(dev_priv->counter); OUT_RING(0); ADVANCE_LP_RING(); dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; return 0; } static int i915_quiescent(drm_device_t * dev) { drm_i915_private_t *dev_priv = dev->dev_private; i915_kernel_lost_context(dev); return i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__); } int i915_flush_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { DRM_ERROR("i915_flush_ioctl called without lock held\n"); return DRM_ERR(EINVAL); } return i915_quiescent(dev); } int i915_batchbuffer(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; u32 *hw_status = dev_priv->hw_status_page; drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) dev_priv->sarea_priv; drm_i915_batchbuffer_t batch; int ret; if (!dev_priv->allow_batchbuffer) { DRM_ERROR("Batchbuffer ioctl disabled\n"); return DRM_ERR(EINVAL); } DRM_COPY_FROM_USER_IOCTL(batch, (drm_i915_batchbuffer_t __user *) data, sizeof(batch)); DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n", batch.start, batch.used, batch.num_cliprects); if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { DRM_ERROR("i915_batchbuffer called without lock held\n"); return DRM_ERR(EINVAL); } if (batch.num_cliprects && DRM_VERIFYAREA_READ(batch.cliprects, batch.num_cliprects * sizeof(drm_clip_rect_t))) return DRM_ERR(EFAULT); ret = i915_dispatch_batchbuffer(dev, &batch); sarea_priv->last_dispatch = (int)hw_status[5]; return ret; } int i915_cmdbuffer(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; u32 *hw_status = dev_priv->hw_status_page; drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *) dev_priv->sarea_priv; drm_i915_cmdbuffer_t cmdbuf; int ret; DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_i915_cmdbuffer_t __user *) data, sizeof(cmdbuf)); DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n", cmdbuf.buf, cmdbuf.sz, cmdbuf.num_cliprects); if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { DRM_ERROR("i915_cmdbuffer called without lock held\n"); return DRM_ERR(EINVAL); } if (cmdbuf.num_cliprects && DRM_VERIFYAREA_READ(cmdbuf.cliprects, cmdbuf.num_cliprects * sizeof(drm_clip_rect_t))) { DRM_ERROR("Fault accessing cliprects\n"); return DRM_ERR(EFAULT); } ret = i915_dispatch_cmdbuffer(dev, &cmdbuf); if (ret) { DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); return ret; } sarea_priv->last_dispatch = (int)hw_status[5]; return 0; } int i915_do_cleanup_pageflip(drm_device_t * dev) { drm_i915_private_t *dev_priv = dev->dev_private; DRM_DEBUG("%s\n", __FUNCTION__); if (dev_priv->current_page != 0) i915_dispatch_flip(dev); return 0; } int i915_flip_bufs(DRM_IOCTL_ARGS) { DRM_DEVICE; DRM_DEBUG("%s\n", __FUNCTION__); if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { DRM_ERROR("i915_flip_buf called without lock held\n"); return DRM_ERR(EINVAL); } return i915_dispatch_flip(dev); } int i915_getparam(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_getparam_t param; int value; if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); return DRM_ERR(EINVAL); } DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_getparam_t __user *) data, sizeof(param)); switch (param.param) { case I915_PARAM_IRQ_ACTIVE: value = dev->irq ? 1 : 0; break; case I915_PARAM_ALLOW_BATCHBUFFER: value = dev_priv->allow_batchbuffer ? 1 : 0; break; default: DRM_ERROR("Unkown parameter %d\n", param.param); return DRM_ERR(EINVAL); } if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) { DRM_ERROR("DRM_COPY_TO_USER failed\n"); return DRM_ERR(EFAULT); } return 0; } int i915_setparam(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_setparam_t param; if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); return DRM_ERR(EINVAL); } DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_setparam_t __user *) data, sizeof(param)); switch (param.param) { case I915_SETPARAM_USE_MI_BATCHBUFFER_START: dev_priv->use_mi_batchbuffer_start = param.value; break; case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: dev_priv->tex_lru_log_granularity = param.value; break; case I915_SETPARAM_ALLOW_BATCHBUFFER: dev_priv->allow_batchbuffer = param.value; break; default: DRM_ERROR("unknown parameter %d\n", param.param); return DRM_ERR(EINVAL); } return 0; } static void i915_driver_pretakedown(drm_device_t *dev) { if (dev->dev_private) { drm_i915_private_t *dev_priv = dev->dev_private; i915_mem_takedown(&(dev_priv->agp_heap)); } i915_dma_cleanup(dev); } static void i915_driver_prerelease(drm_device_t *dev, DRMFILE filp) { if (dev->dev_private) { drm_i915_private_t *dev_priv = dev->dev_private; i915_mem_release(dev, filp, dev_priv->agp_heap); } } void i915_driver_register_fns(drm_device_t *dev) { dev->driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED; dev->fn_tbl.pretakedown = i915_driver_pretakedown; dev->fn_tbl.prerelease = i915_driver_prerelease; dev->fn_tbl.irq_preinstall = i915_driver_irq_preinstall; dev->fn_tbl.irq_postinstall = i915_driver_irq_postinstall; dev->fn_tbl.irq_uninstall = i915_driver_irq_uninstall; dev->fn_tbl.irq_handler = i915_driver_irq_handler; dev->counters += 4; dev->types[6] = _DRM_STAT_IRQ; dev->types[7] = _DRM_STAT_PRIMARY; dev->types[8] = _DRM_STAT_SECONDARY; dev->types[9] = _DRM_STAT_DMA; } 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633
/**
* \file xf86drm.h
* OS-independent header for DRM user-level library interface.
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
*/
/*
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*/
/* $XFree86: xc/programs/Xserver/hw/xfree86/os-support/xf86drm.h,v 1.26 2003/08/16 19:26:37 dawes Exp $ */
#ifndef _XF86DRM_H_
#define _XF86DRM_H_
#include <drm.h>
/* Defaults, if nothing set in xf86config */
#define DRM_DEV_UID 0
#define DRM_DEV_GID 0
/* Default /dev/dri directory permissions 0755 */
#define DRM_DEV_DIRMODE \
(S_IRUSR|S_IWUSR|S_IXUSR|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH)
#define DRM_DEV_MODE (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP)
#define DRM_DIR_NAME "/dev/dri"
#define DRM_DEV_NAME "%s/card%d"
#define DRM_PROC_NAME "/proc/dri/" /* For backward Linux compatibility */
#define DRM_ERR_NO_DEVICE (-1001)
#define DRM_ERR_NO_ACCESS (-1002)
#define DRM_ERR_NOT_ROOT (-1003)
#define DRM_ERR_INVALID (-1004)
#define DRM_ERR_NO_FD (-1005)
#define DRM_AGP_NO_HANDLE 0
typedef unsigned int drmSize, *drmSizePtr; /**< For mapped regions */
typedef void *drmAddress, **drmAddressPtr; /**< For mapped regions */
/**
* Driver version information.
*
* \sa drmGetVersion() and drmSetVersion().
*/
typedef struct _drmVersion {
int version_major; /**< Major version */
int version_minor; /**< Minor version */
int version_patchlevel; /**< Patch level */
int name_len; /**< Length of name buffer */
char *name; /**< Name of driver */
int date_len; /**< Length of date buffer */
char *date; /**< User-space buffer to hold date */
int desc_len; /**< Length of desc buffer */
char *desc; /**< User-space buffer to hold desc */
} drmVersion, *drmVersionPtr;
typedef struct _drmStats {
unsigned long count; /**< Number of data */
struct {
unsigned long value; /**< Value from kernel */
const char *long_format; /**< Suggested format for long_name */
const char *long_name; /**< Long name for value */
const char *rate_format; /**< Suggested format for rate_name */
const char *rate_name; /**< Short name for value per second */
int isvalue; /**< True if value (vs. counter) */
const char *mult_names; /**< Multiplier names (e.g., "KGM") */
int mult; /**< Multiplier value (e.g., 1024) */
int verbose; /**< Suggest only in verbose output */
} data[15];
} drmStatsT;
/* All of these enums *MUST* match with the
kernel implementation -- so do *NOT*
change them! (The drmlib implementation
will just copy the flags instead of
translating them.) */
typedef enum {
DRM_FRAME_BUFFER = 0, /**< WC, no caching, no core dump */
DRM_REGISTERS = 1, /**< no caching, no core dump */
DRM_SHM = 2, /**< shared, cached */
DRM_AGP = 3, /**< AGP/GART */
DRM_SCATTER_GATHER = 4, /**< PCI scatter/gather */
DRM_CONSISTENT = 5 /**< PCI consistent */
} drmMapType;
typedef enum {
DRM_RESTRICTED = 0x0001, /**< Cannot be mapped to client-virtual */
DRM_READ_ONLY = 0x0002, /**< Read-only in client-virtual */
DRM_LOCKED = 0x0004, /**< Physical pages locked */
DRM_KERNEL = 0x0008, /**< Kernel requires access */
DRM_WRITE_COMBINING = 0x0010, /**< Use write-combining, if available */
DRM_CONTAINS_LOCK = 0x0020, /**< SHM page that contains lock */
DRM_REMOVABLE = 0x0040 /**< Removable mapping */
} drmMapFlags;
/**
* \warning These values *MUST* match drm.h
*/
typedef enum {
/** \name Flags for DMA buffer dispatch */
/*@{*/
DRM_DMA_BLOCK = 0x01, /**<
* Block until buffer dispatched.
*
* \note the buffer may not yet have been
* processed by the hardware -- getting a
* hardware lock with the hardware quiescent
* will ensure that the buffer has been
* processed.
*/
DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */
DRM_DMA_PRIORITY = 0x04, /**< High priority dispatch */
/*@}*/
/** \name Flags for DMA buffer request */
/*@{*/
DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */
DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */
DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */
/*@}*/
} drmDMAFlags;
typedef enum {
DRM_PAGE_ALIGN = 0x01,
DRM_AGP_BUFFER = 0x02,
DRM_SG_BUFFER = 0x04,
DRM_FB_BUFFER = 0x08
} drmBufDescFlags;
typedef enum {
DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */
DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */
DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */
DRM_LOCK_FLUSH_ALL = 0x08, /**< Flush all DMA queues first */
/* These *HALT* flags aren't supported yet
-- they will be used to support the
full-screen DGA-like mode. */
DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */
DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */
} drmLockFlags;
typedef enum {
DRM_CONTEXT_PRESERVED = 0x01, /**< This context is preserved and
never swapped. */
DRM_CONTEXT_2DONLY = 0x02 /**< This context is for 2D rendering only. */
} drm_context_tFlags, *drm_context_tFlagsPtr;
typedef struct _drmBufDesc {
int count; /**< Number of buffers of this size */
int size; /**< Size in bytes */
int low_mark; /**< Low water mark */
int high_mark; /**< High water mark */
} drmBufDesc, *drmBufDescPtr;
typedef struct _drmBufInfo {
int count; /**< Number of buffers described in list */
drmBufDescPtr list; /**< List of buffer descriptions */
} drmBufInfo, *drmBufInfoPtr;
typedef struct _drmBuf {
int idx; /**< Index into the master buffer list */
int total; /**< Buffer size */
int used; /**< Amount of buffer in use (for DMA) */
drmAddress address; /**< Address */
} drmBuf, *drmBufPtr;
/**
* Buffer mapping information.
*
* Used by drmMapBufs() and drmUnmapBufs() to store information about the
* mapped buffers.
*/
typedef struct _drmBufMap {
int count; /**< Number of buffers mapped */
drmBufPtr list; /**< Buffers */
} drmBufMap, *drmBufMapPtr;
typedef struct _drmLock {
volatile unsigned int lock;
char padding[60];
/* This is big enough for most current (and future?) architectures:
DEC Alpha: 32 bytes
Intel Merced: ?
Intel P5/PPro/PII/PIII: 32 bytes
Intel StrongARM: 32 bytes
Intel i386/i486: 16 bytes
MIPS: 32 bytes (?)
Motorola 68k: 16 bytes
Motorola PowerPC: 32 bytes
Sun SPARC: 32 bytes
*/
} drmLock, *drmLockPtr;
/**
* Indices here refer to the offset into
* list in drmBufInfo
*/
typedef struct _drmDMAReq {
drm_context_t context; /**< Context handle */
int send_count; /**< Number of buffers to send */
int *send_list; /**< List of handles to buffers */
int *send_sizes; /**< Lengths of data to send, in bytes */
drmDMAFlags flags; /**< Flags */
int request_count; /**< Number of buffers requested */
int request_size; /**< Desired size of buffers requested */
int *request_list; /**< Buffer information */
int *request_sizes; /**< Minimum acceptable sizes */
int granted_count; /**< Number of buffers granted at this size */
} drmDMAReq, *drmDMAReqPtr;
typedef struct _drmRegion {
drm_handle_t handle;
unsigned int offset;
drmSize size;
drmAddress map;
} drmRegion, *drmRegionPtr;
typedef struct _drmTextureRegion {
unsigned char next;
unsigned char prev;
unsigned char in_use;
unsigned char padding; /**< Explicitly pad this out */
unsigned int age;
} drmTextureRegion, *drmTextureRegionPtr;
typedef enum {
DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
DRM_VBLANK_SIGNAL = 0x40000000 /* Send signal instead of blocking */
} drmVBlankSeqType;
typedef struct _drmVBlankReq {
drmVBlankSeqType type;
unsigned int sequence;
unsigned long signal;
} drmVBlankReq, *drmVBlankReqPtr;
typedef struct _drmVBlankReply {
drmVBlankSeqType type;
unsigned int sequence;
long tval_sec;
long tval_usec;
} drmVBlankReply, *drmVBlankReplyPtr;
typedef union _drmVBlank {
drmVBlankReq request;
drmVBlankReply reply;
} drmVBlank, *drmVBlankPtr;
typedef struct _drmSetVersion {
int drm_di_major;
int drm_di_minor;
int drm_dd_major;
int drm_dd_minor;
} drmSetVersion, *drmSetVersionPtr;
#define __drm_dummy_lock(lock) (*(__volatile__ unsigned int *)lock)
#define DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */
#define DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */
#if defined(__GNUC__) && (__GNUC__ >= 2)
# if defined(__i386) || defined(__AMD64__) || defined(__x86_64__) || defined(__amd64__)
/* Reflect changes here to drmP.h */
#define DRM_CAS(lock,old,new,__ret) \
do { \
int __dummy; /* Can't mark eax as clobbered */ \
__asm__ __volatile__( \
"lock ; cmpxchg %4,%1\n\t" \
"setnz %0" \
: "=d" (__ret), \
"=m" (__drm_dummy_lock(lock)), \
"=a" (__dummy) \
: "2" (old), \
"r" (new)); \
} while (0)
#elif defined(__alpha__)
#define DRM_CAS(lock, old, new, ret) \
do { \
int old32; \
int cur32; \
__asm__ __volatile__( \
" mb\n" \
" zap %4, 0xF0, %0\n" \
" ldl_l %1, %2\n" \
" zap %1, 0xF0, %1\n" \
" cmpeq %0, %1, %1\n" \
" beq %1, 1f\n" \
" bis %5, %5, %1\n" \
" stl_c %1, %2\n" \
"1: xor %1, 1, %1\n" \
" stl %1, %3" \
: "=r" (old32), \
"=&r" (cur32), \
"=m" (__drm_dummy_lock(lock)),\
"=m" (ret) \
: "r" (old), \
"r" (new)); \
} while(0)
#elif defined(__sparc__)
#define DRM_CAS(lock,old,new,__ret) \
do { register unsigned int __old __asm("o0"); \
register unsigned int __new __asm("o1"); \
register volatile unsigned int *__lock __asm("o2"); \
__old = old; \
__new = new; \
__lock = (volatile unsigned int *)lock; \
__asm__ __volatile__( \
/*"cas [%2], %3, %0"*/ \
".word 0xd3e29008\n\t" \
/*"membar #StoreStore | #StoreLoad"*/ \
".word 0x8143e00a" \
: "=&r" (__new) \
: "0" (__new), \
"r" (__lock), \
"r" (__old) \
: "memory"); \
__ret = (__new != __old); \
} while(0)
#elif defined(__ia64__)
#ifdef __INTEL_COMPILER
/* this currently generates bad code (missing stop bits)... */
#include <ia64intrin.h>
#define DRM_CAS(lock,old,new,__ret) \
do { \
unsigned long __result, __old = (old) & 0xffffffff; \
__mf(); \
__result = _InterlockedCompareExchange_acq(&__drm_dummy_lock(lock), (new), __old);\
__ret = (__result) != (__old); \
/* __ret = (__sync_val_compare_and_swap(&__drm_dummy_lock(lock), \
(old), (new)) \
!= (old)); */\
} while (0)
#else
#define DRM_CAS(lock,old,new,__ret) \
do { \
unsigned int __result, __old = (old); \
__asm__ __volatile__( \
"mf\n" \
"mov ar.ccv=%2\n" \
";;\n" \
"cmpxchg4.acq %0=%1,%3,ar.ccv" \
: "=r" (__result), "=m" (__drm_dummy_lock(lock)) \
: "r" ((unsigned long)__old), "r" (new) \
: "memory"); \
__ret = (__result) != (__old); \
} while (0)
#endif
#elif defined(__powerpc__)
#define DRM_CAS(lock,old,new,__ret) \
do { \
__asm__ __volatile__( \
"sync;" \
"0: lwarx %0,0,%1;" \
" xor. %0,%3,%0;" \
" bne 1f;" \
" stwcx. %2,0,%1;" \
" bne- 0b;" \
"1: " \
"sync;" \
: "=&r"(__ret) \
: "r"(lock), "r"(new), "r"(old) \
: "cr0", "memory"); \
} while (0)
#endif /* architecture */
#endif /* __GNUC__ >= 2 */
#ifndef DRM_CAS
#define DRM_CAS(lock,old,new,ret) do { ret=1; } while (0) /* FAST LOCK FAILS */
#endif
#if defined(__alpha__) || defined(__powerpc__)
#define DRM_CAS_RESULT(_result) int _result
#else
#define DRM_CAS_RESULT(_result) char _result
#endif
#define DRM_LIGHT_LOCK(fd,lock,context) \
do { \
DRM_CAS_RESULT(__ret); \
DRM_CAS(lock,context,DRM_LOCK_HELD|context,__ret); \
if (__ret) drmGetLock(fd,context,0); \
} while(0)
/* This one counts fast locks -- for
benchmarking only. */
#define DRM_LIGHT_LOCK_COUNT(fd,lock,context,count) \
do { \
DRM_CAS_RESULT(__ret); \
DRM_CAS(lock,context,DRM_LOCK_HELD|context,__ret); \
if (__ret) drmGetLock(fd,context,0); \
else ++count; \
} while(0)
#define DRM_LOCK(fd,lock,context,flags) \
do { \
if (flags) drmGetLock(fd,context,flags); \
else DRM_LIGHT_LOCK(fd,lock,context); \
} while(0)
#define DRM_UNLOCK(fd,lock,context) \
do { \
DRM_CAS_RESULT(__ret); \
DRM_CAS(lock,DRM_LOCK_HELD|context,context,__ret); \
if (__ret) drmUnlock(fd,context); \
} while(0)
/* Simple spin locks */
#define DRM_SPINLOCK(spin,val) \
do { \
DRM_CAS_RESULT(__ret); \
do { \
DRM_CAS(spin,0,val,__ret); \
if (__ret) while ((spin)->lock); \
} while (__ret); \
} while(0)
#define DRM_SPINLOCK_TAKE(spin,val) \
do { \
DRM_CAS_RESULT(__ret); \
int cur; \
do { \
cur = (*spin).lock; \
DRM_CAS(spin,cur,val,__ret); \
} while (__ret); \
} while(0)
#define DRM_SPINLOCK_COUNT(spin,val,count,__ret) \
do { \
int __i; \
__ret = 1; \
for (__i = 0; __ret && __i < count; __i++) { \
DRM_CAS(spin,0,val,__ret); \
if (__ret) for (;__i < count && (spin)->lock; __i++); \
} \
} while(0)
#define DRM_SPINUNLOCK(spin,val) \
do { \
DRM_CAS_RESULT(__ret); \
if ((*spin).lock == val) { /* else server stole lock */ \
do { \
DRM_CAS(spin,val,0,__ret); \
} while (__ret); \
} \
} while(0)
/* General user-level programmer's API: unprivileged */
extern int drmAvailable(void);
extern int drmOpen(const char *name, const char *busid);
extern int drmClose(int fd);
extern drmVersionPtr drmGetVersion(int fd);
extern drmVersionPtr drmGetLibVersion(int fd);
extern void drmFreeVersion(drmVersionPtr);
extern int drmGetMagic(int fd, drm_magic_t * magic);
extern char *drmGetBusid(int fd);
extern int drmGetInterruptFromBusID(int fd, int busnum, int devnum,
int funcnum);
extern int drmGetMap(int fd, int idx, drm_handle_t *offset,
drmSize *size, drmMapType *type,
drmMapFlags *flags, drm_handle_t *handle,
int *mtrr);
extern int drmGetClient(int fd, int idx, int *auth, int *pid,
int *uid, unsigned long *magic,
unsigned long *iocs);
extern int drmGetStats(int fd, drmStatsT *stats);
extern int drmSetInterfaceVersion(int fd, drmSetVersion *version);
extern int drmCommandNone(int fd, unsigned long drmCommandIndex);
extern int drmCommandRead(int fd, unsigned long drmCommandIndex,
void *data, unsigned long size);
extern int drmCommandWrite(int fd, unsigned long drmCommandIndex,
void *data, unsigned long size);
extern int drmCommandWriteRead(int fd, unsigned long drmCommandIndex,
void *data, unsigned long size);
/* General user-level programmer's API: X server (root) only */
extern void drmFreeBusid(const char *busid);
extern int drmSetBusid(int fd, const char *busid);
extern int drmAuthMagic(int fd, drm_magic_t magic);
extern int drmAddMap(int fd,
drm_handle_t offset,
drmSize size,
drmMapType type,
drmMapFlags flags,
drm_handle_t * handle);
extern int drmRmMap(int fd, drm_handle_t handle);
extern int drmAddContextPrivateMapping(int fd, drm_context_t ctx_id,
drm_handle_t handle);
extern int drmAddBufs(int fd, int count, int size,
drmBufDescFlags flags,
int agp_offset);
extern int drmMarkBufs(int fd, double low, double high);
extern int drmCreateContext(int fd, drm_context_t * handle);
extern int drmSetContextFlags(int fd, drm_context_t context,
drm_context_tFlags flags);
extern int drmGetContextFlags(int fd, drm_context_t context,
drm_context_tFlagsPtr flags);
extern int drmAddContextTag(int fd, drm_context_t context, void *tag);
extern int drmDelContextTag(int fd, drm_context_t context);
extern void *drmGetContextTag(int fd, drm_context_t context);
extern drm_context_t * drmGetReservedContextList(int fd, int *count);
extern void drmFreeReservedContextList(drm_context_t *);
extern int drmSwitchToContext(int fd, drm_context_t context);
extern int drmDestroyContext(int fd, drm_context_t handle);
extern int drmCreateDrawable(int fd, drm_drawable_t * handle);
extern int drmDestroyDrawable(int fd, drm_drawable_t handle);
extern int drmCtlInstHandler(int fd, int irq);
extern int drmCtlUninstHandler(int fd);
/* General user-level programmer's API: authenticated client and/or X */
extern int drmMap(int fd,
drm_handle_t handle,
drmSize size,
drmAddressPtr address);
extern int drmUnmap(drmAddress address, drmSize size);
extern drmBufInfoPtr drmGetBufInfo(int fd);
extern drmBufMapPtr drmMapBufs(int fd);
extern int drmUnmapBufs(drmBufMapPtr bufs);
extern int drmDMA(int fd, drmDMAReqPtr request);
extern int drmFreeBufs(int fd, int count, int *list);
extern int drmGetLock(int fd,
drm_context_t context,
drmLockFlags flags);
extern int drmUnlock(int fd, drm_context_t context);
extern int drmFinish(int fd, int context, drmLockFlags flags);
extern int drmGetContextPrivateMapping(int fd, drm_context_t ctx_id,
drm_handle_t * handle);
/* AGP/GART support: X server (root) only */
extern int drmAgpAcquire(int fd);
extern int drmAgpRelease(int fd);
extern int drmAgpEnable(int fd, unsigned long mode);
extern int drmAgpAlloc(int fd, unsigned long size,
unsigned long type, unsigned long *address,
drm_handle_t *handle);
extern int drmAgpFree(int fd, drm_handle_t handle);
extern int drmAgpBind(int fd, drm_handle_t handle,
unsigned long offset);
extern int drmAgpUnbind(int fd, drm_handle_t handle);
/* AGP/GART info: authenticated client and/or X */
extern int drmAgpVersionMajor(int fd);
extern int drmAgpVersionMinor(int fd);
extern unsigned long drmAgpGetMode(int fd);
extern unsigned long drmAgpBase(int fd); /* Physical location */
extern unsigned long drmAgpSize(int fd); /* Bytes */
extern unsigned long drmAgpMemoryUsed(int fd);
extern unsigned long drmAgpMemoryAvail(int fd);
extern unsigned int drmAgpVendorId(int fd);
extern unsigned int drmAgpDeviceId(int fd);
/* PCI scatter/gather support: X server (root) only */
extern int drmScatterGatherAlloc(int fd, unsigned long size,
drm_handle_t *handle);
extern int drmScatterGatherFree(int fd, drm_handle_t handle);
extern int drmWaitVBlank(int fd, drmVBlankPtr vbl);
/* Support routines */
extern int drmError(int err, const char *label);
extern void *drmMalloc(int size);
extern void drmFree(void *pt);
/* Hash table routines */
extern void *drmHashCreate(void);
extern int drmHashDestroy(void *t);
extern int drmHashLookup(void *t, unsigned long key, void **value);
extern int drmHashInsert(void *t, unsigned long key, void *value);
extern int drmHashDelete(void *t, unsigned long key);
extern int drmHashFirst(void *t, unsigned long *key, void **value);
extern int drmHashNext(void *t, unsigned long *key, void **value);
/* PRNG routines */
extern void *drmRandomCreate(unsigned long seed);
extern int drmRandomDestroy(void *state);
extern unsigned long drmRandom(void *state);
extern double drmRandomDouble(void *state);
/* Skip list routines */
extern void *drmSLCreate(void);
extern int drmSLDestroy(void *l);
extern int drmSLLookup(void *l, unsigned long key, void **value);
extern int drmSLInsert(void *l, unsigned long key, void *value);
extern int drmSLDelete(void *l, unsigned long key);
extern int drmSLNext(void *l, unsigned long *key, void **value);
extern int drmSLFirst(void *l, unsigned long *key, void **value);
extern void drmSLDump(void *l);
extern int drmSLLookupNeighbors(void *l, unsigned long key,
unsigned long *prev_key, void **prev_value,
unsigned long *next_key, void **next_value);
#endif