summaryrefslogtreecommitdiff
path: root/bsd-core/drm_dma.c
blob: c2586fa064c52169f95991989309b0ada4bd62aa (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
/*-
 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors:
 *    Rickard E. (Rik) Faith <faith@valinux.com>
 *    Gareth Hughes <gareth@valinux.com>
 *
 */

/** @file drm_dma.c
 * Support code for DMA buffer management.
 *
 * The implementation used to be significantly more complicated, but the
 * complexity has been moved into the drivers as different buffer management
 * schemes evolved.
 */

#include "drmP.h"

int drm_dma_setup(struct drm_device *dev)
{

	dev->dma = malloc(sizeof(*dev->dma), M_DRM, M_NOWAIT | M_ZERO);
	if (dev->dma == NULL)
		return ENOMEM;

	DRM_SPININIT(&dev->dma_lock, "drmdma");

	return 0;
}

void drm_dma_takedown(struct drm_device *dev)
{
	drm_device_dma_t  *dma = dev->dma;
	int		  i, j;

	if (dma == NULL)
		return;

				/* Clear dma buffers */
	for (i = 0; i <= DRM_MAX_ORDER; i++) {
		if (dma->bufs[i].seg_count) {
			DRM_DEBUG("order %d: buf_count = %d,"
				  " seg_count = %d\n",
				  i,
				  dma->bufs[i].buf_count,
				  dma->bufs[i].seg_count);
			for (j = 0; j < dma->bufs[i].seg_count; j++) {
				drm_pci_free(dev, dma->bufs[i].seglist[j]);
			}
			free(dma->bufs[i].seglist, M_DRM);
		}

	   	if (dma->bufs[i].buf_count) {
		   	for (j = 0; j < dma->bufs[i].buf_count; j++) {
				free(dma->bufs[i].buflist[j].dev_private,
				    M_DRM);
			}
		   	free(dma->bufs[i].buflist, M_DRM);
		}
	}

	free(dma->buflist, M_DRM);
	free(dma->pagelist, M_DRM);
	free(dev->dma, M_DRM);
	dev->dma = NULL;
	DRM_SPINUNINIT(&dev->dma_lock);
}


void drm_free_buffer(struct drm_device *dev, drm_buf_t *buf)
{
	if (!buf) return;

	buf->pending  = 0;
	buf->file_priv= NULL;
	buf->used     = 0;
}

void drm_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
{
	drm_device_dma_t *dma = dev->dma;
	int		 i;

	if (!dma) return;
	for (i = 0; i < dma->buf_count; i++) {
		if (dma->buflist[i]->file_priv == file_priv) {
			switch (dma->buflist[i]->list) {
			case DRM_LIST_NONE:
				drm_free_buffer(dev, dma->buflist[i]);
				break;
			case DRM_LIST_WAIT:
				dma->buflist[i]->list = DRM_LIST_RECLAIM;
				break;
			default:
				/* Buffer already on hardware. */
				break;
			}
		}
	}
}

/* Call into the driver-specific DMA handler */
int drm_dma(struct drm_device *dev, void *data, struct drm_file *file_priv)
{

	if (dev->driver.dma_ioctl) {
		/* shared code returns -errno */
		return -dev->driver.dma_ioctl(dev, data, file_priv);
	} else {
		DRM_DEBUG("DMA ioctl on driver with no dma handler\n");
		return EINVAL;
	}
}
k(struct mem_block *p, int start, int size, DRMFILE filp) { /* Maybe cut off the start of an existing block */ if (start > p->start) { struct mem_block *newblock = DRM(alloc)(sizeof(*newblock), DRM_MEM_BUFLISTS); if (!newblock) goto out; newblock->start = start; newblock->size = p->size - (start - p->start); newblock->filp = NULL; newblock->next = p->next; newblock->prev = p; p->next->prev = newblock; p->next = newblock; p->size -= newblock->size; p = newblock; } /* Maybe cut off the end of an existing block */ if (size < p->size) { struct mem_block *newblock = DRM(alloc)(sizeof(*newblock), DRM_MEM_BUFLISTS); if (!newblock) goto out; newblock->start = start + size; newblock->size = p->size - size; newblock->filp = NULL; newblock->next = p->next; newblock->prev = p; p->next->prev = newblock; p->next = newblock; p->size = size; } out: /* Our block is in the middle */ p->filp = filp; return p; } static struct mem_block *alloc_block(struct mem_block *heap, int size, int align2, DRMFILE filp) { struct mem_block *p; int mask = (1 << align2) - 1; for (p = heap->next; p != heap; p = p->next) { int start = (p->start + mask) & ~mask; if (p->filp == NULL && start + size <= p->start + p->size) return split_block(p, start, size, filp); } return NULL; } static struct mem_block *find_block(struct mem_block *heap, int start) { struct mem_block *p; for (p = heap->next; p != heap; p = p->next) if (p->start == start) return p; return NULL; } static void free_block(struct mem_block *p) { p->filp = NULL; /* Assumes a single contiguous range. Needs a special filp in * 'heap' to stop it being subsumed. */ if (p->next->filp == NULL) { struct mem_block *q = p->next; p->size += q->size; p->next = q->next; p->next->prev = p; DRM(free)(q, sizeof(*q), DRM_MEM_BUFLISTS); } if (p->prev->filp == NULL) { struct mem_block *q = p->prev; q->size += p->size; q->next = p->next; q->next->prev = q; DRM(free)(p, sizeof(*q), DRM_MEM_BUFLISTS); } } /* Initialize. How to check for an uninitialized heap? */ static int init_heap(struct mem_block **heap, int start, int size) { struct mem_block *blocks = DRM(alloc)(sizeof(*blocks), DRM_MEM_BUFLISTS); if (!blocks) return -ENOMEM; *heap = DRM(alloc)(sizeof(**heap), DRM_MEM_BUFLISTS); if (!*heap) { DRM(free)(blocks, sizeof(*blocks), DRM_MEM_BUFLISTS); return -ENOMEM; } blocks->start = start; blocks->size = size; blocks->filp = NULL; blocks->next = blocks->prev = *heap; memset(*heap, 0, sizeof(**heap)); (*heap)->filp = (DRMFILE) - 1; (*heap)->next = (*heap)->prev = blocks; return 0; } /* Free all blocks associated with the releasing file. */ void i915_mem_release(drm_device_t * dev, DRMFILE filp, struct mem_block *heap) { struct mem_block *p; if (!heap || !heap->next) return; for (p = heap->next; p != heap; p = p->next) { if (p->filp == filp) { p->filp = NULL; mark_block(dev, p, 0); } } /* Assumes a single contiguous range. Needs a special filp in * 'heap' to stop it being subsumed. */ for (p = heap->next; p != heap; p = p->next) { while (p->filp == NULL && p->next->filp == NULL) { struct mem_block *q = p->next; p->size += q->size; p->next = q->next; p->next->prev = p; DRM(free)(q, sizeof(*q), DRM_MEM_BUFLISTS); } } } /* Shutdown. */ void i915_mem_takedown(struct mem_block **heap) { struct mem_block *p; if (!*heap) return; for (p = (*heap)->next; p != *heap;) { struct mem_block *q = p; p = p->next; DRM(free)(q, sizeof(*q), DRM_MEM_BUFLISTS); } DRM(free)(*heap, sizeof(**heap), DRM_MEM_BUFLISTS); *heap = NULL; } static struct mem_block **get_heap(drm_i915_private_t * dev_priv, int region) { switch (region) { case I915_MEM_REGION_AGP: return &dev_priv->agp_heap; default: return NULL; } } /* IOCTL HANDLERS */ int i915_mem_alloc(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_mem_alloc_t alloc; struct mem_block *block, **heap; if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); return DRM_ERR(EINVAL); } DRM_COPY_FROM_USER_IOCTL(alloc, (drm_i915_mem_alloc_t __user *) data, sizeof(alloc)); heap = get_heap(dev_priv, alloc.region); if (!heap || !*heap) return DRM_ERR(EFAULT); /* Make things easier on ourselves: all allocations at least * 4k aligned. */ if (alloc.alignment < 12) alloc.alignment = 12; block = alloc_block(*heap, alloc.size, alloc.alignment, filp); if (!block) return DRM_ERR(ENOMEM); mark_block(dev, block, 1); if (DRM_COPY_TO_USER(alloc.region_offset, &block->start, sizeof(int))) { DRM_ERROR("copy_to_user\n"); return DRM_ERR(EFAULT); } return 0; } int i915_mem_free(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_mem_free_t memfree; struct mem_block *block, **heap; if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); return DRM_ERR(EINVAL); } DRM_COPY_FROM_USER_IOCTL(memfree, (drm_i915_mem_free_t __user *) data, sizeof(memfree)); heap = get_heap(dev_priv, memfree.region); if (!heap || !*heap) return DRM_ERR(EFAULT); block = find_block(*heap, memfree.region_offset); if (!block) return DRM_ERR(EFAULT); if (block->filp != filp) return DRM_ERR(EPERM); mark_block(dev, block, 0); free_block(block); return 0; } int i915_mem_init_heap(DRM_IOCTL_ARGS) { DRM_DEVICE; drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_mem_init_heap_t initheap; struct mem_block **heap; if (!dev_priv) { DRM_ERROR("%s called with no initialization\n", __FUNCTION__); return DRM_ERR(EINVAL); } DRM_COPY_FROM_USER_IOCTL(initheap, (drm_i915_mem_init_heap_t __user *) data, sizeof(initheap)); heap = get_heap(dev_priv, initheap.region); if (!heap) return DRM_ERR(EFAULT); if (*heap) { DRM_ERROR("heap already initialized?"); return DRM_ERR(EFAULT); } return init_heap(heap, initheap.start, initheap.size); }