summaryrefslogtreecommitdiff
path: root/linux-core/drm_mm.c
blob: cf0d92fae0c4b07aff4051e7628452daef4da342 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
/**************************************************************************
 *
 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 *
 **************************************************************************/

/*
 * Generic simple memory manager implementation. Intended to be used as a base
 * class implementation for more advanced memory managers.
 *
 * Note that the algorithm used is quite simple and there might be substantial
 * performance gains if a smarter free list is implemented. Currently it is just an
 * unordered stack of free regions. This could easily be improved if an RB-tree
 * is used instead. At least if we expect heavy fragmentation.
 *
 * Aligned allocations can also see improvement.
 *
 * Authors:
 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
 */

#include "drmP.h"
#include <linux/slab.h>

unsigned long drm_mm_tail_space(struct drm_mm *mm)
{
	struct list_head *tail_node;
	struct drm_mm_node *entry;

	tail_node = mm->ml_entry.prev;
	entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
	if (!entry->free)
		return 0;

	return entry->size;
}

int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
{
	struct list_head *tail_node;
	struct drm_mm_node *entry;

	tail_node = mm->ml_entry.prev;
	entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
	if (!entry->free)
		return -ENOMEM;

	if (entry->size <= size)
		return -ENOMEM;

	entry->size -= size;
	return 0;
}


static int drm_mm_create_tail_node(struct drm_mm *mm,
			    unsigned long start,
			    unsigned long size)
{
	struct drm_mm_node *child;

	child = (struct drm_mm_node *)
		drm_ctl_alloc(sizeof(*child), DRM_MEM_MM);
	if (!child)
		return -ENOMEM;

	child->free = 1;
	child->size = size;
	child->start = start;
	child->mm = mm;

	list_add_tail(&child->ml_entry, &mm->ml_entry);
	list_add_tail(&child->fl_entry, &mm->fl_entry);

	return 0;
}


int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size)
{
	struct list_head *tail_node;
	struct drm_mm_node *entry;

	tail_node = mm->ml_entry.prev;
	entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
	if (!entry->free) {
		return drm_mm_create_tail_node(mm, entry->start + entry->size, size);
	}
	entry->size += size;
	return 0;
}

static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
					    unsigned long size)
{
	struct drm_mm_node *child;

	child = (struct drm_mm_node *)
		drm_ctl_alloc(sizeof(*child), DRM_MEM_MM);
	if (!child)
		return NULL;

	INIT_LIST_HEAD(&child->fl_entry);

	child->free = 0;
	child->size = size;
	child->start = parent->start;
	child->mm = parent->mm;

	list_add_tail(&child->ml_entry, &parent->ml_entry);
	INIT_LIST_HEAD(&child->fl_entry);

	parent->size -= size;
	parent->start += size;
	return child;
}

struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
				unsigned long size, unsigned alignment)
{

	struct drm_mm_node *align_splitoff = NULL;
	struct drm_mm_node *child;
	unsigned tmp = 0;

	if (alignment)
		tmp = parent->start % alignment;

	if (tmp) {
		align_splitoff = drm_mm_split_at_start(parent, alignment - tmp);
		if (!align_splitoff)
			return NULL;
	}

	if (parent->size == size) {
		list_del_init(&parent->fl_entry);
		parent->free = 0;
		return parent;
	} else {
		child = drm_mm_split_at_start(parent, size);
	}

	if (align_splitoff)
		drm_mm_put_block(align_splitoff);

	return child;
}

/*
 * Put a block. Merge with the previous and / or next block if they are free.
 * Otherwise add to the free stack.
 */

void drm_mm_put_block(struct drm_mm_node * cur)
{

	struct drm_mm *mm = cur->mm;
	struct list_head *cur_head = &cur->ml_entry;
	struct list_head *root_head = &mm->ml_entry;
	struct drm_mm_node *prev_node = NULL;
	struct drm_mm_node *next_node;

	int merged = 0;

	if (cur_head->prev != root_head) {
		prev_node = list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
		if (prev_node->free) {
			prev_node->size += cur->size;
			merged = 1;
		}
	}
	if (cur_head->next != root_head) {
		next_node = list_entry(cur_head->next, struct drm_mm_node, ml_entry);
		if (next_node->free) {
			if (merged) {
				prev_node->size += next_node->size;
				list_del(&next_node->ml_entry);
				list_del(&next_node->fl_entry);
				drm_ctl_free(next_node, sizeof(*next_node),
					     DRM_MEM_MM);
			} else {
				next_node->size += cur->size;
				next_node->start = cur->start;
				merged = 1;
			}
		}
	}
	if (!merged) {
		cur->free = 1;
		list_add(&cur->fl_entry, &mm->fl_entry);
	} else {
		list_del(&cur->ml_entry);
		drm_ctl_free(cur, sizeof(*cur), DRM_MEM_MM);
	}
}
EXPORT_SYMBOL(drm_mm_put_block);

struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
				  unsigned long size,
				  unsigned alignment, int best_match)
{
	struct list_head *list;
	const struct list_head *free_stack = &mm->fl_entry;
	struct drm_mm_node *entry;
	struct drm_mm_node *best;
	unsigned long best_size;
	unsigned wasted;

	best = NULL;
	best_size = ~0UL;

	list_for_each(list, free_stack) {
		entry = list_entry(list, struct drm_mm_node, fl_entry);
		wasted = 0;

		if (entry->size < size) 
			continue;

		if (alignment) {
			register unsigned tmp = entry->start % alignment;
			if (tmp) 
				wasted += alignment - tmp;
		}


		if (entry->size >= size + wasted) {
			if (!best_match)
				return entry;
			if (size < best_size) {
				best = entry;
				best_size = entry->size;
			}
		}
	}

	return best;
}

int drm_mm_clean(struct drm_mm * mm)
{
	struct list_head *head = &mm->ml_entry;

	return (head->next->next == head);
}

int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
{
	INIT_LIST_HEAD(&mm->ml_entry);
	INIT_LIST_HEAD(&mm->fl_entry);

	return drm_mm_create_tail_node(mm, start, size);
}

EXPORT_SYMBOL(drm_mm_init);

void drm_mm_takedown(struct drm_mm * mm)
{
	struct list_head *bnode = mm->fl_entry.next;
	struct drm_mm_node *entry;

	entry = list_entry(bnode, struct drm_mm_node, fl_entry);

	if (entry->ml_entry.next != &mm->ml_entry ||
	    entry->fl_entry.next != &mm->fl_entry) {
		DRM_ERROR("Memory manager not clean. Delaying takedown\n");
		return;
	}

	list_del(&entry->fl_entry);
	list_del(&entry->ml_entry);
	drm_ctl_free(entry, sizeof(*entry), DRM_MEM_MM);
}

EXPORT_SYMBOL(drm_mm_takedown);
TH) ? "dp_pix_width, " : "", (flags & MACH64_UPLOAD_SETUP_CNTL) ? "setup_cntl, " : "", (flags & MACH64_UPLOAD_MISC) ? "misc, " : "", (flags & MACH64_UPLOAD_TEXTURE) ? "texture, " : "", (flags & MACH64_UPLOAD_TEX0IMAGE) ? "tex0 image, " : "", (flags & MACH64_UPLOAD_TEX1IMAGE) ? "tex1 image, " : "", (flags & MACH64_UPLOAD_CLIPRECTS) ? "cliprects, " : ""); } /* Mach64 doesn't have hardware cliprects, just one hardware scissor, * so the GL scissor is intersected with each cliprect here */ /* This function returns 0 on success, 1 for no intersection, and * negative for an error */ static int mach64_emit_cliprect(struct drm_file *file_priv, drm_mach64_private_t * dev_priv, struct drm_clip_rect * box) { u32 sc_left_right, sc_top_bottom; struct drm_clip_rect scissor; drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mach64_context_regs_t *regs = &sarea_priv->context_state; DMALOCALS; DRM_DEBUG("box=%p\n", box); /* Get GL scissor */ /* FIXME: store scissor in SAREA as a cliprect instead of in * hardware format, or do intersection client-side */ scissor.x1 = regs->sc_left_right & 0xffff; scissor.x2 = (regs->sc_left_right & 0xffff0000) >> 16; scissor.y1 = regs->sc_top_bottom & 0xffff; scissor.y2 = (regs->sc_top_bottom & 0xffff0000) >> 16; /* Intersect GL scissor with cliprect */ if (box->x1 > scissor.x1) scissor.x1 = box->x1; if (box->y1 > scissor.y1) scissor.y1 = box->y1; if (box->x2 < scissor.x2) scissor.x2 = box->x2; if (box->y2 < scissor.y2) scissor.y2 = box->y2; /* positive return means skip */ if (scissor.x1 >= scissor.x2) return 1; if (scissor.y1 >= scissor.y2) return 1; DMAGETPTR(file_priv, dev_priv, 2); /* returns on failure to get buffer */ sc_left_right = ((scissor.x1 << 0) | (scissor.x2 << 16)); sc_top_bottom = ((scissor.y1 << 0) | (scissor.y2 << 16)); DMAOUTREG(MACH64_SC_LEFT_RIGHT, sc_left_right); DMAOUTREG(MACH64_SC_TOP_BOTTOM, sc_top_bottom); DMAADVANCE(dev_priv, 1); return 0; } static __inline__ int mach64_emit_state(struct drm_file *file_priv, drm_mach64_private_t * dev_priv) { drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mach64_context_regs_t *regs = &sarea_priv->context_state; unsigned int dirty = sarea_priv->dirty; u32 offset = ((regs->tex_size_pitch & 0xf0) >> 2); DMALOCALS; if (MACH64_VERBOSE) { mach64_print_dirty(__FUNCTION__, dirty); } else { DRM_DEBUG("dirty=0x%08x\n", dirty); } DMAGETPTR(file_priv, dev_priv, 17); /* returns on failure to get buffer */ if (dirty & MACH64_UPLOAD_MISC) { DMAOUTREG(MACH64_DP_MIX, regs->dp_mix); DMAOUTREG(MACH64_DP_SRC, regs->dp_src); DMAOUTREG(MACH64_CLR_CMP_CNTL, regs->clr_cmp_cntl); DMAOUTREG(MACH64_GUI_TRAJ_CNTL, regs->gui_traj_cntl); sarea_priv->dirty &= ~MACH64_UPLOAD_MISC; } if (dirty & MACH64_UPLOAD_DST_OFF_PITCH) { DMAOUTREG(MACH64_DST_OFF_PITCH, regs->dst_off_pitch); sarea_priv->dirty &= ~MACH64_UPLOAD_DST_OFF_PITCH; } if (dirty & MACH64_UPLOAD_Z_OFF_PITCH) { DMAOUTREG(MACH64_Z_OFF_PITCH, regs->z_off_pitch); sarea_priv->dirty &= ~MACH64_UPLOAD_Z_OFF_PITCH; } if (dirty & MACH64_UPLOAD_Z_ALPHA_CNTL) { DMAOUTREG(MACH64_Z_CNTL, regs->z_cntl); DMAOUTREG(MACH64_ALPHA_TST_CNTL, regs->alpha_tst_cntl); sarea_priv->dirty &= ~MACH64_UPLOAD_Z_ALPHA_CNTL; } if (dirty & MACH64_UPLOAD_SCALE_3D_CNTL) { DMAOUTREG(MACH64_SCALE_3D_CNTL, regs->scale_3d_cntl); sarea_priv->dirty &= ~MACH64_UPLOAD_SCALE_3D_CNTL; } if (dirty & MACH64_UPLOAD_DP_FOG_CLR) { DMAOUTREG(MACH64_DP_FOG_CLR, regs->dp_fog_clr); sarea_priv->dirty &= ~MACH64_UPLOAD_DP_FOG_CLR; } if (dirty & MACH64_UPLOAD_DP_WRITE_MASK) { DMAOUTREG(MACH64_DP_WRITE_MASK, regs->dp_write_mask); sarea_priv->dirty &= ~MACH64_UPLOAD_DP_WRITE_MASK; } if (dirty & MACH64_UPLOAD_DP_PIX_WIDTH) { DMAOUTREG(MACH64_DP_PIX_WIDTH, regs->dp_pix_width); sarea_priv->dirty &= ~MACH64_UPLOAD_DP_PIX_WIDTH; } if (dirty & MACH64_UPLOAD_SETUP_CNTL) { DMAOUTREG(MACH64_SETUP_CNTL, regs->setup_cntl); sarea_priv->dirty &= ~MACH64_UPLOAD_SETUP_CNTL; } if (dirty & MACH64_UPLOAD_TEXTURE) { DMAOUTREG(MACH64_TEX_SIZE_PITCH, regs->tex_size_pitch); DMAOUTREG(MACH64_TEX_CNTL, regs->tex_cntl); DMAOUTREG(MACH64_SECONDARY_TEX_OFF, regs->secondary_tex_off); DMAOUTREG(MACH64_TEX_0_OFF + offset, regs->tex_offset); sarea_priv->dirty &= ~MACH64_UPLOAD_TEXTURE; } DMAADVANCE(dev_priv, 1); sarea_priv->dirty &= MACH64_UPLOAD_CLIPRECTS; return 0; } /* ================================================================ * DMA command dispatch functions */ static int mach64_dma_dispatch_clear(struct drm_device * dev, struct drm_file *file_priv, unsigned int flags, int cx, int cy, int cw, int ch, unsigned int clear_color, unsigned int clear_depth) { drm_mach64_private_t *dev_priv = dev->dev_private; drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mach64_context_regs_t *ctx = &sarea_priv->context_state; int nbox = sarea_priv->nbox; struct drm_clip_rect *pbox = sarea_priv->boxes; u32 fb_bpp, depth_bpp; int i; DMALOCALS; DRM_DEBUG("\n"); switch (dev_priv->fb_bpp) { case 16: fb_bpp = MACH64_DATATYPE_RGB565; break; case 32: fb_bpp = MACH64_DATATYPE_ARGB8888; break; default: return -EINVAL; } switch (dev_priv->depth_bpp) { case 16: depth_bpp = MACH64_DATATYPE_RGB565; break; case 24: case 32: depth_bpp = MACH64_DATATYPE_ARGB8888; break; default: return -EINVAL; } if (!nbox) return 0; DMAGETPTR(file_priv, dev_priv, nbox * 31); /* returns on failure to get buffer */ for (i = 0; i < nbox; i++) { int x = pbox[i].x1; int y = pbox[i].y1; int w = pbox[i].x2 - x; int h = pbox[i].y2 - y; DRM_DEBUG("dispatch clear %d,%d-%d,%d flags 0x%x\n", pbox[i].x1, pbox[i].y1, pbox[i].x2, pbox[i].y2, flags); if (flags & (MACH64_FRONT | MACH64_BACK)) { /* Setup for color buffer clears */ DMAOUTREG(MACH64_Z_CNTL, 0); DMAOUTREG(MACH64_SCALE_3D_CNTL, 0); DMAOUTREG(MACH64_SC_LEFT_RIGHT, ctx->sc_left_right); DMAOUTREG(MACH64_SC_TOP_BOTTOM, ctx->sc_top_bottom); DMAOUTREG(MACH64_CLR_CMP_CNTL, 0); DMAOUTREG(MACH64_GUI_TRAJ_CNTL, (MACH64_DST_X_LEFT_TO_RIGHT | MACH64_DST_Y_TOP_TO_BOTTOM)); DMAOUTREG(MACH64_DP_PIX_WIDTH, ((fb_bpp << 0) | (fb_bpp << 4) | (fb_bpp << 8) | (fb_bpp << 16) | (fb_bpp << 28))); DMAOUTREG(MACH64_DP_FRGD_CLR, clear_color); DMAOUTREG(MACH64_DP_WRITE_MASK, ctx->dp_write_mask); DMAOUTREG(MACH64_DP_MIX, (MACH64_BKGD_MIX_D | MACH64_FRGD_MIX_S)); DMAOUTREG(MACH64_DP_SRC, (MACH64_BKGD_SRC_FRGD_CLR | MACH64_FRGD_SRC_FRGD_CLR | MACH64_MONO_SRC_ONE)); } if (flags & MACH64_FRONT) { DMAOUTREG(MACH64_DST_OFF_PITCH, dev_priv->front_offset_pitch); DMAOUTREG(MACH64_DST_X_Y, (y << 16) | x); DMAOUTREG(MACH64_DST_WIDTH_HEIGHT, (h << 16) | w); } if (flags & MACH64_BACK) { DMAOUTREG(MACH64_DST_OFF_PITCH, dev_priv->back_offset_pitch); DMAOUTREG(MACH64_DST_X_Y, (y << 16) | x); DMAOUTREG(MACH64_DST_WIDTH_HEIGHT, (h << 16) | w); } if (flags & MACH64_DEPTH) { /* Setup for depth buffer clear */ DMAOUTREG(MACH64_Z_CNTL, 0); DMAOUTREG(MACH64_SCALE_3D_CNTL, 0); DMAOUTREG(MACH64_SC_LEFT_RIGHT, ctx->sc_left_right); DMAOUTREG(MACH64_SC_TOP_BOTTOM, ctx->sc_top_bottom); DMAOUTREG(MACH64_CLR_CMP_CNTL, 0); DMAOUTREG(MACH64_GUI_TRAJ_CNTL, (MACH64_DST_X_LEFT_TO_RIGHT | MACH64_DST_Y_TOP_TO_BOTTOM)); DMAOUTREG(MACH64_DP_PIX_WIDTH, ((depth_bpp << 0) | (depth_bpp << 4) | (depth_bpp << 8) | (depth_bpp << 16) | (depth_bpp << 28))); DMAOUTREG(MACH64_DP_FRGD_CLR, clear_depth); DMAOUTREG(MACH64_DP_WRITE_MASK, 0xffffffff); DMAOUTREG(MACH64_DP_MIX, (MACH64_BKGD_MIX_D | MACH64_FRGD_MIX_S)); DMAOUTREG(MACH64_DP_SRC, (MACH64_BKGD_SRC_FRGD_CLR | MACH64_FRGD_SRC_FRGD_CLR | MACH64_MONO_SRC_ONE)); DMAOUTREG(MACH64_DST_OFF_PITCH, dev_priv->depth_offset_pitch); DMAOUTREG(MACH64_DST_X_Y, (y << 16) | x); DMAOUTREG(MACH64_DST_WIDTH_HEIGHT, (h << 16) | w); } } DMAADVANCE(dev_priv, 1); return 0; } static int mach64_dma_dispatch_swap(struct drm_device * dev, struct drm_file *file_priv) { drm_mach64_private_t *dev_priv = dev->dev_private; drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv; int nbox = sarea_priv->nbox; struct drm_clip_rect *pbox = sarea_priv->boxes; u32 fb_bpp; int i; DMALOCALS; DRM_DEBUG("\n"); switch (dev_priv->fb_bpp) { case 16: fb_bpp = MACH64_DATATYPE_RGB565; break; case 32: default: fb_bpp = MACH64_DATATYPE_ARGB8888; break; } if (!nbox) return 0; DMAGETPTR(file_priv, dev_priv, 13 + nbox * 4); /* returns on failure to get buffer */ DMAOUTREG(MACH64_Z_CNTL, 0); DMAOUTREG(MACH64_SCALE_3D_CNTL, 0); DMAOUTREG(MACH64_SC_LEFT_RIGHT, 0 | (8191 << 16)); /* no scissor */ DMAOUTREG(MACH64_SC_TOP_BOTTOM, 0 | (16383 << 16)); DMAOUTREG(MACH64_CLR_CMP_CNTL, 0); DMAOUTREG(MACH64_GUI_TRAJ_CNTL, (MACH64_DST_X_LEFT_TO_RIGHT | MACH64_DST_Y_TOP_TO_BOTTOM)); DMAOUTREG(MACH64_DP_PIX_WIDTH, ((fb_bpp << 0) | (fb_bpp << 4) | (fb_bpp << 8) | (fb_bpp << 16) | (fb_bpp << 28))); DMAOUTREG(MACH64_DP_WRITE_MASK, 0xffffffff); DMAOUTREG(MACH64_DP_MIX, (MACH64_BKGD_MIX_D | MACH64_FRGD_MIX_S)); DMAOUTREG(MACH64_DP_SRC, (MACH64_BKGD_SRC_BKGD_CLR | MACH64_FRGD_SRC_BLIT | MACH64_MONO_SRC_ONE)); DMAOUTREG(MACH64_SRC_OFF_PITCH, dev_priv->back_offset_pitch); DMAOUTREG(MACH64_DST_OFF_PITCH, dev_priv->front_offset_pitch); for (i = 0; i < nbox; i++) { int x = pbox[i].x1; int y = pbox[i].y1; int w = pbox[i].x2 - x; int h = pbox[i].y2 - y; DRM_DEBUG("dispatch swap %d,%d-%d,%d\n", pbox[i].x1, pbox[i].y1, pbox[i].x2, pbox[i].y2); DMAOUTREG(MACH64_SRC_WIDTH1, w); DMAOUTREG(MACH64_SRC_Y_X, (x << 16) | y); DMAOUTREG(MACH64_DST_Y_X, (x << 16) | y); DMAOUTREG(MACH64_DST_WIDTH_HEIGHT, (h << 16) | w); } DMAADVANCE(dev_priv, 1); if (dev_priv->driver_mode == MACH64_MODE_DMA_ASYNC) { for (i = 0; i < MACH64_MAX_QUEUED_FRAMES - 1; i++) { dev_priv->frame_ofs[i] = dev_priv->frame_ofs[i + 1]; } dev_priv->frame_ofs[i] = GETRINGOFFSET(); dev_priv->sarea_priv->frames_queued++; } return 0; } static int mach64_do_get_frames_queued(drm_mach64_private_t * dev_priv) { drm_mach64_descriptor_ring_t *ring = &dev_priv->ring; drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv; int i, start; u32 head, tail, ofs; DRM_DEBUG("\n"); if (sarea_priv->frames_queued == 0) return 0; tail = ring->tail; mach64_ring_tick(dev_priv, ring); head = ring->head; start = (MACH64_MAX_QUEUED_FRAMES - DRM_MIN(MACH64_MAX_QUEUED_FRAMES, sarea_priv->frames_queued)); if (head == tail) { sarea_priv->frames_queued = 0; for (i = start; i < MACH64_MAX_QUEUED_FRAMES; i++) { dev_priv->frame_ofs[i] = ~0; } return 0; } for (i = start; i < MACH64_MAX_QUEUED_FRAMES; i++) { ofs = dev_priv->frame_ofs[i]; DRM_DEBUG("frame_ofs[%d] ofs: %d\n", i, ofs); if (ofs == ~0 || (head < tail && (ofs < head || ofs >= tail)) || (head > tail && (ofs < head && ofs >= tail))) { sarea_priv->frames_queued = (MACH64_MAX_QUEUED_FRAMES - 1) - i; dev_priv->frame_ofs[i] = ~0; } } return sarea_priv->frames_queued; } /* Copy and verify a client submited buffer. * FIXME: Make an assembly optimized version */ static __inline__ int copy_from_user_vertex(u32 *to, const u32 __user *ufrom, unsigned long bytes) { unsigned long n = bytes; /* dwords remaining in buffer */ u32 *from, *orig_from; from = drm_alloc(bytes, DRM_MEM_DRIVER); if (from == NULL) return -ENOMEM; if (DRM_COPY_FROM_USER(from, ufrom, bytes)) { drm_free(from, bytes, DRM_MEM_DRIVER); return -EFAULT; } orig_from = from; /* we'll be modifying the "from" ptr, so save it */ n >>= 2; while (n > 1) { u32 data, reg, count; data = *from++; n--; reg = le32_to_cpu(data); count = (reg >> 16) + 1; if (count <= n) { n -= count; reg &= 0xffff; /* This is an exact match of Mach64's Setup Engine registers, * excluding SETUP_CNTL (1_C1). */ if ((reg >= 0x0190 && reg < 0x01c1) || (reg >= 0x01ca && reg <= 0x01cf)) { *to++ = data; memcpy(to, from, count << 2); from += count; to += count; } else { DRM_ERROR("Got bad command: 0x%04x\n", reg); drm_free(orig_from, bytes, DRM_MEM_DRIVER); return -EACCES; } } else { DRM_ERROR ("Got bad command count(=%u) dwords remaining=%lu\n", count, n); drm_free(orig_from, bytes, DRM_MEM_DRIVER); return -EINVAL; } } drm_free(orig_from, bytes, DRM_MEM_DRIVER); if (n == 0) return 0; else { DRM_ERROR("Bad buf->used(=%lu)\n", bytes); return -EINVAL; } } static int mach64_dma_dispatch_vertex(struct drm_device * dev, struct drm_file *file_priv, drm_mach64_vertex_t * vertex) { drm_mach64_private_t *dev_priv = dev->dev_private; drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv; struct drm_buf *copy_buf; void *buf = vertex->buf; unsigned long used = vertex->used; int ret = 0; int i = 0; int done = 0; int verify_ret = 0; DMALOCALS; DRM_DEBUG("buf=%p used=%lu nbox=%d\n", buf, used, sarea_priv->nbox); if (!used) goto _vertex_done; copy_buf = mach64_freelist_get(dev_priv); if (copy_buf == NULL) { DRM_ERROR("couldn't get buffer\n"); return -EAGAIN; } /* Mach64's vertex data is actually register writes. To avoid security * compromises these register writes have to be verified and copied from * user space into a private DMA buffer. */ verify_ret = copy_from_user_vertex(GETBUFPTR(copy_buf), buf, used); if (verify_ret != 0) { mach64_freelist_put(dev_priv, copy_buf); goto _vertex_done; } copy_buf->used = used; DMASETPTR(copy_buf); if (sarea_priv->dirty & ~MACH64_UPLOAD_CLIPRECTS) { ret = mach64_emit_state(file_priv, dev_priv); if (ret < 0) return ret; } do { /* Emit the next cliprect */ if (i < sarea_priv->nbox) { ret = mach64_emit_cliprect(file_priv, dev_priv, &sarea_priv->boxes[i]); if (ret < 0) { /* failed to get buffer */ return ret; } else if (ret != 0) { /* null intersection with scissor */ continue; } } if ((i >= sarea_priv->nbox - 1)) done = 1; /* Add the buffer to the DMA queue */ DMAADVANCE(dev_priv, done); } while (++i < sarea_priv->nbox); if (!done) { if (copy_buf->pending) { DMADISCARDBUF(); } else { /* This buffer wasn't used (no cliprects), so place it * back on the free list */ mach64_freelist_put(dev_priv, copy_buf); } } _vertex_done: sarea_priv->dirty &= ~MACH64_UPLOAD_CLIPRECTS; sarea_priv->nbox = 0; return verify_ret; } static __inline__ int copy_from_user_blit(u32 *to, const u32 __user *ufrom, unsigned long bytes) { to = (u32 *)((char *)to + MACH64_HOSTDATA_BLIT_OFFSET); if (DRM_COPY_FROM_USER(to, ufrom, bytes)) { return -EFAULT; } return 0; }