summaryrefslogtreecommitdiff
path: root/linux-core/drm_ioctl.c
blob: f790a4b35f4c0762f7d4c3b5ad1df904329c30fc (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
/**
 * \file drm_ioctl.c
 * IOCTL processing for DRM
 *
 * \author Rickard E. (Rik) Faith <faith@valinux.com>
 * \author Gareth Hughes <gareth@valinux.com>
 */

/*
 * Created: Fri Jan  8 09:01:26 1999 by faith@valinux.com
 *
 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 */

#include "drmP.h"
#include "drm_core.h"

#include "linux/pci.h"

/**
 * Get the bus id.
 *
 * \param inode device inode.
 * \param filp file pointer.
 * \param cmd command.
 * \param arg user argument, pointing to a drm_unique structure.
 * \return zero on success or a negative number on failure.
 *
 * Copies the bus id from drm_device::unique into user space.
 */
int drm_getunique(struct inode *inode, struct file *filp,
		  unsigned int cmd, unsigned long arg)
{
	drm_file_t *priv = filp->private_data;
	drm_device_t *dev = priv->head->dev;
	drm_unique_t __user *argp = (void __user *)arg;
	drm_unique_t u;

	if (copy_from_user(&u, argp, sizeof(u)))
		return -EFAULT;
	if (u.unique_len >= dev->unique_len) {
		if (copy_to_user(u.unique, dev->unique, dev->unique_len))
			return -EFAULT;
	}
	u.unique_len = dev->unique_len;
	if (copy_to_user(argp, &u, sizeof(u)))
		return -EFAULT;
	return 0;
}

/**
 * Set the bus id.
 *
 * \param inode device inode.
 * \param filp file pointer.
 * \param cmd command.
 * \param arg user argument, pointing to a drm_unique structure.
 * \return zero on success or a negative number on failure.
 *
 * Copies the bus id from userspace into drm_device::unique, and verifies that
 * it matches the device this DRM is attached to (EINVAL otherwise).  Deprecated
 * in interface version 1.1 and will return EBUSY when setversion has requested
 * version 1.1 or greater.
 */
int drm_setunique(struct inode *inode, struct file *filp,
		  unsigned int cmd, unsigned long arg)
{
	drm_file_t *priv = filp->private_data;
	drm_device_t *dev = priv->head->dev;
	drm_unique_t u;
	int domain, bus, slot, func, ret;

	if (dev->unique_len || dev->unique)
		return -EBUSY;

	if (copy_from_user(&u, (drm_unique_t __user *) arg, sizeof(u)))
		return -EFAULT;

	if (!u.unique_len || u.unique_len > 1024)
		return -EINVAL;

	dev->unique_len = u.unique_len;
	dev->unique = drm_alloc(u.unique_len + 1, DRM_MEM_DRIVER);
	if (!dev->unique)
		return -ENOMEM;
	if (copy_from_user(dev->unique, u.unique, dev->unique_len))
		return -EFAULT;

	dev->unique[dev->unique_len] = '\0';

	dev->devname = drm_alloc(strlen(dev->driver->pci_driver.name) + strlen(dev->unique) + 2,
				 DRM_MEM_DRIVER);
	if (!dev->devname)
		return -ENOMEM;

	sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name, dev->unique);

	/* Return error if the busid submitted doesn't match the device's actual
	 * busid.
	 */
	ret = sscanf(dev->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
	if (ret != 3)
		return DRM_ERR(EINVAL);
	domain = bus >> 8;
	bus &= 0xff;

	if ((domain != drm_get_pci_domain(dev)) ||
	    (bus != dev->pdev->bus->number) ||
	    (slot != PCI_SLOT(dev->pdev->devfn)) ||
	    (func != PCI_FUNC(dev->pdev->devfn)))
		return -EINVAL;

	return 0;
}

static int drm_set_busid(drm_device_t * dev)
{
	int len;
	if (dev->unique != NULL)
		return -EBUSY;

	dev->unique_len = 40;
	dev->unique = drm_alloc(dev->unique_len + 1, DRM_MEM_DRIVER);
	if (dev->unique == NULL)
		return -ENOMEM;

	len = snprintf(dev->unique, dev->unique_len, "pci:%04x:%02x:%02x.%d",
		       drm_get_pci_domain(dev),
		       dev->pdev->bus->number,
		       PCI_SLOT(dev->pdev->devfn),
		       PCI_FUNC(dev->pdev->devfn));
	if (len > dev->unique_len)
		DRM_ERROR("buffer overflow");

	dev->devname = drm_alloc(strlen(dev->driver->pci_driver.name) + dev->unique_len + 2,
				 DRM_MEM_DRIVER);
	if (dev->devname == NULL)
		return -ENOMEM;

	sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name, dev->unique);

	return 0;
}

/**
 * Get a mapping information.
 *
 * \param inode device inode.
 * \param filp file pointer.
 * \param cmd command.
 * \param arg user argument, pointing to a drm_map structure.
 *
 * \return zero on success or a negative number on failure.
 *
 * Searches for the mapping with the specified offset and copies its information
 * into userspace
 */
int drm_getmap(struct inode *inode, struct file *filp,
	       unsigned int cmd, unsigned long arg)
{
	drm_file_t *priv = filp->private_data;
	drm_device_t *dev = priv->head->dev;
	drm_map_t __user *argp = (void __user *)arg;
	drm_map_t map;
	drm_map_list_t *r_list = NULL;
	struct list_head *list;
	int idx;
	int i;

	if (copy_from_user(&map, argp, sizeof(map)))
		return -EFAULT;
	idx = map.offset;

	mutex_lock(&dev->struct_mutex);
	if (idx < 0) {
		mutex_unlock(&dev->struct_mutex);
		return -EINVAL;
	}

	i = 0;
	list_for_each(list, &dev->maplist->head) {
		if (i == idx) {
			r_list = list_entry(list, drm_map_list_t, head);
			break;
		}
		i++;
	}
	if (!r_list || !r_list->map) {
		mutex_unlock(&dev->struct_mutex);
		return -EINVAL;
	}

	map.offset = r_list->map->offset;
	map.size = r_list->map->size;
	map.type = r_list->map->type;
	map.flags = r_list->map->flags;
	map.handle = (void *)(unsigned long) r_list->user_token;
	map.mtrr = r_list->map->mtrr;
	mutex_unlock(&dev->struct_mutex);

	if (copy_to_user(argp, &map, sizeof(map)))
		return -EFAULT;
	return 0;
}

/**
 * Get client information.
 *
 * \param inode device inode.
 * \param filp file pointer.
 * \param cmd command.
 * \param arg user argument, pointing to a drm_client structure.
 *
 * \return zero on success or a negative number on failure.
 *
 * Searches for the client with the specified index and copies its information
 * into userspace
 */
int drm_getclient(struct inode *inode, struct file *filp,
		  unsigned int cmd, unsigned long arg)
{
	drm_file_t *priv = filp->private_data;
	drm_device_t *dev = priv->head->dev;
	drm_client_t __user *argp = (drm_client_t __user *)arg;
	drm_client_t client;
	drm_file_t *pt;
	int idx;
	int i;

	if (copy_from_user(&client, argp, sizeof(client)))
		return -EFAULT;
	idx = client.idx;
	mutex_lock(&dev->struct_mutex);
	for (i = 0, pt = dev->file_first; i < idx && pt; i++, pt = pt->next) ;

	if (!pt) {
		mutex_unlock(&dev->struct_mutex);
		return -EINVAL;
	}
	client.auth = pt->authenticated;
	client.pid = pt->pid;
	client.uid = pt->uid;
	client.magic = pt->magic;
	client.iocs = pt->ioctl_count;
	mutex_unlock(&dev->struct_mutex);

	if (copy_to_user(argp, &client, sizeof(client)))
		return -EFAULT;
	return 0;
}

/**
 * Get statistics information.
 *
 * \param inode device inode.
 * \param filp file pointer.
 * \param cmd command.
 * \param arg user argument, pointing to a drm_stats structure.
 *
 * \return zero on success or a negative number on failure.
 */
int drm_getstats(struct inode *inode, struct file *filp,
		 unsigned int cmd, unsigned long arg)
{
	drm_file_t *priv = filp->private_data;
	drm_device_t *dev = priv->head->dev;
	drm_stats_t stats;
	int i;

	memset(&stats, 0, sizeof(stats));

	mutex_lock(&dev->struct_mutex);

	for (i = 0; i < dev->counters; i++) {
		if (dev->types[i] == _DRM_STAT_LOCK)
			stats.data[i].value
			    = (dev->lock.hw_lock ? dev->lock.hw_lock->lock : 0);
		else
			stats.data[i].value = atomic_read(&dev->counts[i]);
		stats.data[i].type = dev->types[i];
	}

	stats.count = dev->counters;

	mutex_unlock(&dev->struct_mutex);

	if (copy_to_user((drm_stats_t __user *) arg, &stats, sizeof(stats)))
		return -EFAULT;
	return 0;
}

/**
 * Setversion ioctl.
 *
 * \param inode device inode.
 * \param filp file pointer.
 * \param cmd command.
 * \param arg user argument, pointing to a drm_lock structure.
 * \return zero on success or negative number on failure.
 *
 * Sets the requested interface version
 */
int drm_setversion(DRM_IOCTL_ARGS)
{
	DRM_DEVICE;
	drm_set_version_t sv;
	drm_set_version_t retv;
	int if_version;
	drm_set_version_t __user *argp = (void __user *)data;
 
	if (copy_from_user(&sv, argp, sizeof(sv)))
		return -EFAULT;

	retv.drm_di_major = DRM_IF_MAJOR;
	retv.drm_di_minor = DRM_IF_MINOR;
	retv.drm_dd_major = dev->driver->major;
	retv.drm_dd_minor = dev->driver->minor;

	if (copy_to_user(argp, &retv, sizeof(retv)))
		return -EFAULT;

	if (sv.drm_di_major != -1) {
		if (sv.drm_di_major != DRM_IF_MAJOR ||
		    sv.drm_di_minor < 0 || sv.drm_di_minor > DRM_IF_MINOR)
			return -EINVAL;
		if_version = DRM_IF_VERSION(sv.drm_di_major, sv.drm_di_minor);
		dev->if_version = max(if_version, dev->if_version);
		if (sv.drm_di_minor >= 1) {
			/*
			 * Version 1.1 includes tying of DRM to specific device
			 */
			drm_set_busid(dev);
		}
	}

	if (sv.drm_dd_major != -1) {
		if (sv.drm_dd_major != dev->driver->major ||
		    sv.drm_dd_minor < 0 || sv.drm_dd_minor > dev->driver->minor)
			return -EINVAL;

		if (dev->driver->set_version)
			dev->driver->set_version(dev, &sv);
	}
	return 0;
}

/** No-op ioctl. */
int drm_noop(struct inode *inode, struct file *filp, unsigned int cmd,
	     unsigned long arg)
{
	DRM_DEBUG("\n");
	return 0;
}
l opt">); } drm_free(*heap, sizeof(**heap), DRM_MEM_DRIVER); *heap = NULL; } void nouveau_mem_close(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; nouveau_mem_takedown(&dev_priv->agp_heap); nouveau_mem_takedown(&dev_priv->fb_heap); if (dev_priv->pci_heap) nouveau_mem_takedown(&dev_priv->pci_heap); } /*XXX BSD needs compat functions for pci access * #define DRM_PCI_DEV struct device * #define drm_pci_get_bsf pci_get_bsf * and a small inline to do *val = pci_read_config(pdev->device, where, 4); * might work */ static int nforce_pci_fn_read_config_dword(int devfn, int where, uint32_t *val) { #ifdef __linux__ DRM_PCI_DEV *pdev; if (!(pdev = drm_pci_get_bsf(0, 0, devfn))) { DRM_ERROR("nForce PCI device function 0x%02x not found\n", devfn); return -ENODEV; } return drm_pci_read_config_dword(pdev, where, val); #else DRM_ERROR("BSD compat for checking IGP memory amount needed\n"); return 0; #endif } static void nouveau_mem_check_nforce_dimms(struct drm_device *dev) { uint32_t mem_ctrlr_pciid; nforce_pci_fn_read_config_dword(3, 0x00, &mem_ctrlr_pciid); mem_ctrlr_pciid >>= 16; if (mem_ctrlr_pciid == 0x01a9 || mem_ctrlr_pciid == 0x01ab || mem_ctrlr_pciid == 0x01ed) { uint32_t dimm[3]; int i; for (i = 0; i < 3; i++) { nforce_pci_fn_read_config_dword(2, 0x40 + i * 4, &dimm[i]); dimm[i] = (dimm[i] >> 8) & 0x4f; } if (dimm[0] + dimm[1] != dimm[2]) DRM_INFO("Your nForce DIMMs are not arranged in " "optimal banks!\n"); } } static uint32_t nouveau_mem_fb_amount_igp(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t mem = 0; if (dev_priv->flags & NV_NFORCE) { nforce_pci_fn_read_config_dword(1, 0x7C, &mem); return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024; } if (dev_priv->flags & NV_NFORCE2) { nforce_pci_fn_read_config_dword(1, 0x84, &mem); return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024; } DRM_ERROR("impossible!\n"); return 0; } /* returns the amount of FB ram in bytes */ uint64_t nouveau_mem_fb_amount(struct drm_device *dev) { struct drm_nouveau_private *dev_priv=dev->dev_private; switch(dev_priv->card_type) { case NV_04: case NV_05: if (NV_READ(NV03_BOOT_0) & 0x00000100) { return (((NV_READ(NV03_BOOT_0) >> 12) & 0xf)*2+2)*1024*1024; } else switch(NV_READ(NV03_BOOT_0)&NV03_BOOT_0_RAM_AMOUNT) { case NV04_BOOT_0_RAM_AMOUNT_32MB: return 32*1024*1024; case NV04_BOOT_0_RAM_AMOUNT_16MB: return 16*1024*1024; case NV04_BOOT_0_RAM_AMOUNT_8MB: return 8*1024*1024; case NV04_BOOT_0_RAM_AMOUNT_4MB: return 4*1024*1024; } break; case NV_10: case NV_11: case NV_17: case NV_20: case NV_30: case NV_40: case NV_44: case NV_50: default: if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) { return nouveau_mem_fb_amount_igp(dev); } else { uint64_t mem; mem = (NV_READ(NV10_PFB_CSTATUS) & NV10_PFB_CSTATUS_RAM_AMOUNT_MB_MASK) >> NV10_PFB_CSTATUS_RAM_AMOUNT_MB_SHIFT; return mem*1024*1024; } break; } DRM_ERROR("Unable to detect video ram size. Please report your setup to " DRIVER_EMAIL "\n"); return 0; } static void nouveau_mem_reset_agp(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t saved_pci_nv_1, saved_pci_nv_19, pmc_enable; saved_pci_nv_1 = NV_READ(NV04_PBUS_PCI_NV_1); saved_pci_nv_19 = NV_READ(NV04_PBUS_PCI_NV_19); /* clear busmaster bit */ NV_WRITE(NV04_PBUS_PCI_NV_1, saved_pci_nv_1 & ~0x4); /* clear SBA and AGP bits */ NV_WRITE(NV04_PBUS_PCI_NV_19, saved_pci_nv_19 & 0xfffff0ff); /* power cycle pgraph, if enabled */ pmc_enable = NV_READ(NV03_PMC_ENABLE); if (pmc_enable & NV_PMC_ENABLE_PGRAPH) { NV_WRITE(NV03_PMC_ENABLE, pmc_enable & ~NV_PMC_ENABLE_PGRAPH); NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH); } /* and restore (gives effect of resetting AGP) */ NV_WRITE(NV04_PBUS_PCI_NV_19, saved_pci_nv_19); NV_WRITE(NV04_PBUS_PCI_NV_1, saved_pci_nv_1); } static int nouveau_mem_init_agp(struct drm_device *dev, int ttm) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_agp_info info; struct drm_agp_mode mode; int ret; nouveau_mem_reset_agp(dev); ret = drm_agp_acquire(dev); if (ret) { DRM_ERROR("Unable to acquire AGP: %d\n", ret); return ret; } ret = drm_agp_info(dev, &info); if (ret) { DRM_ERROR("Unable to get AGP info: %d\n", ret); return ret; } /* see agp.h for the AGPSTAT_* modes available */ mode.mode = info.mode; ret = drm_agp_enable(dev, mode); if (ret) { DRM_ERROR("Unable to enable AGP: %d\n", ret); return ret; } if (!ttm) { struct drm_agp_buffer agp_req; struct drm_agp_binding bind_req; agp_req.size = info.aperture_size; agp_req.type = 0; ret = drm_agp_alloc(dev, &agp_req); if (ret) { DRM_ERROR("Unable to alloc AGP: %d\n", ret); return ret; } bind_req.handle = agp_req.handle; bind_req.offset = 0; ret = drm_agp_bind(dev, &bind_req); if (ret) { DRM_ERROR("Unable to bind AGP: %d\n", ret); return ret; } } dev_priv->gart_info.type = NOUVEAU_GART_AGP; dev_priv->gart_info.aper_base = info.aperture_base; dev_priv->gart_info.aper_size = info.aperture_size; return 0; } #define HACK_OLD_MM int nouveau_mem_init_ttm(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t vram_size, bar1_size; int ret; dev_priv->agp_heap = dev_priv->pci_heap = dev_priv->fb_heap = NULL; dev_priv->fb_phys = drm_get_resource_start(dev,1); dev_priv->gart_info.type = NOUVEAU_GART_NONE; drm_bo_driver_init(dev); /* non-mappable vram */ dev_priv->fb_available_size = nouveau_mem_fb_amount(dev); dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram; vram_size = dev_priv->fb_available_size >> PAGE_SHIFT; bar1_size = drm_get_resource_len(dev, 1) >> PAGE_SHIFT; if (bar1_size < vram_size) { if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_PRIV0, bar1_size, vram_size - bar1_size, 1))) { DRM_ERROR("Failed PRIV0 mm init: %d\n", ret); return ret; } vram_size = bar1_size; } /* mappable vram */ #ifdef HACK_OLD_MM vram_size /= 4; #endif if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_VRAM, 0, vram_size, 1))) { DRM_ERROR("Failed VRAM mm init: %d\n", ret); return ret; } /* GART */ #if !defined(__powerpc__) && !defined(__ia64__) if (drm_device_is_agp(dev) && dev->agp) { if ((ret = nouveau_mem_init_agp(dev, 1))) DRM_ERROR("Error initialising AGP: %d\n", ret); } #endif if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) { if ((ret = nouveau_sgdma_init(dev))) DRM_ERROR("Error initialising PCI SGDMA: %d\n", ret); } if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_TT, 0, dev_priv->gart_info.aper_size >> PAGE_SHIFT, 1))) { DRM_ERROR("Failed TT mm init: %d\n", ret); return ret; } #ifdef HACK_OLD_MM vram_size <<= PAGE_SHIFT; DRM_INFO("Old MM using %dKiB VRAM\n", (vram_size * 3) >> 10); if (nouveau_mem_init_heap(&dev_priv->fb_heap, vram_size, vram_size * 3)) return -ENOMEM; #endif return 0; } int nouveau_mem_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t fb_size; int ret = 0; dev_priv->agp_heap = dev_priv->pci_heap = dev_priv->fb_heap = NULL; dev_priv->fb_phys = 0; dev_priv->gart_info.type = NOUVEAU_GART_NONE; if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) nouveau_mem_check_nforce_dimms(dev); /* setup a mtrr over the FB */ dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1), nouveau_mem_fb_amount(dev), DRM_MTRR_WC); /* Init FB */ dev_priv->fb_phys=drm_get_resource_start(dev,1); fb_size = nouveau_mem_fb_amount(dev); /* On G80, limit VRAM to 512MiB temporarily due to limits in how * we handle VRAM page tables. */ if (dev_priv->card_type >= NV_50 && fb_size > (512 * 1024 * 1024)) fb_size = (512 * 1024 * 1024); fb_size -= dev_priv->ramin_rsvd_vram; dev_priv->fb_available_size = fb_size; DRM_DEBUG("Available VRAM: %dKiB\n", fb_size>>10); if (fb_size>256*1024*1024) { /* On cards with > 256Mb, you can't map everything. * So we create a second FB heap for that type of memory */ if (nouveau_mem_init_heap(&dev_priv->fb_heap, 0, 256*1024*1024)) return -ENOMEM; if (nouveau_mem_init_heap(&dev_priv->fb_nomap_heap, 256*1024*1024, fb_size-256*1024*1024)) return -ENOMEM; } else { if (nouveau_mem_init_heap(&dev_priv->fb_heap, 0, fb_size)) return -ENOMEM; dev_priv->fb_nomap_heap=NULL; } #if !defined(__powerpc__) && !defined(__ia64__) /* Init AGP / NV50 PCIEGART */ if (drm_device_is_agp(dev) && dev->agp) { if ((ret = nouveau_mem_init_agp(dev, 0))) DRM_ERROR("Error initialising AGP: %d\n", ret); } #endif /*Note: this is *not* just NV50 code, but only used on NV50 for now */ if (dev_priv->gart_info.type == NOUVEAU_GART_NONE && dev_priv->card_type >= NV_50) { ret = nouveau_sgdma_init(dev); if (!ret) { ret = nouveau_sgdma_nottm_hack_init(dev); if (ret) nouveau_sgdma_takedown(dev); } if (ret) DRM_ERROR("Error initialising SG DMA: %d\n", ret); } if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) { if (nouveau_mem_init_heap(&dev_priv->agp_heap, 0, dev_priv->gart_info.aper_size)) { if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) { nouveau_sgdma_nottm_hack_takedown(dev); nouveau_sgdma_takedown(dev); } } } /* NV04-NV40 PCIEGART */ if (!dev_priv->agp_heap && dev_priv->card_type < NV_50) { struct drm_scatter_gather sgreq; DRM_DEBUG("Allocating sg memory for PCI DMA\n"); sgreq.size = 16 << 20; //16MB of PCI scatter-gather zone if (drm_sg_alloc(dev, &sgreq)) { DRM_ERROR("Unable to allocate %ldMB of scatter-gather" " pages for PCI DMA!",sgreq.size>>20); } else { if (nouveau_mem_init_heap(&dev_priv->pci_heap, 0, dev->sg->pages * PAGE_SIZE)) { DRM_ERROR("Unable to initialize pci_heap!"); } } } /* G8x: Allocate shared page table to map real VRAM pages into */ if (dev_priv->card_type >= NV_50) { unsigned size = ((512 * 1024 * 1024) / 65536) * 8; ret = nouveau_gpuobj_new(dev, NULL, size, 0, NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ALLOW_NO_REFS, &dev_priv->vm_vram_pt); if (ret) { DRM_ERROR("Error creating VRAM page table: %d\n", ret); return ret; } } return 0; } struct mem_block * nouveau_mem_alloc(struct drm_device *dev, int alignment, uint64_t size, int flags, struct drm_file *file_priv) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct mem_block *block; int type, tail = !(flags & NOUVEAU_MEM_USER); /* * Make things easier on ourselves: all allocations are page-aligned. * We need that to map allocated regions into the user space */ if (alignment < PAGE_SIZE) alignment = PAGE_SIZE; /* Align allocation sizes to 64KiB blocks on G8x. We use a 64KiB * page size in the GPU VM. */ if (flags & NOUVEAU_MEM_FB && dev_priv->card_type >= NV_50) { size = (size + 65535) & ~65535; if (alignment < 65536) alignment = 65536; } /* Further down wants alignment in pages, not bytes */ alignment >>= PAGE_SHIFT; /* * Warn about 0 sized allocations, but let it go through. It'll return 1 page */ if (size == 0) DRM_INFO("warning : 0 byte allocation\n"); /* * Keep alloc size a multiple of the page size to keep drm_addmap() happy */ if (size & (~PAGE_MASK)) size = ((size/PAGE_SIZE) + 1) * PAGE_SIZE; #define NOUVEAU_MEM_ALLOC_AGP {\ type=NOUVEAU_MEM_AGP;\ block = nouveau_mem_alloc_block(dev_priv->agp_heap, size,\ alignment, file_priv, tail); \ if (block) goto alloc_ok;\ } #define NOUVEAU_MEM_ALLOC_PCI {\ type = NOUVEAU_MEM_PCI;\ block = nouveau_mem_alloc_block(dev_priv->pci_heap, size, \ alignment, file_priv, tail); \ if ( block ) goto alloc_ok;\ } #define NOUVEAU_MEM_ALLOC_FB {\ type=NOUVEAU_MEM_FB;\ if (!(flags&NOUVEAU_MEM_MAPPED)) {\ block = nouveau_mem_alloc_block(dev_priv->fb_nomap_heap,\ size, alignment, \ file_priv, tail); \ if (block) goto alloc_ok;\ }\ block = nouveau_mem_alloc_block(dev_priv->fb_heap, size,\ alignment, file_priv, tail);\ if (block) goto alloc_ok;\ } if (flags&NOUVEAU_MEM_FB) NOUVEAU_MEM_ALLOC_FB if (flags&NOUVEAU_MEM_AGP) NOUVEAU_MEM_ALLOC_AGP if (flags&NOUVEAU_MEM_PCI) NOUVEAU_MEM_ALLOC_PCI if (flags&NOUVEAU_MEM_FB_ACCEPTABLE) NOUVEAU_MEM_ALLOC_FB if (flags&NOUVEAU_MEM_AGP_ACCEPTABLE) NOUVEAU_MEM_ALLOC_AGP if (flags&NOUVEAU_MEM_PCI_ACCEPTABLE) NOUVEAU_MEM_ALLOC_PCI return NULL; alloc_ok: block->flags=type; /* On G8x, map memory into VM */ if (block->flags & NOUVEAU_MEM_FB && dev_priv->card_type >= NV_50 && !(flags & NOUVEAU_MEM_NOVM)) { struct nouveau_gpuobj *pt = dev_priv->vm_vram_pt; unsigned offset = block->start; unsigned count = block->size / 65536; unsigned tile = 0; if (!pt) { DRM_ERROR("vm alloc without vm pt\n"); nouveau_mem_free_block(block); return NULL; } /* The tiling stuff is *not* what NVIDIA does - but both the * 2D and 3D engines seem happy with this simpler method. * Should look into why NVIDIA do what they do at some point. */ if (flags & NOUVEAU_MEM_TILE) { if (flags & NOUVEAU_MEM_TILE_ZETA) tile = 0x00002800; else tile = 0x00007000; } while (count--) { unsigned pte = offset / 65536; INSTANCE_WR(pt, (pte * 2) + 0, offset | 1); INSTANCE_WR(pt, (pte * 2) + 1, 0x00000000 | tile); offset += 65536; } } else { block->flags |= NOUVEAU_MEM_NOVM; } if (flags&NOUVEAU_MEM_MAPPED) { struct drm_map_list *entry; int ret = 0; block->flags|=NOUVEAU_MEM_MAPPED; if (type == NOUVEAU_MEM_AGP) { if (dev_priv->gart_info.type != NOUVEAU_GART_SGDMA) ret = drm_addmap(dev, block->start, block->size, _DRM_AGP, 0, &block->map); else ret = drm_addmap(dev, block->start, block->size, _DRM_SCATTER_GATHER, 0, &block->map); } else if (type == NOUVEAU_MEM_FB) ret = drm_addmap(dev, block->start + dev_priv->fb_phys, block->size, _DRM_FRAME_BUFFER, 0, &block->map); else if (type == NOUVEAU_MEM_PCI) ret = drm_addmap(dev, block->start, block->size, _DRM_SCATTER_GATHER, 0, &block->map); if (ret) { nouveau_mem_free_block(block); return NULL; } entry = drm_find_matching_map(dev, block->map); if (!entry) { nouveau_mem_free_block(block); return NULL; } block->map_handle = entry->user_token; } DRM_DEBUG("allocated %lld bytes at 0x%llx type=0x%08x\n", block->size, block->start, block->flags); return block; } void nouveau_mem_free(struct drm_device* dev, struct mem_block* block) { struct drm_nouveau_private *dev_priv = dev->dev_private; DRM_DEBUG("freeing 0x%llx type=0x%08x\n", block->start, block->flags); if (block->flags&NOUVEAU_MEM_MAPPED) drm_rmmap(dev, block->map); /* G8x: Remove pages from vm */ if (block->flags & NOUVEAU_MEM_FB && dev_priv->card_type >= NV_50 && !(block->flags & NOUVEAU_MEM_NOVM)) { struct nouveau_gpuobj *pt = dev_priv->vm_vram_pt; unsigned offset = block->start; unsigned count = block->size / 65536; if (!pt) { DRM_ERROR("vm free without vm pt\n"); goto out_free; } while (count--) { unsigned pte = offset / 65536; INSTANCE_WR(pt, (pte * 2) + 0, 0); INSTANCE_WR(pt, (pte * 2) + 1, 0); offset += 65536; } } out_free: nouveau_mem_free_block(block); } /* * Ioctls */ int nouveau_ioctl_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_mem_alloc *alloc = data; struct mem_block *block; NOUVEAU_CHECK_INITIALISED_WITH_RETURN; if (alloc->flags & NOUVEAU_MEM_INTERNAL) return -EINVAL; block = nouveau_mem_alloc(dev, alloc->alignment, alloc->size, alloc->flags | NOUVEAU_MEM_USER, file_priv); if (!block) return -ENOMEM; alloc->map_handle=block->map_handle; alloc->offset=block->start; alloc->flags=block->flags; if (dev_priv->card_type >= NV_50 && alloc->flags & NOUVEAU_MEM_FB) alloc->offset += 512*1024*1024; return 0; } int nouveau_ioctl_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_mem_free *memfree = data; struct mem_block *block; NOUVEAU_CHECK_INITIALISED_WITH_RETURN; if (dev_priv->card_type >= NV_50 && memfree->flags & NOUVEAU_MEM_FB) memfree->offset -= 512*1024*1024; block=NULL; if (dev_priv->fb_heap && memfree->flags & NOUVEAU_MEM_FB) block = find_block(dev_priv->fb_heap, memfree->offset); else if (dev_priv->agp_heap && memfree->flags & NOUVEAU_MEM_AGP) block = find_block(dev_priv->agp_heap, memfree->offset); else if (dev_priv->pci_heap && memfree->flags & NOUVEAU_MEM_PCI) block = find_block(dev_priv->pci_heap, memfree->offset); if (!block) return -EFAULT; if (block->file_priv != file_priv) return -EPERM; nouveau_mem_free(dev, block); return 0; } int nouveau_ioctl_mem_tile(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_mem_tile *memtile = data; struct mem_block *block = NULL; NOUVEAU_CHECK_INITIALISED_WITH_RETURN; if (dev_priv->card_type < NV_50) return -EINVAL; if (memtile->flags & NOUVEAU_MEM_FB) { memtile->offset -= 512*1024*1024; block = find_block(dev_priv->fb_heap, memtile->offset); } if (!block) return -EINVAL; if (block->file_priv != file_priv) return -EPERM; { struct nouveau_gpuobj *pt = dev_priv->vm_vram_pt; unsigned offset = block->start + memtile->delta; unsigned count = memtile->size / 65536; unsigned tile = 0; if (memtile->flags & NOUVEAU_MEM_TILE) { if (memtile->flags & NOUVEAU_MEM_TILE_ZETA) tile = 0x00002800; else tile = 0x00007000; } while (count--) { unsigned pte = offset / 65536; INSTANCE_WR(pt, (pte * 2) + 0, offset | 1); INSTANCE_WR(pt, (pte * 2) + 1, 0x00000000 | tile); offset += 65536; } } return 0; }