summaryrefslogtreecommitdiff
path: root/shared-core/nv40_fb.c
blob: ae784cb8b70ff7da3f8be46c3a278c5127803f43 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
#include "nouveau_drm.h"

int
nv40_fb_init(struct drm_device *dev)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	uint32_t fb_bar_size, tmp;
	int num_tiles;
	int i;

	/* This is strictly a NV4x register (don't know about NV5x). */
	/* The blob sets these to all kinds of values, and they mess up our setup. */
	/* I got value 0x52802 instead. For some cards the blob even sets it back to 0x1. */
	/* Note: the blob doesn't read this value, so i'm pretty sure this is safe for all cards. */
	/* Any idea what this is? */
	NV_WRITE(NV40_PFB_UNK_800, 0x1);

	switch (dev_priv->chipset) {
	case 0x40:
	case 0x45:
		tmp = NV_READ(NV10_PFB_CLOSE_PAGE2);
		NV_WRITE(NV10_PFB_CLOSE_PAGE2, tmp & ~(1<<15));
		num_tiles = NV10_PFB_TILE__SIZE;
		break;
	case 0x46: /* G72 */
	case 0x47: /* G70 */
	case 0x49: /* G71 */
	case 0x4b: /* G73 */
	case 0x4c: /* C51 (G7X version) */
		num_tiles = NV40_PFB_TILE__SIZE_1;
		break;
	default:
		num_tiles = NV40_PFB_TILE__SIZE_0;
		break;
	}

	fb_bar_size = drm_get_resource_len(dev, 0) - 1;
	switch (dev_priv->chipset) {
	case 0x40:
		for (i=0; i<num_tiles; i++) {
			NV_WRITE(NV10_PFB_TILE(i), 0);
			NV_WRITE(NV10_PFB_TLIMIT(i), fb_bar_size);
		}
		break;
	default:
		for (i=0; i<num_tiles; i++) {
			NV_WRITE(NV40_PFB_TILE(i), 0);
			NV_WRITE(NV40_PFB_TLIMIT(i), fb_bar_size);
		}
		break;
	}

	return 0;
}

void
nv40_fb_takedown(struct drm_device *dev)
{
}
hl com"> * Allocate and initialize a drm_device_dma structure. */ int drm_dma_setup(drm_device_t * dev) { int i; dev->dma = drm_alloc(sizeof(*dev->dma), DRM_MEM_DRIVER); if (!dev->dma) return -ENOMEM; memset(dev->dma, 0, sizeof(*dev->dma)); for (i = 0; i <= DRM_MAX_ORDER; i++) memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0])); return 0; } /** * Cleanup the DMA resources. * * \param dev DRM device. * * Free all pages associated with DMA buffers, the buffers and pages lists, and * finally the the drm_device::dma structure itself. */ void drm_dma_takedown(drm_device_t * dev) { drm_device_dma_t *dma = dev->dma; int i, j; if (!dma) return; /* Clear dma buffers */ for (i = 0; i <= DRM_MAX_ORDER; i++) { if (dma->bufs[i].seg_count) { DRM_DEBUG("order %d: buf_count = %d," " seg_count = %d\n", i, dma->bufs[i].buf_count, dma->bufs[i].seg_count); for (j = 0; j < dma->bufs[i].seg_count; j++) { if (dma->bufs[i].seglist[j]) { drm_pci_free(dev, dma->bufs[i].seglist[j]); } } drm_free(dma->bufs[i].seglist, dma->bufs[i].seg_count * sizeof(*dma->bufs[0].seglist), DRM_MEM_SEGS); } if (dma->bufs[i].buf_count) { for (j = 0; j < dma->bufs[i].buf_count; j++) { if (dma->bufs[i].buflist[j].dev_private) { drm_free(dma->bufs[i].buflist[j]. dev_private, dma->bufs[i].buflist[j]. dev_priv_size, DRM_MEM_BUFS); } } drm_free(dma->bufs[i].buflist, dma->bufs[i].buf_count * sizeof(*dma->bufs[0].buflist), DRM_MEM_BUFS); } } if (dma->buflist) { drm_free(dma->buflist, dma->buf_count * sizeof(*dma->buflist), DRM_MEM_BUFS); } if (dma->pagelist) { drm_free(dma->pagelist, dma->page_count * sizeof(*dma->pagelist), DRM_MEM_PAGES); } drm_free(dev->dma, sizeof(*dev->dma), DRM_MEM_DRIVER); dev->dma = NULL; } /** * Free a buffer. * * \param dev DRM device. * \param buf buffer to free. * * Resets the fields of \p buf. */ void drm_free_buffer(drm_device_t * dev, drm_buf_t * buf) { if (!buf) return; buf->waiting = 0; buf->pending = 0; buf->filp = NULL; buf->used = 0; if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && waitqueue_active(&buf->dma_wait)) { wake_up_interruptible(&buf->dma_wait); } } /** * Reclaim the buffers. * * \param filp file pointer. * * Frees each buffer associated with \p filp not already on the hardware. */ void drm_core_reclaim_buffers(drm_device_t *dev, struct file *filp) { drm_device_dma_t *dma = dev->dma; int i; if (!dma) return; for (i = 0; i < dma->buf_count; i++) { if (dma->buflist[i]->filp == filp) { switch (dma->buflist[i]->list) { case DRM_LIST_NONE: drm_free_buffer(dev, dma->buflist[i]); break; case DRM_LIST_WAIT: dma->buflist[i]->list = DRM_LIST_RECLAIM; break; default: /* Buffer already on hardware. */ break; } } } } EXPORT_SYMBOL(drm_core_reclaim_buffers);