/* * Copyright 2007 Nouveau Project * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifdef HAVE_CONFIG_H #include #endif #include #include #include #include #include #include #include "nouveau_private.h" int nouveau_bo_init(struct nouveau_device *dev) { return 0; } void nouveau_bo_takedown(struct nouveau_device *dev) { } static int nouveau_bo_info(struct nouveau_bo_priv *nvbo, struct drm_nouveau_gem_info *arg) { nvbo->handle = nvbo->base.handle = arg->handle; nvbo->domain = arg->domain; nvbo->size = arg->size; nvbo->offset = arg->offset; nvbo->map_handle = arg->map_handle; nvbo->base.tile_mode = arg->tile_mode; nvbo->base.tile_flags = arg->tile_flags; return 0; } static int nouveau_bo_allocated(struct nouveau_bo_priv *nvbo) { if (nvbo->sysmem || nvbo->handle || (nvbo->flags & NOUVEAU_BO_PIN)) return 1; return 0; } static int nouveau_bo_ualloc(struct nouveau_bo_priv *nvbo) { if (nvbo->user || nvbo->sysmem) { assert(nvbo->sysmem); return 0; } nvbo->sysmem = malloc(nvbo->size); if (!nvbo->sysmem) return -ENOMEM; return 0; } static void nouveau_bo_ufree(struct nouveau_bo_priv *nvbo) { if (nvbo->sysmem) { if (!nvbo->user) free(nvbo->sysmem); nvbo->sysmem = NULL; } } static void nouveau_bo_kfree(struct nouveau_bo_priv *nvbo) { struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device); struct drm_gem_close req; if (!nvbo->handle) return; if (nvbo->map) { munmap(nvbo->map, nvbo->size); nvbo->map = NULL; } req.handle = nvbo->handle; nvbo->handle = 0; ioctl(nvdev->fd, DRM_IOCTL_GEM_CLOSE, &req); } static int nouveau_bo_kalloc(struct nouveau_bo_priv *nvbo, struct nouveau_channel *chan) { struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device); struct drm_nouveau_gem_new req; struct drm_nouveau_gem_info *info = &req.info; int ret; if (nvbo->handle || (nvbo->flags & NOUVEAU_BO_PIN)) return 0; req.channel_hint = chan ? chan->id : 0; req.align = nvbo->align; info->size = nvbo->size; info->domain = 0; if (nvbo->flags & NOUVEAU_BO_VRAM) info->domain |= NOUVEAU_GEM_DOMAIN_VRAM; if (nvbo->flags & NOUVEAU_BO_GART) info->domain |= NOUVEAU_GEM_DOMAIN_GART; if (!info->domain) { info->domain |= (NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART); } if (nvbo->flags & NOUVEAU_BO_MAP) info->domain |= NOUVEAU_GEM_DOMAIN_MAPPABLE; info->tile_mode = nvbo->base.tile_mode; info->tile_flags = nvbo->base.tile_flags; ret = drmCommandWriteRead(nvdev->fd, DRM_NOUVEAU_GEM_NEW, &req, sizeof(req)); if (ret) return ret; nouveau_bo_info(nvbo, &req.info); return 0; } static int nouveau_bo_kmap(struct nouveau_bo_priv *nvbo) { struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device); if (nvbo->map) return 0; if (!nvbo->map_handle) return -EINVAL; nvbo->map = mmap(0, nvbo->size, PROT_READ | PROT_WRITE, MAP_SHARED, nvdev->fd, nvbo->map_handle); if (nvbo->map == MAP_FAILED) { nvbo->map = NULL; return -errno; } return 0; } int nouveau_bo_new_tile(struct nouveau_device *dev, uint32_t flags, int align, int size, uint32_t tile_mode, uint32_t tile_flags, struct nouveau_bo **bo) { struct nouveau_bo_priv *nvbo; int ret; if (!dev || !bo || *bo) return -EINVAL; nvbo = calloc(1, sizeof(struct nouveau_bo_priv)); if (!nvbo) return -ENOMEM; nvbo->base.device = dev; nvbo->base.size = size; nvbo->base.tile_mode = tile_mode; nvbo->base.tile_flags = tile_flags; nvbo->refcount = 1; /* Don't set NOUVEAU_BO_PIN here, or nouveau_bo_allocated() will * decided the buffer's already allocated when it's not. The * call to nouveau_bo_pin() later will set this flag. */ nvbo->flags = (flags & ~NOUVEAU_BO_PIN); nvbo->size = size; nvbo->align = align; if (flags & NOUVEAU_BO_PIN) { ret = nouveau_bo_pin((void *)nvbo, nvbo->flags); if (ret) { nouveau_bo_ref(NULL, (void *)nvbo); return ret; } } *bo = &nvbo->base; return 0; } int nouveau_bo_new(struct nouveau_device *dev, uint32_t flags, int align, int size, struct nouveau_bo **bo) { uint32_t tile_flags = 0; if (flags & NOUVEAU_BO_TILED) { if (flags & NOUVEAU_BO_ZTILE) tile_flags = 0x2800; else tile_flags = 0x7000; } return nouveau_bo_new_tile(dev, flags, align, size, 0, tile_flags, bo); } int nouveau_bo_user(struct nouveau_device *dev, void *ptr, int size, struct nouveau_bo **bo) { struct nouveau_bo_priv *nvbo; int ret; ret = nouveau_bo_new(dev, NOUVEAU_BO_MAP, 0, size, bo); if (ret) return ret; nvbo = nouveau_bo(*bo); nvbo->sysmem = ptr; nvbo->user = 1; return 0; } int nouveau_bo_wrap(struct nouveau_device *dev, uint32_t handle, struct nouveau_bo **bo) { struct nouveau_device_priv *nvdev = nouveau_device(dev); struct drm_nouveau_gem_info req; struct nouveau_bo_priv *nvbo; int ret; ret = nouveau_bo_new(dev, 0, 0, 0, bo); if (ret) return ret; nvbo = nouveau_bo(*bo); req.handle = handle; ret = drmCommandWriteRead(nvdev->fd, DRM_NOUVEAU_GEM_INFO, &req, sizeof(req)); if (ret) { nouveau_bo_ref(NULL, bo); return ret; } nouveau_bo_info(nvbo, &req); nvbo->base.size = nvbo->size; return 0; } int nouveau_bo_handle_get(struct nouveau_bo *bo, uint32_t *handle) { struct nouveau_device_priv *nvdev = nouveau_device(bo->device); struct nouveau_bo_priv *nvbo = nouveau_bo(bo); int ret; if (!bo || !handle) return -EINVAL; if (!nvbo->global_handle) { struct drm_gem_flink req; ret = nouveau_bo_kalloc(nvbo, NULL); if (ret) return ret; req.handle = nvbo->handle; ret = ioctl(nvdev->fd, DRM_IOCTL_GEM_FLINK, &req); if (ret) { nouveau_bo_kfree(nvbo); return ret; } nvbo->global_handle = req.name; } *handle = nvbo->global_handle; return 0; } int nouveau_bo_handle_ref(struct nouveau_device *dev, uint32_t handle, struct nouveau_bo **bo) { struct nouveau_device_priv *nvdev = nouveau_device(dev); struct nouveau_bo_priv *nvbo; struct drm_gem_open req; int ret; req.name = handle; ret = ioctl(nvdev->fd, DRM_IOCTL_GEM_OPEN, &req); if (ret) { nouveau_bo_ref(NULL, bo); return ret; } ret = nouveau_bo_wrap(dev, req.handle, bo); if (ret) { nouveau_bo_ref(NULL, bo); return ret; } nvbo = nouveau_bo(*bo); nvbo->base.handle = nvbo->handle; return 0; } static void nouveau_bo_del(struct nouveau_bo **bo) { struct nouveau_bo_priv *nvbo; if (!bo || !*bo) return; nvbo = nouveau_bo(*bo); *bo = NULL; if (--nvbo->refcount) return; if (nvbo->pending) { nvbo->pending = NULL; nouveau_pushbuf_flush(nvbo->pending_channel, 0); } nouveau_bo_ufree(nvbo); nouveau_bo_kfree(nvbo); free(nvbo); } int nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pbo) { if (!pbo) return -EINVAL; if (ref) nouveau_bo(ref)->refcount++; if (*pbo) nouveau_bo_del(pbo); *pbo = ref; return 0; } static int nouveau_bo_wait(struct nouveau_bo *bo, int cpu_write, int no_wait, int no_block) { struct nouveau_device_priv *nvdev = nouveau_device(bo->device); struct nouveau_bo_priv *nvbo = nouveau_bo(bo); struct drm_nouveau_gem_cpu_prep req; int ret; if (!nvbo->global_handle && !nvbo->write_marker && !cpu_write) return 0; if (nvbo->pending && (nvbo->pending->write_domains || cpu_write)) { nvbo->pending = NULL; nouveau_pushbuf_flush(nvbo->pending_channel, 0); } req.handle = nvbo->handle; req.flags = 0; if (cpu_write) req.flags |= NOUVEAU_GEM_CPU_PREP_WRITE; if (no_wait) req.flags |= NOUVEAU_GEM_CPU_PREP_NOWAIT; if (no_block) req.flags |= NOUVEAU_GEM_CPU_PREP_NOBLOCK; do { ret = drmCommandWrite(nvdev->fd, DRM_NOUVEAU_GEM_CPU_PREP, &req, sizeof(req)); } while (ret == -EAGAIN); if (ret) return ret; if (ret == 0) nvbo->write_marker = 0; return 0; } int nouveau_bo_map_range(struct nouveau_bo *bo, uint32_t delta, uint32_t size, uint32_t flags) { struct nouveau_bo_priv *nvbo = nouveau_bo(bo); int ret; if (!nvbo || bo->map) return -EINVAL; if (!nouveau_bo_allocated(nvbo)) { if (nvbo->flags & (NOUVEAU_BO_VRAM | NOUVEAU_BO_GART)) { ret = nouveau_bo_kalloc(nvbo, NULL); if (ret) return ret; } if (!nouveau_bo_allocated(nvbo)) { ret = nouveau_bo_ualloc(nvbo); if (ret) return ret; } } if (nvbo->sysmem) { bo->map = (char *)nvbo->sysmem + delta; } else { ret = nouveau_bo_kmap(nvbo); if (ret) return ret; if (!(flags & NOUVEAU_BO_NOSYNC)) { ret = nouveau_bo_wait(bo, (flags & NOUVEAU_BO_WR), (flags & NOUVEAU_BO_NOWAIT), 0); if (ret) return ret; } bo->map = (char *)nvbo->map + delta; } return 0; } void nouveau_bo_map_flush(struct nouveau_bo *bo, uint32_t delta, uint32_t size) { } int nouveau_bo_map(struct nouveau_bo *bo, uint32_t flags) { return nouveau_bo_map_range(bo, 0, bo->size, flags); } void nouveau_bo_unmap(struct nouveau_bo *bo) { struct nouveau_bo_priv *nvbo = nouveau_bo(bo); if (bo->map && !nvbo->sysmem) { struct nouveau_device_priv *nvdev = nouveau_device(bo->device); struct drm_nouveau_gem_cpu_fini req; req.handle = nvbo->handle; drmCommandWrite(nvdev->fd, DRM_NOUVEAU_GEM_CPU_FINI, &req, sizeof(req)); } bo->map = NULL; } int nouveau_bo_pin(struct nouveau_bo *bo, uint32_t flags) { struct nouveau_device_priv *nvdev = nouveau_device(bo->device); struct nouveau_bo_priv *nvbo = nouveau_bo(bo); struct drm_nouveau_gem_pin req; int ret; if (nvbo->pinned) return 0; /* Ensure we have a kernel object... */ if (!nvbo->flags) { if (!(flags & (NOUVEAU_BO_VRAM | NOUVEAU_BO_GART))) return -EINVAL; nvbo->flags = flags; } if (!nvbo->handle) { ret = nouveau_bo_kalloc(nvbo, NULL); if (ret) return ret; } /* Now force it to stay put :) */ req.handle = nvbo->handle; req.domain = 0; if (nvbo->flags & NOUVEAU_BO_VRAM) req.domain |= NOUVEAU_GEM_DOMAIN_VRAM; if (nvbo->flags & NOUVEAU_BO_GART) req.domain |= NOUVEAU_GEM_DOMAIN_GART; ret = drmCommandWriteRead(nvdev->fd, DRM_NOUVEAU_GEM_PIN, &req, sizeof(struct drm_nouveau_gem_pin)); if (ret) return ret; nvbo->offset = req.offset; nvbo->domain = req.domain; nvbo->pinned = 1; nvbo->flags |= NOUVEAU_BO_PIN; /* Fill in public nouveau_bo members */ if (nvbo->domain & NOUVEAU_GEM_DOMAIN_VRAM) bo->flags = NOUVEAU_BO_VRAM; if (nvbo->domain & NOUVEAU_GEM_DOMAIN_GART) bo->flags = NOUVEAU_BO_GART; bo->offset = nvbo->offset; return 0; } void nouveau_bo_unpin(struct nouveau_bo *bo) { struct nouveau_device_priv *nvdev = nouveau_device(bo->device); struct nouveau_bo_priv *nvbo = nouveau_bo(bo); struct drm_nouveau_gem_unpin req; if (!nvbo->pinned) return; req.handle = nvbo->handle; drmCommandWrite(nvdev->fd, DRM_NOUVEAU_GEM_UNPIN, &req, sizeof(req)); nvbo->pinned = bo->offset = bo->flags = 0; } int nouveau_bo_busy(struct nouveau_bo *bo, uint32_t access) { return nouveau_bo_wait(bo, (access & NOUVEAU_BO_WR), 1, 1); } uint32_t nouveau_bo_pending(struct nouveau_bo *bo) { struct nouveau_bo_priv *nvbo = nouveau_bo(bo); uint32_t flags; if (!nvbo->pending) return 0; flags = 0; if (nvbo->pending->read_domains) flags |= NOUVEAU_BO_RD; if (nvbo->pending->write_domains) flags |= NOUVEAU_BO_WR; return flags; } struct drm_nouveau_gem_pushbuf_bo * nouveau_bo_emit_buffer(struct nouveau_channel *chan, struct nouveau_bo *bo) { struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(chan->pushbuf); struct nouveau_bo_priv *nvbo = nouveau_bo(bo); struct drm_nouveau_gem_pushbuf_bo *pbbo; struct nouveau_bo *ref = NULL; int ret; if (nvbo->pending) return nvbo->pending; if (!nvbo->handle) { ret = nouveau_bo_kalloc(nvbo, chan); if (ret) return NULL; if (nvbo->sysmem) { void *sysmem_tmp = nvbo->sysmem; nvbo->sysmem = NULL; ret = nouveau_bo_map(bo, NOUVEAU_BO_WR); if (ret) return NULL; nvbo->sysmem = sysmem_tmp; memcpy(bo->map, nvbo->sysmem, nvbo->base.size); nouveau_bo_ufree(nvbo); nouveau_bo_unmap(bo); } } if (nvpb->nr_buffers >= NOUVEAU_GEM_MAX_BUFFERS) return NULL; pbbo = nvpb->buffers + nvpb->nr_buffers++; nvbo->pending = pbbo; nvbo->pending_channel = chan; nvbo->pending_refcnt = 0; nouveau_bo_ref(bo, &ref); pbbo->user_priv = (uint64_t)(unsigned long)ref; pbbo->handle = nvbo->handle; pbbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART; pbbo->read_domains = 0; pbbo->write_domains = 0; pbbo->presumed_domain = nvbo->domain; pbbo->presumed_offset = nvbo->offset; pbbo->presumed_ok = 1; return pbbo; } /a> 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418
/* sis_mm.c -- Private header for Direct Rendering Manager -*- linux-c -*-
 * Created: Mon Jan  4 10:05:05 1999 by sclin@sis.com.tw
 *
 * Copyright 2000 Silicon Integrated Systems Corp, Inc., HsinChu, Taiwan.
 * All rights reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 * DEALINGS IN THE SOFTWARE.
 *
 * Authors:
 *    Sung-Ching Lin <sclin@sis.com.tw>
 *
 */

#if defined(__linux__) && defined(CONFIG_FB_SIS)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
#include <video/sisfb.h>
#else
#include <linux/sisfb.h>
#endif
#endif
#include "drmP.h"
#include "sis_drm.h"
#include "sis_drv.h"
#include "sis_ds.h"

#define MAX_CONTEXT 100
#define VIDEO_TYPE 0
#define AGP_TYPE 1

typedef struct {
	int used;
	int context;
	set_t *sets[2];		/* 0 for video, 1 for AGP */
} sis_context_t;

static sis_context_t global_ppriv[MAX_CONTEXT];

static int add_alloc_set(int context, int type, unsigned int val)
{
	int i, retval = 0;

	for (i = 0; i < MAX_CONTEXT; i++) {
		if (global_ppriv[i].used && global_ppriv[i].context == context) {
			retval = setAdd(global_ppriv[i].sets[type], val);
			break;
		}
	}
	return retval;
}

static int del_alloc_set(int context, int type, unsigned int val)
{
	int i, retval = 0;

	for (i = 0; i < MAX_CONTEXT; i++) {
		if (global_ppriv[i].used && global_ppriv[i].context == context) {
			retval = setDel(global_ppriv[i].sets[type], val);
			break;
		}
	}
	return retval;
}

/* fb management via fb device */
#if defined(__linux__) && defined(CONFIG_FB_SIS)

static int sis_fb_init(DRM_IOCTL_ARGS)
{
	return 0;
}

static int sis_fb_alloc(DRM_IOCTL_ARGS)
{
	drm_sis_mem_t fb;
	struct sis_memreq req;
	drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *)data;
	int retval = 0;

	DRM_COPY_FROM_USER_IOCTL(fb, argp, sizeof(fb));

	req.size = fb.size;
	sis_malloc(&req);
	if (req.offset) {
		/* TODO */
		fb.offset = req.offset;
		fb.free = req.offset;
		if (!add_alloc_set(fb.context, VIDEO_TYPE, fb.free)) {
			DRM_DEBUG("adding to allocation set fails\n");
			sis_free(req.offset);
			retval = DRM_ERR(EINVAL);
		}
	} else {
		fb.offset = 0;
		fb.size = 0;
		fb.free = 0;
	}

	DRM_COPY_TO_USER_IOCTL(argp, fb, sizeof(fb));

	DRM_DEBUG("alloc fb, size = %d, offset = %ld\n", fb.size, req.offset);

	return retval;
}

static int sis_fb_free(DRM_IOCTL_ARGS)
{
	drm_sis_mem_t fb;
	int retval = 0;

	DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_mem_t __user *) data, sizeof(fb));

	if (!fb.free)
		return DRM_ERR(EINVAL);

	if (!del_alloc_set(fb.context, VIDEO_TYPE, fb.free))
		retval = DRM_ERR(EINVAL);
	sis_free(fb.free);

	DRM_DEBUG("free fb, offset = 0x%lx\n", fb.free);

	return retval;
}

#else

/* Called by the X Server to initialize the FB heap.  Allocations will fail
 * unless this is called.  Offset is the beginning of the heap from the
 * framebuffer offset (MaxXFBMem in XFree86).
 *
 * Memory layout according to Thomas Winischofer:
 * |------------------|DDDDDDDDDDDDDDDDDDDDDDDDDDDDD|HHHH|CCCCCCCCCCC|
 *
 *    X driver/sisfb                                  HW-   Command-
 *  framebuffer memory           DRI heap           Cursor   queue
 */
static int sis_fb_init(DRM_IOCTL_ARGS)
{
	DRM_DEVICE;
	drm_sis_private_t *dev_priv = dev->dev_private;
	drm_sis_fb_t fb;

	DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_fb_t __user *) data, sizeof(fb));

	if (dev_priv == NULL) {
		dev->dev_private = drm_calloc(1, sizeof(drm_sis_private_t),
					      DRM_MEM_DRIVER);
		dev_priv = dev->dev_private;
		if (dev_priv == NULL)
			return ENOMEM;
	}

	if (dev_priv->FBHeap != NULL)
		return DRM_ERR(EINVAL);

	dev_priv->FBHeap = mmInit(fb.offset, fb.size);

	DRM_DEBUG("offset = %u, size = %u", fb.offset, fb.size);

	return 0;
}

static int sis_fb_alloc(DRM_IOCTL_ARGS)
{
	DRM_DEVICE;
	drm_sis_private_t *dev_priv = dev->dev_private;
	drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *)data;
	drm_sis_mem_t fb;
	PMemBlock block;
	int retval = 0;

	if (dev_priv == NULL || dev_priv->FBHeap == NULL)
		return DRM_ERR(EINVAL);

	DRM_COPY_FROM_USER_IOCTL(fb, argp, sizeof(fb));

	block = mmAllocMem(dev_priv->FBHeap, fb.size, 0, 0);
	if (block) {
		/* TODO */
		fb.offset = block->ofs;
		fb.free = (unsigned long)block;
		if (!add_alloc_set(fb.context, VIDEO_TYPE, fb.free)) {
			DRM_DEBUG("adding to allocation set fails\n");
			mmFreeMem((PMemBlock) fb.free);
			retval = DRM_ERR(EINVAL);
		}
	} else {
		fb.offset = 0;
		fb.size = 0;
		fb.free = 0;
	}

	DRM_COPY_TO_USER_IOCTL(argp, fb, sizeof(fb));

	DRM_DEBUG("alloc fb, size = %d, offset = %d\n", fb.size, fb.offset);

	return retval;
}

static int sis_fb_free(DRM_IOCTL_ARGS)
{
	DRM_DEVICE;
	drm_sis_private_t *dev_priv = dev->dev_private;
	drm_sis_mem_t fb;

	if (dev_priv == NULL || dev_priv->FBHeap == NULL)
		return DRM_ERR(EINVAL);

	DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_mem_t __user *) data, sizeof(fb));

	if (!mmBlockInHeap(dev_priv->FBHeap, (PMemBlock) fb.free))
		return DRM_ERR(EINVAL);

	if (!del_alloc_set(fb.context, VIDEO_TYPE, fb.free))
		return DRM_ERR(EINVAL);
	mmFreeMem((PMemBlock) fb.free);

	DRM_DEBUG("free fb, free = 0x%lx\n", fb.free);

	return 0;
}

#endif

/* agp memory management */

static int sis_ioctl_agp_init(DRM_IOCTL_ARGS)
{
	DRM_DEVICE;
	drm_sis_private_t *dev_priv = dev->dev_private;
	drm_sis_agp_t agp;

	if (dev_priv == NULL) {
		dev->dev_private = drm_calloc(1, sizeof(drm_sis_private_t),
					      DRM_MEM_DRIVER);
		dev_priv = dev->dev_private;
		if (dev_priv == NULL)
			return ENOMEM;
	}

	if (dev_priv->AGPHeap != NULL)
		return DRM_ERR(EINVAL);

	DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_agp_t __user *) data,
				 sizeof(agp));

	dev_priv->AGPHeap = mmInit(agp.offset, agp.size);

	DRM_DEBUG("offset = %u, size = %u", agp.offset, agp.size);

	return 0;
}

static int sis_ioctl_agp_alloc(DRM_IOCTL_ARGS)
{
	DRM_DEVICE;
	drm_sis_private_t *dev_priv = dev->dev_private;
	drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *)data;
	drm_sis_mem_t agp;
	PMemBlock block;
	int retval = 0;

	if (dev_priv == NULL || dev_priv->AGPHeap == NULL)
		return DRM_ERR(EINVAL);

	DRM_COPY_FROM_USER_IOCTL(agp, argp, sizeof(agp));

	block = mmAllocMem(dev_priv->AGPHeap, agp.size, 0, 0);
	if (block) {
		/* TODO */
		agp.offset = block->ofs;
		agp.free = (unsigned long)block;
		if (!add_alloc_set(agp.context, AGP_TYPE, agp.free)) {
			DRM_DEBUG("adding to allocation set fails\n");
			mmFreeMem((PMemBlock) agp.free);
			retval = -1;
		}
	} else {
		agp.offset = 0;
		agp.size = 0;
		agp.free = 0;
	}

	DRM_COPY_TO_USER_IOCTL(argp, agp, sizeof(agp));

	DRM_DEBUG("alloc agp, size = %d, offset = %d\n", agp.size, agp.offset);

	return retval;
}

static int sis_ioctl_agp_free(DRM_IOCTL_ARGS)
{
	DRM_DEVICE;
	drm_sis_private_t *dev_priv = dev->dev_private;
	drm_sis_mem_t agp;

	if (dev_priv == NULL || dev_priv->AGPHeap == NULL)
		return DRM_ERR(EINVAL);

	DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_mem_t __user *) data,
				 sizeof(agp));

	if (!mmBlockInHeap(dev_priv->AGPHeap, (PMemBlock) agp.free))
		return DRM_ERR(EINVAL);

	mmFreeMem((PMemBlock) agp.free);
	if (!del_alloc_set(agp.context, AGP_TYPE, agp.free))
		return DRM_ERR(EINVAL);

	DRM_DEBUG("free agp, free = 0x%lx\n", agp.free);

	return 0;
}

int sis_init_context(struct drm_device *dev, int context)
{
	int i;

	for (i = 0; i < MAX_CONTEXT; i++) {
		if (global_ppriv[i].used &&
		    (global_ppriv[i].context == context))
			break;
	}

	if (i >= MAX_CONTEXT) {
		for (i = 0; i < MAX_CONTEXT; i++) {
			if (!global_ppriv[i].used) {
				global_ppriv[i].context = context;
				global_ppriv[i].used = 1;
				global_ppriv[i].sets[0] = setInit();
				global_ppriv[i].sets[1] = setInit();
				DRM_DEBUG("init allocation set, socket=%d, "
					  "context = %d\n", i, context);
				break;
			}
		}
		if ((i >= MAX_CONTEXT) || (global_ppriv[i].sets[0] == NULL) ||
		    (global_ppriv[i].sets[1] == NULL)) {
			return 0;
		}
	}

	return 1;
}

int sis_final_context(struct drm_device *dev, int context)
{
	int i;

	for (i = 0; i < MAX_CONTEXT; i++) {
		if (global_ppriv[i].used &&
		    (global_ppriv[i].context == context))
			break;
	}

	if (i < MAX_CONTEXT) {
		set_t *set;
		ITEM_TYPE item;
		int retval;

		DRM_DEBUG("find socket %d, context = %d\n", i, context);

		/* Video Memory */
		set = global_ppriv[i].sets[0];
		retval = setFirst(set, &item);
		while (retval) {
			DRM_DEBUG("free video memory 0x%lx\n", item);
#if defined(__linux__) && defined(CONFIG_FB_SIS)
			sis_free(item);
#else
			mmFreeMem((PMemBlock) item);
#endif
			retval = setNext(set, &item);
		}
		setDestroy(set);

		/* AGP Memory */
		set = global_ppriv[i].sets[1];
		retval = setFirst(set, &item);
		while (retval) {
			DRM_DEBUG("free agp memory 0x%lx\n", item);
			mmFreeMem((PMemBlock) item);
			retval = setNext(set, &item);
		}
		setDestroy(set);

		global_ppriv[i].used = 0;
	}

	return 1;
}

drm_ioctl_desc_t sis_ioctls[] = {
	[DRM_IOCTL_NR(DRM_SIS_FB_ALLOC)] = {sis_fb_alloc, DRM_AUTH},
	[DRM_IOCTL_NR(DRM_SIS_FB_FREE)] = {sis_fb_free, DRM_AUTH},
	[DRM_IOCTL_NR(DRM_SIS_AGP_INIT)] = {sis_ioctl_agp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
	[DRM_IOCTL_NR(DRM_SIS_AGP_ALLOC)] = {sis_ioctl_agp_alloc, DRM_AUTH},
	[DRM_IOCTL_NR(DRM_SIS_AGP_FREE)] = {sis_ioctl_agp_free, DRM_AUTH},
	[DRM_IOCTL_NR(DRM_SIS_FB_INIT)] = {sis_fb_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}
};

int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls);