summaryrefslogtreecommitdiff
path: root/shared-core/nv04_instmem.c
AgeCommit message (Collapse)Author
2007-11-14nouveau: funcs to determine active channel on PFIFO.Ben Skeggs
2007-11-05drm: remove lots of spurious whitespace.Dave Airlie
Kernel "cleanfile" script run.
2007-08-31nouveau: give nv03 the last cut.Stephane Marchesin
2007-08-15nouveau: Workaround mysterious PRAMIN clobbering by the card.Ben Skeggs
2007-08-15nouveau: Catch all NV4x chips instead of just NV_40.Ben Skeggs
2007-08-06nouveau: Give DRM its own gpu channelBen Skeggs
If your card doesn't have working context switching, it is now broken.
2007-08-06nouveau: Various internal and external API changesBen Skeggs
1. DRM_NOUVEAU_GPUOBJ_FREE Used to free GPU objects. The obvious usage case is for Gr objects, but notifiers can also be destroyed in the same way. GPU objects gain a destructor method and private data fields with this change, so other specialised cases (like notifiers) can be implemented on top of gpuobjs. 2. DRM_NOUVEAU_CHANNEL_FREE 3. DRM_NOUVEAU_CARD_INIT Ideally we'd do init during module load, but this isn't currently possible. Doing init during firstopen() is bad as X has a love of opening/closing the DRM many times during startup. Once the modesetting-101 branch is merged this can go away. IRQs are enabled in nouveau_card_init() now, rather than having the X server call drmCtlInstHandler(). We'll need this for when we give the kernel module its own channel. 4. DRM_NOUVEAU_GETPARAM Add CHIPSET_ID value, which will return the chipset id derived from NV_PMC_BOOT_0. 4. Use list_* in a few places, rather than home-brewed stuff.
2007-07-20Remove DRM_ERR OS macro.Eric Anholt
This was used to make all ioctl handlers return -errno on linux and errno on *BSD. Instead, just return -errno in shared code, and flip sign on return from shared code to *BSD code.
2007-07-14nouveau: nv10 and nv11/15 are differentPatrice Mandin
2007-07-13nouveau: nuke internal typedefs, and drm_device_t use.Ben Skeggs
2007-07-09nouveau/nv50: Initial channel/object supportBen Skeggs
Should be OK on G84 for a single channel, multiple channels *almost* work. Untested on G80.
/a> 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200
/**************************************************************************
 * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 **************************************************************************/
/*
 * An allocate-fence manager implementation intended for sets of base-registers
 * or tiling-registers.
 */

#include "drmP.h"

/*
 * Allocate a compatible register and put it on the unfenced list.
 */

int drm_regs_alloc(struct drm_reg_manager *manager,
		   const void *data,
		   uint32_t fence_class,
		   uint32_t fence_type,
		   int interruptible, int no_wait, struct drm_reg **reg)
{
	struct drm_reg *entry, *next_entry;
	int ret;

	*reg = NULL;

	/*
	 * Search the unfenced list.
	 */

	list_for_each_entry(entry, &manager->unfenced, head) {
		if (manager->reg_reusable(entry, data)) {
			entry->new_fence_type |= fence_type;
			goto out;
		}
	}

	/*
	 * Search the lru list.
	 */

	list_for_each_entry_safe(entry, next_entry, &manager->lru, head) {
		struct drm_fence_object *fence = entry->fence;
		if (fence->fence_class == fence_class &&
		    (entry->fence_type & fence_type) == entry->fence_type &&
		    manager->reg_reusable(entry, data)) {
			list_del(&entry->head);
			entry->new_fence_type = fence_type;
			list_add_tail(&entry->head, &manager->unfenced);
			goto out;
		}
	}

	/*
	 * Search the free list.
	 */

	list_for_each_entry(entry, &manager->free, head) {
		list_del(&entry->head);
		entry->new_fence_type = fence_type;
		list_add_tail(&entry->head, &manager->unfenced);
		goto out;
	}

	if (no_wait)
		return -EBUSY;

	/*
	 * Go back to the lru list and try to expire fences.
	 */

	list_for_each_entry_safe(entry, next_entry, &manager->lru, head) {
		BUG_ON(!entry->fence);
		ret = drm_fence_object_wait(entry->fence, 0, !interruptible,
					    entry->fence_type);
		if (ret)
			return ret;

		drm_fence_usage_deref_unlocked(&entry->fence);
		list_del(&entry->head);
		entry->new_fence_type = fence_type;
		list_add_tail(&entry->head, &manager->unfenced);
		goto out;
	}

	/*
	 * Oops. All registers are used up :(.
	 */

	return -EBUSY;
out:
	*reg = entry;
	return 0;
}
EXPORT_SYMBOL(drm_regs_alloc);

void drm_regs_fence(struct drm_reg_manager *manager,
		    struct drm_fence_object *fence)
{
	struct drm_reg *entry;
	struct drm_reg *next_entry;

	if (!fence) {

		/*
		 * Old fence (if any) is still valid.
		 * Put back on free and lru lists.
		 */

		list_for_each_entry_safe_reverse(entry, next_entry,
						 &manager->unfenced, head) {
			list_del(&entry->head);
			list_add(&entry->head, (entry->fence) ?
				 &manager->lru : &manager->free);
		}
	} else {

		/*
		 * Fence with a new fence and put on lru list.
		 */

		list_for_each_entry_safe(entry, next_entry, &manager->unfenced,
					 head) {
			list_del(&entry->head);
			if (entry->fence)
				drm_fence_usage_deref_unlocked(&entry->fence);
			drm_fence_reference_unlocked(&entry->fence, fence);

			entry->fence_type = entry->new_fence_type;
			BUG_ON((entry->fence_type & fence->type) !=
			       entry->fence_type);

			list_add_tail(&entry->head, &manager->lru);
		}
	}
}
EXPORT_SYMBOL(drm_regs_fence);

void drm_regs_free(struct drm_reg_manager *manager)
{
	struct drm_reg *entry;
	struct drm_reg *next_entry;

	drm_regs_fence(manager, NULL);

	list_for_each_entry_safe(entry, next_entry, &manager->free, head) {
		list_del(&entry->head);
		manager->reg_destroy(entry);
	}

	list_for_each_entry_safe(entry, next_entry, &manager->lru, head) {

		(void)drm_fence_object_wait(entry->fence, 1, 1,
					    entry->fence_type);
		list_del(&entry->head);
		drm_fence_usage_deref_unlocked(&entry->fence);
		manager->reg_destroy(entry);
	}
}
EXPORT_SYMBOL(drm_regs_free);

void drm_regs_add(struct drm_reg_manager *manager, struct drm_reg *reg)
{
	reg->fence = NULL;
	list_add_tail(&reg->head, &manager->free);
}
EXPORT_SYMBOL(drm_regs_add);

void drm_regs_init(struct drm_reg_manager *manager,
		   int (*reg_reusable) (const struct drm_reg *, const void *),
		   void (*reg_destroy) (struct drm_reg *))
{
	INIT_LIST_HEAD(&manager->free);
	INIT_LIST_HEAD(&manager->lru);
	INIT_LIST_HEAD(&manager->unfenced);
	manager->reg_reusable = reg_reusable;
	manager->reg_destroy = reg_destroy;
}
EXPORT_SYMBOL(drm_regs_init);