/* * Copyright 2007 Dave Airlied * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ /* * Authors: Dave Airlied * Ben Skeggs * Jeremy Kolb */ #include "drmP.h" #include "nouveau_drm.h" #include "nouveau_drv.h" #include "nouveau_dma.h" static struct drm_ttm_backend * nouveau_bo_create_ttm_backend_entry(struct drm_device * dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; switch (dev_priv->gart_info.type) { case NOUVEAU_GART_AGP: return drm_agp_init_ttm(dev); case NOUVEAU_GART_SGDMA: return nouveau_sgdma_init_ttm(dev); default: DRM_ERROR("Unknown GART type %d\n", dev_priv->gart_info.type); break; } return NULL; } static int nouveau_bo_fence_type(struct drm_buffer_object *bo, uint32_t *fclass, uint32_t *type) { /* When we get called, *fclass is set to the requested fence class */ if (bo->mem.proposed_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE)) *type = 3; else *type = 1; return 0; } static int nouveau_bo_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags) { /* We'll do this from user space. */ return 0; } static int nouveau_bo_init_mem_type(struct drm_device *dev, uint32_t type, struct drm_mem_type_manager *man) { struct drm_nouveau_private *dev_priv = dev->dev_private; switch (type) { case DRM_BO_MEM_LOCAL: man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_MEMTYPE_CACHED; man->drm_bus_maptype = 0; break; case DRM_BO_MEM_VRAM: man->flags = _DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_NEEDS_IOREMAP; man->io_addr = NULL; man->drm_bus_maptype = _DRM_FRAME_BUFFER; man->io_offset = drm_get_resource_start(dev, 1); man->io_size = drm_get_resource_len(dev, 1); if (man->io_size > nouveau_mem_fb_amount(dev)) man->io_size = nouveau_mem_fb_amount(dev); break; case DRM_BO_MEM_PRIV0: /* Unmappable VRAM */ man->flags = _DRM_FLAG_MEMTYPE_CMA; man->drm_bus_maptype = 0; break; case DRM_BO_MEM_TT: switch (dev_priv->gart_info.type) { case NOUVEAU_GART_AGP: man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP; man->drm_bus_maptype = _DRM_AGP; break; case NOUVEAU_GART_SGDMA: man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_MEMTYPE_CMA; man->drm_bus_maptype = _DRM_SCATTER_GATHER; break; default: DRM_ERROR("Unknown GART type: %d\n", dev_priv->gart_info.type); return -EINVAL; } man->io_offset = dev_priv->gart_info.aper_base; man->io_size = dev_priv->gart_info.aper_size; man->io_addr = NULL; break; default: DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); return -EINVAL; } return 0; } static uint64_t nouveau_bo_evict_flags(struct drm_buffer_object *bo) { switch (bo->mem.mem_type) { case DRM_BO_MEM_LOCAL: case DRM_BO_MEM_TT: return DRM_BO_FLAG_MEM_LOCAL; default: return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED; } return 0; } /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access * DRM_BO_MEM_{VRAM,PRIV0,TT} directly. */ static int nouveau_bo_move_m2mf(struct drm_buffer_object *bo, int evict, int no_wait, struct drm_bo_mem_reg *new_mem) { struct drm_device *dev = bo->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_drm_channel *dchan = &dev_priv->channel; struct drm_bo_mem_reg *old_mem = &bo->mem; uint32_t srch, dsth, page_count; /* Can happen during init/takedown */ if (!dchan->chan) return -EINVAL; srch = old_mem->mem_type == DRM_BO_MEM_TT ? NvDmaTT : NvDmaFB; dsth = new_mem->mem_type == DRM_BO_MEM_TT ? NvDmaTT : NvDmaFB; if (srch != dchan->m2mf_dma_source || dsth != dchan->m2mf_dma_destin) { dchan->m2mf_dma_source = srch; dchan->m2mf_dma_destin = dsth; BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_SOURCE, 2); OUT_RING (dchan->m2mf_dma_source); OUT_RING (dchan->m2mf_dma_destin); } page_count = new_mem->num_pages; while (page_count) { int line_count = (page_count > 2047) ? 2047 : page_count; BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); OUT_RING (old_mem->mm_node->start << PAGE_SHIFT); OUT_RING (new_mem->mm_node->start << PAGE_SHIFT); OUT_RING (PAGE_SIZE); /* src_pitch */ OUT_RING (PAGE_SIZE); /* dst_pitch */ OUT_RING (PAGE_SIZE); /* line_length */ OUT_RING (line_count); OUT_RING ((1<<8)|(1<<0)); OUT_RING (0); BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); OUT_RING (0); page_count -= line_count; } return drm_bo_move_accel_cleanup(bo, evict, no_wait, dchan->chan->id, DRM_FENCE_TYPE_EXE, 0, new_mem); } /* Flip pages into the GART and move if we can. */ static int nouveau_bo_move_flipd(struct drm_buffer_object *bo, int evict, int no_wait, struct drm_bo_mem_reg *new_mem) { struct drm_device *dev = bo->dev; struct drm_bo_mem_reg tmp_mem; int ret; tmp_mem = *new_mem; tmp_mem.mm_node = NULL; tmp_mem.proposed_flags = (DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING); ret = drm_bo_mem_space(bo, &tmp_mem, no_wait); if (ret) return ret; ret = drm_ttm_bind(bo->ttm, &tmp_mem); if (ret) goto out_cleanup; ret = nouveau_bo_move_m2mf(bo, 1, no_wait, &tmp_mem); if (ret) goto out_cleanup; ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem); out_cleanup: if (tmp_mem.mm_node) { mutex_lock(&dev->struct_mutex); if (tmp_mem.mm_node != bo->pinned_node) drm_mm_put_block(tmp_mem.mm_node); tmp_mem.mm_node = NULL; mutex_unlock(&dev->struct_mutex); } return ret; } static int nouveau_bo_move(struct drm_buffer_object *bo, int evict, int no_wait, struct drm_bo_mem_reg *new_mem) { struct drm_bo_mem_reg *old_mem = &bo->mem; if (new_mem->mem_type == DRM_BO_MEM_LOCAL) { if (old_mem->mem_type == DRM_BO_MEM_LOCAL) return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); if (nouveau_bo_move_flipd(bo, evict, no_wait, new_mem)) return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); } else if (old_mem->mem_type == DRM_BO_MEM_LOCAL) { if (1 /*nouveau_bo_move_flips(bo, evict, no_wait, new_mem)*/) return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); } else { if (nouveau_bo_move_m2mf(bo, evict, no_wait, new_mem)) return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); } return 0; } static void nouveau_bo_flush_ttm(struct drm_ttm *ttm) { } static uint32_t nouveau_mem_prios[] = { DRM_BO_MEM_PRIV0, DRM_BO_MEM_VRAM, DRM_BO_MEM_TT, DRM_BO_MEM_LOCAL }; static uint32_t nouveau_busy_prios[] = { DRM_BO_MEM_TT, DRM_BO_MEM_PRIV0, DRM_BO_MEM_VRAM, DRM_BO_MEM_LOCAL }; struct drm_bo_driver nouveau_bo_driver = { .mem_type_prio = nouveau_mem_prios, .mem_busy_prio = nouveau_busy_prios, .num_mem_type_prio = sizeof(nouveau_mem_prios)/sizeof(uint32_t), .num_mem_busy_prio = sizeof(nouveau_busy_prios)/sizeof(uint32_t), .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry, .fence_type = nouveau_bo_fence_type, .invalidate_caches = nouveau_bo_invalidate_caches, .init_mem_type = nouveau_bo_init_mem_type, .evict_flags = nouveau_bo_evict_flags, .move = nouveau_bo_move, .ttm_cache_flush= nouveau_bo_flush_ttm, .command_stream_barrier = NULL }; id='n175' href='#n175'>175 176
/**************************************************************************
 *
 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 *
 **************************************************************************/
/*
 * Simple memory MANager interface that keeps track on allocate regions on a
 * per "owner" basis. All regions associated with an "owner" can be released
 * with a simple call. Typically if the "owner" exists. The owner is any
 * "unsigned long" identifier. Can typically be a pointer to a file private
 * struct or a context identifier.
 *
 * Authors:
 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
 */

#ifndef DRM_SMAN_H
#define DRM_SMAN_H

#include "drmP.h"
#include "drm_hashtab.h"

/*
 * A class that is an abstration of a simple memory allocator.
 * The sman implementation provides a default such allocator
 * using the drm_mm.c implementation. But the user can replace it.
 * See the SiS implementation, which may use the SiS FB kernel module
 * for memory management.
 */

struct drm_sman_mm {
	/* private info. If allocated, needs to be destroyed by the destroy
	   function */
	void *private;

	/* Allocate a memory block with given size and alignment.
	   Return an opaque reference to the memory block */

	void *(*allocate) (void *private, unsigned long size,
			   unsigned alignment);

	/* Free a memory block. "ref" is the opaque reference that we got from
	   the "alloc" function */

	void (*free) (void *private, void *ref);

	/* Free all resources associated with this allocator */

	void (*destroy) (void *private);

	/* Return a memory offset from the opaque reference returned from the
	   "alloc" function */

	unsigned long (*offset) (void *private, void *ref);
};

struct drm_memblock_item {
	struct list_head owner_list;
	struct drm_hash_item user_hash;
	void *mm_info;
	struct drm_sman_mm *mm;
	struct drm_sman *sman;
};

struct drm_sman {
	struct drm_sman_mm *mm;
	int num_managers;
	struct drm_open_hash owner_hash_tab;
	struct drm_open_hash user_hash_tab;
	struct list_head owner_items;
};

/*
 * Take down a memory manager. This function should only be called after a
 * successful init and after a call to drm_sman_cleanup.
 */

extern void drm_sman_takedown(struct drm_sman * sman);

/*
 * Allocate structures for a manager.
 * num_managers are the number of memory pools to manage. (VRAM, AGP, ....)
 * user_order is the log2 of the number of buckets in the user hash table.
 *	    set this to approximately log2 of the max number of memory regions
 *	    that will be allocated for _all_ pools together.
 * owner_order is the log2 of the number of buckets in the owner hash table.
 *	    set this to approximately log2 of
 *	    the number of client file connections that will
 *	    be using the manager.
 *
 */

extern int drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
			 unsigned int user_order, unsigned int owner_order);

/*
 * Initialize a drm_mm.c allocator. Should be called only once for each
 * manager unless a customized allogator is used.
 */

extern int drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
			      unsigned long start, unsigned long size);

/*
 * Initialize a customized allocator for one of the managers.
 * (See the SiS module). The object pointed to by "allocator" is copied,
 * so it can be destroyed after this call.
 */

extern int drm_sman_set_manager(struct drm_sman * sman, unsigned int mananger,
				struct drm_sman_mm * allocator);

/*
 * Allocate a memory block. Aligment is not implemented yet.
 */

extern struct drm_memblock_item *drm_sman_alloc(struct drm_sman * sman,
						unsigned int manager,
						unsigned long size,
						unsigned alignment,
						unsigned long owner);
/*
 * Free a memory block identified by its user hash key.
 */

extern int drm_sman_free_key(struct drm_sman * sman, unsigned int key);

/*
 * returns 1 iff there are no stale memory blocks associated with this owner.
 * Typically called to determine if we need to idle the hardware and call
 * drm_sman_owner_cleanup. If there are no stale memory blocks, it removes all
 * resources associated with owner.
 */

extern int drm_sman_owner_clean(struct drm_sman * sman, unsigned long owner);

/*
 * Frees all stale memory blocks associated with this owner. Note that this
 * requires that the hardware is finished with all blocks, so the graphics engine
 * should be idled before this call is made. This function also frees
 * any resources associated with "owner" and should be called when owner
 * is not going to be referenced anymore.
 */

extern void drm_sman_owner_cleanup(struct drm_sman * sman, unsigned long owner);

/*
 * Frees all stale memory blocks associated with the memory manager.
 * See idling above.
 */

extern void drm_sman_cleanup(struct drm_sman * sman);

#endif