From 6a9eb08a872ac0388aad2c901888888964f14559 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Tue, 3 Jun 2008 09:27:37 -0700 Subject: Import bufmgr code to libdrm. Not yet hooked up to the build. --- libdrm/intel/intel_bufmgr_gem.c | 847 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 847 insertions(+) create mode 100644 libdrm/intel/intel_bufmgr_gem.c (limited to 'libdrm/intel/intel_bufmgr_gem.c') diff --git a/libdrm/intel/intel_bufmgr_gem.c b/libdrm/intel/intel_bufmgr_gem.c new file mode 100644 index 00000000..3c1c3157 --- /dev/null +++ b/libdrm/intel/intel_bufmgr_gem.c @@ -0,0 +1,847 @@ +/************************************************************************** + * + * Copyright © 2007 Red Hat Inc. + * Copyright © 2007 Intel Corporation + * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * + **************************************************************************/ +/* + * Authors: Thomas Hellström + * Keith Whitwell + * Eric Anholt + * Dave Airlie + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "errno.h" +#include "mtypes.h" +#include "dri_bufmgr.h" +#include "string.h" +#include "imports.h" + +#include "i915_drm.h" + +#include "intel_bufmgr_gem.h" + +#define DBG(...) do { \ + if (bufmgr_gem->bufmgr.debug) \ + fprintf(stderr, __VA_ARGS__); \ +} while (0) + +struct intel_validate_entry { + dri_bo *bo; + struct drm_i915_op_arg bo_arg; +}; + +struct dri_gem_bo_bucket_entry { + uint32_t gem_handle; + uint32_t last_offset; + struct dri_gem_bo_bucket_entry *next; +}; + +struct dri_gem_bo_bucket { + struct dri_gem_bo_bucket_entry *head; + struct dri_gem_bo_bucket_entry **tail; + /** + * Limit on the number of entries in this bucket. + * + * 0 means that this caching at this bucket size is disabled. + * -1 means that there is no limit to caching at this size. + */ + int max_entries; + int num_entries; +}; + +/* Arbitrarily chosen, 16 means that the maximum size we'll cache for reuse + * is 1 << 16 pages, or 256MB. + */ +#define INTEL_GEM_BO_BUCKETS 16 +typedef struct _dri_bufmgr_gem { + dri_bufmgr bufmgr; + + int fd; + + uint32_t max_relocs; + + struct drm_i915_gem_exec_object *exec_objects; + dri_bo **exec_bos; + int exec_size; + int exec_count; + + /** Array of lists of cached gem objects of power-of-two sizes */ + struct dri_gem_bo_bucket cache_bucket[INTEL_GEM_BO_BUCKETS]; + + struct drm_i915_gem_execbuffer exec_arg; +} dri_bufmgr_gem; + +typedef struct _dri_bo_gem { + dri_bo bo; + + int refcount; + GLboolean mapped; + uint32_t gem_handle; + const char *name; + + /** + * Index of the buffer within the validation list while preparing a + * batchbuffer execution. + */ + int validate_index; + + /** + * Tracks whether set_domain to CPU is current + * Set when set_domain has been called + * Cleared when a batch has been submitted + */ + GLboolean cpu_domain_set; + + /** Array passed to the DRM containing relocation information. */ + struct drm_i915_gem_relocation_entry *relocs; + /** Array of bos corresponding to relocs[i].target_handle */ + dri_bo **reloc_target_bo; + /** Number of entries in relocs */ + int reloc_count; + /** Mapped address for the buffer */ + void *virtual; +} dri_bo_gem; + +static int +logbase2(int n) +{ + GLint i = 1; + GLint log2 = 0; + + while (n > i) { + i *= 2; + log2++; + } + + return log2; +} + +static struct dri_gem_bo_bucket * +dri_gem_bo_bucket_for_size(dri_bufmgr_gem *bufmgr_gem, unsigned long size) +{ + int i; + + /* We only do buckets in power of two increments */ + if ((size & (size - 1)) != 0) + return NULL; + + /* We should only see sizes rounded to pages. */ + assert((size % 4096) == 0); + + /* We always allocate in units of pages */ + i = ffs(size / 4096) - 1; + if (i >= INTEL_GEM_BO_BUCKETS) + return NULL; + + return &bufmgr_gem->cache_bucket[i]; +} + + +static void dri_gem_dump_validation_list(dri_bufmgr_gem *bufmgr_gem) +{ + int i, j; + + for (i = 0; i < bufmgr_gem->exec_count; i++) { + dri_bo *bo = bufmgr_gem->exec_bos[i]; + dri_bo_gem *bo_gem = (dri_bo_gem *)bo; + + if (bo_gem->relocs == NULL) { + DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle, bo_gem->name); + continue; + } + + for (j = 0; j < bo_gem->reloc_count; j++) { + dri_bo *target_bo = bo_gem->reloc_target_bo[j]; + dri_bo_gem *target_gem = (dri_bo_gem *)target_bo; + + DBG("%2d: %d (%s)@0x%08llx -> %d (%s)@0x%08lx + 0x%08x\n", + i, + bo_gem->gem_handle, bo_gem->name, bo_gem->relocs[j].offset, + target_gem->gem_handle, target_gem->name, target_bo->offset, + bo_gem->relocs[j].delta); + } + } +} + +/** + * Adds the given buffer to the list of buffers to be validated (moved into the + * appropriate memory type) with the next batch submission. + * + * If a buffer is validated multiple times in a batch submission, it ends up + * with the intersection of the memory type flags and the union of the + * access flags. + */ +static void +intel_add_validate_buffer(dri_bo *bo) +{ + dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; + dri_bo_gem *bo_gem = (dri_bo_gem *)bo; + int index; + + if (bo_gem->validate_index != -1) + return; + + /* Extend the array of validation entries as necessary. */ + if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) { + int new_size = bufmgr_gem->exec_size * 2; + + if (new_size == 0) + new_size = 5; + + bufmgr_gem->exec_objects = + realloc(bufmgr_gem->exec_objects, + sizeof(*bufmgr_gem->exec_objects) * new_size); + bufmgr_gem->exec_bos = + realloc(bufmgr_gem->exec_bos, + sizeof(*bufmgr_gem->exec_bos) * new_size); + bufmgr_gem->exec_size = new_size; + } + + index = bufmgr_gem->exec_count; + bo_gem->validate_index = index; + /* Fill in array entry */ + bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle; + bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count; + bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs; + bufmgr_gem->exec_objects[index].alignment = 0; + bufmgr_gem->exec_objects[index].offset = 0; + bufmgr_gem->exec_bos[index] = bo; + dri_bo_reference(bo); + bufmgr_gem->exec_count++; +} + + +#define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \ + sizeof(uint32_t)) + +static int +intel_setup_reloc_list(dri_bo *bo) +{ + dri_bo_gem *bo_gem = (dri_bo_gem *)bo; + dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; + + bo_gem->relocs = malloc(bufmgr_gem->max_relocs * + sizeof(struct drm_i915_gem_relocation_entry)); + bo_gem->reloc_target_bo = malloc(bufmgr_gem->max_relocs * sizeof(dri_bo *)); + + return 0; +} + +static dri_bo * +dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name, + unsigned long size, unsigned int alignment, + uint64_t location_mask) +{ + dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr; + dri_bo_gem *bo_gem; + unsigned int page_size = getpagesize(); + int ret; + struct dri_gem_bo_bucket *bucket; + GLboolean alloc_from_cache = GL_FALSE; + + bo_gem = calloc(1, sizeof(*bo_gem)); + if (!bo_gem) + return NULL; + + /* Round the allocated size up to a power of two number of pages. */ + bo_gem->bo.size = 1 << logbase2(size); + if (bo_gem->bo.size < page_size) + bo_gem->bo.size = page_size; + bucket = dri_gem_bo_bucket_for_size(bufmgr_gem, bo_gem->bo.size); + + /* If we don't have caching at this size, don't actually round the + * allocation up. + */ + if (bucket == NULL || bucket->max_entries == 0) { + bo_gem->bo.size = size; + if (bo_gem->bo.size < page_size) + bo_gem->bo.size = page_size; + } + + /* Get a buffer out of the cache if available */ + if (bucket != NULL && bucket->num_entries > 0) { + struct dri_gem_bo_bucket_entry *entry = bucket->head; + struct drm_i915_gem_busy busy; + + busy.handle = entry->gem_handle; + ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy); + alloc_from_cache = (ret == 0 && busy.busy == 0); + + if (alloc_from_cache) { + bucket->head = entry->next; + if (entry->next == NULL) + bucket->tail = &bucket->head; + bucket->num_entries--; + + bo_gem->gem_handle = entry->gem_handle; + bo_gem->bo.offset = entry->last_offset; + free(entry); + } + } + + if (!alloc_from_cache) { + struct drm_gem_create create; + + memset(&create, 0, sizeof(create)); + create.size = bo_gem->bo.size; + + ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CREATE, &create); + bo_gem->gem_handle = create.handle; + if (ret != 0) { + free(bo_gem); + return NULL; + } + } + + bo_gem->bo.virtual = NULL; + bo_gem->bo.bufmgr = bufmgr; + bo_gem->name = name; + bo_gem->refcount = 1; + bo_gem->validate_index = -1; + + DBG("bo_create: buf %d (%s) %ldb\n", + bo_gem->gem_handle, bo_gem->name, size); + + return &bo_gem->bo; +} + +/* Our GEM backend doesn't allow creation of static buffers, as that requires + * privelege for the non-fake case, and the lock in the fake case where we were + * working around the X Server not creating buffers and passing handles to us. + */ +static dri_bo * +dri_gem_bo_alloc_static(dri_bufmgr *bufmgr, const char *name, + unsigned long offset, unsigned long size, void *virtual, + uint64_t location_mask) +{ + return NULL; +} + +/** + * Returns a dri_bo wrapping the given buffer object handle. + * + * This can be used when one application needs to pass a buffer object + * to another. + */ +dri_bo * +intel_gem_bo_create_from_handle(dri_bufmgr *bufmgr, const char *name, + unsigned int handle) +{ + dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr; + dri_bo_gem *bo_gem; + int ret; + struct drm_gem_open open_arg; + + bo_gem = calloc(1, sizeof(*bo_gem)); + if (!bo_gem) + return NULL; + + memset(&open_arg, 0, sizeof(open_arg)); + open_arg.name = handle; + ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_OPEN, &open_arg); + if (ret != 0) { + fprintf(stderr, "Couldn't reference %s handle 0x%08x: %s\n", + name, handle, strerror(-ret)); + free(bo_gem); + return NULL; + } + bo_gem->bo.size = open_arg.size; + bo_gem->bo.offset = 0; + bo_gem->bo.virtual = NULL; + bo_gem->bo.bufmgr = bufmgr; + bo_gem->name = name; + bo_gem->refcount = 1; + bo_gem->validate_index = -1; + bo_gem->gem_handle = open_arg.handle; + + DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name); + + return &bo_gem->bo; +} + +static void +dri_gem_bo_reference(dri_bo *bo) +{ + dri_bo_gem *bo_gem = (dri_bo_gem *)bo; + + bo_gem->refcount++; +} + +static void +dri_gem_bo_unreference(dri_bo *bo) +{ + dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; + dri_bo_gem *bo_gem = (dri_bo_gem *)bo; + + if (!bo) + return; + + if (--bo_gem->refcount == 0) { + struct dri_gem_bo_bucket *bucket; + int ret; + + if (bo_gem->mapped) + munmap (bo_gem->virtual, bo->size); + + if (bo_gem->relocs != NULL) { + int i; + + /* Unreference all the target buffers */ + for (i = 0; i < bo_gem->reloc_count; i++) + dri_bo_unreference(bo_gem->reloc_target_bo[i]); + free(bo_gem->reloc_target_bo); + free(bo_gem->relocs); + } + + bucket = dri_gem_bo_bucket_for_size(bufmgr_gem, bo->size); + /* Put the buffer into our internal cache for reuse if we can. */ + if (bucket != NULL && + (bucket->max_entries == -1 || + (bucket->max_entries > 0 && + bucket->num_entries < bucket->max_entries))) + { + struct dri_gem_bo_bucket_entry *entry; + + entry = calloc(1, sizeof(*entry)); + entry->gem_handle = bo_gem->gem_handle; + entry->last_offset = bo->offset; + + entry->next = NULL; + *bucket->tail = entry; + bucket->tail = &entry->next; + bucket->num_entries++; + } else { + struct drm_gem_close close; + + /* Close this object */ + close.handle = bo_gem->gem_handle; + ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close); + if (ret != 0) { + fprintf(stderr, + "DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n", + bo_gem->gem_handle, bo_gem->name, strerror(-ret)); + } + } + + DBG("bo_unreference final: %d (%s)\n", + bo_gem->gem_handle, bo_gem->name); + + free(bo); + return; + } +} + +static int +dri_gem_bo_map(dri_bo *bo, GLboolean write_enable) +{ + dri_bufmgr_gem *bufmgr_gem; + dri_bo_gem *bo_gem = (dri_bo_gem *)bo; + struct drm_gem_set_domain set_domain; + int ret; + + bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; + + /* Allow recursive mapping. Mesa may recursively map buffers with + * nested display loops. + */ + if (!bo_gem->mapped) { + + assert(bo->virtual == NULL); + + DBG("bo_map: %d (%s)\n", bo_gem->gem_handle, bo_gem->name); + + if (bo_gem->virtual == NULL) { + struct drm_gem_mmap mmap_arg; + + memset(&mmap_arg, 0, sizeof(mmap_arg)); + mmap_arg.handle = bo_gem->gem_handle; + mmap_arg.offset = 0; + mmap_arg.size = bo->size; + ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_MMAP, &mmap_arg); + if (ret != 0) { + fprintf(stderr, "%s:%d: Error mapping buffer %d (%s): %s .\n", + __FILE__, __LINE__, + bo_gem->gem_handle, bo_gem->name, strerror(errno)); + } + bo_gem->virtual = (void *)(uintptr_t)mmap_arg.addr_ptr; + } + bo->virtual = bo_gem->virtual; + bo_gem->mapped = GL_TRUE; + DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name, bo_gem->virtual); + } + + if (!bo_gem->cpu_domain_set) { + set_domain.handle = bo_gem->gem_handle; + set_domain.read_domains = DRM_GEM_DOMAIN_CPU; + set_domain.write_domain = write_enable ? DRM_GEM_DOMAIN_CPU : 0; + ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_GEM_SET_DOMAIN, &set_domain); + if (ret != 0) { + fprintf (stderr, "%s:%d: Error setting memory domains %d (%08x %08x): %s .\n", + __FILE__, __LINE__, + bo_gem->gem_handle, set_domain.read_domains, set_domain.write_domain, + strerror (errno)); + } + bo_gem->cpu_domain_set = GL_TRUE; + } + + return 0; +} + +static int +dri_gem_bo_unmap(dri_bo *bo) +{ + dri_bo_gem *bo_gem = (dri_bo_gem *)bo; + + if (bo == NULL) + return 0; + + assert(bo_gem->mapped); + + return 0; +} + +static int +dri_gem_bo_subdata (dri_bo *bo, unsigned long offset, + unsigned long size, const void *data) +{ + dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; + dri_bo_gem *bo_gem = (dri_bo_gem *)bo; + struct drm_gem_pwrite pwrite; + int ret; + + memset (&pwrite, 0, sizeof (pwrite)); + pwrite.handle = bo_gem->gem_handle; + pwrite.offset = offset; + pwrite.size = size; + pwrite.data_ptr = (uint64_t) (uintptr_t) data; + ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_GEM_PWRITE, &pwrite); + if (ret != 0) { + fprintf (stderr, "%s:%d: Error writing data to buffer %d: (%d %d) %s .\n", + __FILE__, __LINE__, + bo_gem->gem_handle, (int) offset, (int) size, + strerror (errno)); + } + return 0; +} + +static int +dri_gem_bo_get_subdata (dri_bo *bo, unsigned long offset, + unsigned long size, void *data) +{ + dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; + dri_bo_gem *bo_gem = (dri_bo_gem *)bo; + struct drm_gem_pread pread; + int ret; + + memset (&pread, 0, sizeof (pread)); + pread.handle = bo_gem->gem_handle; + pread.offset = offset; + pread.size = size; + pread.data_ptr = (uint64_t) (uintptr_t) data; + ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_GEM_PREAD, &pread); + if (ret != 0) { + fprintf (stderr, "%s:%d: Error reading data from buffer %d: (%d %d) %s .\n", + __FILE__, __LINE__, + bo_gem->gem_handle, (int) offset, (int) size, + strerror (errno)); + } + return 0; +} + +static void +dri_gem_bo_wait_rendering(dri_bo *bo) +{ + dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; + dri_bo_gem *bo_gem = (dri_bo_gem *)bo; + struct drm_gem_set_domain set_domain; + int ret; + + set_domain.handle = bo_gem->gem_handle; + set_domain.read_domains = DRM_GEM_DOMAIN_CPU; + set_domain.write_domain = 0; + ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_GEM_SET_DOMAIN, &set_domain); + if (ret != 0) { + fprintf (stderr, "%s:%d: Error setting memory domains %d (%08x %08x): %s .\n", + __FILE__, __LINE__, + bo_gem->gem_handle, set_domain.read_domains, set_domain.write_domain, + strerror (errno)); + } +} + +static void +dri_bufmgr_gem_destroy(dri_bufmgr *bufmgr) +{ + dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr; + int i; + + free(bufmgr_gem->exec_objects); + free(bufmgr_gem->exec_bos); + + /* Free any cached buffer objects we were going to reuse */ + for (i = 0; i < INTEL_GEM_BO_BUCKETS; i++) { + struct dri_gem_bo_bucket *bucket = &bufmgr_gem->cache_bucket[i]; + struct dri_gem_bo_bucket_entry *entry; + + while ((entry = bucket->head) != NULL) { + struct drm_gem_close close; + int ret; + + bucket->head = entry->next; + if (entry->next == NULL) + bucket->tail = &bucket->head; + bucket->num_entries--; + + /* Close this object */ + close.handle = entry->gem_handle; + ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close); + if (ret != 0) { + fprintf(stderr, "DRM_IOCTL_GEM_CLOSE failed: %s\n", + strerror(-ret)); + } + + free(entry); + } + } + + free(bufmgr); +} + +/** + * Adds the target buffer to the validation list and adds the relocation + * to the reloc_buffer's relocation list. + * + * The relocation entry at the given offset must already contain the + * precomputed relocation value, because the kernel will optimize out + * the relocation entry write when the buffer hasn't moved from the + * last known offset in target_bo. + */ +static int +dri_gem_emit_reloc(dri_bo *bo, uint32_t read_domains, uint32_t write_domain, + uint32_t delta, uint32_t offset, dri_bo *target_bo) +{ + dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; + dri_bo_gem *bo_gem = (dri_bo_gem *)bo; + dri_bo_gem *target_bo_gem = (dri_bo_gem *)target_bo; + + /* Create a new relocation list if needed */ + if (bo_gem->relocs == NULL) + intel_setup_reloc_list(bo); + + /* Check overflow */ + assert(bo_gem->reloc_count < bufmgr_gem->max_relocs); + + /* Check args */ + assert (offset <= bo->size - 4); + assert ((write_domain & (write_domain-1)) == 0); + + bo_gem->relocs[bo_gem->reloc_count].offset = offset; + bo_gem->relocs[bo_gem->reloc_count].delta = delta; + bo_gem->relocs[bo_gem->reloc_count].target_handle = + target_bo_gem->gem_handle; + bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains; + bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain; + bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset; + + bo_gem->reloc_target_bo[bo_gem->reloc_count] = target_bo; + dri_bo_reference(target_bo); + + bo_gem->reloc_count++; + return 0; +} + +/** + * Walk the tree of relocations rooted at BO and accumulate the list of + * validations to be performed and update the relocation buffers with + * index values into the validation list. + */ +static void +dri_gem_bo_process_reloc(dri_bo *bo) +{ + dri_bo_gem *bo_gem = (dri_bo_gem *)bo; + int i; + + if (bo_gem->relocs == NULL) + return; + + for (i = 0; i < bo_gem->reloc_count; i++) { + dri_bo *target_bo = bo_gem->reloc_target_bo[i]; + + /* Continue walking the tree depth-first. */ + dri_gem_bo_process_reloc(target_bo); + + /* Add the target to the validate list */ + intel_add_validate_buffer(target_bo); + } +} + +static void * +dri_gem_process_reloc(dri_bo *batch_buf) +{ + dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *) batch_buf->bufmgr; + + /* Update indices and set up the validate list. */ + dri_gem_bo_process_reloc(batch_buf); + + /* Add the batch buffer to the validation list. There are no relocations + * pointing to it. + */ + intel_add_validate_buffer(batch_buf); + + bufmgr_gem->exec_arg.buffers_ptr = (uintptr_t)bufmgr_gem->exec_objects; + bufmgr_gem->exec_arg.buffer_count = bufmgr_gem->exec_count; + bufmgr_gem->exec_arg.batch_start_offset = 0; + bufmgr_gem->exec_arg.batch_len = 0; /* written in intel_exec_ioctl */ + + return &bufmgr_gem->exec_arg; +} + +static void +intel_update_buffer_offsets (dri_bufmgr_gem *bufmgr_gem) +{ + int i; + + for (i = 0; i < bufmgr_gem->exec_count; i++) { + dri_bo *bo = bufmgr_gem->exec_bos[i]; + dri_bo_gem *bo_gem = (dri_bo_gem *)bo; + + /* Update the buffer offset */ + if (bufmgr_gem->exec_objects[i].offset != bo->offset) { + DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n", + bo_gem->gem_handle, bo_gem->name, bo->offset, + bufmgr_gem->exec_objects[i].offset); + bo->offset = bufmgr_gem->exec_objects[i].offset; + } + } +} + +static void +dri_gem_post_submit(dri_bo *batch_buf) +{ + dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)batch_buf->bufmgr; + int i; + + intel_update_buffer_offsets (bufmgr_gem); + + if (bufmgr_gem->bufmgr.debug) + dri_gem_dump_validation_list(bufmgr_gem); + + for (i = 0; i < bufmgr_gem->exec_count; i++) { + dri_bo *bo = bufmgr_gem->exec_bos[i]; + dri_bo_gem *bo_gem = (dri_bo_gem *)bo; + + /* Need to call set_domain on next bo_map */ + bo_gem->cpu_domain_set = GL_FALSE; + + /* Disconnect the buffer from the validate list */ + bo_gem->validate_index = -1; + dri_bo_unreference(bo); + bufmgr_gem->exec_bos[i] = NULL; + } + bufmgr_gem->exec_count = 0; +} + +/** + * Enables unlimited caching of buffer objects for reuse. + * + * This is potentially very memory expensive, as the cache at each bucket + * size is only bounded by how many buffers of that size we've managed to have + * in flight at once. + */ +void +intel_gem_enable_bo_reuse(dri_bufmgr *bufmgr) +{ + dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr; + int i; + + for (i = 0; i < INTEL_GEM_BO_BUCKETS; i++) { + bufmgr_gem->cache_bucket[i].max_entries = -1; + } +} + +/* + * + */ +static int +dri_gem_check_aperture_space(dri_bo *bo) +{ + return 0; +} + +/** + * Initializes the GEM buffer manager, which uses the kernel to allocate, map, + * and manage map buffer objections. + * + * \param fd File descriptor of the opened DRM device. + */ +dri_bufmgr * +intel_bufmgr_gem_init(int fd, int batch_size) +{ + dri_bufmgr_gem *bufmgr_gem; + int i; + + bufmgr_gem = calloc(1, sizeof(*bufmgr_gem)); + bufmgr_gem->fd = fd; + + /* Let's go with one relocation per every 2 dwords (but round down a bit + * since a power of two will mean an extra page allocation for the reloc + * buffer). + * + * Every 4 was too few for the blender benchmark. + */ + bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2; + + bufmgr_gem->bufmgr.bo_alloc = dri_gem_bo_alloc; + bufmgr_gem->bufmgr.bo_alloc_static = dri_gem_bo_alloc_static; + bufmgr_gem->bufmgr.bo_reference = dri_gem_bo_reference; + bufmgr_gem->bufmgr.bo_unreference = dri_gem_bo_unreference; + bufmgr_gem->bufmgr.bo_map = dri_gem_bo_map; + bufmgr_gem->bufmgr.bo_unmap = dri_gem_bo_unmap; + bufmgr_gem->bufmgr.bo_subdata = dri_gem_bo_subdata; + bufmgr_gem->bufmgr.bo_get_subdata = dri_gem_bo_get_subdata; + bufmgr_gem->bufmgr.bo_wait_rendering = dri_gem_bo_wait_rendering; + bufmgr_gem->bufmgr.destroy = dri_bufmgr_gem_destroy; + bufmgr_gem->bufmgr.emit_reloc = dri_gem_emit_reloc; + bufmgr_gem->bufmgr.process_relocs = dri_gem_process_reloc; + bufmgr_gem->bufmgr.post_submit = dri_gem_post_submit; + bufmgr_gem->bufmgr.debug = GL_FALSE; + bufmgr_gem->bufmgr.check_aperture_space = dri_gem_check_aperture_space; + /* Initialize the linked lists for BO reuse cache. */ + for (i = 0; i < INTEL_GEM_BO_BUCKETS; i++) + bufmgr_gem->cache_bucket[i].tail = &bufmgr_gem->cache_bucket[i].head; + + return &bufmgr_gem->bufmgr; +} + -- cgit v1.2.3 From c4857429c716f35e1fa054d1990cae28055d96d7 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Tue, 3 Jun 2008 10:20:49 -0700 Subject: Fix and hook up bufmgr code to the build. --- libdrm/intel/intel_bufmgr_gem.c | 66 ++++++++++++++++++++--------------------- 1 file changed, 32 insertions(+), 34 deletions(-) (limited to 'libdrm/intel/intel_bufmgr_gem.c') diff --git a/libdrm/intel/intel_bufmgr_gem.c b/libdrm/intel/intel_bufmgr_gem.c index 3c1c3157..20f39b5f 100644 --- a/libdrm/intel/intel_bufmgr_gem.c +++ b/libdrm/intel/intel_bufmgr_gem.c @@ -44,15 +44,12 @@ #include #include "errno.h" -#include "mtypes.h" #include "dri_bufmgr.h" +#include "intel_bufmgr.h" #include "string.h" -#include "imports.h" #include "i915_drm.h" -#include "intel_bufmgr_gem.h" - #define DBG(...) do { \ if (bufmgr_gem->bufmgr.debug) \ fprintf(stderr, __VA_ARGS__); \ @@ -89,9 +86,11 @@ struct dri_gem_bo_bucket { typedef struct _dri_bufmgr_gem { dri_bufmgr bufmgr; + struct intel_bufmgr intel_bufmgr; + int fd; - uint32_t max_relocs; + int max_relocs; struct drm_i915_gem_exec_object *exec_objects; dri_bo **exec_bos; @@ -108,7 +107,8 @@ typedef struct _dri_bo_gem { dri_bo bo; int refcount; - GLboolean mapped; + /** Boolean whether the mmap ioctl has been called for this buffer yet. */ + int mapped; uint32_t gem_handle; const char *name; @@ -119,11 +119,11 @@ typedef struct _dri_bo_gem { int validate_index; /** - * Tracks whether set_domain to CPU is current + * Boolean whether set_domain to CPU is current * Set when set_domain has been called * Cleared when a batch has been submitted */ - GLboolean cpu_domain_set; + int cpu_domain_set; /** Array passed to the DRM containing relocation information. */ struct drm_i915_gem_relocation_entry *relocs; @@ -138,8 +138,8 @@ typedef struct _dri_bo_gem { static int logbase2(int n) { - GLint i = 1; - GLint log2 = 0; + int i = 1; + int log2 = 0; while (n > i) { i *= 2; @@ -262,15 +262,14 @@ intel_setup_reloc_list(dri_bo *bo) static dri_bo * dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name, - unsigned long size, unsigned int alignment, - uint64_t location_mask) + unsigned long size, unsigned int alignment) { dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr; dri_bo_gem *bo_gem; unsigned int page_size = getpagesize(); int ret; struct dri_gem_bo_bucket *bucket; - GLboolean alloc_from_cache = GL_FALSE; + int alloc_from_cache = 0; bo_gem = calloc(1, sizeof(*bo_gem)); if (!bo_gem) @@ -338,18 +337,6 @@ dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name, return &bo_gem->bo; } -/* Our GEM backend doesn't allow creation of static buffers, as that requires - * privelege for the non-fake case, and the lock in the fake case where we were - * working around the X Server not creating buffers and passing handles to us. - */ -static dri_bo * -dri_gem_bo_alloc_static(dri_bufmgr *bufmgr, const char *name, - unsigned long offset, unsigned long size, void *virtual, - uint64_t location_mask) -{ - return NULL; -} - /** * Returns a dri_bo wrapping the given buffer object handle. * @@ -357,7 +344,7 @@ dri_gem_bo_alloc_static(dri_bufmgr *bufmgr, const char *name, * to another. */ dri_bo * -intel_gem_bo_create_from_handle(dri_bufmgr *bufmgr, const char *name, +intel_bo_gem_create_from_name(dri_bufmgr *bufmgr, const char *name, unsigned int handle) { dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr; @@ -465,7 +452,7 @@ dri_gem_bo_unreference(dri_bo *bo) } static int -dri_gem_bo_map(dri_bo *bo, GLboolean write_enable) +dri_gem_bo_map(dri_bo *bo, int write_enable) { dri_bufmgr_gem *bufmgr_gem; dri_bo_gem *bo_gem = (dri_bo_gem *)bo; @@ -499,7 +486,7 @@ dri_gem_bo_map(dri_bo *bo, GLboolean write_enable) bo_gem->virtual = (void *)(uintptr_t)mmap_arg.addr_ptr; } bo->virtual = bo_gem->virtual; - bo_gem->mapped = GL_TRUE; + bo_gem->mapped = 1; DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name, bo_gem->virtual); } @@ -514,7 +501,7 @@ dri_gem_bo_map(dri_bo *bo, GLboolean write_enable) bo_gem->gem_handle, set_domain.read_domains, set_domain.write_domain, strerror (errno)); } - bo_gem->cpu_domain_set = GL_TRUE; + bo_gem->cpu_domain_set = 1; } return 0; @@ -763,7 +750,7 @@ dri_gem_post_submit(dri_bo *batch_buf) dri_bo_gem *bo_gem = (dri_bo_gem *)bo; /* Need to call set_domain on next bo_map */ - bo_gem->cpu_domain_set = GL_FALSE; + bo_gem->cpu_domain_set = 0; /* Disconnect the buffer from the validate list */ bo_gem->validate_index = -1; @@ -781,7 +768,7 @@ dri_gem_post_submit(dri_bo *batch_buf) * in flight at once. */ void -intel_gem_enable_bo_reuse(dri_bufmgr *bufmgr) +intel_bufmgr_gem_enable_reuse(dri_bufmgr *bufmgr) { dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr; int i; @@ -824,7 +811,6 @@ intel_bufmgr_gem_init(int fd, int batch_size) bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2; bufmgr_gem->bufmgr.bo_alloc = dri_gem_bo_alloc; - bufmgr_gem->bufmgr.bo_alloc_static = dri_gem_bo_alloc_static; bufmgr_gem->bufmgr.bo_reference = dri_gem_bo_reference; bufmgr_gem->bufmgr.bo_unreference = dri_gem_bo_unreference; bufmgr_gem->bufmgr.bo_map = dri_gem_bo_map; @@ -833,11 +819,11 @@ intel_bufmgr_gem_init(int fd, int batch_size) bufmgr_gem->bufmgr.bo_get_subdata = dri_gem_bo_get_subdata; bufmgr_gem->bufmgr.bo_wait_rendering = dri_gem_bo_wait_rendering; bufmgr_gem->bufmgr.destroy = dri_bufmgr_gem_destroy; - bufmgr_gem->bufmgr.emit_reloc = dri_gem_emit_reloc; bufmgr_gem->bufmgr.process_relocs = dri_gem_process_reloc; bufmgr_gem->bufmgr.post_submit = dri_gem_post_submit; - bufmgr_gem->bufmgr.debug = GL_FALSE; + bufmgr_gem->bufmgr.debug = 0; bufmgr_gem->bufmgr.check_aperture_space = dri_gem_check_aperture_space; + bufmgr_gem->intel_bufmgr.emit_reloc = dri_gem_emit_reloc; /* Initialize the linked lists for BO reuse cache. */ for (i = 0; i < INTEL_GEM_BO_BUCKETS; i++) bufmgr_gem->cache_bucket[i].tail = &bufmgr_gem->cache_bucket[i].head; @@ -845,3 +831,15 @@ intel_bufmgr_gem_init(int fd, int batch_size) return &bufmgr_gem->bufmgr; } +int +intel_bo_emit_reloc(dri_bo *reloc_buf, + uint32_t read_domains, uint32_t write_domain, + uint32_t delta, uint32_t offset, dri_bo *target_buf) +{ + struct intel_bufmgr *intel_bufmgr; + + intel_bufmgr = (struct intel_bufmgr *)(reloc_buf->bufmgr + 1); + + return intel_bufmgr->emit_reloc(reloc_buf, read_domains, write_domain, + delta, offset, target_buf); +} -- cgit v1.2.3 From a919ff5d5ec2fe716cbf5c593be7cc0705499107 Mon Sep 17 00:00:00 2001 From: Keith Packard Date: Thu, 5 Jun 2008 15:58:09 -0700 Subject: [libdrm/intel] Reuse entire dri_bo_gem structure The code was discarding the dri_bo_gem structure and saving only the kernel handle. This lost the mmap address, causing pain when the next buffer user wanted to map the buffer. --- libdrm/intel/intel_bufmgr_gem.c | 76 +++++++++++++++++++++++------------------ 1 file changed, 43 insertions(+), 33 deletions(-) (limited to 'libdrm/intel/intel_bufmgr_gem.c') diff --git a/libdrm/intel/intel_bufmgr_gem.c b/libdrm/intel/intel_bufmgr_gem.c index 20f39b5f..08cb5d6b 100644 --- a/libdrm/intel/intel_bufmgr_gem.c +++ b/libdrm/intel/intel_bufmgr_gem.c @@ -55,15 +55,16 @@ fprintf(stderr, __VA_ARGS__); \ } while (0) +typedef struct _dri_bo_gem dri_bo_gem; + struct intel_validate_entry { - dri_bo *bo; + dri_bo_gem *bo_gem; struct drm_i915_op_arg bo_arg; }; struct dri_gem_bo_bucket_entry { - uint32_t gem_handle; - uint32_t last_offset; - struct dri_gem_bo_bucket_entry *next; + dri_bo_gem *bo_gem; + struct dri_gem_bo_bucket_entry *next; }; struct dri_gem_bo_bucket { @@ -103,7 +104,7 @@ typedef struct _dri_bufmgr_gem { struct drm_i915_gem_execbuffer exec_arg; } dri_bufmgr_gem; -typedef struct _dri_bo_gem { +struct _dri_bo_gem { dri_bo bo; int refcount; @@ -133,7 +134,7 @@ typedef struct _dri_bo_gem { int reloc_count; /** Mapped address for the buffer */ void *virtual; -} dri_bo_gem; +}; static int logbase2(int n) @@ -270,24 +271,21 @@ dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name, int ret; struct dri_gem_bo_bucket *bucket; int alloc_from_cache = 0; - - bo_gem = calloc(1, sizeof(*bo_gem)); - if (!bo_gem) - return NULL; + unsigned long bo_size; /* Round the allocated size up to a power of two number of pages. */ - bo_gem->bo.size = 1 << logbase2(size); - if (bo_gem->bo.size < page_size) - bo_gem->bo.size = page_size; - bucket = dri_gem_bo_bucket_for_size(bufmgr_gem, bo_gem->bo.size); + bo_size = 1 << logbase2(size); + if (bo_size < page_size) + bo_size = page_size; + bucket = dri_gem_bo_bucket_for_size(bufmgr_gem, bo_size); /* If we don't have caching at this size, don't actually round the * allocation up. */ if (bucket == NULL || bucket->max_entries == 0) { - bo_gem->bo.size = size; - if (bo_gem->bo.size < page_size) - bo_gem->bo.size = page_size; + bo_size = size; + if (bo_size < page_size) + bo_size = page_size; } /* Get a buffer out of the cache if available */ @@ -295,7 +293,9 @@ dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name, struct dri_gem_bo_bucket_entry *entry = bucket->head; struct drm_i915_gem_busy busy; - busy.handle = entry->gem_handle; + bo_gem = entry->bo_gem; + busy.handle = bo_gem->gem_handle; + ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy); alloc_from_cache = (ret == 0 && busy.busy == 0); @@ -305,8 +305,6 @@ dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name, bucket->tail = &bucket->head; bucket->num_entries--; - bo_gem->gem_handle = entry->gem_handle; - bo_gem->bo.offset = entry->last_offset; free(entry); } } @@ -314,8 +312,13 @@ dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name, if (!alloc_from_cache) { struct drm_gem_create create; + bo_gem = calloc(1, sizeof(*bo_gem)); + if (!bo_gem) + return NULL; + + bo_gem->bo.size = bo_size; memset(&create, 0, sizeof(create)); - create.size = bo_gem->bo.size; + create.size = bo_size; ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CREATE, &create); bo_gem->gem_handle = create.handle; @@ -323,10 +326,9 @@ dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name, free(bo_gem); return NULL; } + bo_gem->bo.bufmgr = bufmgr; } - bo_gem->bo.virtual = NULL; - bo_gem->bo.bufmgr = bufmgr; bo_gem->name = name; bo_gem->refcount = 1; bo_gem->validate_index = -1; @@ -400,9 +402,6 @@ dri_gem_bo_unreference(dri_bo *bo) struct dri_gem_bo_bucket *bucket; int ret; - if (bo_gem->mapped) - munmap (bo_gem->virtual, bo->size); - if (bo_gem->relocs != NULL) { int i; @@ -413,6 +412,9 @@ dri_gem_bo_unreference(dri_bo *bo) free(bo_gem->relocs); } + DBG("bo_unreference final: %d (%s)\n", + bo_gem->gem_handle, bo_gem->name); + bucket = dri_gem_bo_bucket_for_size(bufmgr_gem, bo->size); /* Put the buffer into our internal cache for reuse if we can. */ if (bucket != NULL && @@ -422,9 +424,14 @@ dri_gem_bo_unreference(dri_bo *bo) { struct dri_gem_bo_bucket_entry *entry; + bo_gem->name = 0; + bo_gem->validate_index = -1; + bo_gem->relocs = NULL; + bo_gem->reloc_target_bo = NULL; + bo_gem->reloc_count = 0; + entry = calloc(1, sizeof(*entry)); - entry->gem_handle = bo_gem->gem_handle; - entry->last_offset = bo->offset; + entry->bo_gem = bo_gem; entry->next = NULL; *bucket->tail = entry; @@ -441,12 +448,9 @@ dri_gem_bo_unreference(dri_bo *bo) "DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n", bo_gem->gem_handle, bo_gem->name, strerror(-ret)); } + free(bo); } - DBG("bo_unreference final: %d (%s)\n", - bo_gem->gem_handle, bo_gem->name); - - free(bo); return; } } @@ -604,6 +608,7 @@ dri_bufmgr_gem_destroy(dri_bufmgr *bufmgr) while ((entry = bucket->head) != NULL) { struct drm_gem_close close; + dri_bo_gem *bo_gem; int ret; bucket->head = entry->next; @@ -611,14 +616,19 @@ dri_bufmgr_gem_destroy(dri_bufmgr *bufmgr) bucket->tail = &bucket->head; bucket->num_entries--; + bo_gem = entry->bo_gem; + if (bo_gem->mapped) + munmap (bo_gem->virtual, bo_gem->bo.size); + /* Close this object */ - close.handle = entry->gem_handle; + close.handle = bo_gem->gem_handle; ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close); if (ret != 0) { fprintf(stderr, "DRM_IOCTL_GEM_CLOSE failed: %s\n", strerror(-ret)); } + free(bo_gem); free(entry); } } -- cgit v1.2.3 From 5a55b48a410bb25666177c0ea8e5711ea2e3c795 Mon Sep 17 00:00:00 2001 From: Keith Packard Date: Thu, 5 Jun 2008 15:58:55 -0700 Subject: [libdrm/intel] Remove unused intel_validate_entry structure --- libdrm/intel/intel_bufmgr_gem.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'libdrm/intel/intel_bufmgr_gem.c') diff --git a/libdrm/intel/intel_bufmgr_gem.c b/libdrm/intel/intel_bufmgr_gem.c index 08cb5d6b..32e70912 100644 --- a/libdrm/intel/intel_bufmgr_gem.c +++ b/libdrm/intel/intel_bufmgr_gem.c @@ -57,11 +57,6 @@ typedef struct _dri_bo_gem dri_bo_gem; -struct intel_validate_entry { - dri_bo_gem *bo_gem; - struct drm_i915_op_arg bo_arg; -}; - struct dri_gem_bo_bucket_entry { dri_bo_gem *bo_gem; struct dri_gem_bo_bucket_entry *next; -- cgit v1.2.3 From 329e0862255e8ad27e2aa4e3755421a18ea1acc5 Mon Sep 17 00:00:00 2001 From: Keith Packard Date: Thu, 5 Jun 2008 16:05:35 -0700 Subject: [libdrm/intel] Eliminate extra dri_gem_bo_bucket_entry structure Place the buffer reuse links right into the dri_bo_gem object. --- libdrm/intel/intel_bufmgr_gem.c | 42 ++++++++++++++--------------------------- 1 file changed, 14 insertions(+), 28 deletions(-) (limited to 'libdrm/intel/intel_bufmgr_gem.c') diff --git a/libdrm/intel/intel_bufmgr_gem.c b/libdrm/intel/intel_bufmgr_gem.c index 32e70912..6504ad6e 100644 --- a/libdrm/intel/intel_bufmgr_gem.c +++ b/libdrm/intel/intel_bufmgr_gem.c @@ -57,14 +57,8 @@ typedef struct _dri_bo_gem dri_bo_gem; -struct dri_gem_bo_bucket_entry { - dri_bo_gem *bo_gem; - struct dri_gem_bo_bucket_entry *next; -}; - struct dri_gem_bo_bucket { - struct dri_gem_bo_bucket_entry *head; - struct dri_gem_bo_bucket_entry **tail; + dri_bo_gem *head, **tail; /** * Limit on the number of entries in this bucket. * @@ -129,6 +123,9 @@ struct _dri_bo_gem { int reloc_count; /** Mapped address for the buffer */ void *virtual; + + /** free list */ + dri_bo_gem *next; }; static int @@ -285,22 +282,19 @@ dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name, /* Get a buffer out of the cache if available */ if (bucket != NULL && bucket->num_entries > 0) { - struct dri_gem_bo_bucket_entry *entry = bucket->head; struct drm_i915_gem_busy busy; - bo_gem = entry->bo_gem; + bo_gem = bucket->head; busy.handle = bo_gem->gem_handle; ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy); alloc_from_cache = (ret == 0 && busy.busy == 0); if (alloc_from_cache) { - bucket->head = entry->next; - if (entry->next == NULL) + bucket->head = bo_gem->next; + if (bo_gem->next == NULL) bucket->tail = &bucket->head; bucket->num_entries--; - - free(entry); } } @@ -417,20 +411,15 @@ dri_gem_bo_unreference(dri_bo *bo) (bucket->max_entries > 0 && bucket->num_entries < bucket->max_entries))) { - struct dri_gem_bo_bucket_entry *entry; - bo_gem->name = 0; bo_gem->validate_index = -1; bo_gem->relocs = NULL; bo_gem->reloc_target_bo = NULL; bo_gem->reloc_count = 0; - entry = calloc(1, sizeof(*entry)); - entry->bo_gem = bo_gem; - - entry->next = NULL; - *bucket->tail = entry; - bucket->tail = &entry->next; + bo_gem->next = NULL; + *bucket->tail = bo_gem; + bucket->tail = &bo_gem->next; bucket->num_entries++; } else { struct drm_gem_close close; @@ -599,19 +588,17 @@ dri_bufmgr_gem_destroy(dri_bufmgr *bufmgr) /* Free any cached buffer objects we were going to reuse */ for (i = 0; i < INTEL_GEM_BO_BUCKETS; i++) { struct dri_gem_bo_bucket *bucket = &bufmgr_gem->cache_bucket[i]; - struct dri_gem_bo_bucket_entry *entry; + dri_bo_gem *bo_gem; - while ((entry = bucket->head) != NULL) { + while ((bo_gem = bucket->head) != NULL) { struct drm_gem_close close; - dri_bo_gem *bo_gem; int ret; - bucket->head = entry->next; - if (entry->next == NULL) + bucket->head = bo_gem->next; + if (bo_gem->next == NULL) bucket->tail = &bucket->head; bucket->num_entries--; - bo_gem = entry->bo_gem; if (bo_gem->mapped) munmap (bo_gem->virtual, bo_gem->bo.size); @@ -624,7 +611,6 @@ dri_bufmgr_gem_destroy(dri_bufmgr *bufmgr) } free(bo_gem); - free(entry); } } -- cgit v1.2.3 From 500c81d194115fb3c4b97d742519689478eeb4e8 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Fri, 6 Jun 2008 17:13:16 -0700 Subject: [gem] Don't forget to munmap in the non-bo-reuse object-freeing case. --- libdrm/intel/intel_bufmgr_gem.c | 51 +++++++++++++++++++---------------------- 1 file changed, 24 insertions(+), 27 deletions(-) (limited to 'libdrm/intel/intel_bufmgr_gem.c') diff --git a/libdrm/intel/intel_bufmgr_gem.c b/libdrm/intel/intel_bufmgr_gem.c index 6504ad6e..a65ae982 100644 --- a/libdrm/intel/intel_bufmgr_gem.c +++ b/libdrm/intel/intel_bufmgr_gem.c @@ -378,6 +378,28 @@ dri_gem_bo_reference(dri_bo *bo) bo_gem->refcount++; } +static void +dri_gem_bo_free(dri_bo *bo) +{ + dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; + dri_bo_gem *bo_gem = (dri_bo_gem *)bo; + struct drm_gem_close close; + int ret; + + if (bo_gem->mapped) + munmap (bo_gem->virtual, bo_gem->bo.size); + + /* Close this object */ + close.handle = bo_gem->gem_handle; + ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close); + if (ret != 0) { + fprintf(stderr, + "DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n", + bo_gem->gem_handle, bo_gem->name, strerror(-ret)); + } + free(bo); +} + static void dri_gem_bo_unreference(dri_bo *bo) { @@ -389,7 +411,6 @@ dri_gem_bo_unreference(dri_bo *bo) if (--bo_gem->refcount == 0) { struct dri_gem_bo_bucket *bucket; - int ret; if (bo_gem->relocs != NULL) { int i; @@ -422,17 +443,7 @@ dri_gem_bo_unreference(dri_bo *bo) bucket->tail = &bo_gem->next; bucket->num_entries++; } else { - struct drm_gem_close close; - - /* Close this object */ - close.handle = bo_gem->gem_handle; - ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close); - if (ret != 0) { - fprintf(stderr, - "DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n", - bo_gem->gem_handle, bo_gem->name, strerror(-ret)); - } - free(bo); + dri_gem_bo_free(bo); } return; @@ -591,26 +602,12 @@ dri_bufmgr_gem_destroy(dri_bufmgr *bufmgr) dri_bo_gem *bo_gem; while ((bo_gem = bucket->head) != NULL) { - struct drm_gem_close close; - int ret; - bucket->head = bo_gem->next; if (bo_gem->next == NULL) bucket->tail = &bucket->head; bucket->num_entries--; - if (bo_gem->mapped) - munmap (bo_gem->virtual, bo_gem->bo.size); - - /* Close this object */ - close.handle = bo_gem->gem_handle; - ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close); - if (ret != 0) { - fprintf(stderr, "DRM_IOCTL_GEM_CLOSE failed: %s\n", - strerror(-ret)); - } - - free(bo_gem); + dri_gem_bo_free(&bo_gem->bo); } } -- cgit v1.2.3 From 2655005762b8915d5f44d1d1ee7e6c2eb34841d7 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Wed, 11 Jun 2008 14:42:40 -0700 Subject: [gem] Move potentially device-specific ioctls to the intel driver. This is the create (may want location flags), pread/pwrite/mmap (performance tuning hints), and set_domain (will 32 bits be enough for everyone?) ioctls. Left in the generic set are just flink/open/close. The 2D driver must be updated for this change, and API but not ABI is broken for 3D. The driver version is bumped to mark this. --- libdrm/intel/intel_bufmgr_gem.c | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) (limited to 'libdrm/intel/intel_bufmgr_gem.c') diff --git a/libdrm/intel/intel_bufmgr_gem.c b/libdrm/intel/intel_bufmgr_gem.c index a65ae982..e057d949 100644 --- a/libdrm/intel/intel_bufmgr_gem.c +++ b/libdrm/intel/intel_bufmgr_gem.c @@ -299,7 +299,7 @@ dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name, } if (!alloc_from_cache) { - struct drm_gem_create create; + struct drm_i915_gem_create create; bo_gem = calloc(1, sizeof(*bo_gem)); if (!bo_gem) @@ -309,7 +309,7 @@ dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name, memset(&create, 0, sizeof(create)); create.size = bo_size; - ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CREATE, &create); + ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CREATE, &create); bo_gem->gem_handle = create.handle; if (ret != 0) { free(bo_gem); @@ -455,7 +455,7 @@ dri_gem_bo_map(dri_bo *bo, int write_enable) { dri_bufmgr_gem *bufmgr_gem; dri_bo_gem *bo_gem = (dri_bo_gem *)bo; - struct drm_gem_set_domain set_domain; + struct drm_i915_gem_set_domain set_domain; int ret; bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; @@ -470,13 +470,13 @@ dri_gem_bo_map(dri_bo *bo, int write_enable) DBG("bo_map: %d (%s)\n", bo_gem->gem_handle, bo_gem->name); if (bo_gem->virtual == NULL) { - struct drm_gem_mmap mmap_arg; + struct drm_i915_gem_mmap mmap_arg; memset(&mmap_arg, 0, sizeof(mmap_arg)); mmap_arg.handle = bo_gem->gem_handle; mmap_arg.offset = 0; mmap_arg.size = bo->size; - ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_MMAP, &mmap_arg); + ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg); if (ret != 0) { fprintf(stderr, "%s:%d: Error mapping buffer %d (%s): %s .\n", __FILE__, __LINE__, @@ -491,9 +491,9 @@ dri_gem_bo_map(dri_bo *bo, int write_enable) if (!bo_gem->cpu_domain_set) { set_domain.handle = bo_gem->gem_handle; - set_domain.read_domains = DRM_GEM_DOMAIN_CPU; - set_domain.write_domain = write_enable ? DRM_GEM_DOMAIN_CPU : 0; - ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_GEM_SET_DOMAIN, &set_domain); + set_domain.read_domains = I915_GEM_DOMAIN_CPU; + set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_CPU : 0; + ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain); if (ret != 0) { fprintf (stderr, "%s:%d: Error setting memory domains %d (%08x %08x): %s .\n", __FILE__, __LINE__, @@ -525,7 +525,7 @@ dri_gem_bo_subdata (dri_bo *bo, unsigned long offset, { dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; dri_bo_gem *bo_gem = (dri_bo_gem *)bo; - struct drm_gem_pwrite pwrite; + struct drm_i915_gem_pwrite pwrite; int ret; memset (&pwrite, 0, sizeof (pwrite)); @@ -533,7 +533,7 @@ dri_gem_bo_subdata (dri_bo *bo, unsigned long offset, pwrite.offset = offset; pwrite.size = size; pwrite.data_ptr = (uint64_t) (uintptr_t) data; - ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_GEM_PWRITE, &pwrite); + ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite); if (ret != 0) { fprintf (stderr, "%s:%d: Error writing data to buffer %d: (%d %d) %s .\n", __FILE__, __LINE__, @@ -549,7 +549,7 @@ dri_gem_bo_get_subdata (dri_bo *bo, unsigned long offset, { dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; dri_bo_gem *bo_gem = (dri_bo_gem *)bo; - struct drm_gem_pread pread; + struct drm_i915_gem_pread pread; int ret; memset (&pread, 0, sizeof (pread)); @@ -557,7 +557,7 @@ dri_gem_bo_get_subdata (dri_bo *bo, unsigned long offset, pread.offset = offset; pread.size = size; pread.data_ptr = (uint64_t) (uintptr_t) data; - ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_GEM_PREAD, &pread); + ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PREAD, &pread); if (ret != 0) { fprintf (stderr, "%s:%d: Error reading data from buffer %d: (%d %d) %s .\n", __FILE__, __LINE__, @@ -572,13 +572,13 @@ dri_gem_bo_wait_rendering(dri_bo *bo) { dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; dri_bo_gem *bo_gem = (dri_bo_gem *)bo; - struct drm_gem_set_domain set_domain; + struct drm_i915_gem_set_domain set_domain; int ret; set_domain.handle = bo_gem->gem_handle; - set_domain.read_domains = DRM_GEM_DOMAIN_CPU; + set_domain.read_domains = I915_GEM_DOMAIN_CPU; set_domain.write_domain = 0; - ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_GEM_SET_DOMAIN, &set_domain); + ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain); if (ret != 0) { fprintf (stderr, "%s:%d: Error setting memory domains %d (%08x %08x): %s .\n", __FILE__, __LINE__, -- cgit v1.2.3 From e558e1d7dacafa1e7f9681f1eaec072d663287de Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Fri, 13 Jun 2008 10:04:14 -0700 Subject: [gem] Catch -EINTR from blocking ioctls and restart them. Thanks to Thomas Hellstrom for catching the issue, no thanks to the kernel developer who authoritatively told me that they would get restarted on their own. --- libdrm/intel/intel_bufmgr_gem.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'libdrm/intel/intel_bufmgr_gem.c') diff --git a/libdrm/intel/intel_bufmgr_gem.c b/libdrm/intel/intel_bufmgr_gem.c index e057d949..5a28bd14 100644 --- a/libdrm/intel/intel_bufmgr_gem.c +++ b/libdrm/intel/intel_bufmgr_gem.c @@ -493,7 +493,10 @@ dri_gem_bo_map(dri_bo *bo, int write_enable) set_domain.handle = bo_gem->gem_handle; set_domain.read_domains = I915_GEM_DOMAIN_CPU; set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_CPU : 0; - ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain); + do { + ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, + &set_domain); + } while (ret == -1 && errno == EINTR); if (ret != 0) { fprintf (stderr, "%s:%d: Error setting memory domains %d (%08x %08x): %s .\n", __FILE__, __LINE__, @@ -533,7 +536,9 @@ dri_gem_bo_subdata (dri_bo *bo, unsigned long offset, pwrite.offset = offset; pwrite.size = size; pwrite.data_ptr = (uint64_t) (uintptr_t) data; - ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite); + do { + ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite); + } while (ret == -1 && errno == EINTR); if (ret != 0) { fprintf (stderr, "%s:%d: Error writing data to buffer %d: (%d %d) %s .\n", __FILE__, __LINE__, @@ -557,7 +562,9 @@ dri_gem_bo_get_subdata (dri_bo *bo, unsigned long offset, pread.offset = offset; pread.size = size; pread.data_ptr = (uint64_t) (uintptr_t) data; - ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PREAD, &pread); + do { + ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PREAD, &pread); + } while (ret == -1 && errno == EINTR); if (ret != 0) { fprintf (stderr, "%s:%d: Error reading data from buffer %d: (%d %d) %s .\n", __FILE__, __LINE__, -- cgit v1.2.3 From 52e5d24fae4af6f2f4a5304a516c8c5ab347a11b Mon Sep 17 00:00:00 2001 From: Keith Packard Date: Fri, 20 Jun 2008 00:19:42 -0700 Subject: [intel-gem] Add DRM_IOCTL_I915_GEM_SW_FINISH to flag CPU writes When a software fallback has completed, usermode must notify the kernel so that any scanout buffers can be synchronized. This ioctl should be called whenever a fallback completes to flush CPU and chipset caches. --- libdrm/intel/intel_bufmgr_gem.c | 39 ++++++++++++++++++++++++++------------- 1 file changed, 26 insertions(+), 13 deletions(-) (limited to 'libdrm/intel/intel_bufmgr_gem.c') diff --git a/libdrm/intel/intel_bufmgr_gem.c b/libdrm/intel/intel_bufmgr_gem.c index 5a28bd14..b970eacf 100644 --- a/libdrm/intel/intel_bufmgr_gem.c +++ b/libdrm/intel/intel_bufmgr_gem.c @@ -109,11 +109,11 @@ struct _dri_bo_gem { int validate_index; /** - * Boolean whether set_domain to CPU is current - * Set when set_domain has been called - * Cleared when a batch has been submitted + * Boolean whether we've started swrast + * Set when the buffer has been mapped + * Cleared when the buffer is unmapped */ - int cpu_domain_set; + int swrast; /** Array passed to the DRM containing relocation information. */ struct drm_i915_gem_relocation_entry *relocs; @@ -485,25 +485,27 @@ dri_gem_bo_map(dri_bo *bo, int write_enable) bo_gem->virtual = (void *)(uintptr_t)mmap_arg.addr_ptr; } bo->virtual = bo_gem->virtual; + bo_gem->swrast = 0; bo_gem->mapped = 1; DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name, bo_gem->virtual); } - if (!bo_gem->cpu_domain_set) { + if (!bo_gem->swrast) { set_domain.handle = bo_gem->gem_handle; set_domain.read_domains = I915_GEM_DOMAIN_CPU; - set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_CPU : 0; + if (write_enable) + set_domain.write_domain = I915_GEM_DOMAIN_CPU; + else + set_domain.write_domain = 0; do { ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain); } while (ret == -1 && errno == EINTR); if (ret != 0) { - fprintf (stderr, "%s:%d: Error setting memory domains %d (%08x %08x): %s .\n", - __FILE__, __LINE__, - bo_gem->gem_handle, set_domain.read_domains, set_domain.write_domain, - strerror (errno)); + fprintf (stderr, "%s:%d: Error setting swrast %d: %s\n", + __FILE__, __LINE__, bo_gem->gem_handle, strerror (errno)); } - bo_gem->cpu_domain_set = 1; + bo_gem->swrast = 1; } return 0; @@ -512,13 +514,24 @@ dri_gem_bo_map(dri_bo *bo, int write_enable) static int dri_gem_bo_unmap(dri_bo *bo) { + dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; dri_bo_gem *bo_gem = (dri_bo_gem *)bo; + struct drm_i915_gem_sw_finish sw_finish; + int ret; if (bo == NULL) return 0; assert(bo_gem->mapped); + if (bo_gem->swrast) { + sw_finish.handle = bo_gem->gem_handle; + do { + ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SW_FINISH, + &sw_finish); + } while (ret == -1 && errno == EINTR); + bo_gem->swrast = 0; + } return 0; } @@ -744,8 +757,8 @@ dri_gem_post_submit(dri_bo *batch_buf) dri_bo *bo = bufmgr_gem->exec_bos[i]; dri_bo_gem *bo_gem = (dri_bo_gem *)bo; - /* Need to call set_domain on next bo_map */ - bo_gem->cpu_domain_set = 0; + /* Need to call swrast on next bo_map */ + bo_gem->swrast = 0; /* Disconnect the buffer from the validate list */ bo_gem->validate_index = -1; -- cgit v1.2.3 From 5540457fa5bf291e88efb23721b5ac71379c6a6e Mon Sep 17 00:00:00 2001 From: Keith Packard Date: Mon, 23 Jun 2008 11:21:30 -0700 Subject: [intel-gem] Use I915_GEM_DOMAIN_GTT in dri_gem_bo_wait_rendering. I915_GEM_DOMAIN_CPU is very expensive to wait for -- it generally requires clflushing the frame buffer. --- libdrm/intel/intel_bufmgr_gem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'libdrm/intel/intel_bufmgr_gem.c') diff --git a/libdrm/intel/intel_bufmgr_gem.c b/libdrm/intel/intel_bufmgr_gem.c index b970eacf..cdc2a7ac 100644 --- a/libdrm/intel/intel_bufmgr_gem.c +++ b/libdrm/intel/intel_bufmgr_gem.c @@ -596,7 +596,7 @@ dri_gem_bo_wait_rendering(dri_bo *bo) int ret; set_domain.handle = bo_gem->gem_handle; - set_domain.read_domains = I915_GEM_DOMAIN_CPU; + set_domain.read_domains = I915_GEM_DOMAIN_GTT; set_domain.write_domain = 0; ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain); if (ret != 0) { -- cgit v1.2.3 From 8e41ce17b4ab72f526cc6e9acd75c3fa81a60433 Mon Sep 17 00:00:00 2001 From: Keith Packard Date: Mon, 4 Aug 2008 00:34:08 -0700 Subject: Expose pin/unpin/set_tiling/flink APIs --- libdrm/intel/intel_bufmgr_gem.c | 131 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 131 insertions(+) (limited to 'libdrm/intel/intel_bufmgr_gem.c') diff --git a/libdrm/intel/intel_bufmgr_gem.c b/libdrm/intel/intel_bufmgr_gem.c index cdc2a7ac..22f8695d 100644 --- a/libdrm/intel/intel_bufmgr_gem.c +++ b/libdrm/intel/intel_bufmgr_gem.c @@ -768,6 +768,81 @@ dri_gem_post_submit(dri_bo *batch_buf) bufmgr_gem->exec_count = 0; } +static int +dri_gem_pin(dri_bo *bo, uint32_t alignment) +{ + dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; + dri_bo_gem *bo_gem = (dri_bo_gem *)bo; + struct drm_i915_gem_pin pin; + int ret; + + pin.handle = bo_gem->gem_handle; + pin.alignment = alignment; + + ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PIN, &pin); + if (ret != 0) + return -errno; + + bo->offset = pin.offset; + return 0; +} + +static int +dri_gem_unpin(dri_bo *bo) +{ + dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; + dri_bo_gem *bo_gem = (dri_bo_gem *)bo; + struct drm_i915_gem_unpin unpin; + int ret; + + unpin.handle = bo_gem->gem_handle; + + ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin); + if (ret != 0) + return -errno; + + return 0; +} + +static int +dri_gem_set_tiling(dri_bo *bo, uint32_t *tiling_mode) +{ + dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; + dri_bo_gem *bo_gem = (dri_bo_gem *)bo; + struct drm_i915_gem_set_tiling set_tiling; + int ret; + + set_tiling.handle = bo_gem->gem_handle; + set_tiling.tiling_mode = *tiling_mode; + + ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling); + if (ret != 0) { + *tiling_mode = I915_TILING_NONE; + return -errno; + } + + *tiling_mode = set_tiling.tiling_mode; + return 0; +} + +static int +dri_gem_flink(dri_bo *bo, uint32_t *name) +{ + dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr; + dri_bo_gem *bo_gem = (dri_bo_gem *)bo; + struct drm_gem_flink flink; + int ret; + + flink.handle = bo_gem->gem_handle; + + ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink); + if (ret != 0) + return -errno; + + *name = flink.name; + return 0; +} + /** * Enables unlimited caching of buffer objects for reuse. * @@ -832,6 +907,10 @@ intel_bufmgr_gem_init(int fd, int batch_size) bufmgr_gem->bufmgr.debug = 0; bufmgr_gem->bufmgr.check_aperture_space = dri_gem_check_aperture_space; bufmgr_gem->intel_bufmgr.emit_reloc = dri_gem_emit_reloc; + bufmgr_gem->intel_bufmgr.pin = dri_gem_pin; + bufmgr_gem->intel_bufmgr.unpin = dri_gem_unpin; + bufmgr_gem->intel_bufmgr.set_tiling = dri_gem_set_tiling; + bufmgr_gem->intel_bufmgr.flink = dri_gem_flink; /* Initialize the linked lists for BO reuse cache. */ for (i = 0; i < INTEL_GEM_BO_BUCKETS; i++) bufmgr_gem->cache_bucket[i].tail = &bufmgr_gem->cache_bucket[i].head; @@ -851,3 +930,55 @@ intel_bo_emit_reloc(dri_bo *reloc_buf, return intel_bufmgr->emit_reloc(reloc_buf, read_domains, write_domain, delta, offset, target_buf); } + +int +intel_bo_pin(dri_bo *bo, uint32_t alignment) +{ + struct intel_bufmgr *intel_bufmgr; + + intel_bufmgr = (struct intel_bufmgr *)(bo->bufmgr + 1); + + if (intel_bufmgr->pin) + return intel_bufmgr->pin(bo, alignment); + + return 0; +} + +int +intel_bo_unpin(dri_bo *bo) +{ + struct intel_bufmgr *intel_bufmgr; + + intel_bufmgr = (struct intel_bufmgr *)(bo->bufmgr + 1); + + if (intel_bufmgr->unpin) + return intel_bufmgr->unpin(bo); + + return 0; +} + +int intel_bo_set_tiling(dri_bo *bo, uint32_t *tiling_mode) +{ + struct intel_bufmgr *intel_bufmgr; + + intel_bufmgr = (struct intel_bufmgr *)(bo->bufmgr + 1); + + if (intel_bufmgr->set_tiling) + return intel_bufmgr->set_tiling (bo, tiling_mode); + + *tiling_mode = I915_TILING_NONE; + return 0; +} + +int intel_bo_flink(dri_bo *bo, uint32_t *name) +{ + struct intel_bufmgr *intel_bufmgr; + + intel_bufmgr = (struct intel_bufmgr *)(bo->bufmgr + 1); + + if (intel_bufmgr->flink) + return intel_bufmgr->flink (bo, name); + + return -ENODEV; +} + -- cgit v1.2.3 From 5968e061db90451b19c3948bbd91c6d5ac9af941 Mon Sep 17 00:00:00 2001 From: Keith Packard Date: Thu, 7 Aug 2008 15:26:30 -0700 Subject: Make flink save the kernel-assigned name and return it instead of creating another name --- libdrm/intel/intel_bufmgr_gem.c | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) (limited to 'libdrm/intel/intel_bufmgr_gem.c') diff --git a/libdrm/intel/intel_bufmgr_gem.c b/libdrm/intel/intel_bufmgr_gem.c index 22f8695d..f5732226 100644 --- a/libdrm/intel/intel_bufmgr_gem.c +++ b/libdrm/intel/intel_bufmgr_gem.c @@ -102,6 +102,11 @@ struct _dri_bo_gem { uint32_t gem_handle; const char *name; + /** + * Kenel-assigned global name for this object + */ + unsigned int global_name; + /** * Index of the buffer within the validation list while preparing a * batchbuffer execution. @@ -833,13 +838,16 @@ dri_gem_flink(dri_bo *bo, uint32_t *name) struct drm_gem_flink flink; int ret; - flink.handle = bo_gem->gem_handle; - - ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink); - if (ret != 0) - return -errno; - - *name = flink.name; + if (!bo_gem->global_name) { + flink.handle = bo_gem->gem_handle; + + ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink); + if (ret != 0) + return -errno; + bo_gem->gem_handle = flink.name; + } + + *name = bo_gem->gem_handle; return 0; } -- cgit v1.2.3 From 46e9274e8538e5b0517f611dca99dde611f4e95d Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Fri, 8 Aug 2008 13:13:46 -0700 Subject: Replace the check_aperture API with one we can make thread-safe. While the bufmgr isn't thread-safe at the moment, we need it to be for shared objects between contexts. --- libdrm/intel/intel_bufmgr_gem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'libdrm/intel/intel_bufmgr_gem.c') diff --git a/libdrm/intel/intel_bufmgr_gem.c b/libdrm/intel/intel_bufmgr_gem.c index f5732226..48752f2f 100644 --- a/libdrm/intel/intel_bufmgr_gem.c +++ b/libdrm/intel/intel_bufmgr_gem.c @@ -873,7 +873,7 @@ intel_bufmgr_gem_enable_reuse(dri_bufmgr *bufmgr) * */ static int -dri_gem_check_aperture_space(dri_bo *bo) +dri_gem_check_aperture_space(dri_bo *bo_array, int count) { return 0; } -- cgit v1.2.3