summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--linux-core/drmP.h4
-rw-r--r--linux-core/drm_memory.c44
-rw-r--r--linux-core/drm_proc.c14
3 files changed, 53 insertions, 9 deletions
diff --git a/linux-core/drmP.h b/linux-core/drmP.h
index 1fea807b..2f76f3df 100644
--- a/linux-core/drmP.h
+++ b/linux-core/drmP.h
@@ -1001,8 +1001,10 @@ extern int drm_unbind_agp(DRM_AGP_MEM * handle);
extern void drm_free_memctl(size_t size);
extern int drm_alloc_memctl(size_t size);
extern void drm_query_memctl(uint64_t *cur_used,
+ uint64_t *emer_used,
uint64_t *low_threshold,
- uint64_t *high_threshold);
+ uint64_t *high_threshold,
+ uint64_t *emer_threshold);
extern void drm_init_memctl(size_t low_threshold,
size_t high_threshold,
size_t unit_size);
diff --git a/linux-core/drm_memory.c b/linux-core/drm_memory.c
index 402a680f..1a6c48df 100644
--- a/linux-core/drm_memory.c
+++ b/linux-core/drm_memory.c
@@ -39,8 +39,10 @@
static struct {
spinlock_t lock;
uint64_t cur_used;
+ uint64_t emer_used;
uint64_t low_threshold;
uint64_t high_threshold;
+ uint64_t emer_threshold;
} drm_memctl = {
.lock = SPIN_LOCK_UNLOCKED
};
@@ -59,14 +61,30 @@ static inline size_t drm_size_align(size_t size)
int drm_alloc_memctl(size_t size)
{
- int ret;
+ int ret = 0;
unsigned long a_size = drm_size_align(size);
+ unsigned long new_used = drm_memctl.cur_used + a_size;
spin_lock(&drm_memctl.lock);
- ret = ((drm_memctl.cur_used + a_size) > drm_memctl.high_threshold) ?
- -ENOMEM : 0;
- if (!ret)
- drm_memctl.cur_used += a_size;
+ if (unlikely(new_used > drm_memctl.high_threshold)) {
+ if (!DRM_SUSER(DRM_CURPROC) ||
+ (new_used + drm_memctl.emer_used > drm_memctl.emer_threshold) ||
+ (a_size > 2*PAGE_SIZE)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * Allow small root-only allocations, even if the
+ * high threshold is exceeded.
+ */
+
+ new_used -= drm_memctl.high_threshold;
+ drm_memctl.emer_used += new_used;
+ a_size -= new_used;
+ }
+ drm_memctl.cur_used += a_size;
+out:
spin_unlock(&drm_memctl.lock);
return ret;
}
@@ -77,19 +95,30 @@ void drm_free_memctl(size_t size)
unsigned long a_size = drm_size_align(size);
spin_lock(&drm_memctl.lock);
+ if (likely(a_size >= drm_memctl.emer_used)) {
+ a_size -= drm_memctl.emer_used;
+ drm_memctl.emer_used = 0;
+ } else {
+ drm_memctl.emer_used -= a_size;
+ a_size = 0;
+ }
drm_memctl.cur_used -= a_size;
spin_unlock(&drm_memctl.lock);
}
EXPORT_SYMBOL(drm_free_memctl);
void drm_query_memctl(uint64_t *cur_used,
+ uint64_t *emer_used,
uint64_t *low_threshold,
- uint64_t *high_threshold)
+ uint64_t *high_threshold,
+ uint64_t *emer_threshold)
{
spin_lock(&drm_memctl.lock);
*cur_used = drm_memctl.cur_used;
+ *emer_used = drm_memctl.emer_used;
*low_threshold = drm_memctl.low_threshold;
*high_threshold = drm_memctl.high_threshold;
+ *emer_threshold = drm_memctl.emer_threshold;
spin_unlock(&drm_memctl.lock);
}
EXPORT_SYMBOL(drm_query_memctl);
@@ -99,9 +128,12 @@ void drm_init_memctl(size_t p_low_threshold,
size_t unit_size)
{
spin_lock(&drm_memctl.lock);
+ drm_memctl.emer_used = 0;
drm_memctl.cur_used = 0;
drm_memctl.low_threshold = p_low_threshold * unit_size;
drm_memctl.high_threshold = p_high_threshold * unit_size;
+ drm_memctl.emer_threshold = (drm_memctl.high_threshold >> 4) +
+ drm_memctl.high_threshold;
spin_unlock(&drm_memctl.lock);
}
diff --git a/linux-core/drm_proc.c b/linux-core/drm_proc.c
index 67afee8e..42da5c69 100644
--- a/linux-core/drm_proc.c
+++ b/linux-core/drm_proc.c
@@ -445,9 +445,10 @@ static int drm__objects_info(char *buf, char **start, off_t offset, int request,
struct drm_buffer_manager *bm = &dev->bm;
struct drm_fence_manager *fm = &dev->fm;
uint64_t used_mem;
+ uint64_t used_emer;
uint64_t low_mem;
uint64_t high_mem;
-
+ uint64_t emer_mem;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
@@ -476,7 +477,7 @@ static int drm__objects_info(char *buf, char **start, off_t offset, int request,
DRM_PROC_PRINT("Buffer objects are not supported by this driver.\n");
}
- drm_query_memctl(&used_mem, &low_mem, &high_mem);
+ drm_query_memctl(&used_mem, &used_emer, &low_mem, &high_mem, &emer_mem);
if (used_mem > 16*PAGE_SIZE) {
DRM_PROC_PRINT("Used object memory is %lu pages.\n",
@@ -485,10 +486,19 @@ static int drm__objects_info(char *buf, char **start, off_t offset, int request,
DRM_PROC_PRINT("Used object memory is %lu bytes.\n",
(unsigned long) used_mem);
}
+ if (used_emer > 16*PAGE_SIZE) {
+ DRM_PROC_PRINT("Used emergency memory is %lu pages.\n",
+ (unsigned long) (used_emer >> PAGE_SHIFT));
+ } else {
+ DRM_PROC_PRINT("Used emergency memory is %lu bytes.\n\n",
+ (unsigned long) used_emer);
+ }
DRM_PROC_PRINT("Soft object memory usage threshold is %lu pages.\n",
(unsigned long) (low_mem >> PAGE_SHIFT));
DRM_PROC_PRINT("Hard object memory usage threshold is %lu pages.\n",
(unsigned long) (high_mem >> PAGE_SHIFT));
+ DRM_PROC_PRINT("Emergency root only memory usage threshold is %lu pages.\n",
+ (unsigned long) (emer_mem >> PAGE_SHIFT));
DRM_PROC_PRINT("\n");
span> break; default: return -EINVAL; } return 0; } static int radeon_destroy(struct kms_driver *kms) { free(kms); return 0; } static int radeon_bo_create(struct kms_driver *kms, const unsigned width, const unsigned height, const enum kms_bo_type type, const unsigned *attr, struct kms_bo **out) { struct drm_radeon_gem_create arg; unsigned size, pitch; struct radeon_bo *bo; int i, ret; for (i = 0; attr[i]; i += 2) { switch (attr[i]) { case KMS_WIDTH: case KMS_HEIGHT: case KMS_BO_TYPE: break; default: return -EINVAL; } } switch (type) { case KMS_BO_TYPE_CURSOR_64X64_A8R8G8B8: pitch = 4 * 64; size = 4 * 64 * 64; break; case KMS_BO_TYPE_SCANOUT_X8R8G8B8: pitch = width * 4; pitch = (pitch + ALIGNMENT - 1) & ~(ALIGNMENT - 1); size = pitch * height; break; default: return -EINVAL; } bo = calloc(1, sizeof(*bo)); if (!bo) return -ENOMEM; memset(&arg, 0, sizeof(arg)); arg.size = size; arg.alignment = ALIGNMENT; arg.initial_domain = RADEON_GEM_DOMAIN_CPU; arg.flags = 0; arg.handle = 0; ret = drmCommandWriteRead(kms->fd, DRM_RADEON_GEM_CREATE, &arg, sizeof(arg)); if (ret) goto err_free; bo->base.kms = kms; bo->base.handle = arg.handle; bo->base.size = size; bo->base.pitch = pitch; bo->base.offset = 0; bo->map_count = 0; *out = &bo->base; return 0; err_free: free(bo); return ret; } static int radeon_bo_get_prop(struct kms_bo *bo, unsigned key, unsigned *out) { switch (key) { default: return -EINVAL; } } static int radeon_bo_map(struct kms_bo *_bo, void **out) { struct radeon_bo *bo = (struct radeon_bo *)_bo; struct drm_radeon_gem_mmap arg; void *map = NULL; int ret; if (bo->base.ptr) { bo->map_count++; *out = bo->base.ptr; return 0; } memset(&arg, 0, sizeof(arg)); arg.handle = bo->base.handle; arg.offset = bo->base.offset; arg.size = (uint64_t)bo->base.size; ret = drmCommandWriteRead(bo->base.kms->fd, DRM_RADEON_GEM_MMAP, &arg, sizeof(arg)); if (ret) return -errno; map = drm_mmap(0, arg.size, PROT_READ | PROT_WRITE, MAP_SHARED, bo->base.kms->fd, arg.addr_ptr); if (map == MAP_FAILED) return -errno; bo->base.ptr = map; bo->map_count++; *out = bo->base.ptr; return 0; } static int radeon_bo_unmap(struct kms_bo *_bo) { struct radeon_bo *bo = (struct radeon_bo *)_bo; if (--bo->map_count == 0) { drm_munmap(bo->base.ptr, bo->base.size); bo->base.ptr = NULL; } return 0; } static int radeon_bo_destroy(struct kms_bo *_bo) { struct radeon_bo *bo = (struct radeon_bo *)_bo; struct drm_gem_close arg; int ret; if (bo->base.ptr) { /* XXX Sanity check map_count */ drm_munmap(bo->base.ptr, bo->base.size); bo->base.ptr = NULL; } memset(&arg, 0, sizeof(arg)); arg.handle = bo->base.handle; ret = drmIoctl(bo->base.kms->fd, DRM_IOCTL_GEM_CLOSE, &arg); if (ret) return -errno; free(bo); return 0; } drm_private int radeon_create(int fd, struct kms_driver **out) { struct kms_driver *kms; kms = calloc(1, sizeof(*kms)); if (!kms) return -ENOMEM; kms->fd = fd; kms->bo_create = radeon_bo_create; kms->bo_map = radeon_bo_map; kms->bo_unmap = radeon_bo_unmap; kms->bo_get_prop = radeon_bo_get_prop; kms->bo_destroy = radeon_bo_destroy; kms->get_prop = radeon_get_prop; kms->destroy = radeon_destroy; *out = kms; return 0; }