aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--libdrm/intel/intel_bufmgr_gem.c46
1 files changed, 15 insertions, 31 deletions
diff --git a/libdrm/intel/intel_bufmgr_gem.c b/libdrm/intel/intel_bufmgr_gem.c
index e88a6c3f..70d0c855 100644
--- a/libdrm/intel/intel_bufmgr_gem.c
+++ b/libdrm/intel/intel_bufmgr_gem.c
@@ -78,6 +78,7 @@ struct drm_intel_gem_bo_bucket {
*/
int max_entries;
int num_entries;
+ unsigned long size;
};
/* Only cache objects up to 64MB. Bigger than that, and the rounding of the
@@ -203,39 +204,20 @@ drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
static void
drm_intel_gem_bo_unreference(drm_intel_bo *bo);
-static int
-logbase2(int n)
-{
- int i = 1;
- int log2 = 0;
-
- while (n > i) {
- i *= 2;
- log2++;
- }
-
- return log2;
-}
-
static struct drm_intel_gem_bo_bucket *
drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
unsigned long size)
{
int i;
- /* We only do buckets in power of two increments */
- if ((size & (size - 1)) != 0)
- return NULL;
-
- /* We should only see sizes rounded to pages. */
- assert((size % 4096) == 0);
-
- /* We always allocate in units of pages */
- i = ffs(size / 4096) - 1;
- if (i >= DRM_INTEL_GEM_BO_BUCKETS)
- return NULL;
+ for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
+ struct drm_intel_gem_bo_bucket *bucket = &bufmgr_gem->cache_bucket[i];
+ if (bucket->size >= size) {
+ return bucket;
+ }
+ }
- return &bufmgr_gem->cache_bucket[i];
+ return NULL;
}
@@ -345,10 +327,7 @@ drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr, const char *name,
unsigned long bo_size;
/* Round the allocated size up to a power of two number of pages. */
- bo_size = 1 << logbase2(size);
- if (bo_size < page_size)
- bo_size = page_size;
- bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo_size);
+ bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
/* If we don't have caching at this size, don't actually round the
* allocation up.
@@ -357,6 +336,8 @@ drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr, const char *name,
bo_size = size;
if (bo_size < page_size)
bo_size = page_size;
+ } else {
+ bo_size = bucket->size;
}
pthread_mutex_lock(&bufmgr_gem->lock);
@@ -1412,6 +1393,7 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
struct drm_i915_gem_get_aperture aperture;
drm_i915_getparam_t gp;
int ret, i;
+ unsigned long size;
bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
bufmgr_gem->fd = fd;
@@ -1483,8 +1465,10 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
bufmgr_gem->bufmgr.get_pipe_from_crtc_id = drm_intel_gem_get_pipe_from_crtc_id;
/* Initialize the linked lists for BO reuse cache. */
- for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++)
+ for (i = 0, size = 4096; i < DRM_INTEL_GEM_BO_BUCKETS; i++, size *= 2) {
DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
+ bufmgr_gem->cache_bucket[i].size = size;
+ }
return &bufmgr_gem->bufmgr;
}