aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJesse Barnes <jbarnes@virtuousgeek.org>2009-03-26 16:43:00 -0700
committerJesse Barnes <jbarnes@virtuousgeek.org>2009-03-26 16:43:00 -0700
commite2d7dfb61ad7a97367f050150160c205614d152e (patch)
treeeb50b755d5c6b6d981984d19cf7a450a39cd7151
parentc9cfeaa5542e08381a8144d0a5bd96036a19082f (diff)
libdrm/intel: support GTT maps correctly
libdrm has some support for GTT mapping already, but there are bugs with it (no surprise since it hasn't been used much). In fixing 20803, I found that sharing bo_gem->virtual was a bad idea, since a previously mapped object might not end up getting GTT mapped, leading to corruption. So this patch splits the fields according to use, taking care to unmap both at free time (but preserving the map caching). There's still a risk we might run out of mappings (there's a sysctl tunable for max number of mappings per process, defaulted to 64k or so it looks like) but at least GTT maps will work with these changes (and some others for fixing PAT breakage in the kernel). Reviewed-by: Eric Anholt <eric@anholt.net> Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
-rw-r--r--libdrm/intel/intel_bufmgr.h1
-rw-r--r--libdrm/intel/intel_bufmgr_gem.c57
2 files changed, 42 insertions, 16 deletions
diff --git a/libdrm/intel/intel_bufmgr.h b/libdrm/intel/intel_bufmgr.h
index 111d2afa..542dc06f 100644
--- a/libdrm/intel/intel_bufmgr.h
+++ b/libdrm/intel/intel_bufmgr.h
@@ -115,6 +115,7 @@ drm_intel_bo *drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
unsigned int handle);
void drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr);
int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo);
+int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo);
void drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable);
/* drm_intel_bufmgr_fake.c */
diff --git a/libdrm/intel/intel_bufmgr_gem.c b/libdrm/intel/intel_bufmgr_gem.c
index 9e49d7c9..e48778c3 100644
--- a/libdrm/intel/intel_bufmgr_gem.c
+++ b/libdrm/intel/intel_bufmgr_gem.c
@@ -145,7 +145,9 @@ struct _drm_intel_bo_gem {
/** Number of entries in relocs */
int reloc_count;
/** Mapped address for the buffer, saved across map/unmap cycles */
- void *virtual;
+ void *mem_virtual;
+ /** GTT virtual address for the buffer, saved across map/unmap cycles */
+ void *gtt_virtual;
/** BO cache list */
drmMMListHead head;
@@ -524,8 +526,10 @@ drm_intel_gem_bo_free(drm_intel_bo *bo)
struct drm_gem_close close;
int ret;
- if (bo_gem->virtual)
- munmap (bo_gem->virtual, bo_gem->bo.size);
+ if (bo_gem->mem_virtual)
+ munmap (bo_gem->mem_virtual, bo_gem->bo.size);
+ if (bo_gem->gtt_virtual)
+ munmap (bo_gem->gtt_virtual, bo_gem->bo.size);
/* Close this object */
memset(&close, 0, sizeof(close));
@@ -609,7 +613,7 @@ drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
/* Allow recursive mapping. Mesa may recursively map buffers with
* nested display loops.
*/
- if (!bo_gem->virtual) {
+ if (!bo_gem->mem_virtual) {
struct drm_i915_gem_mmap mmap_arg;
DBG("bo_map: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
@@ -626,12 +630,12 @@ drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
pthread_mutex_unlock(&bufmgr_gem->lock);
return ret;
}
- bo_gem->virtual = (void *)(uintptr_t)mmap_arg.addr_ptr;
+ bo_gem->mem_virtual = (void *)(uintptr_t)mmap_arg.addr_ptr;
bo_gem->swrast = 0;
}
DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
- bo_gem->virtual);
- bo->virtual = bo_gem->virtual;
+ bo_gem->mem_virtual);
+ bo->virtual = bo_gem->mem_virtual;
if (bo_gem->global_name != 0 || !bo_gem->swrast) {
set_domain.handle = bo_gem->gem_handle;
@@ -669,7 +673,7 @@ drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
pthread_mutex_lock(&bufmgr_gem->lock);
/* Get a mapping of the buffer if we haven't before. */
- if (bo_gem->virtual == NULL) {
+ if (bo_gem->gtt_virtual == NULL) {
struct drm_i915_gem_mmap_gtt mmap_arg;
DBG("bo_map_gtt: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
@@ -690,10 +694,10 @@ drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
}
/* and mmap it */
- bo_gem->virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
- MAP_SHARED, bufmgr_gem->fd,
- mmap_arg.offset);
- if (bo_gem->virtual == MAP_FAILED) {
+ bo_gem->gtt_virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, bufmgr_gem->fd,
+ mmap_arg.offset);
+ if (bo_gem->gtt_virtual == MAP_FAILED) {
fprintf(stderr,
"%s:%d: Error mapping buffer %d (%s): %s .\n",
__FILE__, __LINE__,
@@ -704,10 +708,10 @@ drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
}
}
- bo->virtual = bo_gem->virtual;
+ bo->virtual = bo_gem->gtt_virtual;
DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
- bo_gem->virtual);
+ bo_gem->gtt_virtual);
/* Now move it to the GTT domain so that the CPU caches are flushed */
set_domain.handle = bo_gem->gem_handle;
@@ -719,7 +723,7 @@ drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
} while (ret == -1 && errno == EINTR);
if (ret != 0) {
- fprintf (stderr, "%s:%d: Error setting swrast %d: %s\n",
+ fprintf (stderr, "%s:%d: Error setting domain %d: %s\n",
__FILE__, __LINE__, bo_gem->gem_handle, strerror (errno));
}
@@ -728,6 +732,26 @@ drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
return 0;
}
+int
+drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
+ struct drm_i915_gem_sw_finish sw_finish;
+ int ret = 0;
+
+ if (bo == NULL)
+ return 0;
+
+ assert(bo_gem->gtt_virtual != NULL);
+
+ pthread_mutex_lock(&bufmgr_gem->lock);
+ bo->virtual = NULL;
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+
+ return ret;
+}
+
static int
drm_intel_gem_bo_unmap(drm_intel_bo *bo)
{
@@ -739,7 +763,7 @@ drm_intel_gem_bo_unmap(drm_intel_bo *bo)
if (bo == NULL)
return 0;
- assert(bo_gem->virtual != NULL);
+ assert(bo_gem->mem_virtual != NULL);
pthread_mutex_lock(&bufmgr_gem->lock);
if (bo_gem->swrast) {
@@ -750,6 +774,7 @@ drm_intel_gem_bo_unmap(drm_intel_bo *bo)
} while (ret == -1 && errno == EINTR);
bo_gem->swrast = 0;
}
+ bo->virtual = NULL;
pthread_mutex_unlock(&bufmgr_gem->lock);
return 0;
}