aboutsummaryrefslogtreecommitdiff
path: root/linux-core
diff options
context:
space:
mode:
authorKeith Packard <keithp@keithp.com>2008-05-06 14:43:49 -0700
committerKeith Packard <keithp@keithp.com>2008-05-06 14:43:49 -0700
commit631e86c5c4ad9b2cdd40749ea3b351204a362c80 (patch)
treed7c7aff2bafaca4a86ea09f64ff7c7c539dbfd0f /linux-core
parent8551bfc6dba03dcd9d182b2099a0906153ecfa01 (diff)
Start coding up memory domains
Diffstat (limited to 'linux-core')
-rw-r--r--linux-core/drmP.h24
-rw-r--r--linux-core/drm_gem.c38
2 files changed, 61 insertions, 1 deletions
diff --git a/linux-core/drmP.h b/linux-core/drmP.h
index ffeafc18..cdeecc30 100644
--- a/linux-core/drmP.h
+++ b/linux-core/drmP.h
@@ -643,6 +643,15 @@ struct drm_gem_object {
*/
int name;
+ /**
+ * Memory domains. These monitor which caches contain read/write data
+ * related to the object. When transitioning from one set of domains
+ * to another, the driver is called to ensure that caches are suitably
+ * flushed and invalidated
+ */
+ uint32_t read_domains;
+ uint32_t write_domain;
+
void *driver_private;
};
@@ -942,6 +951,8 @@ struct drm_device {
spinlock_t object_name_lock;
struct idr object_name_idr;
atomic_t object_count;
+ uint32_t invalidate_domains; /* domains pending invalidation */
+ uint32_t flush_domains; /* domains pending flush */
/*@} */
};
@@ -1321,6 +1332,7 @@ static inline struct drm_memrange *drm_get_mm(struct drm_memrange_node *block)
return block->mm;
}
+/* Graphics Execution Manager library functions (drm_gem.c) */
int
drm_gem_init (struct drm_device *dev);
@@ -1330,7 +1342,6 @@ drm_gem_object_free (struct kref *kref);
void
drm_gem_object_handle_free (struct kref *kref);
-/* Graphics Execution Manager library functions (drm_gem.c) */
static inline void drm_gem_object_reference(struct drm_gem_object *obj)
{
kref_get(&obj->refcount);
@@ -1385,6 +1396,17 @@ int drm_gem_open_ioctl(struct drm_device *dev, void *data,
void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
+
+/*
+ * Given the new read/write domains for an object,
+ * compute the invalidate/flush domains for the whole device.
+ *
+ */
+int drm_gem_object_set_domain (struct drm_gem_object *object,
+ uint32_t read_domains,
+ uint32_t write_domains);
+
+
extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);
diff --git a/linux-core/drm_gem.c b/linux-core/drm_gem.c
index 80d5a350..929c008f 100644
--- a/linux-core/drm_gem.c
+++ b/linux-core/drm_gem.c
@@ -539,3 +539,41 @@ drm_gem_object_handle_free (struct kref *kref)
}
EXPORT_SYMBOL(drm_gem_object_handle_free);
+
+/*
+ * Set the next domain for the specified object. This
+ * may not actually perform the necessary flushing/invaliding though,
+ * as that may want to be batched with other set_domain operations
+ */
+int drm_gem_object_set_domain (struct drm_gem_object *obj,
+ uint32_t read_domains,
+ uint32_t write_domain)
+{
+ struct drm_device *dev = obj->dev;
+ uint32_t invalidate_domains = 0;
+ uint32_t flush_domains = 0;
+
+ /*
+ * Flush the current write domain if
+ * the new read domains don't match. Invalidate
+ * any read domains which differ from the old
+ * write domain
+ */
+ if (obj->write_domain && obj->write_domain != read_domains)
+ {
+ flush_domains |= obj->write_domain;
+ invalidate_domains |= read_domains & ~obj->write_domain;
+ }
+ /*
+ * Invalidate any read caches which may have
+ * stale data. That is, any new read domains.
+ */
+ invalidate_domains |= read_domains & ~obj->read_domains;
+ obj->write_domain = write_domain;
+ obj->read_domain = read_domains;
+ if ((flush_domains | invalidate_domains) & DRM_GEM_DOMAIN_CPU)
+ drm_gem_object_clflush (obj);
+ dev->invalidate_domains |= invalidate_domains & ~DRM_GEM_DOMAIN_CPU;
+ dev->flush_domains |= flush_domains & ~DRM_GEM_DOMAIN_CPU;
+}
+EXPORT_SYMBOL(drm_gem_object_set_domain);