aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKristian Høgsberg <krh@redhat.com>2008-08-01 13:35:56 -0400
committerKristian Høgsberg <krh@redhat.com>2008-08-01 13:35:56 -0400
commit086716c8e2516dd71e94ebda03e20943188a5e5e (patch)
tree860462e19e868694f0e8838bc7418cc69fab0391
parent5052e966ec7fe5146c2d73b90482003619add5da (diff)
parentccbaad52f79162a77d98d0dde00681b1dbf14165 (diff)
Merge commit 'origin/drm-gem' into modesetting-gem
Conflicts: linux-core/Makefile.kernel linux-core/drmP.h linux-core/drm_mm.c linux-core/drm_stub.c linux-core/i915_gem.c linux-core/i915_opregion.c shared-core/i915_dma.c shared-core/i915_drv.h shared-core/i915_irq.c
-rw-r--r--linux-core/Makefile.kernel5
-rw-r--r--linux-core/drmP.h47
-rw-r--r--linux-core/drm_bo.c38
-rw-r--r--linux-core/drm_bo_move.c2
-rw-r--r--linux-core/drm_drv.c2
-rw-r--r--linux-core/drm_mm.c (renamed from linux-core/drm_memrange.c)86
-rw-r--r--linux-core/drm_objects.h6
-rw-r--r--linux-core/drm_proc.c40
-rw-r--r--linux-core/drm_sman.c22
-rw-r--r--linux-core/drm_sman.h4
-rw-r--r--linux-core/drm_stub.c7
-rw-r--r--linux-core/i915_drv.c196
-rw-r--r--linux-core/i915_gem.c876
-rw-r--r--linux-core/i915_gem_debug.c202
-rw-r--r--linux-core/i915_gem_proc.c293
-rw-r--r--linux-core/i915_gem_tiling.c309
-rw-r--r--linux-core/i915_opregion.c19
-rw-r--r--linux-core/nouveau_bo.c2
-rw-r--r--linux-core/nouveau_sgdma.c2
-rw-r--r--linux-core/radeon_buffer.c2
-rw-r--r--shared-core/i915_dma.c61
-rw-r--r--shared-core/i915_drm.h65
-rw-r--r--shared-core/i915_drv.h276
-rw-r--r--shared-core/i915_init.c107
-rw-r--r--shared-core/i915_irq.c7
-rw-r--r--tests/gem_mmap.c2
-rw-r--r--tests/gem_readwrite.c9
27 files changed, 1544 insertions, 1143 deletions
diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel
index b09697d3..44c7a8d2 100644
--- a/linux-core/Makefile.kernel
+++ b/linux-core/Makefile.kernel
@@ -12,7 +12,7 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
drm_sysfs.o drm_pci.o drm_agpsupport.o drm_scatter.o \
drm_memory_debug.o ati_pcigart.o drm_sman.o \
- drm_hashtab.o drm_memrange.o drm_object.o drm_compat.o \
+ drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \
drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o drm_bo_lock.o \
drm_crtc.o drm_edid.o drm_modes.o drm_crtc_helper.o \
drm_regman.o drm_vm_nopage_compat.o drm_gem.o
@@ -22,7 +22,8 @@ mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
i810-objs := i810_drv.o i810_dma.o
i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \
i915_buffer.o i915_compat.o i915_execbuf.o i915_suspend.o \
- i915_gem.o i915_opregion.o \
+ i915_opregion.o \
+ i915_gem.o i915_gem_debug.o i915_gem_proc.o i915_gem_tiling.o \
intel_display.o intel_crt.o intel_lvds.o intel_bios.o \
intel_sdvo.o intel_modes.o intel_i2c.o i915_init.o intel_fb.o \
intel_tv.o intel_dvo.o dvo_ch7xxx.o \
diff --git a/linux-core/drmP.h b/linux-core/drmP.h
index 1e5838ee..bfe2ae1a 100644
--- a/linux-core/drmP.h
+++ b/linux-core/drmP.h
@@ -480,11 +480,6 @@ struct drm_lock_data {
uint32_t kernel_waiters;
uint32_t user_waiters;
int idle_has_lock;
- /**
- * Boolean signaling that the lock is held on behalf of the
- * file_priv client by the kernel in an ioctl handler.
- */
- int kernel_held;
};
/**
@@ -560,17 +555,17 @@ struct drm_sigdata {
* Generic memory manager structs
*/
-struct drm_memrange_node {
+struct drm_mm_node {
struct list_head fl_entry;
struct list_head ml_entry;
int free;
unsigned long start;
unsigned long size;
- struct drm_memrange *mm;
+ struct drm_mm *mm;
void *private;
};
-struct drm_memrange {
+struct drm_mm {
struct list_head fl_entry;
struct list_head ml_entry;
};
@@ -586,7 +581,7 @@ struct drm_map_list {
uint64_t user_token;
struct drm_master *master; /** if this map is associated with a specific
master */
- struct drm_memrange_node *file_offset_node;
+ struct drm_mm_node *file_offset_node;
};
typedef struct drm_map drm_local_map_t;
@@ -905,7 +900,7 @@ struct drm_device {
struct list_head maplist; /**< Linked list of regions */
int map_count; /**< Number of mappable regions */
struct drm_open_hash map_hash; /**< User token hash table for maps */
- struct drm_memrange offset_manager; /**< User token manager */
+ struct drm_mm offset_manager; /**< User token manager */
struct drm_open_hash object_hash; /**< User token hash table for objects */
struct address_space *dev_mapping; /**< For unmap_mapping_range() */
struct page *ttm_dummy_page;
@@ -1422,26 +1417,22 @@ extern int drm_sysfs_connector_add(struct drm_connector *connector);
extern void drm_sysfs_connector_remove(struct drm_connector *connector);
/*
- * Basic memory manager support (drm_memrange.c)
+ * Basic memory manager support (drm_mm.c)
*/
-extern struct drm_memrange_node *drm_memrange_get_block(struct drm_memrange_node * parent,
- unsigned long size,
- unsigned alignment);
-extern void drm_memrange_put_block(struct drm_memrange_node *cur);
-extern struct drm_memrange_node *drm_memrange_search_free(const struct drm_memrange *mm,
- unsigned long size,
- unsigned alignment, int best_match);
-extern int drm_memrange_init(struct drm_memrange *mm,
- unsigned long start, unsigned long size);
-extern void drm_memrange_takedown(struct drm_memrange *mm);
-extern int drm_memrange_clean(struct drm_memrange *mm);
-extern unsigned long drm_memrange_tail_space(struct drm_memrange *mm);
-extern int drm_memrange_remove_space_from_tail(struct drm_memrange *mm,
- unsigned long size);
-extern int drm_memrange_add_space_to_tail(struct drm_memrange *mm,
- unsigned long size);
-static inline struct drm_memrange *drm_get_mm(struct drm_memrange_node *block)
+extern struct drm_mm_node * drm_mm_get_block(struct drm_mm_node * parent, unsigned long size,
+ unsigned alignment);
+extern void drm_mm_put_block(struct drm_mm_node *cur);
+extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, unsigned long size,
+ unsigned alignment, int best_match);
+extern int drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size);
+extern void drm_mm_takedown(struct drm_mm *mm);
+extern int drm_mm_clean(struct drm_mm *mm);
+extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
+extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size);
+extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size);
+
+static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
{
return block->mm;
}
diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c
index 0021530b..ace6734a 100644
--- a/linux-core/drm_bo.c
+++ b/linux-core/drm_bo.c
@@ -417,14 +417,14 @@ static void drm_bo_cleanup_refs(struct drm_buffer_object *bo, int remove_all)
if (!bo->fence) {
list_del_init(&bo->lru);
if (bo->mem.mm_node) {
- drm_memrange_put_block(bo->mem.mm_node);
+ drm_mm_put_block(bo->mem.mm_node);
if (bo->pinned_node == bo->mem.mm_node)
bo->pinned_node = NULL;
bo->mem.mm_node = NULL;
}
list_del_init(&bo->pinned_lru);
if (bo->pinned_node) {
- drm_memrange_put_block(bo->pinned_node);
+ drm_mm_put_block(bo->pinned_node);
bo->pinned_node = NULL;
}
list_del_init(&bo->ddestroy);
@@ -790,7 +790,7 @@ out:
mutex_lock(&dev->struct_mutex);
if (evict_mem.mm_node) {
if (evict_mem.mm_node != bo->pinned_node)
- drm_memrange_put_block(evict_mem.mm_node);
+ drm_mm_put_block(evict_mem.mm_node);
evict_mem.mm_node = NULL;
}
drm_bo_add_to_lru(bo);
@@ -809,7 +809,7 @@ static int drm_bo_mem_force_space(struct drm_device *dev,
struct drm_bo_mem_reg *mem,
uint32_t mem_type, int no_wait)
{
- struct drm_memrange_node *node;
+ struct drm_mm_node *node;
struct drm_buffer_manager *bm = &dev->bm;
struct drm_buffer_object *entry;
struct drm_mem_type_manager *man = &bm->man[mem_type];
@@ -819,7 +819,7 @@ static int drm_bo_mem_force_space(struct drm_device *dev,
mutex_lock(&dev->struct_mutex);
do {
- node = drm_memrange_search_free(&man->manager, num_pages,
+ node = drm_mm_search_free(&man->manager, num_pages,
mem->page_alignment, 1);
if (node)
break;
@@ -845,7 +845,7 @@ static int drm_bo_mem_force_space(struct drm_device *dev,
return -ENOMEM;
}
- node = drm_memrange_get_block(node, num_pages, mem->page_alignment);
+ node = drm_mm_get_block(node, num_pages, mem->page_alignment);
if (unlikely(!node)) {
mutex_unlock(&dev->struct_mutex);
return -ENOMEM;
@@ -923,7 +923,7 @@ int drm_bo_mem_space(struct drm_buffer_object *bo,
int type_found = 0;
int type_ok = 0;
int has_eagain = 0;
- struct drm_memrange_node *node = NULL;
+ struct drm_mm_node *node = NULL;
int ret;
mem->mm_node = NULL;
@@ -951,10 +951,10 @@ int drm_bo_mem_space(struct drm_buffer_object *bo,
mutex_lock(&dev->struct_mutex);
if (man->has_type && man->use_type) {
type_found = 1;
- node = drm_memrange_search_free(&man->manager, mem->num_pages,
+ node = drm_mm_search_free(&man->manager, mem->num_pages,
mem->page_alignment, 1);
if (node)
- node = drm_memrange_get_block(node, mem->num_pages,
+ node = drm_mm_get_block(node, mem->num_pages,
mem->page_alignment);
}
mutex_unlock(&dev->struct_mutex);
@@ -1339,7 +1339,7 @@ out_unlock:
if (ret || !move_unfenced) {
if (mem.mm_node) {
if (mem.mm_node != bo->pinned_node)
- drm_memrange_put_block(mem.mm_node);
+ drm_mm_put_block(mem.mm_node);
mem.mm_node = NULL;
}
drm_bo_add_to_lru(bo);
@@ -1431,7 +1431,7 @@ static int drm_buffer_object_validate(struct drm_buffer_object *bo,
if (bo->pinned_node != bo->mem.mm_node) {
if (bo->pinned_node != NULL)
- drm_memrange_put_block(bo->pinned_node);
+ drm_mm_put_block(bo->pinned_node);
bo->pinned_node = bo->mem.mm_node;
}
@@ -1442,7 +1442,7 @@ static int drm_buffer_object_validate(struct drm_buffer_object *bo,
mutex_lock(&dev->struct_mutex);
if (bo->pinned_node != bo->mem.mm_node)
- drm_memrange_put_block(bo->pinned_node);
+ drm_mm_put_block(bo->pinned_node);
list_del_init(&bo->pinned_lru);
bo->pinned_node = NULL;
@@ -2082,7 +2082,7 @@ static int drm_bo_leave_list(struct drm_buffer_object *bo,
if (bo->pinned_node == bo->mem.mm_node)
bo->pinned_node = NULL;
if (bo->pinned_node != NULL) {
- drm_memrange_put_block(bo->pinned_node);
+ drm_mm_put_block(bo->pinned_node);
bo->pinned_node = NULL;
}
mutex_unlock(&dev->struct_mutex);
@@ -2223,8 +2223,8 @@ int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type, int kern_clean)
drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
- if (drm_memrange_clean(&man->manager)) {
- drm_memrange_takedown(&man->manager);
+ if (drm_mm_clean(&man->manager)) {
+ drm_mm_takedown(&man->manager);
} else {
ret = -EBUSY;
}
@@ -2295,7 +2295,7 @@ int drm_bo_init_mm(struct drm_device *dev, unsigned type,
DRM_ERROR("Zero size memory manager type %d\n", type);
return ret;
}
- ret = drm_memrange_init(&man->manager, p_offset, p_size);
+ ret = drm_mm_init(&man->manager, p_offset, p_size);
if (ret)
return ret;
}
@@ -2722,7 +2722,7 @@ void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo)
list->user_token = 0;
}
if (list->file_offset_node) {
- drm_memrange_put_block(list->file_offset_node);
+ drm_mm_put_block(list->file_offset_node);
list->file_offset_node = NULL;
}
@@ -2766,7 +2766,7 @@ static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo)
atomic_inc(&bo->usage);
map->handle = (void *)bo;
- list->file_offset_node = drm_memrange_search_free(&dev->offset_manager,
+ list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
bo->mem.num_pages, 0, 0);
if (unlikely(!list->file_offset_node)) {
@@ -2774,7 +2774,7 @@ static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo)
return -ENOMEM;
}
- list->file_offset_node = drm_memrange_get_block(list->file_offset_node,
+ list->file_offset_node = drm_mm_get_block(list->file_offset_node,
bo->mem.num_pages, 0);
if (unlikely(!list->file_offset_node)) {
diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c
index 9147a475..5c290af2 100644
--- a/linux-core/drm_bo_move.c
+++ b/linux-core/drm_bo_move.c
@@ -41,7 +41,7 @@ static void drm_bo_free_old_node(struct drm_buffer_object *bo)
if (old_mem->mm_node && (old_mem->mm_node != bo->pinned_node)) {
mutex_lock(&bo->dev->struct_mutex);
- drm_memrange_put_block(old_mem->mm_node);
+ drm_mm_put_block(old_mem->mm_node);
mutex_unlock(&bo->dev->struct_mutex);
}
old_mem->mm_node = NULL;
diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c
index 36c0a14d..be441b00 100644
--- a/linux-core/drm_drv.c
+++ b/linux-core/drm_drv.c
@@ -428,7 +428,7 @@ static void drm_cleanup(struct drm_device * dev)
drm_ctxbitmap_cleanup(dev);
drm_ht_remove(&dev->map_hash);
- drm_memrange_takedown(&dev->offset_manager);
+ drm_mm_takedown(&dev->offset_manager);
drm_ht_remove(&dev->object_hash);
drm_put_minor(dev, &dev->primary);
diff --git a/linux-core/drm_memrange.c b/linux-core/drm_mm.c
index 5921eff8..8d7390d4 100644
--- a/linux-core/drm_memrange.c
+++ b/linux-core/drm_mm.c
@@ -44,26 +44,26 @@
#include "drmP.h"
#include <linux/slab.h>
-unsigned long drm_memrange_tail_space(struct drm_memrange *mm)
+unsigned long drm_mm_tail_space(struct drm_mm *mm)
{
struct list_head *tail_node;
- struct drm_memrange_node *entry;
+ struct drm_mm_node *entry;
tail_node = mm->ml_entry.prev;
- entry = list_entry(tail_node, struct drm_memrange_node, ml_entry);
+ entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
if (!entry->free)
return 0;
return entry->size;
}
-int drm_memrange_remove_space_from_tail(struct drm_memrange *mm, unsigned long size)
+int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
{
struct list_head *tail_node;
- struct drm_memrange_node *entry;
+ struct drm_mm_node *entry;
tail_node = mm->ml_entry.prev;
- entry = list_entry(tail_node, struct drm_memrange_node, ml_entry);
+ entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
if (!entry->free)
return -ENOMEM;
@@ -75,13 +75,13 @@ int drm_memrange_remove_space_from_tail(struct drm_memrange *mm, unsigned long s
}
-static int drm_memrange_create_tail_node(struct drm_memrange *mm,
+static int drm_mm_create_tail_node(struct drm_mm *mm,
unsigned long start,
unsigned long size)
{
- struct drm_memrange_node *child;
+ struct drm_mm_node *child;
- child = (struct drm_memrange_node *)
+ child = (struct drm_mm_node *)
drm_ctl_alloc(sizeof(*child), DRM_MEM_MM);
if (!child)
return -ENOMEM;
@@ -98,26 +98,26 @@ static int drm_memrange_create_tail_node(struct drm_memrange *mm,
}
-int drm_memrange_add_space_to_tail(struct drm_memrange *mm, unsigned long size)
+int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size)
{
struct list_head *tail_node;
- struct drm_memrange_node *entry;
+ struct drm_mm_node *entry;
tail_node = mm->ml_entry.prev;
- entry = list_entry(tail_node, struct drm_memrange_node, ml_entry);
+ entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
if (!entry->free) {
- return drm_memrange_create_tail_node(mm, entry->start + entry->size, size);
+ return drm_mm_create_tail_node(mm, entry->start + entry->size, size);
}
entry->size += size;
return 0;
}
-static struct drm_memrange_node *drm_memrange_split_at_start(struct drm_memrange_node *parent,
+static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
unsigned long size)
{
- struct drm_memrange_node *child;
+ struct drm_mm_node *child;
- child = (struct drm_memrange_node *)
+ child = (struct drm_mm_node *)
drm_ctl_alloc(sizeof(*child), DRM_MEM_MM);
if (!child)
return NULL;
@@ -137,19 +137,19 @@ static struct drm_memrange_node *drm_memrange_split_at_start(struct drm_memrange
return child;
}
-struct drm_memrange_node *drm_memrange_get_block(struct drm_memrange_node * parent,
+struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
unsigned long size, unsigned alignment)
{
- struct drm_memrange_node *align_splitoff = NULL;
- struct drm_memrange_node *child;
+ struct drm_mm_node *align_splitoff = NULL;
+ struct drm_mm_node *child;
unsigned tmp = 0;
if (alignment)
tmp = parent->start % alignment;
if (tmp) {
- align_splitoff = drm_memrange_split_at_start(parent, alignment - tmp);
+ align_splitoff = drm_mm_split_at_start(parent, alignment - tmp);
if (!align_splitoff)
return NULL;
}
@@ -159,41 +159,41 @@ struct drm_memrange_node *drm_memrange_get_block(struct drm_memrange_node * pare
parent->free = 0;
return parent;
} else {
- child = drm_memrange_split_at_start(parent, size);
+ child = drm_mm_split_at_start(parent, size);
}
if (align_splitoff)
- drm_memrange_put_block(align_splitoff);
+ drm_mm_put_block(align_splitoff);
return child;
}
-EXPORT_SYMBOL(drm_memrange_get_block);
+EXPORT_SYMBOL(drm_mm_get_block);
/*
* Put a block. Merge with the previous and / or next block if they are free.
* Otherwise add to the free stack.
*/
-void drm_memrange_put_block(struct drm_memrange_node * cur)
+void drm_mm_put_block(struct drm_mm_node * cur)
{
- struct drm_memrange *mm = cur->mm;
+ struct drm_mm *mm = cur->mm;
struct list_head *cur_head = &cur->ml_entry;
struct list_head *root_head = &mm->ml_entry;
- struct drm_memrange_node *prev_node = NULL;
- struct drm_memrange_node *next_node;
+ struct drm_mm_node *prev_node = NULL;
+ struct drm_mm_node *next_node;
int merged = 0;
if (cur_head->prev != root_head) {
- prev_node = list_entry(cur_head->prev, struct drm_memrange_node, ml_entry);
+ prev_node = list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
if (prev_node->free) {
prev_node->size += cur->size;
merged = 1;
}
}
if (cur_head->next != root_head) {
- next_node = list_entry(cur_head->next, struct drm_memrange_node, ml_entry);
+ next_node = list_entry(cur_head->next, struct drm_mm_node, ml_entry);
if (next_node->free) {
if (merged) {
prev_node->size += next_node->size;
@@ -216,16 +216,16 @@ void drm_memrange_put_block(struct drm_memrange_node * cur)
drm_ctl_free(cur, sizeof(*cur), DRM_MEM_MM);
}
}
-EXPORT_SYMBOL(drm_memrange_put_block);
+EXPORT_SYMBOL(drm_mm_put_block);
-struct drm_memrange_node *drm_memrange_search_free(const struct drm_memrange * mm,
+struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
unsigned long size,
unsigned alignment, int best_match)
{
struct list_head *list;
const struct list_head *free_stack = &mm->fl_entry;
- struct drm_memrange_node *entry;
- struct drm_memrange_node *best;
+ struct drm_mm_node *entry;
+ struct drm_mm_node *best;
unsigned long best_size;
unsigned wasted;
@@ -233,7 +233,7 @@ struct drm_memrange_node *drm_memrange_search_free(const struct drm_memrange * m
best_size = ~0UL;
list_for_each(list, free_stack) {
- entry = list_entry(list, struct drm_memrange_node, fl_entry);
+ entry = list_entry(list, struct drm_mm_node, fl_entry);
wasted = 0;
if (entry->size < size)
@@ -258,31 +258,31 @@ struct drm_memrange_node *drm_memrange_search_free(const struct drm_memrange * m
return best;
}
-EXPORT_SYMBOL(drm_memrange_search_free);
+EXPORT_SYMBOL(drm_mm_search_free);
-int drm_memrange_clean(struct drm_memrange * mm)
+int drm_mm_clean(struct drm_mm * mm)
{
struct list_head *head = &mm->ml_entry;
return (head->next->next == head);
}
-int drm_memrange_init(struct drm_memrange * mm, unsigned long start, unsigned long size)
+int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
{
INIT_LIST_HEAD(&mm->ml_entry);
INIT_LIST_HEAD(&mm->fl_entry);
- return drm_memrange_create_tail_node(mm, start, size);
+ return drm_mm_create_tail_node(mm, start, size);
}
-EXPORT_SYMBOL(drm_memrange_init);
+EXPORT_SYMBOL(drm_mm_init);
-void drm_memrange_takedown(struct drm_memrange * mm)
+void drm_mm_takedown(struct drm_mm * mm)
{
struct list_head *bnode = mm->fl_entry.next;
- struct drm_memrange_node *entry;
+ struct drm_mm_node *entry;
- entry = list_entry(bnode, struct drm_memrange_node, fl_entry);
+ entry = list_entry(bnode, struct drm_mm_node, fl_entry);
if (entry->ml_entry.next != &mm->ml_entry ||
entry->fl_entry.next != &mm->fl_entry) {
@@ -294,4 +294,4 @@ void drm_memrange_takedown(struct drm_memrange * mm)
list_del(&entry->ml_entry);
drm_ctl_free(entry, sizeof(*entry), DRM_MEM_MM);
}
-EXPORT_SYMBOL(drm_memrange_takedown);
+EXPORT_SYMBOL(drm_mm_takedown);
diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h
index 96cfc113..d8e8ee32 100644
--- a/linux-core/drm_objects.h
+++ b/linux-core/drm_objects.h
@@ -417,7 +417,7 @@ extern int drm_ttm_destroy(struct drm_ttm *ttm);
*/
struct drm_bo_mem_reg {
- struct drm_memrange_node *mm_node;
+ struct drm_mm_node *mm_node;
unsigned long size;
unsigned long num_pages;
uint32_t page_alignment;
@@ -498,7 +498,7 @@ struct drm_buffer_object {
unsigned long num_pages;
/* For pinned buffers */
- struct drm_memrange_node *pinned_node;
+ struct drm_mm_node *pinned_node;
uint32_t pinned_mem_type;
struct list_head pinned_lru;
@@ -533,7 +533,7 @@ struct drm_mem_type_manager {
int has_type;
int use_type;
int kern_init_type;
- struct drm_memrange manager;
+ struct drm_mm manager;
struct list_head lru;
struct list_head pinned;
uint32_t flags;
diff --git a/linux-core/drm_proc.c b/linux-core/drm_proc.c
index 127a7987..e66edfb3 100644
--- a/linux-core/drm_proc.c
+++ b/linux-core/drm_proc.c
@@ -598,20 +598,20 @@ struct drm_gem_name_info_data {
int eof;
};
-static int drm_gem_one_name_info (int id, void *ptr, void *data)
+static int drm_gem_one_name_info(int id, void *ptr, void *data)
{
struct drm_gem_object *obj = ptr;
struct drm_gem_name_info_data *nid = data;
- DRM_INFO ("name %d size %d\n", obj->name, obj->size);
+ DRM_INFO("name %d size %d\n", obj->name, obj->size);
if (nid->eof)
return 0;
-
- nid->len += sprintf (&nid->buf[nid->len],
- "%6d%9d%8d%9d\n",
- obj->name, obj->size,
- atomic_read(&obj->handlecount.refcount),
- atomic_read(&obj->refcount.refcount));
+
+ nid->len += sprintf(&nid->buf[nid->len],
+ "%6d%9d%8d%9d\n",
+ obj->name, obj->size,
+ atomic_read(&obj->handlecount.refcount),
+ atomic_read(&obj->refcount.refcount));
if (nid->len > DRM_PROC_LIMIT) {
nid->eof = 1;
return 0;
@@ -622,20 +622,20 @@ static int drm_gem_one_name_info (int id, void *ptr, void *data)
static int drm_gem_name_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
- struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
struct drm_gem_name_info_data nid;
-
+
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
- nid.len = sprintf (buf, " name size handles refcount\n");
+ nid.len = sprintf(buf, " name size handles refcount\n");
nid.buf = buf;
nid.eof = 0;
- idr_for_each (&dev->object_name_idr, drm_gem_one_name_info, &nid);
-
+ idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, &nid);
+
*start = &buf[offset];
*eof = 0;
if (nid.len > request + offset)
@@ -647,10 +647,10 @@ static int drm_gem_name_info(char *buf, char **start, off_t offset,
static int drm_gem_object_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
- struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
int len = 0;
-
+
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
@@ -658,11 +658,11 @@ static int drm_gem_object_info(char *buf, char **start, off_t offset,
*start = &buf[offset];
*eof = 0;
- DRM_PROC_PRINT("%d objects\n", atomic_read (&dev->object_count));
- DRM_PROC_PRINT("%d object bytes\n", atomic_read (&dev->object_memory));
- DRM_PROC_PRINT("%d pinned\n", atomic_read (&dev->pin_count));
- DRM_PROC_PRINT("%d pin bytes\n", atomic_read (&dev->pin_memory));
- DRM_PROC_PRINT("%d gtt bytes\n", atomic_read (&dev->gtt_memory));
+ DRM_PROC_PRINT("%d objects\n", atomic_read(&dev->object_count));
+ DRM_PROC_PRINT("%d object bytes\n", atomic_read(&dev->object_memory));
+ DRM_PROC_PRINT("%d pinned\n", atomic_read(&dev->pin_count));
+ DRM_PROC_PRINT("%d pin bytes\n", atomic_read(&dev->pin_memory));
+ DRM_PROC_PRINT("%d gtt bytes\n", atomic_read(&dev->gtt_memory));
DRM_PROC_PRINT("%d gtt total\n", dev->gtt_total);
if (len > request + offset)
return request;
diff --git a/linux-core/drm_sman.c b/linux-core/drm_sman.c
index 7c16f685..8421a939 100644
--- a/linux-core/drm_sman.c
+++ b/linux-core/drm_sman.c
@@ -88,34 +88,34 @@ EXPORT_SYMBOL(drm_sman_init);
static void *drm_sman_mm_allocate(void *private, unsigned long size,
unsigned alignment)
{
- struct drm_memrange *mm = (struct drm_memrange *) private;
- struct drm_memrange_node *tmp;
+ struct drm_mm *mm = (struct drm_mm *) private;
+ struct drm_mm_node *tmp;
- tmp = drm_memrange_search_free(mm, size, alignment, 1);
+ tmp = drm_mm_search_free(mm, size, alignment, 1);
if (!tmp) {
return NULL;
}
- tmp = drm_memrange_get_block(tmp, size, alignment);
+ tmp = drm_mm_get_block(tmp, size, alignment);
return tmp;
}
static void drm_sman_mm_free(void *private, void *ref)
{
- struct drm_memrange_node *node = (struct drm_memrange_node *) ref;
+ struct drm_mm_node *node = (struct drm_mm_node *) ref;
- drm_memrange_put_block(node);
+ drm_mm_put_block(node);
}
static void drm_sman_mm_destroy(void *private)
{
- struct drm_memrange *mm = (struct drm_memrange *) private;
- drm_memrange_takedown(mm);
+ struct drm_mm *mm = (struct drm_mm *) private;
+ drm_mm_takedown(mm);
drm_free(mm, sizeof(*mm), DRM_MEM_MM);
}
static unsigned long drm_sman_mm_offset(void *private, void *ref)
{
- struct drm_memrange_node *node = (struct drm_memrange_node *) ref;
+ struct drm_mm_node *node = (struct drm_mm_node *) ref;
return node->start;
}
@@ -124,7 +124,7 @@ drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
unsigned long start, unsigned long size)
{
struct drm_sman_mm *sman_mm;
- struct drm_memrange *mm;
+ struct drm_mm *mm;
int ret;
BUG_ON(manager >= sman->num_managers);
@@ -135,7 +135,7 @@ drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
return -ENOMEM;
}
sman_mm->private = mm;
- ret = drm_memrange_init(mm, start, size);
+ ret = drm_mm_init(mm, start, size);
if (ret) {
drm_free(mm, sizeof(*mm), DRM_MEM_MM);
diff --git a/linux-core/drm_sman.h b/linux-core/drm_sman.h
index 0299776c..39a39fef 100644
--- a/linux-core/drm_sman.h
+++ b/linux-core/drm_sman.h
@@ -45,7 +45,7 @@
/*
* A class that is an abstration of a simple memory allocator.
* The sman implementation provides a default such allocator
- * using the drm_memrange.c implementation. But the user can replace it.
+ * using the drm_mm.c implementation. But the user can replace it.
* See the SiS implementation, which may use the SiS FB kernel module
* for memory management.
*/
@@ -116,7 +116,7 @@ extern int drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
unsigned int user_order, unsigned int owner_order);
/*
- * Initialize a drm_memrange.c allocator. Should be called only once for each
+ * Initialize a drm_mm.c allocator. Should be called only once for each
* manager unless a customized allogator is used.
*/
diff --git a/linux-core/drm_stub.c b/linux-core/drm_stub.c
index c62b901d..1676b2a9 100644
--- a/linux-core/drm_stub.c
+++ b/linux-core/drm_stub.c
@@ -200,16 +200,15 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
if (drm_ht_create(&dev->map_hash, DRM_MAP_HASH_ORDER))
return -ENOMEM;
-
- if (drm_memrange_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START,
- DRM_FILE_PAGE_OFFSET_SIZE)) {
+ if (drm_mm_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START,
+ DRM_FILE_PAGE_OFFSET_SIZE)) {
drm_ht_remove(&dev->map_hash);
return -ENOMEM;
}
if (drm_ht_create(&dev->object_hash, DRM_OBJECT_HASH_ORDER)) {
drm_ht_remove(&dev->map_hash);
- drm_memrange_takedown(&dev->offset_manager);
+ drm_mm_takedown(&dev->offset_manager);
return -ENOMEM;
}
diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c
index a3b35358..2e67eb71 100644
--- a/linux-core/i915_drv.c
+++ b/linux-core/i915_drv.c
@@ -73,202 +73,6 @@ static struct drm_bo_driver i915_bo_driver = {
};
#endif /* ttm */
-static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (pipe == PIPE_A)
- return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE);
- else
- return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE);
-}
-
-static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
- u32 *array;
- int i;
-
- if (!i915_pipe_enabled(dev, pipe))
- return;
-
- if (pipe == PIPE_A)
- array = dev_priv->save_palette_a;
- else
- array = dev_priv->save_palette_b;
-
- for(i = 0; i < 256; i++)
- array[i] = I915_READ(reg + (i << 2));
-}
-
-static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
- u32 *array;
- int i;
-
- if (!i915_pipe_enabled(dev, pipe))
- return;
-
- if (pipe == PIPE_A)
- array = dev_priv->save_palette_a;
- else
- array = dev_priv->save_palette_b;
-
- for(i = 0; i < 256; i++)
- I915_WRITE(reg + (i << 2), array[i]);
-}
-
-static u8 i915_read_indexed(u16 index_port, u16 data_port, u8 reg)
-{
- outb(reg, index_port);
- return inb(data_port);
-}
-
-static u8 i915_read_ar(u16 st01, u8 reg, u16 palette_enable)
-{
- inb(st01);
- outb(palette_enable | reg, VGA_AR_INDEX);
- return inb(VGA_AR_DATA_READ);
-}
-
-static void i915_write_ar(u8 st01, u8 reg, u8 val, u16 palette_enable)
-{
- inb(st01);
- outb(palette_enable | reg, VGA_AR_INDEX);
- outb(val, VGA_AR_DATA_WRITE);
-}
-
-static void i915_write_indexed(u16 index_port, u16 data_port, u8 reg, u8 val)
-{
- outb(reg, index_port);
- outb(val, data_port);
-}
-
-static void i915_save_vga(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int i;
- u16 cr_index, cr_data, st01;
-
- /* VGA color palette registers */
- dev_priv->saveDACMASK = inb(VGA_DACMASK);
- /* DACCRX automatically increments during read */
- outb(0, VGA_DACRX);
- /* Read 3 bytes of color data from each index */
- for (i = 0; i < 256 * 3; i++)
- dev_priv->saveDACDATA[i] = inb(VGA_DACDATA);
-
- /* MSR bits */
- dev_priv->saveMSR = inb(VGA_MSR_READ);
- if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
- cr_index = VGA_CR_INDEX_CGA;
- cr_data = VGA_CR_DATA_CGA;
- st01 = VGA_ST01_CGA;
- } else {
- cr_index = VGA_CR_INDEX_MDA;
- cr_data = VGA_CR_DATA_MDA;
- st01 = VGA_ST01_MDA;
- }
-
- /* CRT controller regs */
- i915_write_indexed(cr_index, cr_data, 0x11,
- i915_read_indexed(cr_index, cr_data, 0x11) &
- (~0x80));
- for (i = 0; i <= 0x24; i++)
- dev_priv->saveCR[i] =
- i915_read_indexed(cr_index, cr_data, i);
- /* Make sure we don't turn off CR group 0 writes */
- dev_priv->saveCR[0x11] &= ~0x80;
-
- /* Attribute controller registers */
- inb(st01);
- dev_priv->saveAR_INDEX = inb(VGA_AR_INDEX);
- for (i = 0; i <= 0x14; i++)
- dev_priv->saveAR[i] = i915_read_ar(st01, i, 0);
- inb(st01);
- outb(dev_priv->saveAR_INDEX, VGA_AR_INDEX);
- inb(st01);
-
- /* Graphics controller registers */
- for (i = 0; i < 9; i++)
- dev_priv->saveGR[i] =
- i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, i);
-
- dev_priv->saveGR[0x10] =
- i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10);
- dev_priv->saveGR[0x11] =
- i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11);
- dev_priv->saveGR[0x18] =
- i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18);
-
- /* Sequencer registers */
- for (i = 0; i < 8; i++)
- dev_priv->saveSR[i] =
- i915_read_indexed(VGA_SR_INDEX, VGA_SR_DATA, i);
-}
-
-static void i915_restore_vga(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int i;
- u16 cr_index, cr_data, st01;
-
- /* MSR bits */
- outb(dev_priv->saveMSR, VGA_MSR_WRITE);
- if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
- cr_index = VGA_CR_INDEX_CGA;
- cr_data = VGA_CR_DATA_CGA;
- st01 = VGA_ST01_CGA;
- } else {
- cr_index = VGA_CR_INDEX_MDA;
- cr_data = VGA_CR_DATA_MDA;
- st01 = VGA_ST01_MDA;
- }
-
- /* Sequencer registers, don't write SR07 */
- for (i = 0; i < 7; i++)
- i915_write_indexed(VGA_SR_INDEX, VGA_SR_DATA, i,
- dev_priv->saveSR[i]);
-
- /* CRT controller regs */
- /* Enable CR group 0 writes */
- i915_write_indexed(cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]);
- for (i = 0; i <= 0x24; i++)
- i915_write_indexed(cr_index, cr_data, i, dev_priv->saveCR[i]);
-
- /* Graphics controller regs */
- for (i = 0; i < 9; i++)
- i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, i,
- dev_priv->saveGR[i]);
-
- i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10,
- dev_priv->saveGR[0x10]);
- i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11,
- dev_priv->saveGR[0x11]);
- i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18,
- dev_priv->saveGR[0x18]);
-
- /* Attribute controller registers */
- inb(st01); /* switch back to index mode */
- for (i = 0; i <= 0x14; i++)
- i915_write_ar(st01, i, dev_priv->saveAR[i], 0);
- inb(st01); /* switch back to index mode */
- outb(dev_priv->saveAR_INDEX | 0x20, VGA_AR_INDEX);
- inb(st01);
-
- /* VGA color palette registers */
- outb(dev_priv->saveDACMASK, VGA_DACMASK);
- /* DACCRX automatically increments during read */
- outb(0, VGA_DACWX);
- /* Read 3 bytes of color data from each index */
- for (i = 0; i < 256 * 3; i++)
- outb(dev_priv->saveDACDATA[i], VGA_DACDATA);
-
-}
-
static int i915_suspend(struct drm_device *dev, pm_message_t state)
{
struct drm_i915_private *dev_priv = dev->dev_private;
diff --git a/linux-core/i915_gem.c b/linux-core/i915_gem.c
index 63f4b91d..9d935be2 100644
--- a/linux-core/i915_gem.c
+++ b/linux-core/i915_gem.c
@@ -30,33 +30,26 @@
#include "drm_compat.h"
#include "i915_drm.h"
#include "i915_drv.h"
+#include <linux/swap.h>
-#define WATCH_COHERENCY 0
-#define WATCH_BUF 0
-#define WATCH_EXEC 0
-#define WATCH_LRU 0
-#define WATCH_RELOC 0
-#define WATCH_INACTIVE 0
-#define WATCH_PWRITE 0
-
-#if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE
-static void
-i915_gem_dump_object(struct drm_gem_object *obj, int len,
- const char *where, uint32_t mark);
-#endif
-
static int
i915_gem_object_set_domain(struct drm_gem_object *obj,
uint32_t read_domains,
uint32_t write_domain);
+static int
+i915_gem_object_set_domain_range(struct drm_gem_object *obj,
+ uint64_t offset,
+ uint64_t size,
+ uint32_t read_domains,
+ uint32_t write_domain);
int
i915_gem_set_domain(struct drm_gem_object *obj,
struct drm_file *file_priv,
uint32_t read_domains,
uint32_t write_domain);
-
-static void
-i915_gem_clflush_object(struct drm_gem_object *obj);
+static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
+static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
+static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
int i915_gem_do_init(struct drm_device *dev, unsigned long start,
unsigned long end)
@@ -69,8 +62,8 @@ int i915_gem_do_init(struct drm_device *dev, unsigned long start,
return -EINVAL;
}
- drm_memrange_init(&dev_priv->mm.gtt_space, start,
- end - start);
+ drm_mm_init(&dev_priv->mm.gtt_space, start,
+ end - start);
dev->gtt_total = (uint32_t) (end - start);
@@ -134,22 +127,35 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_pread *args = data;
struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
ssize_t read;
loff_t offset;
int ret;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
+ return -EBADF;
+ obj_priv = obj->driver_private;
+
+ /* Bounds check source.
+ *
+ * XXX: This could use review for overflow issues...
+ */
+ if (args->offset > obj->size || args->size > obj->size ||
+ args->offset + args->size > obj->size) {
+ drm_gem_object_unreference(obj);
return -EINVAL;
+ }
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_set_domain(obj, file_priv,
- I915_GEM_DOMAIN_CPU, 0);
- if (ret) {
+
+ ret = i915_gem_object_set_domain_range(obj, args->offset, args->size,
+ I915_GEM_DOMAIN_CPU, 0);
+ if (ret != 0) {
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
- return ret;
}
+
offset = args->offset;
read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
@@ -171,18 +177,12 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
#include "drm_compat.h"
-/**
- * Writes data to the object referenced by handle.
- *
- * On error, the contents of the buffer that were to be modified are undefined.
- */
-int
-i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+static int
+i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+ struct drm_i915_gem_pwrite *args,
+ struct drm_file *file_priv)
{
- struct drm_i915_gem_pwrite *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
ssize_t remain;
loff_t offset;
char __user *user_data;
@@ -192,18 +192,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
unsigned long pfn;
unsigned long unwritten;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return -EINVAL;
-
- /** Bounds check destination.
- *
- * XXX: This could use review for overflow issues...
- */
- if (args->offset > obj->size || args->size > obj->size ||
- args->offset + args->size > obj->size)
- return -EFAULT;
-
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
if (!access_ok(VERIFY_READ, user_data, remain))
@@ -213,7 +201,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
ret = i915_gem_object_pin(obj, 0);
if (ret) {
- drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
return ret;
}
@@ -221,14 +208,13 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
if (ret)
goto fail;
-
+
obj_priv = obj->driver_private;
offset = obj_priv->gtt_offset + args->offset;
obj_priv->dirty = 1;
-
+
while (remain > 0) {
-
- /** Operation in this page
+ /* Operation in this page
*
* i = page number
* o = offset within page
@@ -241,7 +227,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
l = PAGE_SIZE - o;
pfn = (dev->agp->base >> PAGE_SHIFT) + i;
-
+
#ifdef DRM_KMAP_ATOMIC_PROT_PFN
/* kmap_atomic can't map IO pages on non-HIGHMEM kernels
*/
@@ -251,7 +237,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n",
i, o, l, pfn, vaddr);
#endif
- unwritten = __copy_from_user_inatomic_nocache(vaddr + o, user_data, l);
+ unwritten = __copy_from_user_inatomic_nocache(vaddr + o,
+ user_data, l);
kunmap_atomic(vaddr, KM_USER0);
if (unwritten)
@@ -259,7 +246,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
{
vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
#if WATCH_PWRITE
- DRM_INFO("pwrite slow i %d o %d l %d pfn %ld vaddr %p\n",
+ DRM_INFO("pwrite slow i %d o %d l %d "
+ "pfn %ld vaddr %p\n",
i, o, l, pfn, vaddr);
#endif
if (vaddr == NULL) {
@@ -288,14 +276,96 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
#endif
fail:
- i915_gem_object_unpin (obj);
- drm_gem_object_unreference(obj);
+ i915_gem_object_unpin(obj);
mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+int
+i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+ struct drm_i915_gem_pwrite *args,
+ struct drm_file *file_priv)
+{
+ int ret;
+ loff_t offset;
+ ssize_t written;
+
+ mutex_lock(&dev->struct_mutex);
+
+ ret = i915_gem_set_domain(obj, file_priv,
+ I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+
+ offset = args->offset;
+
+ written = vfs_write(obj->filp,
+ (char __user *)(uintptr_t) args->data_ptr,
+ args->size, &offset);
+ if (written != args->size) {
+ mutex_unlock(&dev->struct_mutex);
+ if (written < 0)
+ return written;
+ else
+ return -EINVAL;
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+/**
+ * Writes data to the object referenced by handle.
+ *
+ * On error, the contents of the buffer that were to be modified are undefined.
+ */
+int
+i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_pwrite *args = data;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ int ret = 0;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+ return -EBADF;
+ obj_priv = obj->driver_private;
+
+ /* Bounds check destination.
+ *
+ * XXX: This could use review for overflow issues...
+ */
+ if (args->offset > obj->size || args->size > obj->size ||
+ args->offset + args->size > obj->size) {
+ drm_gem_object_unreference(obj);
+ return -EINVAL;
+ }
+
+ /* We can only do the GTT pwrite on untiled buffers, as otherwise
+ * it would end up going through the fenced access, and we'll get
+ * different detiling behavior between reading and writing.
+ * pread/pwrite currently are reading and writing from the CPU
+ * perspective, requiring manual detiling by the client.
+ */
+ if (obj_priv->tiling_mode == I915_TILING_NONE &&
+ dev->gtt_total != 0)
+ ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv);
+ else
+ ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv);
+
#if WATCH_PWRITE
if (ret)
DRM_INFO("pwrite failed %d\n", ret);
#endif
+
+ drm_gem_object_unreference(obj);
+
return ret;
}
@@ -315,7 +385,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
- return -EINVAL;
+ return -EBADF;
mutex_lock(&dev->struct_mutex);
ret = i915_gem_set_domain(obj, file_priv,
@@ -344,21 +414,20 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL) {
mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
+ return -EBADF;
}
#if WATCH_BUF
DRM_INFO("%s: sw_finish %d (%p)\n",
__func__, args->handle, obj);
#endif
- obj_priv = obj->driver_private;
-
- /** Pinned buffers may be scanout, so flush the cache
- */
- if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) {
- i915_gem_clflush_object(obj);
- drm_agp_chipset_flush(dev);
- }
+ obj_priv = obj->driver_private;
+
+ /* Pinned buffers may be scanout, so flush the cache */
+ if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) {
+ i915_gem_clflush_object(obj);
+ drm_agp_chipset_flush(dev);
+ }
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -385,7 +454,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
- return -EINVAL;
+ return -EBADF;
offset = args->offset;
@@ -448,25 +517,6 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj)
&dev_priv->mm.active_list);
}
-#if WATCH_INACTIVE
-static void
-i915_verify_inactive(struct drm_device *dev, char *file, int line)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
-
- list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
- obj = obj_priv->obj;
- if (obj_priv->pin_count || obj_priv->active || (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))
- DRM_ERROR("inactive %p (p %d a %d w %x) %s:%d\n",
- obj,
- obj_priv->pin_count, obj_priv->active, obj->write_domain, file, line);
- }
-}
-#else
-#define i915_verify_inactive(dev,file,line)
-#endif
static void
i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
@@ -522,7 +572,7 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains)
OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
OUT_RING(seqno);
- OUT_RING(GFX_OP_USER_INTERRUPT);
+ OUT_RING(MI_USER_INTERRUPT);
ADVANCE_LP_RING();
DRM_DEBUG("%d\n", seqno);
@@ -534,7 +584,7 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains)
list_add_tail(&request->list, &dev_priv->mm.request_list);
if (was_empty)
- schedule_delayed_work (&dev_priv->mm.retire_work, HZ);
+ schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
return seqno;
}
@@ -544,7 +594,6 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains)
* Ensures that all commands in the ring are finished
* before signalling the CPU
*/
-
uint32_t
i915_retire_commands(struct drm_device *dev)
{
@@ -661,7 +710,8 @@ i915_gem_retire_requests(struct drm_device *dev)
list);
retiring_seqno = request->seqno;
- if (i915_seqno_passed(seqno, retiring_seqno) || dev_priv->mm.wedged) {
+ if (i915_seqno_passed(seqno, retiring_seqno) ||
+ dev_priv->mm.wedged) {
i915_gem_retire_request(dev, request);
list_del(&request->list);
@@ -684,7 +734,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
mutex_lock(&dev->struct_mutex);
i915_gem_retire_requests(dev);
if (!list_empty(&dev_priv->mm.request_list))
- schedule_delayed_work (&dev_priv->mm.retire_work, HZ);
+ schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
mutex_unlock(&dev->struct_mutex);
}
@@ -705,7 +755,8 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
i915_user_irq_on(dev);
ret = wait_event_interruptible(dev_priv->irq_queue,
i915_seqno_passed(i915_get_gem_seqno(dev),
- seqno) || dev_priv->mm.wedged);
+ seqno) ||
+ dev_priv->mm.wedged);
i915_user_irq_off(dev);
dev_priv->mm.waiting_gem_seqno = 0;
}
@@ -744,7 +795,8 @@ i915_gem_flush(struct drm_device *dev,
if (flush_domains & I915_GEM_DOMAIN_CPU)
drm_agp_chipset_flush(dev);
- if ((invalidate_domains|flush_domains) & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) {
+ if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU |
+ I915_GEM_DOMAIN_GTT)) {
/*
* read/write caches:
*
@@ -871,7 +923,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
*/
ret = i915_gem_object_wait_rendering(obj);
if (ret) {
- DRM_ERROR ("wait_rendering failed: %d\n", ret);
+ DRM_ERROR("wait_rendering failed: %d\n", ret);
return ret;
}
@@ -901,8 +953,8 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
if (obj_priv->gtt_space) {
atomic_dec(&dev->gtt_count);
atomic_sub(obj->size, &dev->gtt_memory);
-
- drm_memrange_put_block(obj_priv->gtt_space);
+
+ drm_mm_put_block(obj_priv->gtt_space);
obj_priv->gtt_space = NULL;
}
@@ -913,83 +965,6 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
return 0;
}
-#if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE
-static void
-i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
- uint32_t bias, uint32_t mark)
-{
- uint32_t *mem = kmap_atomic(page, KM_USER0);
- int i;
- for (i = start; i < end; i += 4)
- DRM_INFO("%08x: %08x%s\n",
- (int) (bias + i), mem[i / 4],
- (bias + i == mark) ? " ********" : "");
- kunmap_atomic(mem, KM_USER0);
- /* give syslog time to catch up */
- msleep(1);
-}
-
-static void
-i915_gem_dump_object(struct drm_gem_object *obj, int len,
- const char *where, uint32_t mark)
-{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- int page;
-
- DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset);
- for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
- int page_len, chunk, chunk_len;
-
- page_len = len - page * PAGE_SIZE;
- if (page_len > PAGE_SIZE)
- page_len = PAGE_SIZE;
-
- for (chunk = 0; chunk < page_len; chunk += 128) {
- chunk_len = page_len - chunk;
- if (chunk_len > 128)
- chunk_len = 128;
- i915_gem_dump_page(obj_priv->page_list[page],
- chunk, chunk + chunk_len,
- obj_priv->gtt_offset +
- page * PAGE_SIZE,
- mark);
- }
- }
-}
-#endif
-
-#if WATCH_LRU
-static void
-i915_dump_lru(struct drm_device *dev, const char *where)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
-
- DRM_INFO("active list %s {\n", where);
- list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
- list)
- {
- DRM_INFO(" %p: %08x\n", obj_priv,
- obj_priv->last_rendering_seqno);
- }
- DRM_INFO("}\n");
- DRM_INFO("flushing list %s {\n", where);
- list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
- list)
- {
- DRM_INFO(" %p: %08x\n", obj_priv,
- obj_priv->last_rendering_seqno);
- }
- DRM_INFO("}\n");
- DRM_INFO("inactive %s {\n", where);
- list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
- DRM_INFO(" %p: %08x\n", obj_priv,
- obj_priv->last_rendering_seqno);
- }
- DRM_INFO("}\n");
-}
-#endif
-
static int
i915_gem_evict_something(struct drm_device *dev)
{
@@ -1063,7 +1038,8 @@ i915_gem_evict_something(struct drm_device *dev)
continue;
}
- DRM_ERROR("inactive empty %d request empty %d flushing empty %d\n",
+ DRM_ERROR("inactive empty %d request empty %d "
+ "flushing empty %d\n",
list_empty(&dev_priv->mm.inactive_list),
list_empty(&dev_priv->mm.request_list),
list_empty(&dev_priv->mm.flushing_list));
@@ -1084,7 +1060,7 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj)
struct inode *inode;
struct page *page;
int ret;
-
+
if (obj_priv->page_list)
return 0;
@@ -1110,7 +1086,7 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj)
page = NULL;
}
ret = shmem_getpage(inode, i, &page, SGP_DIRTY, NULL);
-
+
if (ret) {
DRM_ERROR("shmem_getpage failed: %d\n", ret);
i915_gem_object_free_page_list(obj);
@@ -1132,7 +1108,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
struct drm_device *dev = obj->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
- struct drm_memrange_node *free_space;
+ struct drm_mm_node *free_space;
int page_count, ret;
if (alignment == 0)
@@ -1143,13 +1119,11 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
}
search_free:
- free_space = drm_memrange_search_free(&dev_priv->mm.gtt_space,
- obj->size,
- alignment, 0);
+ free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
+ obj->size, alignment, 0);
if (free_space != NULL) {
- obj_priv->gtt_space =
- drm_memrange_get_block(free_space, obj->size,
- alignment);
+ obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
+ alignment);
if (obj_priv->gtt_space != NULL) {
obj_priv->gtt_space->private = obj;
obj_priv->gtt_offset = obj_priv->gtt_space->start;
@@ -1183,7 +1157,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
#endif
ret = i915_gem_object_get_page_list(obj);
if (ret) {
- drm_memrange_put_block(obj_priv->gtt_space);
+ drm_mm_put_block(obj_priv->gtt_space);
obj_priv->gtt_space = NULL;
return ret;
}
@@ -1198,7 +1172,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
obj_priv->gtt_offset);
if (obj_priv->agp_mem == NULL) {
i915_gem_object_free_page_list(obj);
- drm_memrange_put_block(obj_priv->gtt_space);
+ drm_mm_put_block(obj_priv->gtt_space);
obj_priv->gtt_space = NULL;
return -ENOMEM;
}
@@ -1215,7 +1189,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
return 0;
}
-static void
+void
i915_gem_clflush_object(struct drm_gem_object *obj)
{
struct drm_i915_gem_object *obj_priv = obj->driver_private;
@@ -1354,8 +1328,8 @@ i915_gem_object_set_domain(struct drm_gem_object *obj,
#if WATCH_BUF
DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
- __func__, obj,
- obj->read_domains, read_domains,
+ __func__, obj,
+ obj->read_domains, read_domains,
obj->write_domain, write_domain);
#endif
/*
@@ -1393,7 +1367,8 @@ i915_gem_object_set_domain(struct drm_gem_object *obj,
* flushed before the cpu cache is invalidated
*/
if ((invalidate_domains & I915_GEM_DOMAIN_CPU) &&
- (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))) {
+ (flush_domains & ~(I915_GEM_DOMAIN_CPU |
+ I915_GEM_DOMAIN_GTT))) {
ret = i915_gem_object_wait_rendering(obj);
if (ret)
return ret;
@@ -1403,7 +1378,17 @@ i915_gem_object_set_domain(struct drm_gem_object *obj,
if ((write_domain | flush_domains) != 0)
obj->write_domain = write_domain;
+
+ /* If we're invalidating the CPU domain, clear the per-page CPU
+ * domain list as well.
+ */
+ if (obj_priv->page_cpu_valid != NULL &&
+ (obj->read_domains & I915_GEM_DOMAIN_CPU) &&
+ ((read_domains & I915_GEM_DOMAIN_CPU) == 0)) {
+ memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
+ }
obj->read_domains = read_domains;
+
dev->invalidate_domains |= invalidate_domains;
dev->flush_domains |= flush_domains;
#if WATCH_BUF
@@ -1416,6 +1401,57 @@ i915_gem_object_set_domain(struct drm_gem_object *obj,
}
/**
+ * Set the read/write domain on a range of the object.
+ *
+ * Currently only implemented for CPU reads, otherwise drops to normal
+ * i915_gem_object_set_domain().
+ */
+static int
+i915_gem_object_set_domain_range(struct drm_gem_object *obj,
+ uint64_t offset,
+ uint64_t size,
+ uint32_t read_domains,
+ uint32_t write_domain)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int ret, i;
+
+ if (obj->read_domains & I915_GEM_DOMAIN_CPU)
+ return 0;
+
+ if (read_domains != I915_GEM_DOMAIN_CPU ||
+ write_domain != 0)
+ return i915_gem_object_set_domain(obj,
+ read_domains, write_domain);
+
+ /* Wait on any GPU rendering to the object to be flushed. */
+ if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) {
+ ret = i915_gem_object_wait_rendering(obj);
+ if (ret)
+ return ret;
+ }
+
+ if (obj_priv->page_cpu_valid == NULL) {
+ obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
+ DRM_MEM_DRIVER);
+ }
+
+ /* Flush the cache on any pages that are still invalid from the CPU's
+ * perspective.
+ */
+ for (i = offset / PAGE_SIZE; i < (offset + size - 1) / PAGE_SIZE; i++) {
+ if (obj_priv->page_cpu_valid[i])
+ continue;
+
+ drm_ttm_cache_flush(obj_priv->page_list + i, 1);
+
+ obj_priv->page_cpu_valid[i] = 1;
+ }
+
+ return 0;
+}
+
+/**
* Once all of the objects have been set in the proper domain,
* perform the necessary flush and invalidate operations.
*
@@ -1447,76 +1483,6 @@ i915_gem_dev_set_domain(struct drm_device *dev)
return flush_domains;
}
-#if WATCH_COHERENCY
-static void
-i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
-{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
- int page;
- uint32_t *gtt_mapping;
- uint32_t *backing_map = NULL;
- int bad_count = 0;
-
- DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %dkb):\n",
- __func__, obj, obj_priv->gtt_offset, handle,
- obj->size / 1024);
-
- gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset,
- obj->size);
- if (gtt_mapping == NULL) {
- DRM_ERROR("failed to map GTT space\n");
- return;
- }
-
- for (page = 0; page < obj->size / PAGE_SIZE; page++) {
- int i;
-
- backing_map = kmap_atomic(obj_priv->page_list[page], KM_USER0);
-
- if (backing_map == NULL) {
- DRM_ERROR("failed to map backing page\n");
- goto out;
- }
-
- for (i = 0; i < PAGE_SIZE / 4; i++) {
- uint32_t cpuval = backing_map[i];
- uint32_t gttval = readl(gtt_mapping +
- page * 1024 + i);
-
- if (cpuval != gttval) {
- DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
- "0x%08x vs 0x%08x\n",
- (int)(obj_priv->gtt_offset +
- page * PAGE_SIZE + i * 4),
- cpuval, gttval);
- if (bad_count++ >= 8) {
- DRM_INFO("...\n");
- goto out;
- }
- }
- }
- kunmap_atomic(backing_map, KM_USER0);
- backing_map = NULL;
- }
-
- out:
- if (backing_map != NULL)
- kunmap_atomic(backing_map, KM_USER0);
- iounmap(gtt_mapping);
-
- /* give syslog time to catch up */
- msleep(1);
-
- /* Directly flush the object, since we just loaded values with the CPU
- * from thebacking pages and we don't want to disturb the cache
- * management that we're trying to observe.
- */
-
- i915_gem_clflush_object(obj);
-}
-#endif
-
/**
* Pin an object to the GTT and evaluate the relocations landing in it.
*/
@@ -1561,7 +1527,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
reloc.target_handle);
if (target_obj == NULL) {
i915_gem_object_unpin(obj);
- return -EINVAL;
+ return -EBADF;
}
target_obj_priv = target_obj->driver_private;
@@ -1784,7 +1750,8 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
mutex_lock(&dev->struct_mutex);
seqno = i915_file_priv->mm.last_gem_throttle_seqno;
- i915_file_priv->mm.last_gem_throttle_seqno = i915_file_priv->mm.last_gem_seqno;
+ i915_file_priv->mm.last_gem_throttle_seqno =
+ i915_file_priv->mm.last_gem_seqno;
if (seqno)
ret = i915_wait_request(dev, seqno);
mutex_unlock(&dev->struct_mutex);
@@ -1841,7 +1808,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
mutex_unlock(&dev->struct_mutex);
return -EIO;
}
-
+
if (dev_priv->mm.suspended) {
DRM_ERROR("Execbuf while VT-switched.\n");
mutex_unlock(&dev->struct_mutex);
@@ -1862,7 +1829,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
if (object_list[i] == NULL) {
DRM_ERROR("Invalid object handle %d at index %d\n",
exec_list[i].handle, i);
- ret = -EINVAL;
+ ret = -EBADF;
goto err;
}
@@ -2022,7 +1989,9 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
if (obj_priv->pin_count == 1) {
atomic_inc(&dev->pin_count);
atomic_add(obj->size, &dev->pin_memory);
- if (!obj_priv->active && (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) == 0 &&
+ if (!obj_priv->active &&
+ (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
+ I915_GEM_DOMAIN_GTT)) == 0 &&
!list_empty(&obj_priv->list))
list_del_init(&obj_priv->list);
}
@@ -2048,7 +2017,9 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
* the inactive list
*/
if (obj_priv->pin_count == 0) {
- if (!obj_priv->active && (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) == 0)
+ if (!obj_priv->active &&
+ (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
+ I915_GEM_DOMAIN_GTT)) == 0)
list_move_tail(&obj_priv->list,
&dev_priv->mm.inactive_list);
atomic_dec(&dev->pin_count);
@@ -2073,7 +2044,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
args->handle);
mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
+ return -EBADF;
}
obj_priv = obj->driver_private;
@@ -2084,7 +2055,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
return ret;
}
- /** XXX - flush the CPU caches for pinned objects
+ /* XXX - flush the CPU caches for pinned objects
* as the X server doesn't manage domains yet
*/
if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
@@ -2113,7 +2084,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
args->handle);
mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
+ return -EBADF;
}
i915_gem_object_unpin(obj);
@@ -2137,7 +2108,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
args->handle);
mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
+ return -EBADF;
}
obj_priv = obj->driver_private;
@@ -2187,6 +2158,7 @@ void i915_gem_free_object(struct drm_gem_object *obj)
i915_gem_object_unbind(obj);
+ drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
}
@@ -2206,7 +2178,7 @@ i915_gem_set_domain(struct drm_gem_object *obj,
if (ret)
return ret;
flush_domains = i915_gem_dev_set_domain(obj->dev);
-
+
if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))
(void) i915_add_request(dev, flush_domains);
@@ -2267,7 +2239,8 @@ i915_gem_idle(struct drm_device *dev)
*/
i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
- seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
+ seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU |
+ I915_GEM_DOMAIN_GTT));
if (seqno == 0) {
mutex_unlock(&dev->struct_mutex);
@@ -2317,6 +2290,56 @@ i915_gem_idle(struct drm_device *dev)
return 0;
}
+static int
+i915_gem_init_hws(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ int ret;
+
+ /* If we need a physical address for the status page, it's already
+ * initialized at driver load time.
+ */
+ if (!I915_NEED_GFX_HWS(dev))
+ return 0;
+
+ obj = drm_gem_object_alloc(dev, 4096);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate status page\n");
+ return -ENOMEM;
+ }
+ obj_priv = obj->driver_private;
+
+ ret = i915_gem_object_pin(obj, 4096);
+ if (ret != 0) {
+ drm_gem_object_unreference(obj);
+ return ret;
+ }
+
+ dev_priv->status_gfx_addr = obj_priv->gtt_offset;
+ dev_priv->hws_map.offset = dev->agp->base + obj_priv->gtt_offset;
+ dev_priv->hws_map.size = 4096;
+ dev_priv->hws_map.type = 0;
+ dev_priv->hws_map.flags = 0;
+ dev_priv->hws_map.mtrr = 0;
+
+ drm_core_ioremap(&dev_priv->hws_map, dev);
+ if (dev_priv->hws_map.handle == NULL) {
+ DRM_ERROR("Failed to map status page.\n");
+ memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
+ drm_gem_object_unreference(obj);
+ return -EINVAL;
+ }
+ dev_priv->hws_obj = obj;
+ dev_priv->hw_status_page = dev_priv->hws_map.handle;
+ memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+ I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
+ DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
+
+ return 0;
+}
+
int
i915_gem_init_ringbuffer(struct drm_device *dev)
{
@@ -2325,6 +2348,10 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
struct drm_i915_gem_object *obj_priv;
int ret;
+ ret = i915_gem_init_hws(dev);
+ if (ret != 0)
+ return ret;
+
obj = drm_gem_object_alloc(dev, 128 * 1024);
if (obj == NULL) {
DRM_ERROR("Failed to allocate ringbuffer\n");
@@ -2360,15 +2387,16 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
/* Stop the ring if it's running. */
I915_WRITE(PRB0_CTL, 0);
- I915_WRITE(PRB0_HEAD, 0);
- I915_WRITE(PRB0_TAIL, 0);
+ I915_WRITE(PRB0_HEAD, 0);
+ I915_WRITE(PRB0_TAIL, 0);
I915_WRITE(PRB0_START, 0);
/* Initialize the ring. */
I915_WRITE(PRB0_START, obj_priv->gtt_offset);
- I915_WRITE(PRB0_CTL, (((obj->size - 4096) & RING_NR_PAGES) |
- RING_NO_REPORT |
- RING_VALID));
+ I915_WRITE(PRB0_CTL,
+ ((obj->size - 4096) & RING_NR_PAGES) |
+ RING_NO_REPORT |
+ RING_VALID);
/* Update our cache of the ring state */
i915_kernel_lost_context(dev);
@@ -2388,8 +2416,18 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
i915_gem_object_unpin(dev_priv->ring.ring_obj);
drm_gem_object_unreference(dev_priv->ring.ring_obj);
-
+ dev_priv->ring.ring_obj = NULL;
memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
+
+ if (dev_priv->hws_obj != NULL) {
+ i915_gem_object_unpin(dev_priv->hws_obj);
+ drm_gem_object_unreference(dev_priv->hws_obj);
+ dev_priv->hws_obj = NULL;
+ memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
+
+ /* Write high address into HWS_PGA when disabling. */
+ I915_WRITE(HWS_PGA, 0x1ffff000);
+ }
}
int
@@ -2399,8 +2437,11 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return 0;
+
if (dev_priv->mm.wedged) {
- DRM_ERROR("Renabling wedged hardware, good luck\n");
+ DRM_ERROR("Reenabling wedged hardware, good luck\n");
dev_priv->mm.wedged = 0;
}
@@ -2424,6 +2465,9 @@ i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
{
int ret;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return 0;
+
mutex_lock(&dev->struct_mutex);
ret = i915_gem_idle(dev);
if (ret == 0)
@@ -2433,263 +2477,6 @@ i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
return 0;
}
-static int i915_gem_active_info(char *buf, char **start, off_t offset,
- int request, int *eof, void *data)
-{
- struct drm_minor *minor = (struct drm_minor *) data;
- struct drm_device *dev = minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
- int len = 0;
-
- if (offset > DRM_PROC_LIMIT) {
- *eof = 1;
- return 0;
- }
-
- *start = &buf[offset];
- *eof = 0;
- DRM_PROC_PRINT("Active:\n");
- list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
- list)
- {
- struct drm_gem_object *obj = obj_priv->obj;
- if (obj->name) {
- DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
- obj, obj->name,
- obj->read_domains, obj->write_domain,
- obj_priv->last_rendering_seqno);
- } else {
- DRM_PROC_PRINT(" %p: %08x %08x %d\n",
- obj,
- obj->read_domains, obj->write_domain,
- obj_priv->last_rendering_seqno);
- }
- }
- if (len > request + offset)
- return request;
- *eof = 1;
- return len - offset;
-}
-
-static int i915_gem_flushing_info(char *buf, char **start, off_t offset,
- int request, int *eof, void *data)
-{
- struct drm_minor *minor = (struct drm_minor *) data;
- struct drm_device *dev = minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
- int len = 0;
-
- if (offset > DRM_PROC_LIMIT) {
- *eof = 1;
- return 0;
- }
-
- *start = &buf[offset];
- *eof = 0;
- DRM_PROC_PRINT("Flushing:\n");
- list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
- list)
- {
- struct drm_gem_object *obj = obj_priv->obj;
- if (obj->name) {
- DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
- obj, obj->name,
- obj->read_domains, obj->write_domain,
- obj_priv->last_rendering_seqno);
- } else {
- DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj,
- obj->read_domains, obj->write_domain,
- obj_priv->last_rendering_seqno);
- }
- }
- if (len > request + offset)
- return request;
- *eof = 1;
- return len - offset;
-}
-
-static int i915_gem_inactive_info(char *buf, char **start, off_t offset,
- int request, int *eof, void *data)
-{
- struct drm_minor *minor = (struct drm_minor *) data;
- struct drm_device *dev = minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
- int len = 0;
-
- if (offset > DRM_PROC_LIMIT) {
- *eof = 1;
- return 0;
- }
-
- *start = &buf[offset];
- *eof = 0;
- DRM_PROC_PRINT("Inactive:\n");
- list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list,
- list)
- {
- struct drm_gem_object *obj = obj_priv->obj;
- if (obj->name) {
- DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
- obj, obj->name,
- obj->read_domains, obj->write_domain,
- obj_priv->last_rendering_seqno);
- } else {
- DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj,
- obj->read_domains, obj->write_domain,
- obj_priv->last_rendering_seqno);
- }
- }
- if (len > request + offset)
- return request;
- *eof = 1;
- return len - offset;
-}
-
-static int i915_gem_request_info(char *buf, char **start, off_t offset,
- int request, int *eof, void *data)
-{
- struct drm_minor *minor = (struct drm_minor *) data;
- struct drm_device *dev = minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_request *gem_request;
- int len = 0;
-
- if (offset > DRM_PROC_LIMIT) {
- *eof = 1;
- return 0;
- }
-
- *start = &buf[offset];
- *eof = 0;
- DRM_PROC_PRINT("Request:\n");
- list_for_each_entry(gem_request, &dev_priv->mm.request_list,
- list)
- {
- DRM_PROC_PRINT (" %d @ %d %08x\n",
- gem_request->seqno,
- (int) (jiffies - gem_request->emitted_jiffies),
- gem_request->flush_domains);
- }
- if (len > request + offset)
- return request;
- *eof = 1;
- return len - offset;
-}
-
-static int i915_gem_seqno_info(char *buf, char **start, off_t offset,
- int request, int *eof, void *data)
-{
- struct drm_minor *minor = (struct drm_minor *) data;
- struct drm_device *dev = minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int len = 0;
-
- if (offset > DRM_PROC_LIMIT) {
- *eof = 1;
- return 0;
- }
-
- *start = &buf[offset];
- *eof = 0;
- DRM_PROC_PRINT("Current sequence: %d\n", i915_get_gem_seqno(dev));
- DRM_PROC_PRINT("Waiter sequence: %d\n", dev_priv->mm.waiting_gem_seqno);
- DRM_PROC_PRINT("IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
- if (len > request + offset)
- return request;
- *eof = 1;
- return len - offset;
-}
-
-
-static int i915_interrupt_info(char *buf, char **start, off_t offset,
- int request, int *eof, void *data)
-{
- struct drm_minor *minor = (struct drm_minor *) data;
- struct drm_device *dev = minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int len = 0;
-
- if (offset > DRM_PROC_LIMIT) {
- *eof = 1;
- return 0;
- }
-
- *start = &buf[offset];
- *eof = 0;
- DRM_PROC_PRINT("Interrupt enable: %08x\n",
- I915_READ(IER));
- DRM_PROC_PRINT("Interrupt identity: %08x\n",
- I915_READ(IIR));
- DRM_PROC_PRINT("Interrupt mask: %08x\n",
- I915_READ(IMR));
- DRM_PROC_PRINT("Pipe A stat: %08x\n",
- I915_READ(PIPEASTAT));
- DRM_PROC_PRINT("Pipe B stat: %08x\n",
- I915_READ(PIPEBSTAT));
- DRM_PROC_PRINT("Interrupts received: %d\n",
- atomic_read(&dev_priv->irq_received));
- DRM_PROC_PRINT("Current sequence: %d\n",
- i915_get_gem_seqno(dev));
- DRM_PROC_PRINT("Waiter sequence: %d\n",
- dev_priv->mm.waiting_gem_seqno);
- DRM_PROC_PRINT("IRQ sequence: %d\n",
- dev_priv->mm.irq_gem_seqno);
- if (len > request + offset)
- return request;
- *eof = 1;
- return len - offset;
-}
-
-static struct drm_proc_list {
- const char *name; /**< file name */
- int (*f) (char *, char **, off_t, int, int *, void *); /**< proc callback*/
-} i915_gem_proc_list[] = {
- {"i915_gem_active", i915_gem_active_info},
- {"i915_gem_flushing", i915_gem_flushing_info},
- {"i915_gem_inactive", i915_gem_inactive_info},
- {"i915_gem_request", i915_gem_request_info},
- {"i915_gem_seqno", i915_gem_seqno_info},
- {"i915_gem_interrupt", i915_interrupt_info},
-};
-
-#define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list)
-
-int i915_gem_proc_init(struct drm_minor *minor)
-{
- struct proc_dir_entry *ent;
- int i, j;
-
- for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) {
- ent = create_proc_entry(i915_gem_proc_list[i].name,
- S_IFREG | S_IRUGO, minor->dev_root);
- if (!ent) {
- DRM_ERROR("Cannot create /proc/dri/.../%s\n",
- i915_gem_proc_list[i].name);
- for (j = 0; j < i; j++)
- remove_proc_entry(i915_gem_proc_list[i].name,
- minor->dev_root);
- return -1;
- }
- ent->read_proc = i915_gem_proc_list[i].f;
- ent->data = minor;
- }
- return 0;
-}
-
-void i915_gem_proc_cleanup(struct drm_minor *minor)
-{
- int i;
-
- if (!minor->dev_root)
- return;
-
- for (i = 0; i < I915_GEM_PROC_ENTRIES; i++)
- remove_proc_entry(i915_gem_proc_list[i].name, minor->dev_root);
-}
-
void
i915_gem_lastclose(struct drm_device *dev)
{
@@ -2702,9 +2489,24 @@ i915_gem_lastclose(struct drm_device *dev)
ret = i915_gem_idle(dev);
if (ret)
DRM_ERROR("failed to idle hardware: %d\n", ret);
-
+
i915_gem_cleanup_ringbuffer(dev);
}
-
+
mutex_unlock(&dev->struct_mutex);
}
+
+void i915_gem_load(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ INIT_LIST_HEAD(&dev_priv->mm.active_list);
+ INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
+ INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
+ INIT_LIST_HEAD(&dev_priv->mm.request_list);
+ INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
+ i915_gem_retire_work_handler);
+ dev_priv->mm.next_gem_seqno = 1;
+
+ i915_gem_detect_bit_6_swizzle(dev);
+}
diff --git a/linux-core/i915_gem_debug.c b/linux-core/i915_gem_debug.c
new file mode 100644
index 00000000..a2d6f289
--- /dev/null
+++ b/linux-core/i915_gem_debug.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Keith Packard <keithp@keithp.com>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_compat.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+#if WATCH_INACTIVE
+void
+i915_verify_inactive(struct drm_device *dev, char *file, int line)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+
+ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
+ obj = obj_priv->obj;
+ if (obj_priv->pin_count || obj_priv->active ||
+ (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
+ I915_GEM_DOMAIN_GTT)))
+ DRM_ERROR("inactive %p (p %d a %d w %x) %s:%d\n",
+ obj,
+ obj_priv->pin_count, obj_priv->active,
+ obj->write_domain, file, line);
+ }
+}
+#endif /* WATCH_INACTIVE */
+
+
+#if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE
+static void
+i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
+ uint32_t bias, uint32_t mark)
+{
+ uint32_t *mem = kmap_atomic(page, KM_USER0);
+ int i;
+ for (i = start; i < end; i += 4)
+ DRM_INFO("%08x: %08x%s\n",
+ (int) (bias + i), mem[i / 4],
+ (bias + i == mark) ? " ********" : "");
+ kunmap_atomic(mem, KM_USER0);
+ /* give syslog time to catch up */
+ msleep(1);
+}
+
+void
+i915_gem_dump_object(struct drm_gem_object *obj, int len,
+ const char *where, uint32_t mark)
+{
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int page;
+
+ DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset);
+ for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
+ int page_len, chunk, chunk_len;
+
+ page_len = len - page * PAGE_SIZE;
+ if (page_len > PAGE_SIZE)
+ page_len = PAGE_SIZE;
+
+ for (chunk = 0; chunk < page_len; chunk += 128) {
+ chunk_len = page_len - chunk;
+ if (chunk_len > 128)
+ chunk_len = 128;
+ i915_gem_dump_page(obj_priv->page_list[page],
+ chunk, chunk + chunk_len,
+ obj_priv->gtt_offset +
+ page * PAGE_SIZE,
+ mark);
+ }
+ }
+}
+#endif
+
+#if WATCH_LRU
+void
+i915_dump_lru(struct drm_device *dev, const char *where)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv;
+
+ DRM_INFO("active list %s {\n", where);
+ list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
+ list)
+ {
+ DRM_INFO(" %p: %08x\n", obj_priv,
+ obj_priv->last_rendering_seqno);
+ }
+ DRM_INFO("}\n");
+ DRM_INFO("flushing list %s {\n", where);
+ list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
+ list)
+ {
+ DRM_INFO(" %p: %08x\n", obj_priv,
+ obj_priv->last_rendering_seqno);
+ }
+ DRM_INFO("}\n");
+ DRM_INFO("inactive %s {\n", where);
+ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
+ DRM_INFO(" %p: %08x\n", obj_priv,
+ obj_priv->last_rendering_seqno);
+ }
+ DRM_INFO("}\n");
+}
+#endif
+
+
+#if WATCH_COHERENCY
+void
+i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int page;
+ uint32_t *gtt_mapping;
+ uint32_t *backing_map = NULL;
+ int bad_count = 0;
+
+ DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %dkb):\n",
+ __func__, obj, obj_priv->gtt_offset, handle,
+ obj->size / 1024);
+
+ gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset,
+ obj->size);
+ if (gtt_mapping == NULL) {
+ DRM_ERROR("failed to map GTT space\n");
+ return;
+ }
+
+ for (page = 0; page < obj->size / PAGE_SIZE; page++) {
+ int i;
+
+ backing_map = kmap_atomic(obj_priv->page_list[page], KM_USER0);
+
+ if (backing_map == NULL) {
+ DRM_ERROR("failed to map backing page\n");
+ goto out;
+ }
+
+ for (i = 0; i < PAGE_SIZE / 4; i++) {
+ uint32_t cpuval = backing_map[i];
+ uint32_t gttval = readl(gtt_mapping +
+ page * 1024 + i);
+
+ if (cpuval != gttval) {
+ DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
+ "0x%08x vs 0x%08x\n",
+ (int)(obj_priv->gtt_offset +
+ page * PAGE_SIZE + i * 4),
+ cpuval, gttval);
+ if (bad_count++ >= 8) {
+ DRM_INFO("...\n");
+ goto out;
+ }
+ }
+ }
+ kunmap_atomic(backing_map, KM_USER0);
+ backing_map = NULL;
+ }
+
+ out:
+ if (backing_map != NULL)
+ kunmap_atomic(backing_map, KM_USER0);
+ iounmap(gtt_mapping);
+
+ /* give syslog time to catch up */
+ msleep(1);
+
+ /* Directly flush the object, since we just loaded values with the CPU
+ * from the backing pages and we don't want to disturb the cache
+ * management that we're trying to observe.
+ */
+
+ i915_gem_clflush_object(obj);
+}
+#endif
diff --git a/linux-core/i915_gem_proc.c b/linux-core/i915_gem_proc.c
new file mode 100644
index 00000000..2704f922
--- /dev/null
+++ b/linux-core/i915_gem_proc.c
@@ -0,0 +1,293 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Keith Packard <keithp@keithp.com>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_compat.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+static int i915_gem_active_info(char *buf, char **start, off_t offset,
+ int request, int *eof, void *data)
+{
+ struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv;
+ int len = 0;
+
+ if (offset > DRM_PROC_LIMIT) {
+ *eof = 1;
+ return 0;
+ }
+
+ *start = &buf[offset];
+ *eof = 0;
+ DRM_PROC_PRINT("Active:\n");
+ list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
+ list)
+ {
+ struct drm_gem_object *obj = obj_priv->obj;
+ if (obj->name) {
+ DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
+ obj, obj->name,
+ obj->read_domains, obj->write_domain,
+ obj_priv->last_rendering_seqno);
+ } else {
+ DRM_PROC_PRINT(" %p: %08x %08x %d\n",
+ obj,
+ obj->read_domains, obj->write_domain,
+ obj_priv->last_rendering_seqno);
+ }
+ }
+ if (len > request + offset)
+ return request;
+ *eof = 1;
+ return len - offset;
+}
+
+static int i915_gem_flushing_info(char *buf, char **start, off_t offset,
+ int request, int *eof, void *data)
+{
+ struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv;
+ int len = 0;
+
+ if (offset > DRM_PROC_LIMIT) {
+ *eof = 1;
+ return 0;
+ }
+
+ *start = &buf[offset];
+ *eof = 0;
+ DRM_PROC_PRINT("Flushing:\n");
+ list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
+ list)
+ {
+ struct drm_gem_object *obj = obj_priv->obj;
+ if (obj->name) {
+ DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
+ obj, obj->name,
+ obj->read_domains, obj->write_domain,
+ obj_priv->last_rendering_seqno);
+ } else {
+ DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj,
+ obj->read_domains, obj->write_domain,
+ obj_priv->last_rendering_seqno);
+ }
+ }
+ if (len > request + offset)
+ return request;
+ *eof = 1;
+ return len - offset;
+}
+
+static int i915_gem_inactive_info(char *buf, char **start, off_t offset,
+ int request, int *eof, void *data)
+{
+ struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv;
+ int len = 0;
+
+ if (offset > DRM_PROC_LIMIT) {
+ *eof = 1;
+ return 0;
+ }
+
+ *start = &buf[offset];
+ *eof = 0;
+ DRM_PROC_PRINT("Inactive:\n");
+ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list,
+ list)
+ {
+ struct drm_gem_object *obj = obj_priv->obj;
+ if (obj->name) {
+ DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
+ obj, obj->name,
+ obj->read_domains, obj->write_domain,
+ obj_priv->last_rendering_seqno);
+ } else {
+ DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj,
+ obj->read_domains, obj->write_domain,
+ obj_priv->last_rendering_seqno);
+ }
+ }
+ if (len > request + offset)
+ return request;
+ *eof = 1;
+ return len - offset;
+}
+
+static int i915_gem_request_info(char *buf, char **start, off_t offset,
+ int request, int *eof, void *data)
+{
+ struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_request *gem_request;
+ int len = 0;
+
+ if (offset > DRM_PROC_LIMIT) {
+ *eof = 1;
+ return 0;
+ }
+
+ *start = &buf[offset];
+ *eof = 0;
+ DRM_PROC_PRINT("Request:\n");
+ list_for_each_entry(gem_request, &dev_priv->mm.request_list,
+ list)
+ {
+ DRM_PROC_PRINT(" %d @ %d %08x\n",
+ gem_request->seqno,
+ (int) (jiffies - gem_request->emitted_jiffies),
+ gem_request->flush_domains);
+ }
+ if (len > request + offset)
+ return request;
+ *eof = 1;
+ return len - offset;
+}
+
+static int i915_gem_seqno_info(char *buf, char **start, off_t offset,
+ int request, int *eof, void *data)
+{
+ struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int len = 0;
+
+ if (offset > DRM_PROC_LIMIT) {
+ *eof = 1;
+ return 0;
+ }
+
+ *start = &buf[offset];
+ *eof = 0;
+ DRM_PROC_PRINT("Current sequence: %d\n", i915_get_gem_seqno(dev));
+ DRM_PROC_PRINT("Waiter sequence: %d\n",
+ dev_priv->mm.waiting_gem_seqno);
+ DRM_PROC_PRINT("IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
+ if (len > request + offset)
+ return request;
+ *eof = 1;
+ return len - offset;
+}
+
+
+static int i915_interrupt_info(char *buf, char **start, off_t offset,
+ int request, int *eof, void *data)
+{
+ struct drm_minor *minor = (struct drm_minor *) data;
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int len = 0;
+
+ if (offset > DRM_PROC_LIMIT) {
+ *eof = 1;
+ return 0;
+ }
+
+ *start = &buf[offset];
+ *eof = 0;
+ DRM_PROC_PRINT("Interrupt enable: %08x\n",
+ I915_READ(IER));
+ DRM_PROC_PRINT("Interrupt identity: %08x\n",
+ I915_READ(IIR));
+ DRM_PROC_PRINT("Interrupt mask: %08x\n",
+ I915_READ(IMR));
+ DRM_PROC_PRINT("Pipe A stat: %08x\n",
+ I915_READ(PIPEASTAT));
+ DRM_PROC_PRINT("Pipe B stat: %08x\n",
+ I915_READ(PIPEBSTAT));
+ DRM_PROC_PRINT("Interrupts received: %d\n",
+ atomic_read(&dev_priv->irq_received));
+ DRM_PROC_PRINT("Current sequence: %d\n",
+ i915_get_gem_seqno(dev));
+ DRM_PROC_PRINT("Waiter sequence: %d\n",
+ dev_priv->mm.waiting_gem_seqno);
+ DRM_PROC_PRINT("IRQ sequence: %d\n",
+ dev_priv->mm.irq_gem_seqno);
+ if (len > request + offset)
+ return request;
+ *eof = 1;
+ return len - offset;
+}
+
+static struct drm_proc_list {
+ /** file name */
+ const char *name;
+ /** proc callback*/
+ int (*f) (char *, char **, off_t, int, int *, void *);
+} i915_gem_proc_list[] = {
+ {"i915_gem_active", i915_gem_active_info},
+ {"i915_gem_flushing", i915_gem_flushing_info},
+ {"i915_gem_inactive", i915_gem_inactive_info},
+ {"i915_gem_request", i915_gem_request_info},
+ {"i915_gem_seqno", i915_gem_seqno_info},
+ {"i915_gem_interrupt", i915_interrupt_info},
+};
+
+#define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list)
+
+int i915_gem_proc_init(struct drm_minor *minor)
+{
+ struct proc_dir_entry *ent;
+ int i, j;
+
+ for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) {
+ ent = create_proc_entry(i915_gem_proc_list[i].name,
+ S_IFREG | S_IRUGO, minor->dev_root);
+ if (!ent) {
+ DRM_ERROR("Cannot create /proc/dri/.../%s\n",
+ i915_gem_proc_list[i].name);
+ for (j = 0; j < i; j++)
+ remove_proc_entry(i915_gem_proc_list[i].name,
+ minor->dev_root);
+ return -1;
+ }
+ ent->read_proc = i915_gem_proc_list[i].f;
+ ent->data = minor;
+ }
+ return 0;
+}
+
+void i915_gem_proc_cleanup(struct drm_minor *minor)
+{
+ int i;
+
+ if (!minor->dev_root)
+ return;
+
+ for (i = 0; i < I915_GEM_PROC_ENTRIES; i++)
+ remove_proc_entry(i915_gem_proc_list[i].name, minor->dev_root);
+}
diff --git a/linux-core/i915_gem_tiling.c b/linux-core/i915_gem_tiling.c
new file mode 100644
index 00000000..c5825fbb
--- /dev/null
+++ b/linux-core/i915_gem_tiling.c
@@ -0,0 +1,309 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+/** @file i915_gem_tiling.c
+ *
+ * Support for managing tiling state of buffer objects.
+ *
+ * The idea behind tiling is to increase cache hit rates by rearranging
+ * pixel data so that a group of pixel accesses are in the same cacheline.
+ * Performance improvement from doing this on the back/depth buffer are on
+ * the order of 30%.
+ *
+ * Intel architectures make this somewhat more complicated, though, by
+ * adjustments made to addressing of data when the memory is in interleaved
+ * mode (matched pairs of DIMMS) to improve memory bandwidth.
+ * For interleaved memory, the CPU sends every sequential 64 bytes
+ * to an alternate memory channel so it can get the bandwidth from both.
+ *
+ * The GPU also rearranges its accesses for increased bandwidth to interleaved
+ * memory, and it matches what the CPU does for non-tiled. However, when tiled
+ * it does it a little differently, since one walks addresses not just in the
+ * X direction but also Y. So, along with alternating channels when bit
+ * 6 of the address flips, it also alternates when other bits flip -- Bits 9
+ * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
+ * are common to both the 915 and 965-class hardware.
+ *
+ * The CPU also sometimes XORs in higher bits as well, to improve
+ * bandwidth doing strided access like we do so frequently in graphics. This
+ * is called "Channel XOR Randomization" in the MCH documentation. The result
+ * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
+ * decode.
+ *
+ * All of this bit 6 XORing has an effect on our memory management,
+ * as we need to make sure that the 3d driver can correctly address object
+ * contents.
+ *
+ * If we don't have interleaved memory, all tiling is safe and no swizzling is
+ * required.
+ *
+ * When bit 17 is XORed in, we simply refuse to tile at all. Bit
+ * 17 is not just a page offset, so as we page an objet out and back in,
+ * individual pages in it will have different bit 17 addresses, resulting in
+ * each 64 bytes being swapped with its neighbor!
+ *
+ * Otherwise, if interleaved, we have to tell the 3d driver what the address
+ * swizzling it needs to do is, since it's writing with the CPU to the pages
+ * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
+ * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
+ * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
+ * to match what the GPU expects.
+ */
+
+/**
+ * Detects bit 6 swizzling of address lookup between IGD access and CPU
+ * access through main memory.
+ */
+void
+i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct pci_dev *bridge;
+ uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+ uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+ int mchbar_offset;
+ char __iomem *mchbar;
+ int ret;
+
+ bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
+ if (bridge == NULL) {
+ DRM_ERROR("Couldn't get bridge device\n");
+ return;
+ }
+
+ ret = pci_enable_device(bridge);
+ if (ret != 0) {
+ DRM_ERROR("pci_enable_device failed: %d\n", ret);
+ return;
+ }
+
+ if (IS_I965G(dev))
+ mchbar_offset = 0x48;
+ else
+ mchbar_offset = 0x44;
+
+ /* Use resource 2 for our BAR that's stashed in a nonstandard location,
+ * since the bridge would only ever use standard BARs 0-1 (though it
+ * doesn't anyway)
+ */
+ ret = pci_read_base(bridge, mchbar_offset, &bridge->resource[2]);
+ if (ret != 0) {
+ DRM_ERROR("pci_read_base failed: %d\n", ret);
+ return;
+ }
+
+ mchbar = ioremap(pci_resource_start(bridge, 2),
+ pci_resource_len(bridge, 2));
+ if (mchbar == NULL) {
+ DRM_ERROR("Couldn't map MCHBAR to determine tile swizzling\n");
+ return;
+ }
+
+ if (IS_I965G(dev) && !IS_I965GM(dev)) {
+ uint32_t chdecmisc;
+
+ /* On the 965, channel interleave appears to be determined by
+ * the flex bit. If flex is set, then the ranks (sides of a
+ * DIMM) of memory will be "stacked" (physical addresses walk
+ * through one rank then move on to the next, flipping channels
+ * or not depending on rank configuration). The GPU in this
+ * case does exactly the same addressing as the CPU.
+ *
+ * Unlike the 945, channel randomization based does not
+ * appear to be available.
+ *
+ * XXX: While the G965 doesn't appear to do any interleaving
+ * when the DIMMs are not exactly matched, the G4x chipsets
+ * might be for "L-shaped" configurations, and will need to be
+ * detected.
+ *
+ * L-shaped configuration:
+ *
+ * +-----+
+ * | |
+ * |DIMM2| <-- non-interleaved
+ * +-----+
+ * +-----+ +-----+
+ * | | | |
+ * |DIMM0| |DIMM1| <-- interleaved area
+ * +-----+ +-----+
+ */
+ chdecmisc = readb(mchbar + CHDECMISC);
+
+ if (chdecmisc == 0xff) {
+ DRM_ERROR("Couldn't read from MCHBAR. "
+ "Disabling tiling.\n");
+ } else if (chdecmisc & CHDECMISC_FLEXMEMORY) {
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ } else {
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ }
+ } else if (IS_I9XX(dev)) {
+ uint32_t dcc;
+
+ /* On 915-945 and GM965, channel interleave by the CPU is
+ * determined by DCC. The CPU will alternate based on bit 6
+ * in interleaved mode, and the GPU will then also alternate
+ * on bit 6, 9, and 10 for X, but the CPU may also optionally
+ * alternate based on bit 17 (XOR not disabled and XOR
+ * bit == 17).
+ */
+ dcc = readl(mchbar + DCC);
+ switch (dcc & DCC_ADDRESSING_MODE_MASK) {
+ case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
+ case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ break;
+ case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
+ if (IS_I915G(dev) || IS_I915GM(dev) ||
+ dcc & DCC_CHANNEL_XOR_DISABLE) {
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ } else if (IS_I965GM(dev)) {
+ /* GM965 only does bit 11-based channel
+ * randomization
+ */
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
+ swizzle_y = I915_BIT_6_SWIZZLE_9_11;
+ } else {
+ /* Bit 17 or perhaps other swizzling */
+ swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+ swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+ }
+ break;
+ }
+ if (dcc == 0xffffffff) {
+ DRM_ERROR("Couldn't read from MCHBAR. "
+ "Disabling tiling.\n");
+ swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+ swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+ }
+ } else {
+ /* As far as we know, the 865 doesn't have these bit 6
+ * swizzling issues.
+ */
+ swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+ swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+ }
+
+ iounmap(mchbar);
+
+ dev_priv->mm.bit_6_swizzle_x = swizzle_x;
+ dev_priv->mm.bit_6_swizzle_y = swizzle_y;
+}
+
+/**
+ * Sets the tiling mode of an object, returning the required swizzling of
+ * bit 6 of addresses in the object.
+ */
+int
+i915_gem_set_tiling(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_set_tiling *args = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+ return -EINVAL;
+ obj_priv = obj->driver_private;
+
+ mutex_lock(&dev->struct_mutex);
+
+ if (args->tiling_mode == I915_TILING_NONE) {
+ obj_priv->tiling_mode = I915_TILING_NONE;
+ args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+ } else {
+ if (args->tiling_mode == I915_TILING_X)
+ args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
+ else
+ args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
+ /* If we can't handle the swizzling, make it untiled. */
+ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
+ args->tiling_mode = I915_TILING_NONE;
+ args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+ }
+ }
+ obj_priv->tiling_mode = args->tiling_mode;
+
+ mutex_unlock(&dev->struct_mutex);
+
+ drm_gem_object_unreference(obj);
+
+ return 0;
+}
+
+/**
+ * Returns the current tiling mode and required bit 6 swizzling for the object.
+ */
+int
+i915_gem_get_tiling(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_get_tiling *args = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+ return -EINVAL;
+ obj_priv = obj->driver_private;
+
+ mutex_lock(&dev->struct_mutex);
+
+ args->tiling_mode = obj_priv->tiling_mode;
+ switch (obj_priv->tiling_mode) {
+ case I915_TILING_X:
+ args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
+ break;
+ case I915_TILING_Y:
+ args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
+ break;
+ case I915_TILING_NONE:
+ args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+ break;
+ default:
+ DRM_ERROR("unknown tiling mode\n");
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+
+ drm_gem_object_unreference(obj);
+
+ return 0;
+}
diff --git a/linux-core/i915_opregion.c b/linux-core/i915_opregion.c
index d015db28..1fa599ea 100644
--- a/linux-core/i915_opregion.c
+++ b/linux-core/i915_opregion.c
@@ -248,22 +248,21 @@ void opregion_enable_asle(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct opregion_asle *asle = dev_priv->opregion.asle;
- u32 mask = 0;
if (asle) {
- u32 pipeb_stats = I915_READ(PIPEBSTAT);
if (IS_MOBILE(dev)) {
+ u32 pipeb_stats = I915_READ(PIPEBSTAT);
/* Some hardware uses the legacy backlight controller
to signal interrupts, so we need to set up pipe B
to generate an IRQ on writes */
- I915_WRITE(PIPEBSTAT, pipeb_stats |=
- I915_LEGACY_BLC_EVENT_ENABLE);
- mask = I915_ASLE_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
- } else
- mask = I915_ASLE_INTERRUPT;
-
- dev_priv->irq_mask_reg &= ~mask;
+ pipeb_stats |= I915_LEGACY_BLC_EVENT_ENABLE;
+ I915_WRITE(PIPEBSTAT, pipeb_stats);
+
+ dev_priv->irq_mask_reg &=
+ ~I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+ }
+
+ dev_priv->irq_mask_reg &= ~I915_ASLE_INTERRUPT;
asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
ASLE_PFMB_EN;
diff --git a/linux-core/nouveau_bo.c b/linux-core/nouveau_bo.c
index 86347e03..ab3b23a4 100644
--- a/linux-core/nouveau_bo.c
+++ b/linux-core/nouveau_bo.c
@@ -229,7 +229,7 @@ out_cleanup:
if (tmp_mem.mm_node) {
mutex_lock(&dev->struct_mutex);
if (tmp_mem.mm_node != bo->pinned_node)
- drm_memrange_put_block(tmp_mem.mm_node);
+ drm_mm_put_block(tmp_mem.mm_node);
tmp_mem.mm_node = NULL;
mutex_unlock(&dev->struct_mutex);
}
diff --git a/linux-core/nouveau_sgdma.c b/linux-core/nouveau_sgdma.c
index 81704ea1..cc4d5a92 100644
--- a/linux-core/nouveau_sgdma.c
+++ b/linux-core/nouveau_sgdma.c
@@ -280,7 +280,7 @@ nouveau_sgdma_nottm_hack_init(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_ttm_backend *be;
struct drm_scatter_gather sgreq;
- struct drm_memrange_node mm_node;
+ struct drm_mm_node mm_node;
struct drm_bo_mem_reg mem;
int ret;
diff --git a/linux-core/radeon_buffer.c b/linux-core/radeon_buffer.c
index e9ba11d4..227a2fa0 100644
--- a/linux-core/radeon_buffer.c
+++ b/linux-core/radeon_buffer.c
@@ -216,7 +216,7 @@ out_cleanup:
if (tmp_mem.mm_node) {
mutex_lock(&dev->struct_mutex);
if (tmp_mem.mm_node != bo->pinned_node)
- drm_memrange_put_block(tmp_mem.mm_node);
+ drm_mm_put_block(tmp_mem.mm_node);
tmp_mem.mm_node = NULL;
mutex_unlock(&dev->struct_mutex);
}
diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c
index 8d09828a..1fdc5e17 100644
--- a/shared-core/i915_dma.c
+++ b/shared-core/i915_dma.c
@@ -41,12 +41,12 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_ring_buffer *ring = &(dev_priv->ring);
u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
- u32 acthd_reg = IS_I965G(dev) ? I965REG_ACTHD : I915REG_ACTHD;
+ u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
u32 last_acthd = I915_READ(acthd_reg);
u32 acthd;
int i;
- for (i = 0; i < 10000; i++) {
+ for (i = 0; i < 100000; i++) {
ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
acthd = I915_READ(acthd_reg);
ring->space = ring->head - (ring->tail + 8);
@@ -132,19 +132,6 @@ int i915_dma_cleanup(struct drm_device * dev)
dev_priv->ring.Size = 0;
}
- if (dev_priv->status_page_dmah) {
- drm_pci_free(dev, dev_priv->status_page_dmah);
- dev_priv->status_page_dmah = NULL;
- /* Need to rewrite hardware status page */
- I915_WRITE(0x02080, 0x1ffff000);
- }
-
- if (dev_priv->hws_agpoffset) {
- dev_priv->hws_agpoffset = 0;
- drm_core_ioremapfree(&dev_priv->hws_map, dev);
- I915_WRITE(0x02080, 0x1ffff000);
- }
-
return 0;
}
@@ -266,25 +253,6 @@ static int i915_initialize(struct drm_device * dev,
*/
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
- /* Program Hardware Status Page */
- if (!I915_NEED_GFX_HWS(dev)) {
- dev_priv->status_page_dmah =
- drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
-
- if (!dev_priv->status_page_dmah) {
- i915_dma_cleanup(dev);
- DRM_ERROR("Can not allocate hardware status page\n");
- return -ENOMEM;
- }
- dev_priv->hws_vaddr = dev_priv->status_page_dmah->vaddr;
- dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
-
- memset(dev_priv->hws_vaddr, 0, PAGE_SIZE);
-
- I915_WRITE(0x02080, dev_priv->dma_status_page);
- }
- DRM_DEBUG("Enabled hardware status page\n");
-
#ifdef I915_HAVE_BUFFER
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
mutex_init(&dev_priv->cmdbuf_mutex);
@@ -320,14 +288,14 @@ static int i915_dma_resume(struct drm_device * dev)
}
/* Program Hardware Status Page */
- if (!dev_priv->hws_vaddr) {
+ if (!dev_priv->hw_status_page) {
DRM_ERROR("Can not find hardware status page\n");
return -EINVAL;
}
- DRM_DEBUG("hw status page @ %p\n", dev_priv->hws_vaddr);
+ DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
- if (dev_priv->hws_agpoffset != 0)
- I915_WRITE(HWS_PGA, dev_priv->hws_agpoffset);
+ if (dev_priv->status_gfx_addr != 0)
+ I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
else
I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
DRM_DEBUG("Enabled hardware status page\n");
@@ -896,6 +864,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_CHIPSET_ID:
value = dev->pci_device;
break;
+ case I915_PARAM_HAS_GEM:
+ value = 1;
+ break;
default:
DRM_ERROR("Unknown parameter %d\n", param->param);
return -EINVAL;
@@ -1033,7 +1004,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr);
- dev_priv->hws_agpoffset = hws->addr & (0x1ffff<<12);
+ dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
dev_priv->hws_map.offset = dev->agp->base + hws->addr;
dev_priv->hws_map.size = 4*1024;
@@ -1044,17 +1015,17 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
drm_core_ioremap(&dev_priv->hws_map, dev);
if (dev_priv->hws_map.handle == NULL) {
i915_dma_cleanup(dev);
- dev_priv->hws_agpoffset = 0;
+ dev_priv->status_gfx_addr = 0;
DRM_ERROR("can not ioremap virtual address for"
" G33 hw status page\n");
return -ENOMEM;
}
- dev_priv->hws_vaddr = dev_priv->hws_map.handle;
+ dev_priv->hw_status_page = dev_priv->hws_map.handle;
- memset(dev_priv->hws_vaddr, 0, PAGE_SIZE);
- I915_WRITE(HWS_PGA, dev_priv->hws_agpoffset);
- DRM_DEBUG("load hws at %p\n", dev_priv->hws_vaddr);
+ memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+ I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
+ DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
return 0;
}
@@ -1095,6 +1066,8 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
+ DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/shared-core/i915_drm.h b/shared-core/i915_drm.h
index 8ba71687..53087b57 100644
--- a/shared-core/i915_drm.h
+++ b/shared-core/i915_drm.h
@@ -190,6 +190,8 @@ typedef struct drm_i915_sarea {
#define DRM_I915_GEM_MMAP 0x1e
#define DRM_I915_GEM_SET_DOMAIN 0x1f
#define DRM_I915_GEM_SW_FINISH 0x20
+#define DRM_I915_GEM_SET_TILING 0x21
+#define DRM_I915_GEM_GET_TILING 0x22
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -223,6 +225,8 @@ typedef struct drm_i915_sarea {
#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
#define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
+#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
+#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
/* Asynchronous page flipping:
*/
@@ -276,6 +280,7 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_ALLOW_BATCHBUFFER 2
#define I915_PARAM_LAST_DISPATCH 3
#define I915_PARAM_CHIPSET_ID 4
+#define I915_PARAM_HAS_GEM 5
typedef struct drm_i915_getparam {
int param;
@@ -655,4 +660,64 @@ struct drm_i915_gem_busy {
uint32_t busy;
};
+#define I915_TILING_NONE 0
+#define I915_TILING_X 1
+#define I915_TILING_Y 2
+
+#define I915_BIT_6_SWIZZLE_NONE 0
+#define I915_BIT_6_SWIZZLE_9 1
+#define I915_BIT_6_SWIZZLE_9_10 2
+#define I915_BIT_6_SWIZZLE_9_11 3
+#define I915_BIT_6_SWIZZLE_9_10_11 4
+/* Not seen by userland */
+#define I915_BIT_6_SWIZZLE_UNKNOWN 5
+
+struct drm_i915_gem_set_tiling {
+ /** Handle of the buffer to have its tiling state updated */
+ uint32_t handle;
+
+ /**
+ * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
+ * I915_TILING_Y).
+ *
+ * This value is to be set on request, and will be updated by the
+ * kernel on successful return with the actual chosen tiling layout.
+ *
+ * The tiling mode may be demoted to I915_TILING_NONE when the system
+ * has bit 6 swizzling that can't be managed correctly by GEM.
+ *
+ * Buffer contents become undefined when changing tiling_mode.
+ */
+ uint32_t tiling_mode;
+
+ /**
+ * Stride in bytes for the object when in I915_TILING_X or
+ * I915_TILING_Y.
+ */
+ uint32_t stride;
+
+ /**
+ * Returned address bit 6 swizzling required for CPU access through
+ * mmap mapping.
+ */
+ uint32_t swizzle_mode;
+};
+
+struct drm_i915_gem_get_tiling {
+ /** Handle of the buffer to get tiling state for. */
+ uint32_t handle;
+
+ /**
+ * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
+ * I915_TILING_Y).
+ */
+ uint32_t tiling_mode;
+
+ /**
+ * Returned address bit 6 swizzling required for CPU access through
+ * mmap mapping.
+ */
+ uint32_t swizzle_mode;
+};
+
#endif /* _I915_DRM_H_ */
diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h
index a0f73298..2d186558 100644
--- a/shared-core/i915_drv.h
+++ b/shared-core/i915_drv.h
@@ -39,7 +39,7 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20080611"
+#define DRIVER_DATE "20080730"
#if defined(__linux__)
#define I915_HAVE_FENCE
@@ -63,7 +63,7 @@
*/
#define DRIVER_MAJOR 1
#if defined(I915_HAVE_FENCE) && defined(I915_HAVE_BUFFER)
-#define DRIVER_MINOR 14
+#define DRIVER_MINOR 13
#else
#define DRIVER_MINOR 6
#endif
@@ -79,6 +79,14 @@ enum pipe {
struct drm_i915_validate_buffer;
#endif
+#define WATCH_COHERENCY 0
+#define WATCH_BUF 0
+#define WATCH_EXEC 0
+#define WATCH_LRU 0
+#define WATCH_RELOC 0
+#define WATCH_INACTIVE 0
+#define WATCH_PWRITE 0
+
struct drm_i915_ring_buffer {
int tail_mask;
unsigned long Size;
@@ -140,12 +148,12 @@ struct drm_i915_private {
struct drm_i915_ring_buffer ring;
struct drm_dma_handle *status_page_dmah;
+ void *hw_status_page;
dma_addr_t dma_status_page;
uint32_t counter;
- uint32_t hws_agpoffset;
+ uint32_t status_gfx_addr;
drm_local_map_t hws_map;
- void *hws_vaddr;
- struct drm_memrange_node *hws;
+ struct drm_gem_object *hws_obj;
unsigned int cpp;
@@ -166,7 +174,7 @@ struct drm_i915_private {
bool cursor_needs_physical;
- struct drm_memrange vram;
+ struct drm_mm vram;
#ifdef I915_HAVE_FENCE
uint32_t flush_sequence;
@@ -204,83 +212,6 @@ struct drm_i915_private {
int lvds_vbt:1;
int int_crt_support:1;
- struct {
- struct drm_memrange gtt_space;
-
- /**
- * List of objects currently involved in rendering from the
- * ringbuffer.
- *
- * A reference is held on the buffer while on this list.
- */
- struct list_head active_list;
-
- /**
- * List of objects which are not in the ringbuffer but which
- * still have a write_domain which needs to be flushed before
- * unbinding.
- *
- * A reference is held on the buffer while on this list.
- */
- struct list_head flushing_list;
-
- /**
- * LRU list of objects which are not in the ringbuffer and
- * are ready to unbind, but are still in the GTT.
- *
- * A reference is not held on the buffer while on this list,
- * as merely being GTT-bound shouldn't prevent its being
- * freed, and we'll pull it off the list in the free path.
- */
- struct list_head inactive_list;
-
- /**
- * List of breadcrumbs associated with GPU requests currently
- * outstanding.
- */
- struct list_head request_list;
-
- /**
- * We leave the user IRQ off as much as possible,
- * but this means that requests will finish and never
- * be retired once the system goes idle. Set a timer to
- * fire periodically while the ring is running. When it
- * fires, go retire requests.
- */
- struct delayed_work retire_work;
-
- uint32_t next_gem_seqno;
-
- /**
- * Waiting sequence number, if any
- */
- uint32_t waiting_gem_seqno;
-
- /**
- * Last seq seen at irq time
- */
- uint32_t irq_gem_seqno;
-
- /**
- * Flag if the X Server, and thus DRM, is not currently in
- * control of the device.
- *
- * This is set between LeaveVT and EnterVT. It needs to be
- * replaced with a semaphore. It also needs to be
- * transitioned away from for kernel modesetting.
- */
- int suspended;
-
- /**
- * Flag if the hardware appears to be wedged.
- *
- * This is set when attempts to idle the device timeout.
- * It prevents command submission from occuring and makes
- * every pending request fail
- */
- int wedged;
- } mm;
-
struct work_struct user_interrupt_task;
#ifdef __linux__
@@ -375,7 +306,89 @@ struct drm_i915_private {
u8 saveDACMASK;
u8 saveDACDATA[256*3]; /* 256 3-byte colors */
u8 saveCR[37];
-};
+
+ struct {
+ struct drm_mm gtt_space;
+
+ /**
+ * List of objects currently involved in rendering from the
+ * ringbuffer.
+ *
+ * A reference is held on the buffer while on this list.
+ */
+ struct list_head active_list;
+
+ /**
+ * List of objects which are not in the ringbuffer but which
+ * still have a write_domain which needs to be flushed before
+ * unbinding.
+ *
+ * A reference is held on the buffer while on this list.
+ */
+ struct list_head flushing_list;
+
+ /**
+ * LRU list of objects which are not in the ringbuffer and
+ * are ready to unbind, but are still in the GTT.
+ *
+ * A reference is not held on the buffer while on this list,
+ * as merely being GTT-bound shouldn't prevent its being
+ * freed, and we'll pull it off the list in the free path.
+ */
+ struct list_head inactive_list;
+
+ /**
+ * List of breadcrumbs associated with GPU requests currently
+ * outstanding.
+ */
+ struct list_head request_list;
+
+ /**
+ * We leave the user IRQ off as much as possible,
+ * but this means that requests will finish and never
+ * be retired once the system goes idle. Set a timer to
+ * fire periodically while the ring is running. When it
+ * fires, go retire requests.
+ */
+ struct delayed_work retire_work;
+
+ uint32_t next_gem_seqno;
+
+ /**
+ * Waiting sequence number, if any
+ */
+ uint32_t waiting_gem_seqno;
+
+ /**
+ * Last seq seen at irq time
+ */
+ uint32_t irq_gem_seqno;
+
+ /**
+ * Flag if the X Server, and thus DRM, is not currently in
+ * control of the device.
+ *
+ * This is set between LeaveVT and EnterVT. It needs to be
+ * replaced with a semaphore. It also needs to be
+ * transitioned away from for kernel modesetting.
+ */
+ int suspended;
+
+ /**
+ * Flag if the hardware appears to be wedged.
+ *
+ * This is set when attempts to idle the device timeout.
+ * It prevents command submission from occuring and makes
+ * every pending request fail
+ */
+ int wedged;
+
+ /** Bit 6 swizzling required for X tiling */
+ uint32_t bit_6_swizzle_x;
+ /** Bit 6 swizzling required for Y tiling */
+ uint32_t bit_6_swizzle_y;
+ } mm;
+} drm_i915_private_t;
struct drm_i915_file_private {
struct {
@@ -396,7 +409,7 @@ struct drm_i915_gem_object {
struct drm_gem_object *obj;
/** Current space allocated to this object in the GTT, if any. */
- struct drm_memrange_node *gtt_space;
+ struct drm_mm_node *gtt_space;
/** This object's place on the active/flushing/inactive lists */
struct list_head list;
@@ -434,6 +447,15 @@ struct drm_i915_gem_object {
/** Breadcrumb of last rendering to the buffer. */
uint32_t last_rendering_seqno;
+
+ /** Current tiling mode for the object. */
+ uint32_t tiling_mode;
+
+ /**
+ * Flagging of which individual pages are valid in GEM_DOMAIN_CPU when
+ * GEM_DOMAIN_CPU is not in the object's read domain.
+ */
+ uint8_t *page_cpu_valid;
};
/**
@@ -588,6 +610,11 @@ int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int i915_gem_set_tiling(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_get_tiling(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+void i915_gem_load(struct drm_device *dev);
int i915_gem_proc_init(struct drm_minor *minor);
void i915_gem_proc_cleanup(struct drm_minor *minor);
int i915_gem_init_object(struct drm_gem_object *obj);
@@ -602,10 +629,27 @@ void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int i915_gem_do_init(struct drm_device *dev, unsigned long start,
unsigned long end);
void i915_gem_retire_work_handler(struct work_struct *work);
+void i915_gem_clflush_object(struct drm_gem_object *obj);
#endif
extern unsigned int i915_fbpercrtc;
+/* i915_gem_tiling.c */
+void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
+
+/* i915_gem_debug.c */
+void i915_gem_dump_object(struct drm_gem_object *obj, int len,
+ const char *where, uint32_t mark);
+#if WATCH_INACTIVE
+void i915_verify_inactive(struct drm_device *dev, char *file, int line);
+#else
+#define i915_verify_inactive(dev,file,line)
+#endif
+void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle);
+void i915_gem_dump_object(struct drm_gem_object *obj, int len,
+ const char *where, uint32_t mark);
+void i915_dump_lru(struct drm_device *dev, const char *where);
+
#ifdef __linux__
/* i915_opregion.c */
extern int intel_opregion_init(struct drm_device *dev);
@@ -686,6 +730,39 @@ void i915_ring_validate(struct drm_device *dev, const char *func, int line);
extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
+#define BREADCRUMB_BITS 31
+#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1)
+
+#define READ_BREADCRUMB(dev_priv) (((volatile u32*)(dev_priv->hw_status_page))[5])
+/**
+ * Reads a dword out of the status page, which is written to from the command
+ * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
+ * MI_STORE_DATA_IMM.
+ *
+ * The following dwords have a reserved meaning:
+ * 0: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
+ * 4: ring 0 head pointer
+ * 5: ring 1 head pointer (915-class)
+ * 6: ring 2 head pointer (915-class)
+ *
+ * The area from dword 0x10 to 0x3ff is available for driver usage.
+ */
+#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
+#define I915_GEM_HWS_INDEX 0x10
+
+/* MCH MMIO space */
+/** 915-945 and GM965 MCH register controlling DRAM channel access */
+#define DCC 0x200
+#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0)
+#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0)
+#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0)
+#define DCC_ADDRESSING_MODE_MASK (3 << 0)
+#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
+
+/** 965 MCH register controlling DRAM channel configuration */
+#define CHDECMISC 0x111
+#define CHDECMISC_FLEXMEMORY (1 << 1)
+
/*
* The Bridge device's PCI config space has information about the
* fb aperture size and the amount of pre-reserved memory.
@@ -793,27 +870,6 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define MI_BATCH_NON_SECURE_I965 (1<<8)
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
-#define BREADCRUMB_BITS 31
-#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1)
-
-#define READ_BREADCRUMB(dev_priv) (((volatile u32*)(dev_priv->hws_vaddr))[5])
-
-/**
- * Reads a dword out of the status page, which is written to from the command
- * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
- * MI_STORE_DATA_IMM.
- *
- * The following dwords have a reserved meaning:
- * 0: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
- * 4: ring 0 head pointer
- * 5: ring 1 head pointer (915-class)
- * 6: ring 2 head pointer (915-class)
- *
- * The area from dword 0x10 to 0x3ff is available for driver usage.
- */
-#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hws_vaddr))[reg])
-#define I915_GEM_HWS_INDEX 0x10
-
/*
* 3D instructions used by the kernel
*/
@@ -837,6 +893,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4)
#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
+#define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
@@ -880,8 +937,10 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define PRB1_HEAD 0x02044 /* 915+ only */
#define PRB1_START 0x02048 /* 915+ only */
#define PRB1_CTL 0x0204c /* 915+ only */
-#define I965REG_ACTHD 0x02074
+#define ACTHD_I965 0x02074
#define HWS_PGA 0x02080
+#define HWS_ADDRESS_MASK 0xfffff000
+#define HWS_START_ADDRESS_SHIFT 4
#define IPEIR 0x02088
#define NOPID 0x02094
#define HWSTAM 0x02098
@@ -911,7 +970,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define EMR 0x020b4
#define ESR 0x020b8
#define INSTPM 0x020c0
-#define I915REG_ACTHD 0x020C8
+#define ACTHD 0x020c8
#define FW_BLC 0x020d8
#define FW_BLC_SELF 0x020e0 /* 915+ only */
#define MI_ARB_STATE 0x020e4 /* 915+ only */
@@ -961,7 +1020,6 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
/*
* GPIO regs
*/
-
#define GPIOA 0x5010
#define GPIOB 0x5014
#define GPIOC 0x5018
diff --git a/shared-core/i915_init.c b/shared-core/i915_init.c
index 009d447b..4f2d3a4f 100644
--- a/shared-core/i915_init.c
+++ b/shared-core/i915_init.c
@@ -100,92 +100,6 @@ int i915_probe_agp(struct pci_dev *pdev, unsigned long *aperture_size,
return 0;
}
-static int i915_init_hwstatus(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_memrange_node *free_space;
- int ret = 0;
-
- /* Program Hardware Status Page */
- if (!IS_G33(dev)) {
- dev_priv->status_page_dmah =
- drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
-
- if (!dev_priv->status_page_dmah) {
- DRM_ERROR("Can not allocate hardware status page\n");
- ret = -ENOMEM;
- goto out;
- }
- dev_priv->hws_vaddr = dev_priv->status_page_dmah->vaddr;
- dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
-
- I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
- } else {
- free_space = drm_memrange_search_free(&dev_priv->vram,
- PAGE_SIZE,
- PAGE_SIZE, 0);
- if (!free_space) {
- DRM_ERROR("No free vram available, aborting\n");
- ret = -ENOMEM;
- goto out;
- }
-
- dev_priv->hws = drm_memrange_get_block(free_space, PAGE_SIZE,
- PAGE_SIZE);
- if (!dev_priv->hws) {
- DRM_ERROR("Unable to allocate or pin hw status page\n");
- ret = -EINVAL;
- goto out;
- }
-
- dev_priv->hws_agpoffset = dev_priv->hws->start;
- dev_priv->hws_map.offset = dev->agp->base +
- dev_priv->hws->start;
- dev_priv->hws_map.size = PAGE_SIZE;
- dev_priv->hws_map.type= 0;
- dev_priv->hws_map.flags= 0;
- dev_priv->hws_map.mtrr = 0;
-
- drm_core_ioremap(&dev_priv->hws_map, dev);
- if (dev_priv->hws_map.handle == NULL) {
- dev_priv->hws_agpoffset = 0;
- DRM_ERROR("can not ioremap virtual addr for"
- "G33 hw status page\n");
- ret = -ENOMEM;
- goto out_free;
- }
- dev_priv->hws_vaddr = dev_priv->hws_map.handle;
- I915_WRITE(HWS_PGA, dev_priv->hws_agpoffset);
- }
-
- memset(dev_priv->hws_vaddr, 0, PAGE_SIZE);
-
- DRM_DEBUG("Enabled hardware status page\n");
-
- return 0;
-
-out_free:
- /* free hws */
-out:
- return ret;
-}
-
-static void i915_cleanup_hwstatus(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!IS_G33(dev)) {
- if (dev_priv->status_page_dmah)
- drm_pci_free(dev, dev_priv->status_page_dmah);
- } else {
- if (dev_priv->hws_map.handle)
- drm_core_ioremapfree(&dev_priv->hws_map, dev);
- if (dev_priv->hws)
- drm_memrange_put_block(dev_priv->hws);
- }
- I915_WRITE(HWS_PGA, 0x1ffff000);
-}
-
static int i915_load_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -195,7 +109,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
i915_probe_agp(dev->pdev, &agp_size, &prealloc_size);
/* Basic memrange allocator for stolen space (aka vram) */
- drm_memrange_init(&dev_priv->vram, 0, prealloc_size);
+ drm_mm_init(&dev_priv->vram, 0, prealloc_size);
/* Let GEM Manage from end of prealloc space to end of aperture */
i915_gem_do_init(dev, prealloc_size, agp_size);
@@ -203,10 +117,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret)
goto out;
- ret = i915_init_hwstatus(dev);
- if (ret)
- goto destroy_ringbuffer;
-
/* Allow hardware batchbuffers unless told otherwise.
*/
dev_priv->allow_batchbuffer = 1;
@@ -217,7 +127,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (dev_priv->wq == 0) {
DRM_DEBUG("Error\n");
ret = -EINVAL;
- goto destroy_hws;
+ goto destroy_ringbuffer;
}
ret = intel_init_bios(dev);
@@ -247,8 +157,6 @@ modeset_cleanup:
intel_modeset_cleanup(dev);
destroy_wq:
destroy_workqueue(dev_priv->wq);
-destroy_hws:
- i915_cleanup_hwstatus(dev);
destroy_ringbuffer:
i915_gem_cleanup_ringbuffer(dev);
out:
@@ -324,13 +232,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto free_priv;
}
- INIT_LIST_HEAD(&dev_priv->mm.active_list);
- INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
- INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
- INIT_LIST_HEAD(&dev_priv->mm.request_list);
- INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
- i915_gem_retire_work_handler);
- dev_priv->mm.next_gem_seqno = 1;
+ i915_gem_load(dev);
#ifdef __linux__
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
@@ -445,13 +347,12 @@ int i915_driver_unload(struct drm_device *dev)
dev_priv->sarea_bo = NULL;
}
#endif
- i915_cleanup_hwstatus(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
- drm_memrange_takedown(&dev_priv->vram);
+ drm_mm_takedown(&dev_priv->vram);
i915_gem_lastclose(dev);
}
diff --git a/shared-core/i915_irq.c b/shared-core/i915_irq.c
index b4004a8f..00570e11 100644
--- a/shared-core/i915_irq.c
+++ b/shared-core/i915_irq.c
@@ -502,6 +502,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
I915_WRITE(IMR, dev_priv->irq_mask_reg);
(void) I915_READ(IMR);
}
+ return IRQ_NONE;
}
/*
@@ -768,7 +769,6 @@ int i915_enable_vblank(struct drm_device *dev, int plane)
PIPE_VBLANK_INTERRUPT_STATUS);
I915_WRITE(pipestat_reg, pipestat);
}
-
DRM_SPINLOCK(&dev_priv->user_irq_lock);
i915_enable_irq(dev_priv, mask_reg);
DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
@@ -889,11 +889,6 @@ void i915_enable_interrupt (struct drm_device *dev)
opregion_enable_asle(dev);
#endif
#endif
-
- I915_WRITE(IMR, dev_priv->irq_mask_reg);
- I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
- (void) I915_READ (IER);
-
dev_priv->irq_enabled = 1;
}
diff --git a/tests/gem_mmap.c b/tests/gem_mmap.c
index c3a51883..b5c15463 100644
--- a/tests/gem_mmap.c
+++ b/tests/gem_mmap.c
@@ -89,7 +89,7 @@ int main(int argc, char **argv)
mmap.size = 4096;
printf("Testing mmaping of bad object.\n");
ret = ioctl(fd, DRM_IOCTL_I915_GEM_MMAP, &mmap);
- assert(ret == -1 && errno == EINVAL);
+ assert(ret == -1 && errno == EBADF);
memset(&create, 0, sizeof(create));
create.size = OBJECT_SIZE;
diff --git a/tests/gem_readwrite.c b/tests/gem_readwrite.c
index 54b25ea3..bd1d232b 100644
--- a/tests/gem_readwrite.c
+++ b/tests/gem_readwrite.c
@@ -94,6 +94,7 @@ int main(int argc, char **argv)
printf("Testing read beyond end of buffer.\n");
ret = do_read(fd, handle, buf, OBJECT_SIZE / 2, OBJECT_SIZE);
+ printf("%d %d\n", ret, errno);
assert(ret == -1 && errno == EINVAL);
printf("Testing full write of buffer\n");
@@ -120,6 +121,14 @@ int main(int argc, char **argv)
assert(ret == 0);
assert(memcmp(buf, expected + 512, 1024) == 0);
+ printf("Testing read of bad buffer handle\n");
+ ret = do_read(fd, 1234, buf, 0, 1024);
+ assert(ret == -1 && errno == EBADF);
+
+ printf("Testing write of bad buffer handle\n");
+ ret = do_write(fd, 1234, buf, 0, 1024);
+ assert(ret == -1 && errno == EBADF);
+
close(fd);
return 0;