aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--linux-core/drmP.h4
-rw-r--r--linux-core/drm_bo.c70
-rw-r--r--linux-core/drm_bo_move.c3
-rw-r--r--linux-core/drm_fence.c4
-rw-r--r--linux-core/drm_memory.c51
-rw-r--r--linux-core/drm_proc.c14
-rw-r--r--linux-core/drm_ttm.c12
-rw-r--r--shared-core/i915_dma.c21
-rw-r--r--shared-core/i915_drv.h51
-rw-r--r--shared-core/i915_irq.c163
-rw-r--r--shared-core/nouveau_fifo.c50
-rw-r--r--shared-core/nouveau_mem.c28
-rw-r--r--shared-core/nv04_mc.c13
-rw-r--r--shared-core/radeon_state.c6
14 files changed, 335 insertions, 155 deletions
diff --git a/linux-core/drmP.h b/linux-core/drmP.h
index 7b21f8a1..8273c879 100644
--- a/linux-core/drmP.h
+++ b/linux-core/drmP.h
@@ -1076,8 +1076,10 @@ extern int drm_unbind_agp(DRM_AGP_MEM * handle);
extern void drm_free_memctl(size_t size);
extern int drm_alloc_memctl(size_t size);
extern void drm_query_memctl(uint64_t *cur_used,
+ uint64_t *emer_used,
uint64_t *low_threshold,
- uint64_t *high_threshold);
+ uint64_t *high_threshold,
+ uint64_t *emer_threshold);
extern void drm_init_memctl(size_t low_threshold,
size_t high_threshold,
size_t unit_size);
diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c
index 1d3f87e5..7e82080b 100644
--- a/linux-core/drm_bo.c
+++ b/linux-core/drm_bo.c
@@ -208,36 +208,35 @@ static int drm_bo_handle_move_mem(struct drm_buffer_object *bo,
if (ret)
goto out_err;
}
- }
-
- if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) {
-
- struct drm_bo_mem_reg *old_mem = &bo->mem;
- uint64_t save_flags = old_mem->flags;
- uint64_t save_proposed_flags = old_mem->proposed_flags;
- *old_mem = *mem;
- mem->mm_node = NULL;
- old_mem->proposed_flags = save_proposed_flags;
- DRM_FLAG_MASKED(save_flags, mem->flags, DRM_BO_MASK_MEMTYPE);
-
- } else if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
- !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
+ if (bo->mem.mem_type == DRM_BO_MEM_LOCAL) {
+
+ struct drm_bo_mem_reg *old_mem = &bo->mem;
+ uint64_t save_flags = old_mem->flags;
+ uint64_t save_proposed_flags = old_mem->proposed_flags;
+
+ *old_mem = *mem;
+ mem->mm_node = NULL;
+ old_mem->proposed_flags = save_proposed_flags;
+ DRM_FLAG_MASKED(save_flags, mem->flags,
+ DRM_BO_MASK_MEMTYPE);
+ goto moved;
+ }
+
+ }
+ if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
+ !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED))
ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
-
- } else if (dev->driver->bo_driver->move) {
+ else if (dev->driver->bo_driver->move)
ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
-
- } else {
-
+ else
ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
- }
-
if (ret)
goto out_err;
+moved:
if (old_is_pci || new_is_pci)
drm_bo_vm_post_move(bo);
@@ -789,6 +788,11 @@ static int drm_bo_mem_force_space(struct drm_device *dev,
}
node = drm_mm_get_block(node, num_pages, mem->page_alignment);
+ if (unlikely(!node)) {
+ mutex_unlock(&dev->struct_mutex);
+ return -ENOMEM;
+ }
+
mutex_unlock(&dev->struct_mutex);
mem->mm_node = node;
mem->mem_type = mem_type;
@@ -974,6 +978,20 @@ static int drm_bo_modify_proposed_flags (struct drm_buffer_object *bo,
return -EPERM;
}
+ if (likely(new_mask & DRM_BO_MASK_MEM) &&
+ (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) &&
+ !DRM_SUSER(DRM_CURPROC)) {
+ if (likely(bo->mem.flags & new_flags & new_mask &
+ DRM_BO_MASK_MEM))
+ new_flags = (new_flags & ~DRM_BO_MASK_MEM) |
+ (bo->mem.flags & DRM_BO_MASK_MEM);
+ else {
+ DRM_ERROR("Incompatible memory type specification "
+ "for NO_EVICT buffer.\n");
+ return -EPERM;
+ }
+ }
+
if ((new_flags & DRM_BO_FLAG_NO_MOVE)) {
DRM_ERROR("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n");
return -EPERM;
@@ -1482,6 +1500,9 @@ static int drm_buffer_object_validate(struct drm_buffer_object *bo,
if (ret) {
if (ret != -EAGAIN)
DRM_ERROR("Failed moving buffer.\n");
+ if (ret == -ENOMEM)
+ DRM_ERROR("Out of aperture space or "
+ "DRM memory quota.\n");
return ret;
}
}
@@ -2748,7 +2769,7 @@ static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo)
list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
bo->mem.num_pages, 0, 0);
- if (!list->file_offset_node) {
+ if (unlikely(!list->file_offset_node)) {
drm_bo_takedown_vm_locked(bo);
return -ENOMEM;
}
@@ -2756,6 +2777,11 @@ static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo)
list->file_offset_node = drm_mm_get_block(list->file_offset_node,
bo->mem.num_pages, 0);
+ if (unlikely(!list->file_offset_node)) {
+ drm_bo_takedown_vm_locked(bo);
+ return -ENOMEM;
+ }
+
list->hash.key = list->file_offset_node->start;
if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
drm_bo_takedown_vm_locked(bo);
diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c
index 536ff5d3..f10549ab 100644
--- a/linux-core/drm_bo_move.c
+++ b/linux-core/drm_bo_move.c
@@ -42,7 +42,6 @@ static void drm_bo_free_old_node(struct drm_buffer_object *bo)
if (old_mem->mm_node && (old_mem->mm_node != bo->pinned_node)) {
mutex_lock(&bo->dev->struct_mutex);
drm_mm_put_block(old_mem->mm_node);
- old_mem->mm_node = NULL;
mutex_unlock(&bo->dev->struct_mutex);
}
old_mem->mm_node = NULL;
@@ -57,7 +56,7 @@ int drm_bo_move_ttm(struct drm_buffer_object *bo,
uint64_t save_proposed_flags = old_mem->proposed_flags;
int ret;
- if (old_mem->mem_type == DRM_BO_MEM_TT) {
+ if (old_mem->mem_type != DRM_BO_MEM_LOCAL) {
if (evict)
drm_ttm_evict(ttm);
else
diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c
index 9d80327f..0ca0c408 100644
--- a/linux-core/drm_fence.c
+++ b/linux-core/drm_fence.c
@@ -520,8 +520,10 @@ int drm_fence_object_create(struct drm_device *dev, uint32_t fence_class,
struct drm_fence_manager *fm = &dev->fm;
fence = drm_ctl_calloc(1, sizeof(*fence), DRM_MEM_FENCE);
- if (!fence)
+ if (!fence) {
+ DRM_ERROR("Out of memory creating fence object\n");
return -ENOMEM;
+ }
ret = drm_fence_object_init(dev, fence_class, type, flags, fence);
if (ret) {
drm_fence_usage_deref_unlocked(&fence);
diff --git a/linux-core/drm_memory.c b/linux-core/drm_memory.c
index 402a680f..12e01414 100644
--- a/linux-core/drm_memory.c
+++ b/linux-core/drm_memory.c
@@ -39,8 +39,10 @@
static struct {
spinlock_t lock;
uint64_t cur_used;
+ uint64_t emer_used;
uint64_t low_threshold;
uint64_t high_threshold;
+ uint64_t emer_threshold;
} drm_memctl = {
.lock = SPIN_LOCK_UNLOCKED
};
@@ -59,14 +61,30 @@ static inline size_t drm_size_align(size_t size)
int drm_alloc_memctl(size_t size)
{
- int ret;
+ int ret = 0;
unsigned long a_size = drm_size_align(size);
+ unsigned long new_used = drm_memctl.cur_used + a_size;
spin_lock(&drm_memctl.lock);
- ret = ((drm_memctl.cur_used + a_size) > drm_memctl.high_threshold) ?
- -ENOMEM : 0;
- if (!ret)
- drm_memctl.cur_used += a_size;
+ if (unlikely(new_used > drm_memctl.high_threshold)) {
+ if (!DRM_SUSER(DRM_CURPROC) ||
+ (new_used + drm_memctl.emer_used > drm_memctl.emer_threshold) ||
+ (a_size > 2*PAGE_SIZE)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * Allow small root-only allocations, even if the
+ * high threshold is exceeded.
+ */
+
+ new_used -= drm_memctl.high_threshold;
+ drm_memctl.emer_used += new_used;
+ a_size -= new_used;
+ }
+ drm_memctl.cur_used += a_size;
+out:
spin_unlock(&drm_memctl.lock);
return ret;
}
@@ -77,19 +95,30 @@ void drm_free_memctl(size_t size)
unsigned long a_size = drm_size_align(size);
spin_lock(&drm_memctl.lock);
+ if (likely(a_size >= drm_memctl.emer_used)) {
+ a_size -= drm_memctl.emer_used;
+ drm_memctl.emer_used = 0;
+ } else {
+ drm_memctl.emer_used -= a_size;
+ a_size = 0;
+ }
drm_memctl.cur_used -= a_size;
spin_unlock(&drm_memctl.lock);
}
EXPORT_SYMBOL(drm_free_memctl);
void drm_query_memctl(uint64_t *cur_used,
+ uint64_t *emer_used,
uint64_t *low_threshold,
- uint64_t *high_threshold)
+ uint64_t *high_threshold,
+ uint64_t *emer_threshold)
{
spin_lock(&drm_memctl.lock);
*cur_used = drm_memctl.cur_used;
+ *emer_used = drm_memctl.emer_used;
*low_threshold = drm_memctl.low_threshold;
*high_threshold = drm_memctl.high_threshold;
+ *emer_threshold = drm_memctl.emer_threshold;
spin_unlock(&drm_memctl.lock);
}
EXPORT_SYMBOL(drm_query_memctl);
@@ -99,9 +128,12 @@ void drm_init_memctl(size_t p_low_threshold,
size_t unit_size)
{
spin_lock(&drm_memctl.lock);
+ drm_memctl.emer_used = 0;
drm_memctl.cur_used = 0;
drm_memctl.low_threshold = p_low_threshold * unit_size;
drm_memctl.high_threshold = p_high_threshold * unit_size;
+ drm_memctl.emer_threshold = (drm_memctl.high_threshold >> 4) +
+ drm_memctl.high_threshold;
spin_unlock(&drm_memctl.lock);
}
@@ -294,7 +326,12 @@ static void *agp_remap(unsigned long offset, unsigned long size,
return NULL;
}
#endif /* agp */
-
+#else
+static void *agp_remap(unsigned long offset, unsigned long size,
+ struct drm_device * dev)
+{
+ return NULL;
+}
#endif /* debug_memory */
void drm_core_ioremap(struct drm_map *map, struct drm_device *dev)
diff --git a/linux-core/drm_proc.c b/linux-core/drm_proc.c
index e10501f2..b6748b9b 100644
--- a/linux-core/drm_proc.c
+++ b/linux-core/drm_proc.c
@@ -446,9 +446,10 @@ static int drm__objects_info(char *buf, char **start, off_t offset, int request,
struct drm_buffer_manager *bm = &dev->bm;
struct drm_fence_manager *fm = &dev->fm;
uint64_t used_mem;
+ uint64_t used_emer;
uint64_t low_mem;
uint64_t high_mem;
-
+ uint64_t emer_mem;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
@@ -477,7 +478,7 @@ static int drm__objects_info(char *buf, char **start, off_t offset, int request,
DRM_PROC_PRINT("Buffer objects are not supported by this driver.\n");
}
- drm_query_memctl(&used_mem, &low_mem, &high_mem);
+ drm_query_memctl(&used_mem, &used_emer, &low_mem, &high_mem, &emer_mem);
if (used_mem > 16*PAGE_SIZE) {
DRM_PROC_PRINT("Used object memory is %lu pages.\n",
@@ -486,10 +487,19 @@ static int drm__objects_info(char *buf, char **start, off_t offset, int request,
DRM_PROC_PRINT("Used object memory is %lu bytes.\n",
(unsigned long) used_mem);
}
+ if (used_emer > 16*PAGE_SIZE) {
+ DRM_PROC_PRINT("Used emergency memory is %lu pages.\n",
+ (unsigned long) (used_emer >> PAGE_SHIFT));
+ } else {
+ DRM_PROC_PRINT("Used emergency memory is %lu bytes.\n\n",
+ (unsigned long) used_emer);
+ }
DRM_PROC_PRINT("Soft object memory usage threshold is %lu pages.\n",
(unsigned long) (low_mem >> PAGE_SHIFT));
DRM_PROC_PRINT("Hard object memory usage threshold is %lu pages.\n",
(unsigned long) (high_mem >> PAGE_SHIFT));
+ DRM_PROC_PRINT("Emergency root only memory usage threshold is %lu pages.\n",
+ (unsigned long) (emer_mem >> PAGE_SHIFT));
DRM_PROC_PRINT("\n");
diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c
index cc80b132..e991254f 100644
--- a/linux-core/drm_ttm.c
+++ b/linux-core/drm_ttm.c
@@ -299,13 +299,13 @@ int drm_ttm_populate(struct drm_ttm *ttm)
return 0;
be = ttm->be;
- if (ttm->page_flags & DRM_TTM_PAGE_WRITE) {
- for (i = 0; i < ttm->num_pages; ++i) {
- page = drm_ttm_get_page(ttm, i);
- if (!page)
- return -ENOMEM;
- }
+
+ for (i = 0; i < ttm->num_pages; ++i) {
+ page = drm_ttm_get_page(ttm, i);
+ if (!page)
+ return -ENOMEM;
}
+
be->func->populate(be, ttm->num_pages, ttm->pages, ttm->dummy_read_page);
ttm->state = ttm_unbound;
return 0;
diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c
index 8237e145..c6164b0a 100644
--- a/shared-core/i915_dma.c
+++ b/shared-core/i915_dma.c
@@ -1180,7 +1180,6 @@ static int i915_handle_copyback(struct drm_device *dev,
buffers++;
}
}
-
return err;
}
@@ -1272,7 +1271,6 @@ static int i915_execbuffer(struct drm_device *dev, void *data,
struct drm_fence_arg *fence_arg = &exec_buf->fence_arg;
int num_buffers;
int ret;
- struct drm_i915_validate_buffer *buffers;
if (!dev_priv->allow_batchbuffer) {
DRM_ERROR("Batchbuffer ioctl disabled\n");
@@ -1288,7 +1286,6 @@ static int i915_execbuffer(struct drm_device *dev, void *data,
if (exec_buf->num_buffers > dev_priv->max_validate_buffers)
return -EINVAL;
-
ret = drm_bo_read_lock(&dev->bm.bm_lock);
if (ret)
return ret;
@@ -1306,8 +1303,12 @@ static int i915_execbuffer(struct drm_device *dev, void *data,
num_buffers = exec_buf->num_buffers;
- buffers = drm_calloc(num_buffers, sizeof(struct drm_i915_validate_buffer), DRM_MEM_DRIVER);
- if (!buffers) {
+ if (!dev_priv->val_bufs) {
+ dev_priv->val_bufs =
+ vmalloc(sizeof(struct drm_i915_validate_buffer)*
+ dev_priv->max_validate_buffers);
+ }
+ if (!dev_priv->val_bufs) {
drm_bo_read_unlock(&dev->bm.bm_lock);
mutex_unlock(&dev_priv->cmdbuf_mutex);
return -ENOMEM;
@@ -1315,7 +1316,7 @@ static int i915_execbuffer(struct drm_device *dev, void *data,
/* validate buffer list + fixup relocations */
ret = i915_validate_buffer_list(file_priv, 0, exec_buf->ops_list,
- buffers, &num_buffers);
+ dev_priv->val_bufs, &num_buffers);
if (ret)
goto out_err0;
@@ -1324,7 +1325,7 @@ static int i915_execbuffer(struct drm_device *dev, void *data,
drm_agp_chipset_flush(dev);
/* submit buffer */
- batch->start = buffers[num_buffers-1].buffer->offset;
+ batch->start = dev_priv->val_bufs[num_buffers-1].buffer->offset;
DRM_DEBUG("i915 exec batchbuffer, start %x used %d cliprects %d\n",
batch->start, batch->used, batch->num_cliprects);
@@ -1341,13 +1342,11 @@ static int i915_execbuffer(struct drm_device *dev, void *data,
out_err0:
/* handle errors */
- ret = i915_handle_copyback(dev, buffers, num_buffers, ret);
+ ret = i915_handle_copyback(dev, dev_priv->val_bufs, num_buffers, ret);
mutex_lock(&dev->struct_mutex);
- i915_dereference_buffers_locked(buffers, num_buffers);
+ i915_dereference_buffers_locked(dev_priv->val_bufs, num_buffers);
mutex_unlock(&dev->struct_mutex);
- drm_free(buffers, (exec_buf->num_buffers * sizeof(struct drm_buffer_object *)), DRM_MEM_DRIVER);
-
mutex_unlock(&dev_priv->cmdbuf_mutex);
drm_bo_read_unlock(&dev->bm.bm_lock);
return ret;
diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h
index fb855933..64faac9b 100644
--- a/shared-core/i915_drv.h
+++ b/shared-core/i915_drv.h
@@ -69,6 +69,7 @@
#ifdef I915_HAVE_BUFFER
#define I915_MAX_VALIDATE_BUFFERS 4096
+struct drm_i915_validate_buffer;
#endif
struct drm_i915_ring_buffer {
@@ -154,6 +155,7 @@ struct drm_i915_private {
unsigned int max_validate_buffers;
struct mutex cmdbuf_mutex;
size_t stolen_base;
+ struct drm_i915_validate_buffer *val_bufs;
#endif
DRM_SPINTYPE swaps_lock;
@@ -520,10 +522,23 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
/* Interrupt bits:
*/
-#define USER_INT_FLAG (1<<1)
-#define VSYNC_PIPEB_FLAG (1<<5)
-#define VSYNC_PIPEA_FLAG (1<<7)
-#define HWB_OOM_FLAG (1<<13) /* binner out of memory */
+#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
+#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
+#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
+#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14)
+#define I915_HWB_OOM_INTERRUPT (1<<13) /* binner out of memory */
+#define I915_SYNC_STATUS_INTERRUPT (1<<12)
+#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11)
+#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10)
+#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1<<9)
+#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8)
+#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1<<7)
+#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1<<6)
+#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1<<5)
+#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4)
+#define I915_DEBUG_INTERRUPT (1<<2)
+#define I915_USER_INTERRUPT (1<<1)
+
#define I915REG_HWSTAM 0x02098
#define I915REG_INT_IDENTITY_R 0x020a4
@@ -630,6 +645,34 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define VCO_LOOP_DIV_BY_4M 0x00
#define VCO_LOOP_DIV_BY_16M 0x04
+#define I915_FIFO_UNDERRUN_STATUS (1UL<<31)
+#define I915_CRC_ERROR_ENABLE (1UL<<29)
+#define I915_CRC_DONE_ENABLE (1UL<<28)
+#define I915_GMBUS_EVENT_ENABLE (1UL<<27)
+#define I915_HOTPLUG_INTERRUPT_ENABLE (1UL<<26)
+#define I915_VSYNC_INTERRUPT_ENABLE (1UL<<25)
+#define I915_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24)
+#define I915_DPST_EVENT_ENABLE (1UL<<23)
+#define I915_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
+#define I915_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
+#define I915_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
+#define I915_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */
+#define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17)
+#define I915_OVERLAY_UPDATED_ENABLE (1UL<<16)
+#define I915_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
+#define I915_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
+#define I915_GMBUS_INTERRUPT_STATUS (1UL<<11)
+#define I915_HOTPLUG_INTERRUPT_STATUS (1UL<<10)
+#define I915_VSYNC_INTERRUPT_STATUS (1UL<<9)
+#define I915_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
+#define I915_DPST_EVENT_STATUS (1UL<<7)
+#define I915_LEGACY_BLC_EVENT_STATUS (1UL<<6)
+#define I915_ODD_FIELD_INTERRUPT_STATUS (1UL<<5)
+#define I915_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4)
+#define I915_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
+#define I915_VBLANK_INTERRUPT_STATUS (1UL<<1)
+#define I915_OVERLAY_UPDATED_STATUS (1UL<<0)
+
#define SRX_INDEX 0x3c4
#define SRX_DATA 0x3c5
#define SR01 1
diff --git a/shared-core/i915_irq.c b/shared-core/i915_irq.c
index 558693f1..abd8a7d3 100644
--- a/shared-core/i915_irq.c
+++ b/shared-core/i915_irq.c
@@ -30,16 +30,8 @@
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
-
#include "intel_drv.h"
-#define USER_INT_FLAG (1<<1)
-#define EVENT_PIPEB_FLAG (1<<4)
-#define VSYNC_PIPEB_FLAG (1<<5)
-#define EVENT_PIPEA_FLAG (1<<6)
-#define VSYNC_PIPEA_FLAG (1<<7)
-#define HOTPLUG_FLAG (1 << 17)
-
#define MAX_NOPID ((u32)~0)
/**
@@ -575,85 +567,90 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
struct drm_device *dev = (struct drm_device *) arg;
struct drm_i915_master_private *master_priv;
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
- u32 temp = 0;
+ u32 iir;
u32 pipea_stats, pipeb_stats;
int hotplug = 0;
+ int vblank = 0;
/* On i8xx/i915 hw the IIR and IER are 16bit on i9xx its 32bit */
if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev))
- temp = I915_READ(I915REG_INT_IDENTITY_R);
+ iir = I915_READ(I915REG_INT_IDENTITY_R);
else
- temp = I915_READ16(I915REG_INT_IDENTITY_R);
+ iir = I915_READ16(I915REG_INT_IDENTITY_R);
- temp &= (dev_priv->irq_enable_reg | USER_INT_FLAG);
+ iir &= (dev_priv->irq_enable_reg | I915_USER_INTERRUPT);
- if (temp == 0)
+#if 0
+ DRM_DEBUG("flag=%08x\n", iir);
+#endif
+ if (iir == 0) {
+ DRM_DEBUG ("iir 0x%08x im 0x%08x ie 0x%08x pipea 0x%08x pipeb 0x%08x\n",
+ iir,
+ I915_READ(I915REG_INT_MASK_R),
+ I915_READ(I915REG_INT_ENABLE_R),
+ I915_READ(I915REG_PIPEASTAT),
+ I915_READ(I915REG_PIPEBSTAT));
return IRQ_NONE;
-
- pipea_stats = I915_READ(I915REG_PIPEASTAT);
- pipeb_stats = I915_READ(I915REG_PIPEBSTAT);
+ }
/*
* Clear the PIPE(A|B)STAT regs before the IIR otherwise
* we may get extra interrupts.
*/
- if (temp & VSYNC_PIPEA_FLAG) {
- drm_handle_vblank(dev, i915_get_plane(dev, 0));
-
- pipea_stats |= I915_VBLANK_INTERRUPT_ENABLE |
- I915_VBLANK_CLEAR;
- }
-
- if (temp & VSYNC_PIPEB_FLAG) {
- drm_handle_vblank(dev, i915_get_plane(dev, 1));
-
- pipeb_stats |= I915_VBLANK_INTERRUPT_ENABLE |
- I915_VBLANK_CLEAR;
- }
+ if (iir & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) {
+ pipea_stats = I915_READ(I915REG_PIPEASTAT);
+ if (pipea_stats & (I915_START_VBLANK_INTERRUPT_STATUS|
+ I915_VBLANK_INTERRUPT_STATUS))
+ {
+ vblank++;
+ drm_handle_vblank(dev, i915_get_plane(dev, 0));
+ }
- /* This is a global event, and not a pipe A event */
- if (temp & EVENT_PIPEA_FLAG) {
- if (pipea_stats & I915_HOTPLUG_CLEAR)
+ /* This is a global event, and not a pipe A event */
+ if (pipea_stats & I915_HOTPLUG_INTERRUPT_STATUS)
hotplug = 1;
- pipea_stats |= I915_HOTPLUG_INTERRUPT_ENABLE |
- I915_HOTPLUG_CLEAR;
-
+ I915_WRITE(I915REG_PIPEASTAT, pipea_stats);
}
- I915_WRITE(I915REG_PIPEASTAT, pipea_stats);
- (void) I915_READ(I915REG_PIPEASTAT);
- I915_WRITE(I915REG_PIPEBSTAT, pipeb_stats);
- (void) I915_READ(I915REG_PIPEBSTAT);
+ if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) {
+ pipeb_stats = I915_READ(I915REG_PIPEBSTAT);
+ if (pipeb_stats & (I915_START_VBLANK_INTERRUPT_STATUS|
+ I915_VBLANK_INTERRUPT_STATUS))
+ {
+ vblank++;
+ drm_handle_vblank(dev, i915_get_plane(dev, 1));
+ }
+ I915_WRITE(I915REG_PIPEBSTAT, pipeb_stats);
+ }
/* Clear the generated interrupt */
if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
- I915_WRITE(I915REG_INT_IDENTITY_R, temp);
+ I915_WRITE(I915REG_INT_IDENTITY_R, iir);
(void) I915_READ(I915REG_INT_IDENTITY_R);
} else {
- I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
+ I915_WRITE16(I915REG_INT_IDENTITY_R, iir);
(void) I915_READ16(I915REG_INT_IDENTITY_R);
}
-
if (dev->primary->master) {
master_priv = dev->primary->master->driver_priv;
master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
}
- if (temp & USER_INT_FLAG) {
+ if (iir & I915_USER_INTERRUPT) {
DRM_WAKEUP(&dev_priv->irq_queue);
#ifdef I915_HAVE_FENCE
i915_fence_handler(dev);
#endif
}
- if (temp & (VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG)) {
+ if (vblank) {
if (dev_priv->swaps_pending > 0)
drm_locked_tasklet(dev, i915_vblank_tasklet);
}
- if ((temp & HOTPLUG_FLAG) || hotplug) {
+ if ((iir & I915_DISPLAY_PORT_INTERRUPT) || hotplug) {
u32 temp2 = 0;
DRM_INFO("Hotplug event received\n");
@@ -697,7 +694,7 @@ void i915_user_irq_on(struct drm_device *dev)
DRM_SPINLOCK(&dev_priv->user_irq_lock);
if (dev_priv->irq_enabled && (++dev_priv->user_irq_refcount == 1)){
- dev_priv->irq_enable_reg |= USER_INT_FLAG;
+ dev_priv->irq_enable_reg |= I915_USER_INTERRUPT;
if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev))
I915_WRITE(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
else
@@ -713,7 +710,7 @@ void i915_user_irq_off(struct drm_device *dev)
DRM_SPINLOCK(&dev_priv->user_irq_lock);
if (dev_priv->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
- // dev_priv->irq_enable_reg &= ~USER_INT_FLAG;
+ // dev_priv->irq_enable_reg &= ~I915_USER_INTERRUPT;
// if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev))
// I915_WRITE(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
// else
@@ -799,13 +796,17 @@ int i915_enable_vblank(struct drm_device *dev, int plane)
{
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
int pipe = i915_get_pipe(dev, plane);
-
+ u32 pipestat_reg = 0;
+ u32 pipestat;
+
switch (pipe) {
case 0:
- dev_priv->irq_enable_reg |= VSYNC_PIPEA_FLAG;
+ pipestat_reg = I915REG_PIPEASTAT;
+ dev_priv->irq_enable_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
break;
case 1:
- dev_priv->irq_enable_reg |= VSYNC_PIPEB_FLAG;
+ pipestat_reg = I915REG_PIPEBSTAT;
+ dev_priv->irq_enable_reg |= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
break;
default:
DRM_ERROR("tried to enable vblank on non-existent pipe %d\n",
@@ -813,11 +814,31 @@ int i915_enable_vblank(struct drm_device *dev, int plane)
break;
}
+ if (pipestat_reg)
+ {
+ pipestat = I915_READ (pipestat_reg);
+ /*
+ * Older chips didn't have the start vblank interrupt,
+ * but
+ */
+ if (IS_I965G (dev))
+ pipestat |= I915_START_VBLANK_INTERRUPT_ENABLE;
+ else
+ pipestat |= I915_VBLANK_INTERRUPT_ENABLE;
+ /*
+ * Clear any pending status
+ */
+ pipestat |= (I915_START_VBLANK_INTERRUPT_STATUS |
+ I915_VBLANK_INTERRUPT_STATUS);
+ I915_WRITE(pipestat_reg, pipestat);
+ }
+
if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev))
I915_WRITE(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
else
I915_WRITE16(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
+
return 0;
}
@@ -825,13 +846,17 @@ void i915_disable_vblank(struct drm_device *dev, int plane)
{
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
int pipe = i915_get_pipe(dev, plane);
+ u32 pipestat_reg = 0;
+ u32 pipestat;
switch (pipe) {
case 0:
- dev_priv->irq_enable_reg &= ~VSYNC_PIPEA_FLAG;
+ pipestat_reg = I915REG_PIPEASTAT;
+ dev_priv->irq_enable_reg &= ~I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
break;
case 1:
- dev_priv->irq_enable_reg &= ~VSYNC_PIPEB_FLAG;
+ pipestat_reg = I915REG_PIPEBSTAT;
+ dev_priv->irq_enable_reg &= ~I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
break;
default:
DRM_ERROR("tried to disable vblank on non-existent pipe %d\n",
@@ -843,6 +868,19 @@ void i915_disable_vblank(struct drm_device *dev, int plane)
I915_WRITE(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
else
I915_WRITE16(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
+
+ if (pipestat_reg)
+ {
+ pipestat = I915_READ (pipestat_reg);
+ pipestat &= ~(I915_START_VBLANK_INTERRUPT_ENABLE |
+ I915_VBLANK_INTERRUPT_ENABLE);
+ /*
+ * Clear any pending status
+ */
+ pipestat |= (I915_START_VBLANK_INTERRUPT_STATUS |
+ I915_VBLANK_INTERRUPT_STATUS);
+ I915_WRITE(pipestat_reg, pipestat);
+ }
}
void i915_enable_interrupt (struct drm_device *dev)
@@ -850,20 +888,20 @@ void i915_enable_interrupt (struct drm_device *dev)
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
struct drm_output *o;
- dev_priv->irq_enable_reg |= USER_INT_FLAG;
+ dev_priv->irq_enable_reg |= I915_USER_INTERRUPT;
if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
if (dev->mode_config.num_output)
- dev_priv->irq_enable_reg |= HOTPLUG_FLAG;
+ dev_priv->irq_enable_reg |= I915_DISPLAY_PORT_INTERRUPT;
} else {
if (dev->mode_config.num_output)
- dev_priv->irq_enable_reg |= EVENT_PIPEA_FLAG | EVENT_PIPEB_FLAG;
+ dev_priv->irq_enable_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
/* Enable global interrupts for hotplug - not a pipeA event */
I915_WRITE(I915REG_PIPEASTAT, I915_READ(I915REG_PIPEASTAT) | I915_HOTPLUG_INTERRUPT_ENABLE | I915_HOTPLUG_CLEAR);
}
- if (dev_priv->irq_enable_reg & (HOTPLUG_FLAG | EVENT_PIPEA_FLAG | EVENT_PIPEB_FLAG)) {
+ if (dev_priv->irq_enable_reg & (I915_DISPLAY_PORT_INTERRUPT | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT)) {
u32 temp = 0;
if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
@@ -953,9 +991,9 @@ int i915_vblank_pipe_get(struct drm_device *dev, void *data,
flag = I915_READ16(I915REG_INT_ENABLE_R);
pipe->pipe = 0;
- if (flag & VSYNC_PIPEA_FLAG)
+ if (flag & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT)
pipe->pipe |= DRM_I915_VBLANK_PIPE_A;
- if (flag & VSYNC_PIPEB_FLAG)
+ if (flag & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
pipe->pipe |= DRM_I915_VBLANK_PIPE_B;
return 0;
@@ -1128,8 +1166,6 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
{
struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
- I915_WRITE(I915REG_PIPEASTAT, 0xffff);
- I915_WRITE(I915REG_PIPEBSTAT, 0xffff);
I915_WRITE16(I915REG_HWSTAM, 0xeffe);
if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
I915_WRITE(I915REG_INT_MASK_R, 0x0);
@@ -1185,9 +1221,10 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
dev_priv->irq_enabled = 0;
-
- I915_WRITE(I915REG_PIPEASTAT, 0xffff);
- I915_WRITE(I915REG_PIPEBSTAT, 0xffff);
+ temp = I915_READ(I915REG_PIPEASTAT);
+ I915_WRITE(I915REG_PIPEASTAT, temp);
+ temp = I915_READ(I915REG_PIPEBSTAT);
+ I915_WRITE(I915REG_PIPEBSTAT, temp);
if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
I915_WRITE(I915REG_HWSTAM, 0xffffffff);
I915_WRITE(I915REG_INT_MASK_R, 0xffffffff);
diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c
index 056cb6d0..d8fda277 100644
--- a/shared-core/nouveau_fifo.c
+++ b/shared-core/nouveau_fifo.c
@@ -390,6 +390,34 @@ nouveau_fifo_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
return 0;
}
+static int
+nouveau_channel_idle(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->Engine;
+ uint32_t caches;
+ int idle;
+
+ caches = NV_READ(NV03_PFIFO_CACHES);
+ NV_WRITE(NV03_PFIFO_CACHES, caches & ~1);
+
+ if (engine->fifo.channel_id(dev) != chan->id) {
+ struct nouveau_gpuobj *ramfc = chan->ramfc->gpuobj;
+
+ if (INSTANCE_RD(ramfc, 0) != INSTANCE_RD(ramfc, 1))
+ idle = 0;
+ else
+ idle = 1;
+ } else {
+ idle = (NV_READ(NV04_PFIFO_CACHE1_DMA_GET) ==
+ NV_READ(NV04_PFIFO_CACHE1_DMA_PUT));
+ }
+
+ NV_WRITE(NV03_PFIFO_CACHES, caches);
+ return idle;
+}
+
/* stops a fifo */
void nouveau_fifo_free(struct nouveau_channel *chan)
{
@@ -400,22 +428,9 @@ void nouveau_fifo_free(struct nouveau_channel *chan)
DRM_INFO("%s: freeing fifo %d\n", __func__, chan->id);
- /* Disable channel switching, if this channel isn't currenly
- * active re-enable it if there's still pending commands.
- * We really should do a manual context switch here, but I'm
- * not sure I trust our ability to do this reliably yet..
- */
- NV_WRITE(NV03_PFIFO_CACHES, 0);
- if (engine->fifo.channel_id(dev) != chan->id &&
- NV_READ(chan->get) != NV_READ(chan->put)) {
- NV_WRITE(NV03_PFIFO_CACHES, 1);
- }
-
/* Give the channel a chance to idle, wait 2s (hopefully) */
t_start = engine->timer.read(dev);
- while (NV_READ(chan->get) != NV_READ(chan->put) ||
- NV_READ(NV03_PFIFO_CACHE1_GET) !=
- NV_READ(NV03_PFIFO_CACHE1_PUT)) {
+ while (!nouveau_channel_idle(chan)) {
if (engine->timer.read(dev) - t_start > 2000000000ULL) {
DRM_ERROR("Failed to idle channel %d before destroy."
"Prepare for strangeness..\n", chan->id);
@@ -433,13 +448,6 @@ void nouveau_fifo_free(struct nouveau_channel *chan)
NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000);
NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000);
- /* stop the fifo, otherwise it could be running and
- * it will crash when removing gpu objects
- *XXX: from real-world evidence, absolutely useless..
- */
- NV_WRITE(chan->get, chan->pushbuf_base);
- NV_WRITE(chan->put, chan->pushbuf_base);
-
// FIXME XXX needs more code
engine->fifo.destroy_context(chan);
diff --git a/shared-core/nouveau_mem.c b/shared-core/nouveau_mem.c
index 80b2990d..4e80ca46 100644
--- a/shared-core/nouveau_mem.c
+++ b/shared-core/nouveau_mem.c
@@ -300,6 +300,32 @@ uint64_t nouveau_mem_fb_amount(struct drm_device *dev)
return 0;
}
+static void nouveau_mem_reset_agp(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t saved_pci_nv_1, saved_pci_nv_19, pmc_enable;
+
+ saved_pci_nv_1 = NV_READ(NV04_PBUS_PCI_NV_1);
+ saved_pci_nv_19 = NV_READ(NV04_PBUS_PCI_NV_19);
+
+ /* clear busmaster bit */
+ NV_WRITE(NV04_PBUS_PCI_NV_1, saved_pci_nv_1 & ~0x4);
+ /* clear SBA and AGP bits */
+ NV_WRITE(NV04_PBUS_PCI_NV_19, saved_pci_nv_19 & 0xfffff0ff);
+
+ /* power cycle pgraph, if enabled */
+ pmc_enable = NV_READ(NV03_PMC_ENABLE);
+ if (pmc_enable & NV_PMC_ENABLE_PGRAPH) {
+ NV_WRITE(NV03_PMC_ENABLE, pmc_enable & ~NV_PMC_ENABLE_PGRAPH);
+ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) |
+ NV_PMC_ENABLE_PGRAPH);
+ }
+
+ /* and restore (gives effect of resetting AGP) */
+ NV_WRITE(NV04_PBUS_PCI_NV_19, saved_pci_nv_19);
+ NV_WRITE(NV04_PBUS_PCI_NV_1, saved_pci_nv_1);
+}
+
static int
nouveau_mem_init_agp(struct drm_device *dev, int ttm)
{
@@ -308,6 +334,8 @@ nouveau_mem_init_agp(struct drm_device *dev, int ttm)
struct drm_agp_mode mode;
int ret;
+ nouveau_mem_reset_agp(dev);
+
ret = drm_agp_acquire(dev);
if (ret) {
DRM_ERROR("Unable to acquire AGP: %d\n", ret);
diff --git a/shared-core/nv04_mc.c b/shared-core/nv04_mc.c
index 766f3a33..24c1f7b3 100644
--- a/shared-core/nv04_mc.c
+++ b/shared-core/nv04_mc.c
@@ -7,25 +7,12 @@ int
nv04_mc_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t saved_pci_nv_1, saved_pci_nv_19;
-
- saved_pci_nv_1 = NV_READ(NV04_PBUS_PCI_NV_1);
- saved_pci_nv_19 = NV_READ(NV04_PBUS_PCI_NV_19);
-
- /* clear busmaster bit */
- NV_WRITE(NV04_PBUS_PCI_NV_1, saved_pci_nv_1 & ~(0x00000001 << 2));
- /* clear SBA and AGP bits */
- NV_WRITE(NV04_PBUS_PCI_NV_19, saved_pci_nv_19 & 0xfffff0ff);
/* Power up everything, resetting each individual unit will
* be done later if needed.
*/
NV_WRITE(NV03_PMC_ENABLE, 0xFFFFFFFF);
- /* and restore (gives effect of resetting AGP) */
- NV_WRITE(NV04_PBUS_PCI_NV_19, saved_pci_nv_19);
- NV_WRITE(NV04_PBUS_PCI_NV_1, saved_pci_nv_1);
-
return 0;
}
diff --git a/shared-core/radeon_state.c b/shared-core/radeon_state.c
index 6ff952e1..7bcf210a 100644
--- a/shared-core/radeon_state.c
+++ b/shared-core/radeon_state.c
@@ -3126,12 +3126,14 @@ static int radeon_cp_setparam(struct drm_device *dev, void *data, struct drm_fil
DRM_DEBUG("color tiling disabled\n");
dev_priv->front_pitch_offset &= ~RADEON_DST_TILE_MACRO;
dev_priv->back_pitch_offset &= ~RADEON_DST_TILE_MACRO;
- dev_priv->sarea_priv->tiling_enabled = 0;
+ if (dev_priv->sarea_priv)
+ dev_priv->sarea_priv->tiling_enabled = 0;
} else if (sp->value == 1) {
DRM_DEBUG("color tiling enabled\n");
dev_priv->front_pitch_offset |= RADEON_DST_TILE_MACRO;
dev_priv->back_pitch_offset |= RADEON_DST_TILE_MACRO;
- dev_priv->sarea_priv->tiling_enabled = 1;
+ if (dev_priv->sarea_priv)
+ dev_priv->sarea_priv->tiling_enabled = 1;
}
break;
case RADEON_SETPARAM_PCIGART_LOCATION: