aboutsummaryrefslogtreecommitdiff
path: root/shared-core
diff options
context:
space:
mode:
authorEric Anholt <eric@anholt.net>2008-08-08 14:05:01 -0700
committerEric Anholt <eric@anholt.net>2008-08-08 14:05:01 -0700
commite1b8e79796b172c356af98eb49107c8abbebfe5a (patch)
tree4af674ca495ca93535f057ea4c0385c57825c2e4 /shared-core
parent0c47151a571827905c34649208e22f8ec0175d62 (diff)
parent46e9274e8538e5b0517f611dca99dde611f4e95d (diff)
Merge branch 'drm-gem'
Conflicts: shared-core/i915_dma.c This brings in kernel support and userland interface for intel GEM.
Diffstat (limited to 'shared-core')
-rw-r--r--shared-core/drm.h31
-rw-r--r--shared-core/i915_dma.c260
-rw-r--r--shared-core/i915_drm.h321
-rw-r--r--shared-core/i915_drv.h325
-rw-r--r--shared-core/i915_irq.c104
5 files changed, 902 insertions, 139 deletions
diff --git a/shared-core/drm.h b/shared-core/drm.h
index 7f1ccd1d..94755e4b 100644
--- a/shared-core/drm.h
+++ b/shared-core/drm.h
@@ -959,6 +959,31 @@ struct drm_mm_info_arg {
uint64_t p_size;
};
+struct drm_gem_close {
+ /** Handle of the object to be closed. */
+ uint32_t handle;
+ uint32_t pad;
+};
+
+struct drm_gem_flink {
+ /** Handle for the object being named */
+ uint32_t handle;
+
+ /** Returned global name */
+ uint32_t name;
+};
+
+struct drm_gem_open {
+ /** Name of object being opened */
+ uint32_t name;
+
+ /** Returned handle for the object */
+ uint32_t handle;
+
+ /** Returned size of the object */
+ uint64_t size;
+};
+
/**
* \name Ioctls Definitions
*/
@@ -978,7 +1003,11 @@ struct drm_mm_info_arg {
#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client)
#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
-#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
+#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
+
+#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close)
+#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink)
+#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c
index 852b2437..5811fa3c 100644
--- a/shared-core/i915_dma.c
+++ b/shared-core/i915_dma.c
@@ -41,10 +41,14 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+ u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
+ u32 last_acthd = I915_READ(acthd_reg);
+ u32 acthd;
int i;
- for (i = 0; i < 10000; i++) {
+ for (i = 0; i < 100000; i++) {
ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+ acthd = I915_READ(acthd_reg);
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
ring->space += ring->Size;
@@ -54,13 +58,79 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
if (ring->head != last_head)
i = 0;
+ if (acthd != last_acthd)
+ i = 0;
+
last_head = ring->head;
- DRM_UDELAY(1);
+ last_acthd = acthd;
+ msleep_interruptible (10);
}
return -EBUSY;
}
+int i915_init_hardware_status(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ /* Program Hardware Status Page */
+ dev_priv->status_page_dmah =
+ drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
+
+ if (!dev_priv->status_page_dmah) {
+ DRM_ERROR("Can not allocate hardware status page\n");
+ return -ENOMEM;
+ }
+ dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
+ dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
+
+ memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+
+ I915_WRITE(0x02080, dev_priv->dma_status_page);
+ DRM_DEBUG("Enabled hardware status page\n");
+ return 0;
+}
+
+void i915_free_hardware_status(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ if (dev_priv->status_page_dmah) {
+ drm_pci_free(dev, dev_priv->status_page_dmah);
+ dev_priv->status_page_dmah = NULL;
+ /* Need to rewrite hardware status page */
+ I915_WRITE(0x02080, 0x1ffff000);
+ }
+
+ if (dev_priv->status_gfx_addr) {
+ dev_priv->status_gfx_addr = 0;
+ drm_core_ioremapfree(&dev_priv->hws_map, dev);
+ I915_WRITE(0x02080, 0x1ffff000);
+ }
+}
+
+#if I915_RING_VALIDATE
+/**
+ * Validate the cached ring tail value
+ *
+ * If the X server writes to the ring and DRM doesn't
+ * reload the head and tail pointers, it will end up writing
+ * data to the wrong place in the ring, causing havoc.
+ */
+void i915_ring_validate(struct drm_device *dev, const char *func, int line)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
+ u32 tail = I915_READ(LP_RING+RING_TAIL) & HEAD_ADDR;
+ u32 head = I915_READ(LP_RING+RING_HEAD) & HEAD_ADDR;
+
+ if (tail != ring->tail) {
+ DRM_ERROR("%s:%d head sw %x, hw %x. tail sw %x hw %x\n",
+ func, line,
+ ring->head, head, ring->tail, tail);
+ BUG_ON(1);
+ }
+}
+#endif
+
void i915_kernel_lost_context(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -80,7 +150,7 @@ static int i915_dma_cleanup(struct drm_device * dev)
* may not have been called from userspace and after dev_private
* is freed, it's too late.
*/
- if (dev->irq)
+ if (dev->irq_enabled)
drm_irq_uninstall(dev);
if (dev_priv->ring.virtual_start) {
@@ -90,18 +160,8 @@ static int i915_dma_cleanup(struct drm_device * dev)
dev_priv->ring.map.size = 0;
}
- if (dev_priv->status_page_dmah) {
- drm_pci_free(dev, dev_priv->status_page_dmah);
- dev_priv->status_page_dmah = NULL;
- /* Need to rewrite hardware status page */
- I915_WRITE(0x02080, 0x1ffff000);
- }
-
- if (dev_priv->status_gfx_addr) {
- dev_priv->status_gfx_addr = 0;
- drm_core_ioremapfree(&dev_priv->hws_map, dev);
- I915_WRITE(0x02080, 0x1ffff000);
- }
+ if (I915_NEED_GFX_HWS(dev))
+ i915_free_hardware_status(dev);
return 0;
}
@@ -182,14 +242,6 @@ static int i915_initialize(struct drm_device * dev,
return -EINVAL;
}
- if (init->mmio_offset != 0)
- dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
- if (!dev_priv->mmio_map) {
- i915_dma_cleanup(dev);
- DRM_ERROR("can not find mmio map!\n");
- return -EINVAL;
- }
-
#ifdef I915_HAVE_BUFFER
dev_priv->max_validate_buffers = I915_MAX_VALIDATE_BUFFERS;
#endif
@@ -203,27 +255,27 @@ static int i915_initialize(struct drm_device * dev,
dev_priv->sarea_priv = NULL;
}
- dev_priv->ring.Start = init->ring_start;
- dev_priv->ring.End = init->ring_end;
- dev_priv->ring.Size = init->ring_size;
- dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
+ if (init->ring_size != 0) {
+ dev_priv->ring.Size = init->ring_size;
+ dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
- dev_priv->ring.map.offset = init->ring_start;
- dev_priv->ring.map.size = init->ring_size;
- dev_priv->ring.map.type = 0;
- dev_priv->ring.map.flags = 0;
- dev_priv->ring.map.mtrr = 0;
+ dev_priv->ring.map.offset = init->ring_start;
+ dev_priv->ring.map.size = init->ring_size;
+ dev_priv->ring.map.type = 0;
+ dev_priv->ring.map.flags = 0;
+ dev_priv->ring.map.mtrr = 0;
- drm_core_ioremap(&dev_priv->ring.map, dev);
+ drm_core_ioremap(&dev_priv->ring.map, dev);
- if (dev_priv->ring.map.handle == NULL) {
- i915_dma_cleanup(dev);
- DRM_ERROR("can not ioremap virtual address for"
- " ring buffer\n");
- return -ENOMEM;
- }
+ if (dev_priv->ring.map.handle == NULL) {
+ i915_dma_cleanup(dev);
+ DRM_ERROR("can not ioremap virtual address for"
+ " ring buffer\n");
+ return -ENOMEM;
+ }
- dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
+ dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
+ }
dev_priv->cpp = init->cpp;
@@ -233,9 +285,6 @@ static int i915_initialize(struct drm_device * dev,
/* We are using separate values as placeholders for mechanisms for
* private backbuffer/depthbuffer usage.
*/
- dev_priv->use_mi_batchbuffer_start = 0;
- if (IS_I965G(dev)) /* 965 doesn't support older method */
- dev_priv->use_mi_batchbuffer_start = 1;
/* Allow hardware batchbuffers unless told otherwise.
*/
@@ -245,24 +294,6 @@ static int i915_initialize(struct drm_device * dev,
*/
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
- /* Program Hardware Status Page */
- if (!I915_NEED_GFX_HWS(dev)) {
- dev_priv->status_page_dmah =
- drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
-
- if (!dev_priv->status_page_dmah) {
- i915_dma_cleanup(dev);
- DRM_ERROR("Can not allocate hardware status page\n");
- return -ENOMEM;
- }
- dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
- dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
-
- memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
-
- I915_WRITE(0x02080, dev_priv->dma_status_page);
- }
- DRM_DEBUG("Enabled hardware status page\n");
#ifdef I915_HAVE_BUFFER
mutex_init(&dev_priv->cmdbuf_mutex);
#endif
@@ -291,11 +322,6 @@ static int i915_dma_resume(struct drm_device * dev)
return -EINVAL;
}
- if (!dev_priv->mmio_map) {
- DRM_ERROR("can not find mmio map!\n");
- return -EINVAL;
- }
-
if (dev_priv->ring.map.handle == NULL) {
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
@@ -459,9 +485,9 @@ static int i915_emit_cmds(struct drm_device *dev, int __user *buffer,
return 0;
}
-static int i915_emit_box(struct drm_device * dev,
- struct drm_clip_rect __user * boxes,
- int i, int DR1, int DR4)
+int i915_emit_box(struct drm_device * dev,
+ struct drm_clip_rect __user * boxes,
+ int i, int DR1, int DR4)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_clip_rect box;
@@ -517,7 +543,7 @@ void i915_emit_breadcrumb(struct drm_device *dev)
BEGIN_LP_RING(4);
OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(20);
+ OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
OUT_RING(dev_priv->counter);
OUT_RING(0);
ADVANCE_LP_RING();
@@ -610,7 +636,14 @@ int i915_dispatch_batchbuffer(struct drm_device * dev,
return ret;
}
- if (dev_priv->use_mi_batchbuffer_start) {
+ if (IS_I830(dev) || IS_845G(dev)) {
+ BEGIN_LP_RING(4);
+ OUT_RING(MI_BATCH_BUFFER);
+ OUT_RING(batch->start | MI_BATCH_NON_SECURE);
+ OUT_RING(batch->start + batch->used - 4);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+ } else {
BEGIN_LP_RING(2);
if (IS_I965G(dev)) {
OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
@@ -620,14 +653,6 @@ int i915_dispatch_batchbuffer(struct drm_device * dev,
OUT_RING(batch->start | MI_BATCH_NON_SECURE);
}
ADVANCE_LP_RING();
-
- } else {
- BEGIN_LP_RING(4);
- OUT_RING(MI_BATCH_BUFFER);
- OUT_RING(batch->start | MI_BATCH_NON_SECURE);
- OUT_RING(batch->start + batch->used - 4);
- OUT_RING(0);
- ADVANCE_LP_RING();
}
}
@@ -715,9 +740,19 @@ void i915_dispatch_flip(struct drm_device * dev, int planes, int sync)
int i915_quiescent(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
i915_kernel_lost_context(dev);
- return i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
+ ret = i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
+ if (ret)
+ {
+ i915_kernel_lost_context (dev);
+ DRM_ERROR ("not quiescent head %08x tail %08x space %08x\n",
+ dev_priv->ring.head,
+ dev_priv->ring.tail,
+ dev_priv->ring.space);
+ }
+ return ret;
}
static int i915_flush_ioctl(struct drm_device *dev, void *data,
@@ -854,7 +889,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
switch (param->param) {
case I915_PARAM_IRQ_ACTIVE:
- value = dev->irq ? 1 : 0;
+ value = dev->irq_enabled ? 1 : 0;
break;
case I915_PARAM_ALLOW_BATCHBUFFER:
value = dev_priv->allow_batchbuffer ? 1 : 0;
@@ -865,6 +900,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_CHIPSET_ID:
value = dev->pci_device;
break;
+ case I915_PARAM_HAS_GEM:
+ value = 1;
+ break;
default:
DRM_ERROR("Unknown parameter %d\n", param->param);
return -EINVAL;
@@ -891,8 +929,6 @@ static int i915_setparam(struct drm_device *dev, void *data,
switch (param->param) {
case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
- if (!IS_I965G(dev))
- dev_priv->use_mi_batchbuffer_start = param->value;
break;
case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
dev_priv->tex_lru_log_granularity = param->value;
@@ -1026,6 +1062,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
memset(dev_priv, 0, sizeof(drm_i915_private_t));
dev->dev_private = (void *)dev_priv;
+ dev_priv->dev = dev;
/* Add register map (needed for suspend/resume) */
base = drm_get_resource_start(dev, mmio_bar);
@@ -1034,6 +1071,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
_DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map);
+ i915_gem_load(dev);
+
#ifdef __linux__
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
intel_init_chipset_flush_compat(dev);
@@ -1043,6 +1082,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
#endif
#endif
+ /* Init HWS */
+ if (!I915_NEED_GFX_HWS(dev)) {
+ ret = i915_init_hardware_status(dev);
+ if(ret)
+ return ret;
+ }
+
return ret;
}
@@ -1050,8 +1096,9 @@ int i915_driver_unload(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (dev_priv->mmio_map)
- drm_rmmap(dev, dev_priv->mmio_map);
+ i915_free_hardware_status(dev);
+
+ drm_rmmap(dev, dev_priv->mmio_map);
#ifdef __linux__
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
@@ -1083,6 +1130,7 @@ void i915_driver_lastclose(struct drm_device * dev)
dev_priv->val_bufs = NULL;
}
#endif
+ i915_gem_lastclose(dev);
if (drm_getsarea(dev) && dev_priv->sarea_priv)
i915_do_cleanup_pageflip(dev);
@@ -1106,12 +1154,38 @@ void i915_driver_lastclose(struct drm_device * dev)
i915_dma_cleanup(dev);
}
+int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
+{
+ struct drm_i915_file_private *i915_file_priv;
+
+ DRM_DEBUG("\n");
+ i915_file_priv = (struct drm_i915_file_private *)
+ drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES);
+
+ if (!i915_file_priv)
+ return -ENOMEM;
+
+ file_priv->driver_priv = i915_file_priv;
+
+ i915_file_priv->mm.last_gem_seqno = 0;
+ i915_file_priv->mm.last_gem_throttle_seqno = 0;
+
+ return 0;
+}
+
void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = dev->dev_private;
i915_mem_release(dev, file_priv, dev_priv->agp_heap);
}
+void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
+{
+ struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
+
+ drm_free(i915_file_priv, sizeof(*i915_file_priv), DRM_MEM_FILES);
+}
+
struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
@@ -1134,6 +1208,22 @@ struct drm_ioctl_desc i915_ioctls[] = {
#ifdef I915_HAVE_BUFFER
DRM_IOCTL_DEF(DRM_I915_EXECBUFFER, i915_execbuffer, DRM_AUTH),
#endif
+ DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
+ DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/shared-core/i915_drm.h b/shared-core/i915_drm.h
index 97e77428..2801687c 100644
--- a/shared-core/i915_drm.h
+++ b/shared-core/i915_drm.h
@@ -176,6 +176,22 @@ typedef struct drm_i915_sarea {
#define DRM_I915_MMIO 0x10
#define DRM_I915_HWS_ADDR 0x11
#define DRM_I915_EXECBUFFER 0x12
+#define DRM_I915_GEM_INIT 0x13
+#define DRM_I915_GEM_EXECBUFFER 0x14
+#define DRM_I915_GEM_PIN 0x15
+#define DRM_I915_GEM_UNPIN 0x16
+#define DRM_I915_GEM_BUSY 0x17
+#define DRM_I915_GEM_THROTTLE 0x18
+#define DRM_I915_GEM_ENTERVT 0x19
+#define DRM_I915_GEM_LEAVEVT 0x1a
+#define DRM_I915_GEM_CREATE 0x1b
+#define DRM_I915_GEM_PREAD 0x1c
+#define DRM_I915_GEM_PWRITE 0x1d
+#define DRM_I915_GEM_MMAP 0x1e
+#define DRM_I915_GEM_SET_DOMAIN 0x1f
+#define DRM_I915_GEM_SW_FINISH 0x20
+#define DRM_I915_GEM_SET_TILING 0x21
+#define DRM_I915_GEM_GET_TILING 0x22
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -195,6 +211,22 @@ typedef struct drm_i915_sarea {
#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
#define DRM_IOCTL_I915_MMIO DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_MMIO, drm_i915_mmio)
#define DRM_IOCTL_I915_EXECBUFFER DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_EXECBUFFER, struct drm_i915_execbuffer)
+#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
+#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
+#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
+#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
+#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
+#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
+#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
+#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
+#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
+#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
+#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
+#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
+#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
+#define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
+#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
+#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
/* Asynchronous page flipping:
*/
@@ -248,6 +280,7 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_ALLOW_BATCHBUFFER 2
#define I915_PARAM_LAST_DISPATCH 3
#define I915_PARAM_CHIPSET_ID 4
+#define I915_PARAM_HAS_GEM 5
typedef struct drm_i915_getparam {
int param;
@@ -395,4 +428,292 @@ struct drm_i915_execbuffer {
struct drm_fence_arg fence_arg;
};
+struct drm_i915_gem_init {
+ /**
+ * Beginning offset in the GTT to be managed by the DRM memory
+ * manager.
+ */
+ uint64_t gtt_start;
+ /**
+ * Ending offset in the GTT to be managed by the DRM memory
+ * manager.
+ */
+ uint64_t gtt_end;
+};
+
+struct drm_i915_gem_create {
+ /**
+ * Requested size for the object.
+ *
+ * The (page-aligned) allocated size for the object will be returned.
+ */
+ uint64_t size;
+ /**
+ * Returned handle for the object.
+ *
+ * Object handles are nonzero.
+ */
+ uint32_t handle;
+ uint32_t pad;
+};
+
+struct drm_i915_gem_pread {
+ /** Handle for the object being read. */
+ uint32_t handle;
+ uint32_t pad;
+ /** Offset into the object to read from */
+ uint64_t offset;
+ /** Length of data to read */
+ uint64_t size;
+ /** Pointer to write the data into. */
+ uint64_t data_ptr; /* void *, but pointers are not 32/64 compatible */
+};
+
+struct drm_i915_gem_pwrite {
+ /** Handle for the object being written to. */
+ uint32_t handle;
+ uint32_t pad;
+ /** Offset into the object to write to */
+ uint64_t offset;
+ /** Length of data to write */
+ uint64_t size;
+ /** Pointer to read the data from. */
+ uint64_t data_ptr; /* void *, but pointers are not 32/64 compatible */
+};
+
+struct drm_i915_gem_mmap {
+ /** Handle for the object being mapped. */
+ uint32_t handle;
+ uint32_t pad;
+ /** Offset in the object to map. */
+ uint64_t offset;
+ /**
+ * Length of data to map.
+ *
+ * The value will be page-aligned.
+ */
+ uint64_t size;
+ /** Returned pointer the data was mapped at */
+ uint64_t addr_ptr; /* void *, but pointers are not 32/64 compatible */
+};
+
+struct drm_i915_gem_set_domain {
+ /** Handle for the object */
+ uint32_t handle;
+
+ /** New read domains */
+ uint32_t read_domains;
+
+ /** New write domain */
+ uint32_t write_domain;
+};
+
+struct drm_i915_gem_sw_finish {
+ /** Handle for the object */
+ uint32_t handle;
+};
+
+struct drm_i915_gem_relocation_entry {
+ /**
+ * Handle of the buffer being pointed to by this relocation entry.
+ *
+ * It's appealing to make this be an index into the mm_validate_entry
+ * list to refer to the buffer, but this allows the driver to create
+ * a relocation list for state buffers and not re-write it per
+ * exec using the buffer.
+ */
+ uint32_t target_handle;
+
+ /**
+ * Value to be added to the offset of the target buffer to make up
+ * the relocation entry.
+ */
+ uint32_t delta;
+
+ /** Offset in the buffer the relocation entry will be written into */
+ uint64_t offset;
+
+ /**
+ * Offset value of the target buffer that the relocation entry was last
+ * written as.
+ *
+ * If the buffer has the same offset as last time, we can skip syncing
+ * and writing the relocation. This value is written back out by
+ * the execbuffer ioctl when the relocation is written.
+ */
+ uint64_t presumed_offset;
+
+ /**
+ * Target memory domains read by this operation.
+ */
+ uint32_t read_domains;
+
+ /**
+ * Target memory domains written by this operation.
+ *
+ * Note that only one domain may be written by the whole
+ * execbuffer operation, so that where there are conflicts,
+ * the application will get -EINVAL back.
+ */
+ uint32_t write_domain;
+};
+
+/** @{
+ * Intel memory domains
+ *
+ * Most of these just align with the various caches in
+ * the system and are used to flush and invalidate as
+ * objects end up cached in different domains.
+ */
+/** CPU cache */
+#define I915_GEM_DOMAIN_CPU 0x00000001
+/** Render cache, used by 2D and 3D drawing */
+#define I915_GEM_DOMAIN_RENDER 0x00000002
+/** Sampler cache, used by texture engine */
+#define I915_GEM_DOMAIN_SAMPLER 0x00000004
+/** Command queue, used to load batch buffers */
+#define I915_GEM_DOMAIN_COMMAND 0x00000008
+/** Instruction cache, used by shader programs */
+#define I915_GEM_DOMAIN_INSTRUCTION 0x00000010
+/** Vertex address cache */
+#define I915_GEM_DOMAIN_VERTEX 0x00000020
+/** GTT domain - aperture and scanout */
+#define I915_GEM_DOMAIN_GTT 0x00000040
+/** @} */
+
+struct drm_i915_gem_exec_object {
+ /**
+ * User's handle for a buffer to be bound into the GTT for this
+ * operation.
+ */
+ uint32_t handle;
+
+ /** Number of relocations to be performed on this buffer */
+ uint32_t relocation_count;
+ /**
+ * Pointer to array of struct drm_i915_gem_relocation_entry containing
+ * the relocations to be performed in this buffer.
+ */
+ uint64_t relocs_ptr;
+
+ /** Required alignment in graphics aperture */
+ uint64_t alignment;
+
+ /**
+ * Returned value of the updated offset of the object, for future
+ * presumed_offset writes.
+ */
+ uint64_t offset;
+};
+
+struct drm_i915_gem_execbuffer {
+ /**
+ * List of buffers to be validated with their relocations to be
+ * performend on them.
+ *
+ * This is a pointer to an array of struct drm_i915_gem_validate_entry.
+ *
+ * These buffers must be listed in an order such that all relocations
+ * a buffer is performing refer to buffers that have already appeared
+ * in the validate list.
+ */
+ uint64_t buffers_ptr;
+ uint32_t buffer_count;
+
+ /** Offset in the batchbuffer to start execution from. */
+ uint32_t batch_start_offset;
+ /** Bytes used in batchbuffer from batch_start_offset */
+ uint32_t batch_len;
+ uint32_t DR1;
+ uint32_t DR4;
+ uint32_t num_cliprects;
+ uint64_t cliprects_ptr; /* struct drm_clip_rect *cliprects */
+};
+
+struct drm_i915_gem_pin {
+ /** Handle of the buffer to be pinned. */
+ uint32_t handle;
+ uint32_t pad;
+
+ /** alignment required within the aperture */
+ uint64_t alignment;
+
+ /** Returned GTT offset of the buffer. */
+ uint64_t offset;
+};
+
+struct drm_i915_gem_unpin {
+ /** Handle of the buffer to be unpinned. */
+ uint32_t handle;
+ uint32_t pad;
+};
+
+struct drm_i915_gem_busy {
+ /** Handle of the buffer to check for busy */
+ uint32_t handle;
+
+ /** Return busy status (1 if busy, 0 if idle) */
+ uint32_t busy;
+};
+
+#define I915_TILING_NONE 0
+#define I915_TILING_X 1
+#define I915_TILING_Y 2
+
+#define I915_BIT_6_SWIZZLE_NONE 0
+#define I915_BIT_6_SWIZZLE_9 1
+#define I915_BIT_6_SWIZZLE_9_10 2
+#define I915_BIT_6_SWIZZLE_9_11 3
+#define I915_BIT_6_SWIZZLE_9_10_11 4
+/* Not seen by userland */
+#define I915_BIT_6_SWIZZLE_UNKNOWN 5
+
+struct drm_i915_gem_set_tiling {
+ /** Handle of the buffer to have its tiling state updated */
+ uint32_t handle;
+
+ /**
+ * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
+ * I915_TILING_Y).
+ *
+ * This value is to be set on request, and will be updated by the
+ * kernel on successful return with the actual chosen tiling layout.
+ *
+ * The tiling mode may be demoted to I915_TILING_NONE when the system
+ * has bit 6 swizzling that can't be managed correctly by GEM.
+ *
+ * Buffer contents become undefined when changing tiling_mode.
+ */
+ uint32_t tiling_mode;
+
+ /**
+ * Stride in bytes for the object when in I915_TILING_X or
+ * I915_TILING_Y.
+ */
+ uint32_t stride;
+
+ /**
+ * Returned address bit 6 swizzling required for CPU access through
+ * mmap mapping.
+ */
+ uint32_t swizzle_mode;
+};
+
+struct drm_i915_gem_get_tiling {
+ /** Handle of the buffer to get tiling state for. */
+ uint32_t handle;
+
+ /**
+ * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
+ * I915_TILING_Y).
+ */
+ uint32_t tiling_mode;
+
+ /**
+ * Returned address bit 6 swizzling required for CPU access through
+ * mmap mapping.
+ */
+ uint32_t swizzle_mode;
+};
+
#endif /* _I915_DRM_H_ */
diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h
index 421572cd..f84dcc25 100644
--- a/shared-core/i915_drv.h
+++ b/shared-core/i915_drv.h
@@ -37,7 +37,7 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20080312"
+#define DRIVER_DATE "20080730"
#if defined(__linux__)
#define I915_HAVE_FENCE
@@ -77,16 +77,23 @@ enum pipe {
struct drm_i915_validate_buffer;
#endif
+#define WATCH_COHERENCY 0
+#define WATCH_BUF 0
+#define WATCH_EXEC 0
+#define WATCH_LRU 0
+#define WATCH_RELOC 0
+#define WATCH_INACTIVE 0
+#define WATCH_PWRITE 0
+
typedef struct _drm_i915_ring_buffer {
int tail_mask;
- unsigned long Start;
- unsigned long End;
unsigned long Size;
u8 *virtual_start;
int head;
int tail;
int space;
drm_local_map_t map;
+ struct drm_gem_object *ring_obj;
} drm_i915_ring_buffer_t;
struct mem_block {
@@ -122,6 +129,8 @@ struct intel_opregion {
#endif
typedef struct drm_i915_private {
+ struct drm_device *dev;
+
drm_local_map_t *sarea;
drm_local_map_t *mmio_map;
@@ -134,13 +143,12 @@ typedef struct drm_i915_private {
uint32_t counter;
unsigned int status_gfx_addr;
drm_local_map_t hws_map;
+ struct drm_gem_object *hws_obj;
unsigned int cpp;
- int use_mi_batchbuffer_start;
wait_queue_head_t irq_queue;
atomic_t irq_received;
- atomic_t irq_emitted;
int tex_lru_log_granularity;
int allow_batchbuffer;
@@ -150,7 +158,7 @@ typedef struct drm_i915_private {
DRM_SPINTYPE user_irq_lock;
int user_irq_refcount;
int fence_irq_on;
- uint32_t irq_enable_reg;
+ uint32_t irq_mask_reg;
int irq_enabled;
#ifdef I915_HAVE_FENCE
@@ -267,8 +275,97 @@ typedef struct drm_i915_private {
u8 saveDACMASK;
u8 saveDACDATA[256*3]; /* 256 3-byte colors */
u8 saveCR[37];
+
+ struct {
+ struct drm_mm gtt_space;
+
+ /**
+ * List of objects currently involved in rendering from the
+ * ringbuffer.
+ *
+ * A reference is held on the buffer while on this list.
+ */
+ struct list_head active_list;
+
+ /**
+ * List of objects which are not in the ringbuffer but which
+ * still have a write_domain which needs to be flushed before
+ * unbinding.
+ *
+ * A reference is held on the buffer while on this list.
+ */
+ struct list_head flushing_list;
+
+ /**
+ * LRU list of objects which are not in the ringbuffer and
+ * are ready to unbind, but are still in the GTT.
+ *
+ * A reference is not held on the buffer while on this list,
+ * as merely being GTT-bound shouldn't prevent its being
+ * freed, and we'll pull it off the list in the free path.
+ */
+ struct list_head inactive_list;
+
+ /**
+ * List of breadcrumbs associated with GPU requests currently
+ * outstanding.
+ */
+ struct list_head request_list;
+
+ /**
+ * We leave the user IRQ off as much as possible,
+ * but this means that requests will finish and never
+ * be retired once the system goes idle. Set a timer to
+ * fire periodically while the ring is running. When it
+ * fires, go retire requests.
+ */
+ struct delayed_work retire_work;
+
+ uint32_t next_gem_seqno;
+
+ /**
+ * Waiting sequence number, if any
+ */
+ uint32_t waiting_gem_seqno;
+
+ /**
+ * Last seq seen at irq time
+ */
+ uint32_t irq_gem_seqno;
+
+ /**
+ * Flag if the X Server, and thus DRM, is not currently in
+ * control of the device.
+ *
+ * This is set between LeaveVT and EnterVT. It needs to be
+ * replaced with a semaphore. It also needs to be
+ * transitioned away from for kernel modesetting.
+ */
+ int suspended;
+
+ /**
+ * Flag if the hardware appears to be wedged.
+ *
+ * This is set when attempts to idle the device timeout.
+ * It prevents command submission from occuring and makes
+ * every pending request fail
+ */
+ int wedged;
+
+ /** Bit 6 swizzling required for X tiling */
+ uint32_t bit_6_swizzle_x;
+ /** Bit 6 swizzling required for Y tiling */
+ uint32_t bit_6_swizzle_y;
+ } mm;
} drm_i915_private_t;
+struct drm_i915_file_private {
+ struct {
+ uint32_t last_gem_seqno;
+ uint32_t last_gem_throttle_seqno;
+ } mm;
+};
+
enum intel_chip_family {
CHIP_I8XX = 0x01,
CHIP_I9XX = 0x02,
@@ -276,6 +373,83 @@ enum intel_chip_family {
CHIP_I965 = 0x08,
};
+/** driver private structure attached to each drm_gem_object */
+struct drm_i915_gem_object {
+ struct drm_gem_object *obj;
+
+ /** Current space allocated to this object in the GTT, if any. */
+ struct drm_mm_node *gtt_space;
+
+ /** This object's place on the active/flushing/inactive lists */
+ struct list_head list;
+
+ /**
+ * This is set if the object is on the active or flushing lists
+ * (has pending rendering), and is not set if it's on inactive (ready
+ * to be unbound).
+ */
+ int active;
+
+ /**
+ * This is set if the object has been written to since last bound
+ * to the GTT
+ */
+ int dirty;
+
+ /** AGP memory structure for our GTT binding. */
+ DRM_AGP_MEM *agp_mem;
+
+ struct page **page_list;
+
+ /**
+ * Current offset of the object in GTT space.
+ *
+ * This is the same as gtt_space->start
+ */
+ uint32_t gtt_offset;
+
+ /** Boolean whether this object has a valid gtt offset. */
+ int gtt_bound;
+
+ /** How many users have pinned this object in GTT space */
+ int pin_count;
+
+ /** Breadcrumb of last rendering to the buffer. */
+ uint32_t last_rendering_seqno;
+
+ /** Current tiling mode for the object. */
+ uint32_t tiling_mode;
+
+ /**
+ * Flagging of which individual pages are valid in GEM_DOMAIN_CPU when
+ * GEM_DOMAIN_CPU is not in the object's read domain.
+ */
+ uint8_t *page_cpu_valid;
+};
+
+/**
+ * Request queue structure.
+ *
+ * The request queue allows us to note sequence numbers that have been emitted
+ * and may be associated with active buffers to be retired.
+ *
+ * By keeping this list, we can avoid having to do questionable
+ * sequence-number comparisons on buffer last_rendering_seqnos, and associate
+ * an emission time with seqnos for tracking how far ahead of the GPU we are.
+ */
+struct drm_i915_gem_request {
+ /** GEM sequence number associated with this request. */
+ uint32_t seqno;
+
+ /** Time at which this request was emitted, in jiffies. */
+ unsigned long emitted_jiffies;
+
+ /** Cache domains that were flushed at the start of the request. */
+ uint32_t flush_domains;
+
+ struct list_head list;
+};
+
extern struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl;
@@ -284,8 +458,11 @@ extern void i915_kernel_lost_context(struct drm_device * dev);
extern int i915_driver_load(struct drm_device *, unsigned long flags);
extern int i915_driver_unload(struct drm_device *);
extern void i915_driver_lastclose(struct drm_device * dev);
+extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
extern void i915_driver_preclose(struct drm_device *dev,
struct drm_file *file_priv);
+extern void i915_driver_postclose(struct drm_device *dev,
+ struct drm_file *file_priv);
extern int i915_driver_device_is_agp(struct drm_device * dev);
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
@@ -297,6 +474,10 @@ extern int i915_dispatch_batchbuffer(struct drm_device * dev,
drm_i915_batchbuffer_t * batch);
extern int i915_quiescent(struct drm_device *dev);
+int i915_emit_box(struct drm_device * dev,
+ struct drm_clip_rect __user * boxes,
+ int i, int DR1, int DR4);
+
/* i915_irq.c */
extern int i915_irq_emit(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -312,6 +493,7 @@ extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_emit_irq(struct drm_device * dev);
+extern int i915_wait_irq(struct drm_device * dev, int irq_nr);
extern int i915_enable_vblank(struct drm_device *dev, int crtc);
extern void i915_disable_vblank(struct drm_device *dev, int crtc);
extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
@@ -360,8 +542,68 @@ void i915_flush_ttm(struct drm_ttm *ttm);
/* i915_execbuf.c */
int i915_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+/* i915_gem.c */
+int i915_gem_init_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_execbuffer(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_set_tiling(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_get_tiling(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+void i915_gem_load(struct drm_device *dev);
+int i915_gem_proc_init(struct drm_minor *minor);
+void i915_gem_proc_cleanup(struct drm_minor *minor);
+int i915_gem_init_object(struct drm_gem_object *obj);
+void i915_gem_free_object(struct drm_gem_object *obj);
+int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
+void i915_gem_object_unpin(struct drm_gem_object *obj);
+void i915_gem_lastclose(struct drm_device *dev);
+uint32_t i915_get_gem_seqno(struct drm_device *dev);
+void i915_gem_retire_requests(struct drm_device *dev);
+void i915_gem_retire_work_handler(struct work_struct *work);
+void i915_gem_clflush_object(struct drm_gem_object *obj);
+#endif
+/* i915_gem_tiling.c */
+void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
+
+/* i915_gem_debug.c */
+void i915_gem_dump_object(struct drm_gem_object *obj, int len,
+ const char *where, uint32_t mark);
+#if WATCH_INACTIVE
+void i915_verify_inactive(struct drm_device *dev, char *file, int line);
+#else
+#define i915_verify_inactive(dev,file,line)
#endif
+void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle);
+void i915_gem_dump_object(struct drm_gem_object *obj, int len,
+ const char *where, uint32_t mark);
+void i915_dump_lru(struct drm_device *dev, const char *where);
#ifdef __linux__
/* i915_opregion.c */
@@ -390,16 +632,25 @@ typedef boolean_t bool;
#endif
#define I915_VERBOSE 0
+#define I915_RING_VALIDATE 0
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
#define RING_LOCALS unsigned int outring, ringmask, outcount; \
volatile char *virt;
+#if I915_RING_VALIDATE
+void i915_ring_validate(struct drm_device *dev, const char *func, int line);
+#define I915_RING_DO_VALIDATE(dev) i915_ring_validate(dev, __FUNCTION__, __LINE__)
+#else
+#define I915_RING_DO_VALIDATE(dev)
+#endif
+
#define BEGIN_LP_RING(n) do { \
if (I915_VERBOSE) \
DRM_DEBUG("BEGIN_LP_RING(%d)\n", \
(n)); \
+ I915_RING_DO_VALIDATE(dev); \
if (dev_priv->ring.space < (n)*4) \
i915_wait_ring(dev, (n)*4, __FUNCTION__); \
outcount = 0; \
@@ -418,6 +669,7 @@ typedef boolean_t bool;
#define ADVANCE_LP_RING() do { \
if (I915_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING %x\n", outring); \
+ I915_RING_DO_VALIDATE(dev); \
dev_priv->ring.tail = outring; \
dev_priv->ring.space -= outcount * 4; \
I915_WRITE(PRB0_TAIL, outring); \
@@ -425,6 +677,39 @@ typedef boolean_t bool;
extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
+#define BREADCRUMB_BITS 31
+#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1)
+
+#define READ_BREADCRUMB(dev_priv) (((volatile u32*)(dev_priv->hw_status_page))[5])
+/**
+ * Reads a dword out of the status page, which is written to from the command
+ * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
+ * MI_STORE_DATA_IMM.
+ *
+ * The following dwords have a reserved meaning:
+ * 0: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
+ * 4: ring 0 head pointer
+ * 5: ring 1 head pointer (915-class)
+ * 6: ring 2 head pointer (915-class)
+ *
+ * The area from dword 0x10 to 0x3ff is available for driver usage.
+ */
+#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
+#define I915_GEM_HWS_INDEX 0x10
+
+/* MCH MMIO space */
+/** 915-945 and GM965 MCH register controlling DRAM channel access */
+#define DCC 0x200
+#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0)
+#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0)
+#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0)
+#define DCC_ADDRESSING_MODE_MASK (3 << 0)
+#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
+
+/** 965 MCH register controlling DRAM channel configuration */
+#define CHDECMISC 0x111
+#define CHDECMISC_FLEXMEMORY (1 << 1)
+
/*
* The Bridge device's PCI config space has information about the
* fb aperture size and the amount of pre-reserved memory.
@@ -525,33 +810,13 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
+#define MI_STORE_DWORD_INDEX_SHIFT 2
#define MI_LOAD_REGISTER_IMM MI_INSTR(0x22, 1)
#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
#define MI_BATCH_NON_SECURE (1)
#define MI_BATCH_NON_SECURE_I965 (1<<8)
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
-#define BREADCRUMB_BITS 31
-#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1)
-
-#define READ_BREADCRUMB(dev_priv) (((volatile u32*)(dev_priv->hw_status_page))[5])
-
-/**
- * Reads a dword out of the status page, which is written to from the command
- * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
- * MI_STORE_DATA_IMM.
- *
- * The following dwords have a reserved meaning:
- * 0: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
- * 4: ring 0 head pointer
- * 5: ring 1 head pointer (915-class)
- * 6: ring 2 head pointer (915-class)
- *
- * The area from dword 0x10 to 0x3ff is available for driver usage.
- */
-#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
-#define I915_GEM_HWS_INDEX 0x10
-
/*
* 3D instructions used by the kernel
*/
@@ -574,6 +839,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4)
#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
+#define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
@@ -617,7 +883,10 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define PRB1_HEAD 0x02044 /* 915+ only */
#define PRB1_START 0x02048 /* 915+ only */
#define PRB1_CTL 0x0204c /* 915+ only */
+#define ACTHD_I965 0x02074
#define HWS_PGA 0x02080
+#define HWS_ADDRESS_MASK 0xfffff000
+#define HWS_START_ADDRESS_SHIFT 4
#define IPEIR 0x02088
#define NOPID 0x02094
#define HWSTAM 0x02098
@@ -647,6 +916,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define EMR 0x020b4
#define ESR 0x020b8
#define INSTPM 0x020c0
+#define ACTHD 0x020c8
#define FW_BLC 0x020d8
#define FW_BLC_SELF 0x020e0 /* 915+ only */
#define MI_ARB_STATE 0x020e4 /* 915+ only */
@@ -696,7 +966,6 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
/*
* GPIO regs
*/
-
#define GPIOA 0x5010
#define GPIOB 0x5014
#define GPIOC 0x5018
diff --git a/shared-core/i915_irq.c b/shared-core/i915_irq.c
index ae4081f8..418e0ae9 100644
--- a/shared-core/i915_irq.c
+++ b/shared-core/i915_irq.c
@@ -33,6 +33,33 @@
#define MAX_NOPID ((u32)~0)
+/*
+ * These are the interrupts used by the driver
+ */
+#define I915_INTERRUPT_ENABLE_MASK (I915_USER_INTERRUPT | \
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
+
+static inline void
+i915_enable_irq(drm_i915_private_t *dev_priv, uint32_t mask)
+{
+ if ((dev_priv->irq_mask_reg & mask) != 0) {
+ dev_priv->irq_mask_reg &= ~mask;
+ I915_WRITE(IMR, dev_priv->irq_mask_reg);
+ (void) I915_READ(IMR);
+ }
+}
+
+static inline void
+i915_disable_irq(drm_i915_private_t *dev_priv, uint32_t mask)
+{
+ if ((dev_priv->irq_mask_reg & mask) != mask) {
+ dev_priv->irq_mask_reg |= mask;
+ I915_WRITE(IMR, dev_priv->irq_mask_reg);
+ (void) I915_READ(IMR);
+ }
+}
+
/**
* i915_get_pipe - return the the pipe associated with a given plane
* @dev: DRM device
@@ -403,12 +430,23 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 iir;
- u32 pipea_stats, pipeb_stats;
+ u32 pipea_stats = 0, pipeb_stats = 0;
int vblank = 0;
+ if (dev->pdev->msi_enabled)
+ I915_WRITE(IMR, ~0);
iir = I915_READ(IIR);
- if (iir == 0)
+#if 0
+ DRM_DEBUG("flag=%08x\n", iir);
+#endif
+ atomic_inc(&dev_priv->irq_received);
+ if (iir == 0) {
+ if (dev->pdev->msi_enabled) {
+ I915_WRITE(IMR, dev_priv->irq_mask_reg);
+ (void) I915_READ(IMR);
+ }
return IRQ_NONE;
+ }
/*
* Clear the PIPE(A|B)STAT regs before the IIR otherwise
@@ -422,7 +460,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
vblank++;
drm_handle_vblank(dev, i915_get_plane(dev, 0));
}
-
I915_WRITE(PIPEASTAT, pipea_stats);
}
if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) {
@@ -462,9 +499,12 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
I915_WRITE(IIR, iir);
- (void) I915_READ(IIR);
+ if (dev->pdev->msi_enabled)
+ I915_WRITE(IMR, dev_priv->irq_mask_reg);
+ (void) I915_READ(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT) {
+ dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
DRM_WAKEUP(&dev_priv->irq_queue);
#ifdef I915_HAVE_FENCE
i915_fence_handler(dev);
@@ -501,35 +541,40 @@ int i915_emit_irq(struct drm_device *dev)
void i915_user_irq_on(drm_i915_private_t *dev_priv)
{
DRM_SPINLOCK(&dev_priv->user_irq_lock);
- if (dev_priv->irq_enabled && (++dev_priv->user_irq_refcount == 1)){
- dev_priv->irq_enable_reg |= I915_USER_INTERRUPT;
- I915_WRITE(IER, dev_priv->irq_enable_reg);
- }
+ if (dev_priv->irq_enabled && (++dev_priv->user_irq_refcount == 1))
+ i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
-
}
void i915_user_irq_off(drm_i915_private_t *dev_priv)
{
DRM_SPINLOCK(&dev_priv->user_irq_lock);
- if (dev_priv->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
- // dev_priv->irq_enable_reg &= ~I915_USER_INTERRUPT;
- // I915_WRITE(IER, dev_priv->irq_enable_reg);
- }
+ BUG_ON(dev_priv->irq_enabled && dev_priv->user_irq_refcount <= 0);
+ if (dev_priv->irq_enabled && (--dev_priv->user_irq_refcount == 0))
+ i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
}
-static int i915_wait_irq(struct drm_device * dev, int irq_nr)
+int i915_wait_irq(struct drm_device * dev, int irq_nr)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = 0;
+ if (!dev_priv) {
+ DRM_ERROR("called with no initialization\n");
+ return -EINVAL;
+ }
+
DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
READ_BREADCRUMB(dev_priv));
- if (READ_BREADCRUMB(dev_priv) >= irq_nr)
+ if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
+ if (dev_priv->sarea_priv)
+ dev_priv->sarea_priv->last_dispatch =
+ READ_BREADCRUMB(dev_priv);
return 0;
+ }
i915_user_irq_on(dev_priv);
DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
@@ -594,16 +639,17 @@ int i915_enable_vblank(struct drm_device *dev, int plane)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe = i915_get_pipe(dev, plane);
u32 pipestat_reg = 0;
+ u32 mask_reg = 0;
u32 pipestat;
switch (pipe) {
case 0:
pipestat_reg = PIPEASTAT;
- dev_priv->irq_enable_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
+ mask_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
break;
case 1:
pipestat_reg = PIPEBSTAT;
- dev_priv->irq_enable_reg |= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+ mask_reg |= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
break;
default:
DRM_ERROR("tried to enable vblank on non-existent pipe %d\n",
@@ -629,7 +675,9 @@ int i915_enable_vblank(struct drm_device *dev, int plane)
PIPE_VBLANK_INTERRUPT_STATUS);
I915_WRITE(pipestat_reg, pipestat);
}
- I915_WRITE(IER, dev_priv->irq_enable_reg);
+ DRM_SPINLOCK(&dev_priv->user_irq_lock);
+ i915_enable_irq(dev_priv, mask_reg);
+ DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
return 0;
}
@@ -639,16 +687,17 @@ void i915_disable_vblank(struct drm_device *dev, int plane)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe = i915_get_pipe(dev, plane);
u32 pipestat_reg = 0;
+ u32 mask_reg = 0;
u32 pipestat;
switch (pipe) {
case 0:
pipestat_reg = PIPEASTAT;
- dev_priv->irq_enable_reg &= ~I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
+ mask_reg |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
break;
case 1:
pipestat_reg = PIPEBSTAT;
- dev_priv->irq_enable_reg &= ~I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+ mask_reg |= I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
break;
default:
DRM_ERROR("tried to disable vblank on non-existent pipe %d\n",
@@ -656,7 +705,9 @@ void i915_disable_vblank(struct drm_device *dev, int plane)
break;
}
- I915_WRITE(IER, dev_priv->irq_enable_reg);
+ DRM_SPINLOCK(&dev_priv->user_irq_lock);
+ i915_disable_irq(dev_priv, mask_reg);
+ DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
if (pipestat_reg)
{
@@ -669,14 +720,18 @@ void i915_disable_vblank(struct drm_device *dev, int plane)
pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
PIPE_VBLANK_INTERRUPT_STATUS);
I915_WRITE(pipestat_reg, pipestat);
+ (void) I915_READ(pipestat_reg);
}
}
static void i915_enable_interrupt (struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
- dev_priv->irq_enable_reg |= I915_USER_INTERRUPT;
+
+ dev_priv->irq_mask_reg = ~0;
+ I915_WRITE(IMR, dev_priv->irq_mask_reg);
+ I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
+ (void) I915_READ (IER);
#ifdef __linux__
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25)
@@ -684,7 +739,6 @@ static void i915_enable_interrupt (struct drm_device *dev)
#endif
#endif
- I915_WRITE(IER, dev_priv->irq_enable_reg);
dev_priv->irq_enabled = 1;
}
@@ -900,7 +954,7 @@ int i915_driver_irq_postinstall(struct drm_device * dev)
DRM_SPININIT(&dev_priv->user_irq_lock, "userirq");
dev_priv->user_irq_refcount = 0;
- dev_priv->irq_enable_reg = 0;
+ dev_priv->irq_mask_reg = ~0;
ret = drm_vblank_init(dev, num_pipes);
if (ret)