aboutsummaryrefslogtreecommitdiff
path: root/linux-core
diff options
context:
space:
mode:
Diffstat (limited to 'linux-core')
-rw-r--r--linux-core/Makefile5
-rw-r--r--linux-core/Makefile.kernel5
-rw-r--r--linux-core/ati_pcigart.c104
-rw-r--r--linux-core/drmP.h4
-rw-r--r--linux-core/drm_bo.c530
-rw-r--r--linux-core/drm_bo_lock.c46
-rw-r--r--linux-core/drm_bo_move.c9
-rw-r--r--linux-core/drm_compat.c5
-rw-r--r--linux-core/drm_compat.h5
-rw-r--r--linux-core/drm_fence.c2
-rw-r--r--linux-core/drm_fops.c4
-rw-r--r--linux-core/drm_irq.c2
-rw-r--r--linux-core/drm_lock.c31
-rw-r--r--linux-core/drm_memory.c42
-rw-r--r--linux-core/drm_objects.h26
-rw-r--r--linux-core/drm_ttm.c51
-rw-r--r--linux-core/drm_vm.c8
-rw-r--r--linux-core/i915_buffer.c4
-rw-r--r--linux-core/i915_drv.c7
-rw-r--r--linux-core/i915_execbuf.c921
-rw-r--r--linux-core/i915_fence.c4
-rw-r--r--linux-core/nouveau_bo.c (renamed from linux-core/nouveau_buffer.c)19
-rw-r--r--linux-core/nouveau_drv.c4
-rw-r--r--linux-core/nouveau_fence.c3
-rw-r--r--linux-core/via_dmablit.c2
-rw-r--r--linux-core/xgi_pcie.c1
26 files changed, 1400 insertions, 444 deletions
diff --git a/linux-core/Makefile b/linux-core/Makefile
index 2f33e5df..4ac083fe 100644
--- a/linux-core/Makefile
+++ b/linux-core/Makefile
@@ -340,6 +340,11 @@ ifneq (,$(findstring i915,$(DRM_MODULES)))
CONFIG_DRM_I915 := m
endif
+GIT_REVISION := $(shell cd "$(DRMSRCDIR)" && git-describe --abbrev=17)
+ifneq ($(GIT_REVISION),)
+EXTRA_CFLAGS+=-D"GIT_REVISION=\"$(GIT_REVISION)\""
+endif
+
include $(DRMSRCDIR)/Makefile.kernel
# Depencencies
diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel
index 28f6ec06..6903ec63 100644
--- a/linux-core/Makefile.kernel
+++ b/linux-core/Makefile.kernel
@@ -20,13 +20,14 @@ r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o
mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
i810-objs := i810_drv.o i810_dma.o
i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \
- i915_buffer.o intel_display.o intel_crt.o intel_lvds.o \
+ i915_buffer.o i915_execbuf.o \
+ intel_display.o intel_crt.o intel_lvds.o \
intel_sdvo.o intel_modes.o intel_i2c.o i915_init.o intel_fb.o \
intel_tv.o i915_compat.o intel_dvo.o dvo_ch7xxx.o \
dvo_ch7017.o dvo_ivch.o dvo_tfp410.o dvo_sil164.o
nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \
nouveau_object.o nouveau_irq.o nouveau_notifier.o nouveau_swmthd.o \
- nouveau_sgdma.o nouveau_dma.o nouveau_buffer.o nouveau_fence.o \
+ nouveau_sgdma.o nouveau_dma.o nouveau_bo.o nouveau_fence.o \
nv04_timer.o \
nv04_mc.o nv40_mc.o nv50_mc.o \
nv04_fb.o nv10_fb.o nv40_fb.o \
diff --git a/linux-core/ati_pcigart.c b/linux-core/ati_pcigart.c
index 93519e5f..beaa4424 100644
--- a/linux-core/ati_pcigart.c
+++ b/linux-core/ati_pcigart.c
@@ -34,51 +34,23 @@
#include "drmP.h"
# define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */
-
-static void *drm_ati_alloc_pcigart_table(int order)
+static int drm_ati_alloc_pcigart_table(struct drm_device *dev,
+ struct drm_ati_pcigart_info *gart_info)
{
- unsigned long address;
- struct page *page;
- int i;
+ gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size,
+ PAGE_SIZE,
+ gart_info->table_mask);
+ if (gart_info->table_handle == NULL)
+ return -ENOMEM;
- DRM_DEBUG("%d order\n", order);
-
- address = __get_free_pages(GFP_KERNEL | __GFP_COMP,
- order);
- if (address == 0UL) {
- return NULL;
- }
-
- page = virt_to_page(address);
-
- for (i = 0; i < order; i++, page++) {
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
- get_page(page);
-#endif
- SetPageReserved(page);
- }
-
- DRM_DEBUG("returning 0x%08lx\n", address);
- return (void *)address;
+ return 0;
}
-static void drm_ati_free_pcigart_table(void *address, int order)
+static void drm_ati_free_pcigart_table(struct drm_device *dev,
+ struct drm_ati_pcigart_info *gart_info)
{
- struct page *page;
- int i;
- int num_pages = 1 << order;
- DRM_DEBUG("\n");
-
- page = virt_to_page((unsigned long)address);
-
- for (i = 0; i < num_pages; i++, page++) {
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
- __put_page(page);
-#endif
- ClearPageReserved(page);
- }
-
- free_pages((unsigned long)address, order);
+ drm_pci_free(dev, gart_info->table_handle);
+ gart_info->table_handle = NULL;
}
int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
@@ -86,8 +58,7 @@ int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info
struct drm_sg_mem *entry = dev->sg;
unsigned long pages;
int i;
- int order;
- int num_pages, max_pages;
+ int max_pages;
/* we need to support large memory configurations */
if (!entry) {
@@ -95,15 +66,7 @@ int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info
return 0;
}
- order = drm_order((gart_info->table_size + (PAGE_SIZE-1)) / PAGE_SIZE);
- num_pages = 1 << order;
-
if (gart_info->bus_addr) {
- if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
- pci_unmap_single(dev->pdev, gart_info->bus_addr,
- num_pages * PAGE_SIZE,
- PCI_DMA_TODEVICE);
- }
max_pages = (gart_info->table_size / sizeof(u32));
pages = (entry->pages <= max_pages)
@@ -122,10 +85,9 @@ int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN
- && gart_info->addr) {
+ && gart_info->table_handle) {
- drm_ati_free_pcigart_table(gart_info->addr, order);
- gart_info->addr = NULL;
+ drm_ati_free_pcigart_table(dev, gart_info);
}
return 1;
@@ -137,11 +99,10 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
struct drm_sg_mem *entry = dev->sg;
void *address = NULL;
unsigned long pages;
- u32 *pci_gart, page_base, bus_address = 0;
+ u32 *pci_gart, page_base;
+ dma_addr_t bus_address = 0;
int i, j, ret = 0;
- int order;
int max_pages;
- int num_pages;
if (!entry) {
DRM_ERROR("no scatter/gather memory!\n");
@@ -151,31 +112,14 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
- order = drm_order((gart_info->table_size +
- (PAGE_SIZE-1)) / PAGE_SIZE);
- num_pages = 1 << order;
- address = drm_ati_alloc_pcigart_table(order);
- if (!address) {
+ ret = drm_ati_alloc_pcigart_table(dev, gart_info);
+ if (ret) {
DRM_ERROR("cannot allocate PCI GART page!\n");
goto done;
}
- if (!dev->pdev) {
- DRM_ERROR("PCI device unknown!\n");
- goto done;
- }
-
- bus_address = pci_map_single(dev->pdev, address,
- num_pages * PAGE_SIZE,
- PCI_DMA_TODEVICE);
- if (bus_address == 0) {
- DRM_ERROR("unable to map PCIGART pages!\n");
- order = drm_order((gart_info->table_size +
- (PAGE_SIZE-1)) / PAGE_SIZE);
- drm_ati_free_pcigart_table(address, order);
- address = NULL;
- goto done;
- }
+ address = gart_info->table_handle->vaddr;
+ bus_address = gart_info->table_handle->busaddr;
} else {
address = gart_info->addr;
bus_address = gart_info->bus_addr;
@@ -224,12 +168,6 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
}
}
- if (gart_info->gart_table_location == DRM_ATI_GART_MAIN)
- dma_sync_single_for_device(&dev->pdev->dev,
- bus_address,
- max_pages * sizeof(u32),
- PCI_DMA_TODEVICE);
-
ret = 1;
#if defined(__i386__) || defined(__x86_64__)
diff --git a/linux-core/drmP.h b/linux-core/drmP.h
index 24f8c3d8..f5e794e5 100644
--- a/linux-core/drmP.h
+++ b/linux-core/drmP.h
@@ -52,6 +52,7 @@
#include <linux/version.h>
#include <linux/sched.h>
#include <linux/smp_lock.h> /* For (un)lock_kernel */
+#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
@@ -610,6 +611,9 @@ struct drm_ati_pcigart_info {
int gart_reg_if;
void *addr;
dma_addr_t bus_addr;
+ dma_addr_t table_mask;
+ dma_addr_t member_mask;
+ struct drm_dma_handle *table_handle;
drm_local_map_t mapping;
int table_size;
};
diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c
index 7e82080b..301f946f 100644
--- a/linux-core/drm_bo.c
+++ b/linux-core/drm_bo.c
@@ -275,30 +275,81 @@ out_err:
/*
* Call bo->mutex locked.
- * Wait until the buffer is idle.
+ * Returns -EBUSY if the buffer is currently rendered to or from. 0 otherwise.
*/
-int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int ignore_signals,
- int no_wait)
+static int drm_bo_busy(struct drm_buffer_object *bo, int check_unfenced)
{
- int ret;
+ struct drm_fence_object *fence = bo->fence;
- DRM_ASSERT_LOCKED(&bo->mutex);
+ if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
+ return -EBUSY;
- if (bo->fence) {
- if (drm_fence_object_signaled(bo->fence, bo->fence_type)) {
+ if (fence) {
+ if (drm_fence_object_signaled(fence, bo->fence_type)) {
+ drm_fence_usage_deref_unlocked(&bo->fence);
+ return 0;
+ }
+ drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE);
+ if (drm_fence_object_signaled(fence, bo->fence_type)) {
drm_fence_usage_deref_unlocked(&bo->fence);
return 0;
}
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static int drm_bo_check_unfenced(struct drm_buffer_object *bo)
+{
+ int ret;
+
+ mutex_lock(&bo->mutex);
+ ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
+ mutex_unlock(&bo->mutex);
+ return ret;
+}
+
+
+/*
+ * Call bo->mutex locked.
+ * Wait until the buffer is idle.
+ */
+
+int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int interruptible,
+ int no_wait, int check_unfenced)
+{
+ int ret;
+
+ DRM_ASSERT_LOCKED(&bo->mutex);
+ while(unlikely(drm_bo_busy(bo, check_unfenced))) {
if (no_wait)
return -EBUSY;
- ret = drm_fence_object_wait(bo->fence, lazy, ignore_signals,
- bo->fence_type);
- if (ret)
- return ret;
+ if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)) {
+ mutex_unlock(&bo->mutex);
+ wait_event(bo->event_queue, !drm_bo_check_unfenced(bo));
+ mutex_lock(&bo->mutex);
+ bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED;
+ }
+
+ if (bo->fence) {
+ struct drm_fence_object *fence;
+ uint32_t fence_type = bo->fence_type;
+
+ drm_fence_reference_unlocked(&fence, bo->fence);
+ mutex_unlock(&bo->mutex);
+
+ ret = drm_fence_object_wait(fence, lazy, !interruptible,
+ fence_type);
+
+ drm_fence_usage_deref_unlocked(&fence);
+ mutex_lock(&bo->mutex);
+ bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED;
+ if (ret)
+ return ret;
+ }
- drm_fence_usage_deref_unlocked(&bo->fence);
}
return 0;
}
@@ -314,7 +365,7 @@ static int drm_bo_expire_fence(struct drm_buffer_object *bo, int allow_errors)
unsigned long _end = jiffies + 3 * DRM_HZ;
int ret;
do {
- ret = drm_bo_wait(bo, 0, 1, 0);
+ ret = drm_bo_wait(bo, 0, 0, 0, 0);
if (ret && allow_errors)
return ret;
@@ -689,24 +740,32 @@ static int drm_bo_evict(struct drm_buffer_object *bo, unsigned mem_type,
* buffer mutex.
*/
- if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
- goto out;
- if (bo->mem.mem_type != mem_type)
- goto out;
-
- ret = drm_bo_wait(bo, 0, 0, no_wait);
+ do {
+ bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
+
+ if (unlikely(bo->mem.flags &
+ (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)))
+ goto out_unlock;
+ if (unlikely(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
+ goto out_unlock;
+ if (unlikely(bo->mem.mem_type != mem_type))
+ goto out_unlock;
+ ret = drm_bo_wait(bo, 0, 1, no_wait, 0);
+ if (ret)
+ goto out_unlock;
- if (ret && ret != -EAGAIN) {
- DRM_ERROR("Failed to expire fence before "
- "buffer eviction.\n");
- goto out;
- }
+ } while(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
evict_mem = bo->mem;
evict_mem.mm_node = NULL;
evict_mem = bo->mem;
evict_mem.proposed_flags = dev->driver->bo_driver->evict_flags(bo);
+
+ mutex_lock(&dev->struct_mutex);
+ list_del_init(&bo->lru);
+ mutex_unlock(&dev->struct_mutex);
+
ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
if (ret) {
@@ -724,20 +783,21 @@ static int drm_bo_evict(struct drm_buffer_object *bo, unsigned mem_type,
goto out;
}
+ DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
+ _DRM_BO_FLAG_EVICTED);
+
+out:
mutex_lock(&dev->struct_mutex);
if (evict_mem.mm_node) {
if (evict_mem.mm_node != bo->pinned_node)
drm_mm_put_block(evict_mem.mm_node);
evict_mem.mm_node = NULL;
}
- list_del(&bo->lru);
drm_bo_add_to_lru(bo);
+ BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
+out_unlock:
mutex_unlock(&dev->struct_mutex);
- DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
- _DRM_BO_FLAG_EVICTED);
-
-out:
return ret;
}
@@ -772,8 +832,6 @@ static int drm_bo_mem_force_space(struct drm_device *dev,
atomic_inc(&entry->usage);
mutex_unlock(&dev->struct_mutex);
mutex_lock(&entry->mutex);
- BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT));
-
ret = drm_bo_evict(entry, mem_type, no_wait);
mutex_unlock(&entry->mutex);
drm_bo_usage_deref_unlocked(&entry);
@@ -1039,46 +1097,23 @@ EXPORT_SYMBOL(drm_lookup_buffer_object);
/*
* Call bo->mutex locked.
- * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
+ * Returns -EBUSY if the buffer is currently rendered to or from. 0 otherwise.
* Doesn't do any fence flushing as opposed to the drm_bo_busy function.
*/
-static int drm_bo_quick_busy(struct drm_buffer_object *bo)
+static int drm_bo_quick_busy(struct drm_buffer_object *bo, int check_unfenced)
{
struct drm_fence_object *fence = bo->fence;
- BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
- if (fence) {
- if (drm_fence_object_signaled(fence, bo->fence_type)) {
- drm_fence_usage_deref_unlocked(&bo->fence);
- return 0;
- }
- return 1;
- }
- return 0;
-}
-
-/*
- * Call bo->mutex locked.
- * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
- */
-
-static int drm_bo_busy(struct drm_buffer_object *bo)
-{
- struct drm_fence_object *fence = bo->fence;
+ if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
+ return -EBUSY;
- BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
if (fence) {
if (drm_fence_object_signaled(fence, bo->fence_type)) {
drm_fence_usage_deref_unlocked(&bo->fence);
return 0;
}
- drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE);
- if (drm_fence_object_signaled(fence, bo->fence_type)) {
- drm_fence_usage_deref_unlocked(&bo->fence);
- return 0;
- }
- return 1;
+ return -EBUSY;
}
return 0;
}
@@ -1102,68 +1137,33 @@ static int drm_bo_wait_unmapped(struct drm_buffer_object *bo, int no_wait)
{
int ret = 0;
- if ((atomic_read(&bo->mapped) >= 0) && no_wait)
- return -EBUSY;
-
- DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
- atomic_read(&bo->mapped) == -1);
+ if (likely(atomic_read(&bo->mapped)) == 0)
+ return 0;
- if (ret == -EINTR)
- ret = -EAGAIN;
+ if (unlikely(no_wait))
+ return -EBUSY;
- return ret;
-}
+ do {
+ mutex_unlock(&bo->mutex);
+ ret = wait_event_interruptible(bo->event_queue,
+ atomic_read(&bo->mapped) == 0);
+ mutex_lock(&bo->mutex);
+ bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED;
-static int drm_bo_check_unfenced(struct drm_buffer_object *bo)
-{
- int ret;
+ if (ret == -ERESTARTSYS)
+ ret = -EAGAIN;
+ } while((ret == 0) && atomic_read(&bo->mapped) > 0);
- mutex_lock(&bo->mutex);
- ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
- mutex_unlock(&bo->mutex);
return ret;
}
/*
- * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
- * Until then, we cannot really do anything with it except delete it.
- */
-
-static int drm_bo_wait_unfenced(struct drm_buffer_object *bo, int no_wait,
- int eagain_if_wait)
-{
- int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
-
- if (ret && no_wait)
- return -EBUSY;
- else if (!ret)
- return 0;
-
- ret = 0;
- mutex_unlock(&bo->mutex);
- DRM_WAIT_ON (ret, bo->event_queue, 3 * DRM_HZ,
- !drm_bo_check_unfenced(bo));
- mutex_lock(&bo->mutex);
- if (ret == -EINTR)
- return -EAGAIN;
- ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
- if (ret) {
- DRM_ERROR("Timeout waiting for buffer to become fenced\n");
- return -EBUSY;
- }
- if (eagain_if_wait)
- return -EAGAIN;
-
- return 0;
-}
-
-/*
* Fill in the ioctl reply argument with buffer info.
* Bo locked.
*/
-static void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
- struct drm_bo_info_rep *rep)
+void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
+ struct drm_bo_info_rep *rep)
{
if (!rep)
return;
@@ -1189,11 +1189,12 @@ static void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
rep->rep_flags = 0;
rep->page_alignment = bo->mem.page_alignment;
- if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
+ if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo, 1)) {
DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
DRM_BO_REP_BUSY);
}
}
+EXPORT_SYMBOL(drm_bo_fill_rep_arg);
/*
* Wait for buffer idle and register that we've mapped the buffer.
@@ -1219,61 +1220,33 @@ static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle,
return -EINVAL;
mutex_lock(&bo->mutex);
- ret = drm_bo_wait_unfenced(bo, no_wait, 0);
- if (ret)
- goto out;
-
- /*
- * If this returns true, we are currently unmapped.
- * We need to do this test, because unmapping can
- * be done without the bo->mutex held.
- */
-
- while (1) {
- if (atomic_inc_and_test(&bo->mapped)) {
- if (no_wait && drm_bo_busy(bo)) {
- atomic_dec(&bo->mapped);
- ret = -EBUSY;
- goto out;
- }
- ret = drm_bo_wait(bo, 0, 0, no_wait);
- if (ret) {
- atomic_dec(&bo->mapped);
- goto out;
- }
-
- if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED)
- drm_bo_evict_cached(bo);
-
- break;
- } else if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED) {
+ do {
+ bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
- /*
- * We are already mapped with different flags.
- * need to wait for unmap.
- */
+ ret = drm_bo_wait(bo, 0, 1, no_wait, 1);
+ if (unlikely(ret))
+ goto out;
- ret = drm_bo_wait_unmapped(bo, no_wait);
- if (ret)
- goto out;
+ if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED)
+ drm_bo_evict_cached(bo);
- continue;
- }
- break;
- }
+ } while (unlikely(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED));
+ atomic_inc(&bo->mapped);
mutex_lock(&dev->struct_mutex);
ret = drm_add_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
mutex_unlock(&dev->struct_mutex);
if (ret) {
- if (atomic_add_negative(-1, &bo->mapped))
+ if (atomic_dec_and_test(&bo->mapped))
wake_up_all(&bo->event_queue);
} else
drm_bo_fill_rep_arg(bo, rep);
-out:
+
+ out:
mutex_unlock(&bo->mutex);
drm_bo_usage_deref_unlocked(&bo);
+
return ret;
}
@@ -1323,7 +1296,7 @@ static void drm_buffer_user_object_unmap(struct drm_file *file_priv,
BUG_ON(action != _DRM_REF_TYPE1);
- if (atomic_add_negative(-1, &bo->mapped))
+ if (atomic_dec_and_test(&bo->mapped))
wake_up_all(&bo->event_queue);
}
@@ -1339,19 +1312,8 @@ int drm_bo_move_buffer(struct drm_buffer_object *bo, uint64_t new_mem_flags,
struct drm_buffer_manager *bm = &dev->bm;
int ret = 0;
struct drm_bo_mem_reg mem;
- /*
- * Flush outstanding fences.
- */
-
- drm_bo_busy(bo);
- /*
- * Wait for outstanding fences.
- */
-
- ret = drm_bo_wait(bo, 0, 0, no_wait);
- if (ret)
- return ret;
+ BUG_ON(bo->fence != NULL);
mem.num_pages = bo->num_pages;
mem.size = mem.num_pages << PAGE_SHIFT;
@@ -1437,64 +1399,14 @@ static int drm_bo_mem_compat(struct drm_bo_mem_reg *mem)
static int drm_buffer_object_validate(struct drm_buffer_object *bo,
uint32_t fence_class,
- int move_unfenced, int no_wait)
+ int move_unfenced, int no_wait,
+ int move_buffer)
{
struct drm_device *dev = bo->dev;
struct drm_buffer_manager *bm = &dev->bm;
- struct drm_bo_driver *driver = dev->driver->bo_driver;
- uint32_t ftype;
int ret;
- DRM_DEBUG("Proposed flags 0x%016llx, Old flags 0x%016llx\n",
- (unsigned long long) bo->mem.proposed_flags,
- (unsigned long long) bo->mem.flags);
-
- ret = driver->fence_type(bo, &fence_class, &ftype);
-
- if (ret) {
- DRM_ERROR("Driver did not support given buffer permissions\n");
- return ret;
- }
-
- /*
- * We're switching command submission mechanism,
- * or cannot simply rely on the hardware serializing for us.
- *
- * Insert a driver-dependant barrier or wait for buffer idle.
- */
-
- if ((fence_class != bo->fence_class) ||
- ((ftype ^ bo->fence_type) & bo->fence_type)) {
-
- ret = -EINVAL;
- if (driver->command_stream_barrier) {
- ret = driver->command_stream_barrier(bo,
- fence_class,
- ftype,
- no_wait);
- }
- if (ret)
- ret = drm_bo_wait(bo, 0, 0, no_wait);
-
- if (ret)
- return ret;
-
- }
-
- bo->new_fence_class = fence_class;
- bo->new_fence_type = ftype;
-
- ret = drm_bo_wait_unmapped(bo, no_wait);
- if (ret) {
- DRM_ERROR("Timed out waiting for buffer unmap.\n");
- return ret;
- }
-
- /*
- * Check whether we need to move buffer.
- */
-
- if (!drm_bo_mem_compat(&bo->mem)) {
+ if (move_buffer) {
ret = drm_bo_move_buffer(bo, bo->mem.proposed_flags, no_wait,
move_unfenced);
if (ret) {
@@ -1578,6 +1490,83 @@ static int drm_buffer_object_validate(struct drm_buffer_object *bo,
return 0;
}
+/*
+ * This function is called with bo->mutex locked, but may release it
+ * temporarily to wait for events.
+ */
+
+static int drm_bo_prepare_for_validate(struct drm_buffer_object *bo,
+ uint64_t flags,
+ uint64_t mask,
+ uint32_t hint,
+ uint32_t fence_class,
+ int no_wait,
+ int *move_buffer)
+{
+ struct drm_device *dev = bo->dev;
+ struct drm_bo_driver *driver = dev->driver->bo_driver;
+ uint32_t ftype;
+
+ int ret;
+
+ DRM_DEBUG("Proposed flags 0x%016llx, Old flags 0x%016llx\n",
+ (unsigned long long) bo->mem.proposed_flags,
+ (unsigned long long) bo->mem.flags);
+
+ ret = drm_bo_modify_proposed_flags (bo, flags, mask);
+ if (ret)
+ return ret;
+
+ ret = drm_bo_wait_unmapped(bo, no_wait);
+ if (ret)
+ return ret;
+
+ ret = driver->fence_type(bo, &fence_class, &ftype);
+
+ if (ret) {
+ DRM_ERROR("Driver did not support given buffer permissions.\n");
+ return ret;
+ }
+
+ /*
+ * We're switching command submission mechanism,
+ * or cannot simply rely on the hardware serializing for us.
+ * Insert a driver-dependant barrier or wait for buffer idle.
+ */
+
+ if ((fence_class != bo->fence_class) ||
+ ((ftype ^ bo->fence_type) & bo->fence_type)) {
+
+ ret = -EINVAL;
+ if (driver->command_stream_barrier) {
+ ret = driver->command_stream_barrier(bo,
+ fence_class,
+ ftype,
+ no_wait);
+ }
+ if (ret && ret != -EAGAIN)
+ ret = drm_bo_wait(bo, 0, 1, no_wait, 1);
+
+ if (ret)
+ return ret;
+ }
+
+ bo->new_fence_class = fence_class;
+ bo->new_fence_type = ftype;
+
+ /*
+ * Check whether we need to move buffer.
+ */
+
+ *move_buffer = 0;
+ if (!drm_bo_mem_compat(&bo->mem)) {
+ *move_buffer = 1;
+ ret = drm_bo_wait(bo, 0, 1, no_wait, 1);
+ }
+
+ return ret;
+}
+
/**
* drm_bo_do_validate:
*
@@ -1610,26 +1599,34 @@ int drm_bo_do_validate(struct drm_buffer_object *bo,
{
int ret;
int no_wait = (hint & DRM_BO_HINT_DONT_BLOCK) != 0;
+ int move_buffer;
mutex_lock(&bo->mutex);
- ret = drm_bo_wait_unfenced(bo, no_wait, 0);
- if (ret)
- goto out;
+ do {
+ bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
- ret = drm_bo_modify_proposed_flags (bo, flags, mask);
- if (ret)
- goto out;
+ ret = drm_bo_prepare_for_validate(bo, flags, mask, hint,
+ fence_class, no_wait,
+ &move_buffer);
+ if (ret)
+ goto out;
+
+ } while(unlikely(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED));
ret = drm_buffer_object_validate(bo,
fence_class,
!(hint & DRM_BO_HINT_DONT_FENCE),
- no_wait);
+ no_wait,
+ move_buffer);
+
+ BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
out:
if (rep)
drm_bo_fill_rep_arg(bo, rep);
mutex_unlock(&bo->mutex);
+
return ret;
}
EXPORT_SYMBOL(drm_bo_do_validate);
@@ -1655,22 +1652,19 @@ EXPORT_SYMBOL(drm_bo_do_validate);
* fencing mechanism. At this point, there isn't any use of this
* from the user mode code.
*
- * @use_old_fence_class: don't change fence class, pull it from the buffer object
- *
* @rep: To be stuffed with the reply from validation
- *
+ *
* @bp_rep: To be stuffed with the buffer object pointer
*
- * Perform drm_bo_do_validate on a buffer referenced by a user-space handle.
- * Some permissions checking is done on the parameters, otherwise this
- * is a thin wrapper.
+ * Perform drm_bo_do_validate on a buffer referenced by a user-space handle instead
+ * of a pointer to a buffer object. Optionally return a pointer to the buffer object.
+ * This is a convenience wrapper only.
*/
int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
uint64_t flags, uint64_t mask,
uint32_t hint,
uint32_t fence_class,
- int use_old_fence_class,
struct drm_bo_info_rep *rep,
struct drm_buffer_object **bo_rep)
{
@@ -1685,17 +1679,9 @@ int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
if (!bo)
return -EINVAL;
- if (use_old_fence_class)
- fence_class = bo->fence_class;
-
- /*
- * Only allow creator to change shared buffer mask.
- */
-
if (bo->base.owner != file_priv)
mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);
-
ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class, rep);
if (!ret && bo_rep)
@@ -1707,6 +1693,7 @@ int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
}
EXPORT_SYMBOL(drm_bo_handle_validate);
+
static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle,
struct drm_bo_info_rep *rep)
{
@@ -1721,8 +1708,12 @@ static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle,
return -EINVAL;
mutex_lock(&bo->mutex);
- if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
- (void)drm_bo_busy(bo);
+
+ /*
+ * FIXME: Quick busy here?
+ */
+
+ drm_bo_busy(bo, 1);
drm_bo_fill_rep_arg(bo, rep);
mutex_unlock(&bo->mutex);
drm_bo_usage_deref_unlocked(&bo);
@@ -1746,15 +1737,11 @@ static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle,
return -EINVAL;
mutex_lock(&bo->mutex);
- ret = drm_bo_wait_unfenced(bo, no_wait, 0);
- if (ret)
- goto out;
- ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait);
+ ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 1, no_wait, 1);
if (ret)
goto out;
drm_bo_fill_rep_arg(bo, rep);
-
out:
mutex_unlock(&bo->mutex);
drm_bo_usage_deref_unlocked(&bo);
@@ -1791,7 +1778,7 @@ int drm_buffer_object_create(struct drm_device *dev,
mutex_lock(&bo->mutex);
atomic_set(&bo->usage, 1);
- atomic_set(&bo->mapped, -1);
+ atomic_set(&bo->mapped, 0);
DRM_INIT_WAITQUEUE(&bo->event_queue);
INIT_LIST_HEAD(&bo->lru);
INIT_LIST_HEAD(&bo->pinned_lru);
@@ -1833,17 +1820,18 @@ int drm_buffer_object_create(struct drm_device *dev,
goto out_err;
}
- ret = drm_buffer_object_validate(bo, 0, 0, hint & DRM_BO_HINT_DONT_BLOCK);
+ mutex_unlock(&bo->mutex);
+ ret = drm_bo_do_validate(bo, 0, 0, hint | DRM_BO_HINT_DONT_FENCE,
+ 0, NULL);
if (ret)
- goto out_err;
+ goto out_err_unlocked;
- mutex_unlock(&bo->mutex);
*buf_obj = bo;
return 0;
out_err:
mutex_unlock(&bo->mutex);
-
+out_err_unlocked:
drm_bo_usage_deref_unlocked(&bo);
return ret;
}
@@ -1929,6 +1917,7 @@ int drm_bo_setstatus_ioctl(struct drm_device *dev,
struct drm_bo_map_wait_idle_arg *arg = data;
struct drm_bo_info_req *req = &arg->d.req;
struct drm_bo_info_rep *rep = &arg->d.rep;
+ struct drm_buffer_object *bo;
int ret;
if (!dev->bm.initialized) {
@@ -1936,28 +1925,29 @@ int drm_bo_setstatus_ioctl(struct drm_device *dev,
return -EINVAL;
}
- ret = drm_bo_read_lock(&dev->bm.bm_lock);
+ ret = drm_bo_read_lock(&dev->bm.bm_lock, 1);
if (ret)
return ret;
- /*
- * validate the buffer. note that 'fence_class' will be unused
- * as we pass use_old_fence_class=1 here. Note also that
- * the libdrm API doesn't pass fence_class to the kernel,
- * so it's a good thing it isn't used here.
- */
- ret = drm_bo_handle_validate(file_priv, req->handle,
- req->flags,
- req->mask,
- req->hint | DRM_BO_HINT_DONT_FENCE,
- req->fence_class, 1,
- rep, NULL);
+ mutex_lock(&dev->struct_mutex);
+ bo = drm_lookup_buffer_object(file_priv, req->handle, 1);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (!bo)
+ return -EINVAL;
+
+ if (bo->base.owner != file_priv)
+ req->mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);
+
+ ret = drm_bo_do_validate(bo, req->flags, req->mask,
+ req->hint | DRM_BO_HINT_DONT_FENCE,
+ bo->fence_class, rep);
+
+ drm_bo_usage_deref_unlocked(&bo);
(void) drm_bo_read_unlock(&dev->bm.bm_lock);
- if (ret)
- return ret;
- return 0;
+ return ret;
}
int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
@@ -2448,7 +2438,7 @@ int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_
return -EINVAL;
}
- ret = drm_bo_write_lock(&bm->bm_lock, file_priv);
+ ret = drm_bo_write_lock(&bm->bm_lock, 1, file_priv);
if (ret)
return ret;
@@ -2499,7 +2489,7 @@ int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *f
return -EINVAL;
}
- ret = drm_bo_write_lock(&bm->bm_lock, file_priv);
+ ret = drm_bo_write_lock(&bm->bm_lock, 0, file_priv);
if (ret)
return ret;
@@ -2547,7 +2537,7 @@ int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_
}
if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
- ret = drm_bo_write_lock(&dev->bm.bm_lock, file_priv);
+ ret = drm_bo_write_lock(&dev->bm.bm_lock, 1, file_priv);
if (ret)
return ret;
}
diff --git a/linux-core/drm_bo_lock.c b/linux-core/drm_bo_lock.c
index 2795384e..08b1c6be 100644
--- a/linux-core/drm_bo_lock.c
+++ b/linux-core/drm_bo_lock.c
@@ -49,7 +49,7 @@
* unmappable regions to mappable. It's a bug to leave kernel space with the
* read lock held.
*
- * Both read- and write lock taking is interruptible for low signal-delivery
+ * Both read- and write lock taking may be interruptible for low signal-delivery
* latency. The locking functions will return -EAGAIN if interrupted by a
* signal.
*
@@ -68,17 +68,21 @@ void drm_bo_init_lock(struct drm_bo_lock *lock)
void drm_bo_read_unlock(struct drm_bo_lock *lock)
{
- if (unlikely(atomic_add_negative(-1, &lock->readers)))
- BUG();
- if (atomic_read(&lock->readers) == 0)
- wake_up_interruptible(&lock->queue);
+ if (atomic_dec_and_test(&lock->readers))
+ wake_up_all(&lock->queue);
}
EXPORT_SYMBOL(drm_bo_read_unlock);
-int drm_bo_read_lock(struct drm_bo_lock *lock)
+int drm_bo_read_lock(struct drm_bo_lock *lock, int interruptible)
{
while (unlikely(atomic_read(&lock->write_lock_pending) != 0)) {
int ret;
+
+ if (!interruptible) {
+ wait_event(lock->queue,
+ atomic_read(&lock->write_lock_pending) == 0);
+ continue;
+ }
ret = wait_event_interruptible
(lock->queue, atomic_read(&lock->write_lock_pending) == 0);
if (ret)
@@ -87,8 +91,13 @@ int drm_bo_read_lock(struct drm_bo_lock *lock)
while (unlikely(!atomic_add_unless(&lock->readers, 1, -1))) {
int ret;
+ if (!interruptible) {
+ wait_event(lock->queue,
+ atomic_read(&lock->readers) != -1);
+ continue;
+ }
ret = wait_event_interruptible
- (lock->queue, atomic_add_unless(&lock->readers, 1, -1));
+ (lock->queue, atomic_read(&lock->readers) != -1);
if (ret)
return -EAGAIN;
}
@@ -100,9 +109,7 @@ static int __drm_bo_write_unlock(struct drm_bo_lock *lock)
{
if (unlikely(atomic_cmpxchg(&lock->readers, -1, 0) != -1))
return -EINVAL;
- if (unlikely(atomic_cmpxchg(&lock->write_lock_pending, 1, 0) != 1))
- return -EINVAL;
- wake_up_interruptible(&lock->queue);
+ wake_up_all(&lock->queue);
return 0;
}
@@ -116,21 +123,26 @@ static void drm_bo_write_lock_remove(struct drm_file *file_priv,
BUG_ON(ret);
}
-int drm_bo_write_lock(struct drm_bo_lock *lock, struct drm_file *file_priv)
+int drm_bo_write_lock(struct drm_bo_lock *lock, int interruptible,
+ struct drm_file *file_priv)
{
int ret = 0;
struct drm_device *dev;
- if (unlikely(atomic_cmpxchg(&lock->write_lock_pending, 0, 1) != 0))
- return -EINVAL;
+ atomic_inc(&lock->write_lock_pending);
while (unlikely(atomic_cmpxchg(&lock->readers, 0, -1) != 0)) {
+ if (!interruptible) {
+ wait_event(lock->queue,
+ atomic_read(&lock->readers) == 0);
+ continue;
+ }
ret = wait_event_interruptible
- (lock->queue, atomic_cmpxchg(&lock->readers, 0, -1) == 0);
+ (lock->queue, atomic_read(&lock->readers) == 0);
if (ret) {
- atomic_set(&lock->write_lock_pending, 0);
- wake_up_interruptible(&lock->queue);
+ atomic_dec(&lock->write_lock_pending);
+ wake_up_all(&lock->queue);
return -EAGAIN;
}
}
@@ -141,6 +153,8 @@ int drm_bo_write_lock(struct drm_bo_lock *lock, struct drm_file *file_priv)
* while holding it.
*/
+ if (atomic_dec_and_test(&lock->write_lock_pending))
+ wake_up_all(&lock->queue);
dev = file_priv->minor->dev;
mutex_lock(&dev->struct_mutex);
ret = drm_add_user_object(file_priv, &lock->base, 0);
diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c
index f10549ab..5c290af2 100644
--- a/linux-core/drm_bo_move.c
+++ b/linux-core/drm_bo_move.c
@@ -357,10 +357,11 @@ int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,
bo->mem.mm_node != NULL))
#endif
{
- ret = drm_bo_wait(bo, 0, 1, 0);
- if (ret)
- return ret;
-
+ if (bo->fence) {
+ (void) drm_fence_object_wait(bo->fence, 0, 1,
+ bo->fence_type);
+ drm_fence_usage_deref_unlocked(&bo->fence);
+ }
drm_bo_free_old_node(bo);
if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm != NULL)) {
diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c
index 9b982662..8fa8ae02 100644
--- a/linux-core/drm_compat.c
+++ b/linux-core/drm_compat.c
@@ -213,7 +213,7 @@ static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
unsigned long bus_size;
dev = bo->dev;
- while(drm_bo_read_lock(&dev->bm.bm_lock));
+ drm_bo_read_lock(&dev->bm.bm_lock, 0);
mutex_lock(&bo->mutex);
@@ -780,7 +780,7 @@ struct pci_dev * pci_get_bus_and_slot(unsigned int bus, unsigned int devfn)
EXPORT_SYMBOL(pci_get_bus_and_slot);
#endif
-#if defined(DRM_KMAP_ATOMIC_PROT_PFN) && defined(CONFIG_HIMEM)
+#if defined(DRM_KMAP_ATOMIC_PROT_PFN)
#define drm_kmap_get_fixmap_pte(vaddr) \
pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr))
@@ -807,4 +807,3 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type,
EXPORT_SYMBOL(kmap_atomic_prot_pfn);
#endif
-
diff --git a/linux-core/drm_compat.h b/linux-core/drm_compat.h
index 046c7122..32e62ddb 100644
--- a/linux-core/drm_compat.h
+++ b/linux-core/drm_compat.h
@@ -352,6 +352,7 @@ static inline int kobject_uevent_env(struct kobject *kobj,
#define PM_EVENT_PRETHAW 3
#endif
+
#if (defined(CONFIG_X86) && defined(CONFIG_X86_32) && defined(CONFIG_HIMEM))
#define DRM_KMAP_ATOMIC_PROT_PFN
extern void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type,
@@ -362,4 +363,8 @@ extern void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type,
#define flush_agp_mappings() do {} while(0)
#endif
+#ifndef DMA_BIT_MASK
+#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : (1ULL<<(n)) - 1)
+#endif
+
#endif
diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c
index 0ca0c408..7c78e09f 100644
--- a/linux-core/drm_fence.c
+++ b/linux-core/drm_fence.c
@@ -445,6 +445,7 @@ int drm_fence_object_emit(struct drm_fence_object *fence, uint32_t fence_flags,
fence->type = type;
fence->waiting_types = 0;
fence->signaled_types = 0;
+ fence->error = 0;
fence->sequence = sequence;
fence->native_types = native_types;
if (list_empty(&fc->ring))
@@ -482,6 +483,7 @@ static int drm_fence_object_init(struct drm_device *dev, uint32_t fence_class,
fence->signaled_types = 0;
fence->waiting_types = 0;
fence->sequence = 0;
+ fence->error = 0;
fence->dev = dev;
write_unlock_irqrestore(&fm->lock, flags);
if (fence_flags & DRM_FENCE_FLAG_EMIT) {
diff --git a/linux-core/drm_fops.c b/linux-core/drm_fops.c
index 043552fd..a9f39ac5 100644
--- a/linux-core/drm_fops.c
+++ b/linux-core/drm_fops.c
@@ -418,9 +418,9 @@ int drm_release(struct inode *inode, struct file *filp)
*/
do{
- spin_lock(&file_priv->master->lock.spinlock);
+ spin_lock_bh(&file_priv->master->lock.spinlock);
locked = file_priv->master->lock.idle_has_lock;
- spin_unlock(&file_priv->master->lock.spinlock);
+ spin_unlock_bh(&file_priv->master->lock.spinlock);
if (locked)
break;
schedule();
diff --git a/linux-core/drm_irq.c b/linux-core/drm_irq.c
index 230ef3cc..e1c93054 100644
--- a/linux-core/drm_irq.c
+++ b/linux-core/drm_irq.c
@@ -528,7 +528,7 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
int crtc, ret = 0;
u32 new;
- crtc = modeset->arg;
+ crtc = modeset->crtc;
if (crtc >= dev->num_crtcs) {
ret = -EINVAL;
goto out;
diff --git a/linux-core/drm_lock.c b/linux-core/drm_lock.c
index 08e063d8..6bbf1444 100644
--- a/linux-core/drm_lock.c
+++ b/linux-core/drm_lock.c
@@ -72,9 +72,10 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
return -EINVAL;
add_wait_queue(&master->lock.lock_queue, &entry);
- spin_lock(&master->lock.spinlock);
+ spin_lock_bh(&master->lock.spinlock);
master->lock.user_waiters++;
- spin_unlock(&master->lock.spinlock);
+ spin_unlock_bh(&master->lock.spinlock);
+
for (;;) {
__set_current_state(TASK_INTERRUPTIBLE);
if (!master->lock.hw_lock) {
@@ -96,9 +97,9 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
break;
}
}
- spin_lock(&master->lock.spinlock);
+ spin_lock_bh(&master->lock.spinlock);
master->lock.user_waiters--;
- spin_unlock(&master->lock.spinlock);
+ spin_unlock_bh(&master->lock.spinlock);
__set_current_state(TASK_RUNNING);
remove_wait_queue(&master->lock.lock_queue, &entry);
@@ -201,7 +202,7 @@ int drm_lock_take(struct drm_lock_data *lock_data,
unsigned int old, new, prev;
volatile unsigned int *lock = &lock_data->hw_lock->lock;
- spin_lock(&lock_data->spinlock);
+ spin_lock_bh(&lock_data->spinlock);
do {
old = *lock;
if (old & _DRM_LOCK_HELD)
@@ -213,7 +214,7 @@ int drm_lock_take(struct drm_lock_data *lock_data,
}
prev = cmpxchg(lock, old, new);
} while (prev != old);
- spin_unlock(&lock_data->spinlock);
+ spin_unlock_bh(&lock_data->spinlock);
if (_DRM_LOCKING_CONTEXT(old) == context) {
if (old & _DRM_LOCK_HELD) {
@@ -276,14 +277,14 @@ int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
unsigned int old, new, prev;
volatile unsigned int *lock = &lock_data->hw_lock->lock;
- spin_lock(&lock_data->spinlock);
+ spin_lock_bh(&lock_data->spinlock);
if (lock_data->kernel_waiters != 0) {
drm_lock_transfer(lock_data, 0);
lock_data->idle_has_lock = 1;
- spin_unlock(&lock_data->spinlock);
+ spin_unlock_bh(&lock_data->spinlock);
return 1;
}
- spin_unlock(&lock_data->spinlock);
+ spin_unlock_bh(&lock_data->spinlock);
do {
old = *lock;
@@ -348,18 +349,18 @@ void drm_idlelock_take(struct drm_lock_data *lock_data)
{
int ret = 0;
- spin_lock(&lock_data->spinlock);
+ spin_lock_bh(&lock_data->spinlock);
lock_data->kernel_waiters++;
if (!lock_data->idle_has_lock) {
- spin_unlock(&lock_data->spinlock);
+ spin_unlock_bh(&lock_data->spinlock);
ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT);
- spin_lock(&lock_data->spinlock);
+ spin_lock_bh(&lock_data->spinlock);
if (ret == 1)
lock_data->idle_has_lock = 1;
}
- spin_unlock(&lock_data->spinlock);
+ spin_unlock_bh(&lock_data->spinlock);
}
EXPORT_SYMBOL(drm_idlelock_take);
@@ -368,7 +369,7 @@ void drm_idlelock_release(struct drm_lock_data *lock_data)
unsigned int old, prev;
volatile unsigned int *lock = &lock_data->hw_lock->lock;
- spin_lock(&lock_data->spinlock);
+ spin_lock_bh(&lock_data->spinlock);
if (--lock_data->kernel_waiters == 0) {
if (lock_data->idle_has_lock) {
do {
@@ -379,7 +380,7 @@ void drm_idlelock_release(struct drm_lock_data *lock_data)
lock_data->idle_has_lock = 0;
}
}
- spin_unlock(&lock_data->spinlock);
+ spin_unlock_bh(&lock_data->spinlock);
}
EXPORT_SYMBOL(drm_idlelock_release);
diff --git a/linux-core/drm_memory.c b/linux-core/drm_memory.c
index 12e01414..75f5b521 100644
--- a/linux-core/drm_memory.c
+++ b/linux-core/drm_memory.c
@@ -61,35 +61,39 @@ static inline size_t drm_size_align(size_t size)
int drm_alloc_memctl(size_t size)
{
- int ret = 0;
+ int ret = 0;
unsigned long a_size = drm_size_align(size);
- unsigned long new_used = drm_memctl.cur_used + a_size;
+ unsigned long new_used;
spin_lock(&drm_memctl.lock);
- if (unlikely(new_used > drm_memctl.high_threshold)) {
- if (!DRM_SUSER(DRM_CURPROC) ||
- (new_used + drm_memctl.emer_used > drm_memctl.emer_threshold) ||
- (a_size > 2*PAGE_SIZE)) {
- ret = -ENOMEM;
- goto out;
- }
-
- /*
- * Allow small root-only allocations, even if the
- * high threshold is exceeded.
- */
-
- new_used -= drm_memctl.high_threshold;
- drm_memctl.emer_used += new_used;
- a_size -= new_used;
+ new_used = drm_memctl.cur_used + a_size;
+ if (likely(new_used < drm_memctl.high_threshold)) {
+ drm_memctl.cur_used = new_used;
+ goto out;
}
- drm_memctl.cur_used += a_size;
+
+ /*
+ * Allow small allocations from root-only processes to
+ * succeed until the emergency threshold is reached.
+ */
+
+ new_used += drm_memctl.emer_used;
+ if (unlikely(!DRM_SUSER(DRM_CURPROC) ||
+ (a_size > 16*PAGE_SIZE) ||
+ (new_used > drm_memctl.emer_threshold))) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ drm_memctl.cur_used = drm_memctl.high_threshold;
+ drm_memctl.emer_used = new_used - drm_memctl.high_threshold;
out:
spin_unlock(&drm_memctl.lock);
return ret;
}
EXPORT_SYMBOL(drm_alloc_memctl);
+
void drm_free_memctl(size_t size)
{
unsigned long a_size = drm_size_align(size);
diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h
index 8f81b665..7feacd33 100644
--- a/linux-core/drm_objects.h
+++ b/linux-core/drm_objects.h
@@ -310,6 +310,8 @@ struct drm_ttm_backend {
struct drm_ttm {
struct page *dummy_read_page;
struct page **pages;
+ long first_himem_page;
+ long last_lomem_page;
uint32_t page_flags;
unsigned long num_pages;
atomic_t vma_count;
@@ -317,6 +319,8 @@ struct drm_ttm {
int destroy;
uint32_t mapping_offset;
struct drm_ttm_backend *be;
+ unsigned long highest_lomem_entry;
+ unsigned long lowest_himem_entry;
enum {
ttm_bound,
ttm_evicted,
@@ -334,7 +338,7 @@ extern void drm_ttm_unbind(struct drm_ttm *ttm);
extern void drm_ttm_evict(struct drm_ttm *ttm);
extern void drm_ttm_fixup_caching(struct drm_ttm *ttm);
extern struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index);
-extern void drm_ttm_cache_flush(void);
+extern void drm_ttm_cache_flush(struct page *pages[], unsigned long num_pages);
extern int drm_ttm_populate(struct drm_ttm *ttm);
extern int drm_ttm_set_user(struct drm_ttm *ttm,
struct task_struct *tsk,
@@ -512,6 +516,14 @@ struct drm_buffer_object {
#define _DRM_BO_FLAG_UNFENCED 0x00000001
#define _DRM_BO_FLAG_EVICTED 0x00000002
+/*
+ * This flag indicates that a flag called with bo->mutex held has
+ * temporarily released the buffer object mutex, (usually to wait for something).
+ * and thus any post-lock validation needs to be rerun.
+ */
+
+#define _DRM_BO_FLAG_UNLOCKED 0x00000004
+
struct drm_mem_type_manager {
int has_type;
int use_type;
@@ -677,8 +689,8 @@ extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size,
uint32_t hint, uint32_t page_alignment,
unsigned long buffer_start,
struct drm_buffer_object **bo);
-extern int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int ignore_signals,
- int no_wait);
+extern int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int interruptible,
+ int no_wait, int check_unfenced);
extern int drm_bo_mem_space(struct drm_buffer_object *bo,
struct drm_bo_mem_reg *mem, int no_wait);
extern int drm_bo_move_buffer(struct drm_buffer_object *bo,
@@ -690,7 +702,7 @@ extern int drm_bo_init_mm(struct drm_device *dev, unsigned type,
int kern_init);
extern int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
uint64_t flags, uint64_t mask, uint32_t hint,
- uint32_t fence_class, int use_old_fence_class,
+ uint32_t fence_class,
struct drm_bo_info_rep *rep,
struct drm_buffer_object **bo_rep);
extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
@@ -745,6 +757,8 @@ extern int drm_bo_pfn_prot(struct drm_buffer_object *bo,
unsigned long dst_offset,
unsigned long *pfn,
pgprot_t *prot);
+extern void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
+ struct drm_bo_info_rep *rep);
/*
@@ -797,8 +811,10 @@ extern void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg *
extern void drm_bo_init_lock(struct drm_bo_lock *lock);
extern void drm_bo_read_unlock(struct drm_bo_lock *lock);
-extern int drm_bo_read_lock(struct drm_bo_lock *lock);
+extern int drm_bo_read_lock(struct drm_bo_lock *lock,
+ int interruptible);
extern int drm_bo_write_lock(struct drm_bo_lock *lock,
+ int interruptible,
struct drm_file *file_priv);
extern int drm_bo_write_unlock(struct drm_bo_lock *lock,
diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c
index e991254f..80a8ff5d 100644
--- a/linux-core/drm_ttm.c
+++ b/linux-core/drm_ttm.c
@@ -30,13 +30,48 @@
#include "drmP.h"
+#if defined( CONFIG_X86 ) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
+static void drm_clflush_page(struct page *page)
+{
+ uint8_t *page_virtual;
+ unsigned int i;
+
+ if (unlikely(page == NULL))
+ return;
+
+ page_virtual = kmap_atomic(page, KM_USER0);
+
+ for (i=0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
+ clflush(page_virtual + i);
+
+ kunmap_atomic(page_virtual, KM_USER0);
+}
+
+static void drm_ttm_cache_flush_clflush(struct page *pages[], unsigned long num_pages)
+{
+ unsigned long i;
+
+ mb();
+ for (i=0; i < num_pages; ++i)
+ drm_clflush_page(*pages++);
+ mb();
+}
+#endif
+
static void drm_ttm_ipi_handler(void *null)
{
flush_agp_cache();
}
-void drm_ttm_cache_flush(void)
+void drm_ttm_cache_flush(struct page *pages[], unsigned long num_pages)
{
+
+#if defined( CONFIG_X86 ) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
+ if (cpu_has_clflush) {
+ drm_ttm_cache_flush_clflush(pages, num_pages);
+ return;
+ }
+#endif
if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1, 1) != 0)
DRM_ERROR("Timed out waiting for drm cache flush.\n");
}
@@ -114,7 +149,7 @@ static int drm_ttm_set_caching(struct drm_ttm *ttm, int noncached)
return 0;
if (noncached)
- drm_ttm_cache_flush();
+ drm_ttm_cache_flush(ttm->pages, ttm->num_pages);
for (i = 0; i < ttm->num_pages; ++i) {
cur_page = ttm->pages + i;
@@ -228,12 +263,16 @@ struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index)
struct page *p;
struct drm_buffer_manager *bm = &ttm->dev->bm;
- p = ttm->pages[index];
- if (!p) {
+ while(NULL == (p = ttm->pages[index])) {
p = drm_ttm_alloc_page();
if (!p)
return NULL;
- ttm->pages[index] = p;
+
+ if (PageHighMem(p))
+ ttm->pages[--ttm->first_himem_page] = p;
+ else
+ ttm->pages[++ttm->last_lomem_page] = p;
+
++bm->cur_pages;
}
return p;
@@ -341,6 +380,8 @@ struct drm_ttm *drm_ttm_create(struct drm_device *dev, unsigned long size,
ttm->destroy = 0;
ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ ttm->first_himem_page = ttm->num_pages;
+ ttm->last_lomem_page = -1;
ttm->page_flags = page_flags;
diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c
index ffda8284..b85b4c13 100644
--- a/linux-core/drm_vm.c
+++ b/linux-core/drm_vm.c
@@ -738,7 +738,7 @@ static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
return NOPFN_SIGBUS;
dev = bo->dev;
- err = drm_bo_read_lock(&dev->bm.bm_lock);
+ err = drm_bo_read_lock(&dev->bm.bm_lock, 1);
if (err)
return NOPFN_REFAULT;
@@ -748,12 +748,15 @@ static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
return NOPFN_REFAULT;
}
- err = drm_bo_wait(bo, 0, 0, 0);
+ err = drm_bo_wait(bo, 0, 1, 0, 1);
if (err) {
ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT;
+ bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
goto out_unlock;
}
+ bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
+
/*
* If buffer happens to be in a non-mappable location,
* move it to a mappable.
@@ -806,6 +809,7 @@ static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
goto out_unlock;
}
out_unlock:
+ BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
mutex_unlock(&bo->mutex);
drm_bo_read_unlock(&dev->bm.bm_lock);
return ret;
diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c
index ba586888..4224b737 100644
--- a/linux-core/i915_buffer.c
+++ b/linux-core/i915_buffer.c
@@ -252,10 +252,10 @@ int i915_move(struct drm_buffer_object *bo,
if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
} else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) {
- if (0) /*i915_move_flip(bo, evict, no_wait, new_mem)*/
+ if (1) /*i915_move_flip(bo, evict, no_wait, new_mem)*/
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
} else {
- if (0) /*i915_move_blit(bo, evict, no_wait, new_mem)*/
+ if (1) /*i915_move_blit(bo, evict, no_wait, new_mem)*/
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
}
return 0;
diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c
index 0e65c0cd..3fe101ff 100644
--- a/linux-core/i915_drv.c
+++ b/linux-core/i915_drv.c
@@ -285,6 +285,9 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
pci_save_state(dev->pdev);
pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
+ /* Display arbitration control */
+ dev_priv->saveDSPARB = I915_READ(DSPARB);
+
/* Pipe & plane A info */
dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
@@ -378,6 +381,7 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
/* Clock gating state */
+ dev_priv->saveD_STATE = I915_READ(D_STATE);
dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D);
/* Cache mode state */
@@ -417,6 +421,8 @@ static int i915_resume(struct drm_device *dev)
pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
+ I915_WRITE(DSPARB, dev_priv->saveDSPARB);
+
/* Pipe & plane A info */
/* Prime the clock */
if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
@@ -536,6 +542,7 @@ static int i915_resume(struct drm_device *dev)
udelay(150);
/* Clock gating state */
+ I915_WRITE (D_STATE, dev_priv->saveD_STATE);
I915_WRITE (DSPCLK_GATE_D, dev_priv->saveDSPCLK_GATE_D);
/* Cache mode state */
diff --git a/linux-core/i915_execbuf.c b/linux-core/i915_execbuf.c
new file mode 100644
index 00000000..932882dd
--- /dev/null
+++ b/linux-core/i915_execbuf.c
@@ -0,0 +1,921 @@
+/*
+ * Copyright 2003-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ * Dave Airlie
+ * Keith Packard
+ * ... ?
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+#if DRM_DEBUG_CODE
+#define DRM_DEBUG_RELOCATION (drm_debug != 0)
+#else
+#define DRM_DEBUG_RELOCATION 0
+#endif
+
+enum i915_buf_idle {
+ I915_RELOC_UNCHECKED,
+ I915_RELOC_IDLE,
+ I915_RELOC_BUSY
+};
+
+struct i915_relocatee_info {
+ struct drm_buffer_object *buf;
+ unsigned long offset;
+ uint32_t *data_page;
+ unsigned page_offset;
+ struct drm_bo_kmap_obj kmap;
+ int is_iomem;
+ int dst;
+ int idle;
+ int performed_ring_relocs;
+#ifdef DRM_KMAP_ATOMIC_PROT_PFN
+ unsigned long pfn;
+ pgprot_t pg_prot;
+#endif
+};
+
+struct drm_i915_validate_buffer {
+ struct drm_buffer_object *buffer;
+ int presumed_offset_correct;
+ void __user *data;
+ int ret;
+ enum i915_buf_idle idle;
+};
+
+/*
+ * I'd like to use MI_STORE_DATA_IMM here, but I can't make
+ * it work. Seems like GART writes are broken with that
+ * instruction. Also I'm not sure that MI_FLUSH will
+ * act as a memory barrier for that instruction. It will
+ * for this single dword 2D blit.
+ */
+
+static void i915_emit_ring_reloc(struct drm_device *dev, uint32_t offset,
+ uint32_t value)
+{
+ struct drm_i915_private *dev_priv =
+ (struct drm_i915_private *)dev->dev_private;
+
+ RING_LOCALS;
+ i915_kernel_lost_context(dev);
+ BEGIN_LP_RING(6);
+ OUT_RING((0x02 << 29) | (0x40 << 22) | (0x3 << 20) | (0x3));
+ OUT_RING((0x3 << 24) | (0xF0 << 16) | (0x40));
+ OUT_RING((0x1 << 16) | (0x4));
+ OUT_RING(offset);
+ OUT_RING(value);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+}
+
+static void i915_dereference_buffers_locked(struct drm_i915_validate_buffer
+ *buffers, unsigned num_buffers)
+{
+ while (num_buffers--)
+ drm_bo_usage_deref_locked(&buffers[num_buffers].buffer);
+}
+
+int i915_apply_reloc(struct drm_file *file_priv, int num_buffers,
+ struct drm_i915_validate_buffer *buffers,
+ struct i915_relocatee_info *relocatee, uint32_t * reloc)
+{
+ unsigned index;
+ unsigned long new_cmd_offset;
+ u32 val;
+ int ret, i;
+ int buf_index = -1;
+
+ /*
+ * FIXME: O(relocs * buffers) complexity.
+ */
+
+ for (i = 0; i <= num_buffers; i++)
+ if (buffers[i].buffer)
+ if (reloc[2] == buffers[i].buffer->base.hash.key)
+ buf_index = i;
+
+ if (buf_index == -1) {
+ DRM_ERROR("Illegal relocation buffer %08X\n", reloc[2]);
+ return -EINVAL;
+ }
+
+ /*
+ * Short-circuit relocations that were correctly
+ * guessed by the client
+ */
+ if (buffers[buf_index].presumed_offset_correct && !DRM_DEBUG_RELOCATION)
+ return 0;
+
+ new_cmd_offset = reloc[0];
+ if (!relocatee->data_page ||
+ !drm_bo_same_page(relocatee->offset, new_cmd_offset)) {
+ struct drm_bo_mem_reg *mem = &relocatee->buf->mem;
+
+ drm_bo_kunmap(&relocatee->kmap);
+ relocatee->data_page = NULL;
+ relocatee->offset = new_cmd_offset;
+
+ if (unlikely(relocatee->idle == I915_RELOC_UNCHECKED)) {
+ ret = drm_bo_wait(relocatee->buf, 0, 1, 0, 0);
+ if (ret)
+ return ret;
+ relocatee->idle = I915_RELOC_IDLE;
+ }
+
+ if (unlikely((mem->mem_type != DRM_BO_MEM_LOCAL) &&
+ (mem->flags & DRM_BO_FLAG_CACHED_MAPPED)))
+ drm_bo_evict_cached(relocatee->buf);
+
+ ret = drm_bo_kmap(relocatee->buf, new_cmd_offset >> PAGE_SHIFT,
+ 1, &relocatee->kmap);
+ if (ret) {
+ DRM_ERROR
+ ("Could not map command buffer to apply relocs\n %08lx",
+ new_cmd_offset);
+ return ret;
+ }
+ relocatee->data_page = drm_bmo_virtual(&relocatee->kmap,
+ &relocatee->is_iomem);
+ relocatee->page_offset = (relocatee->offset & PAGE_MASK);
+ }
+
+ val = buffers[buf_index].buffer->offset;
+ index = (reloc[0] - relocatee->page_offset) >> 2;
+
+ /* add in validate */
+ val = val + reloc[1];
+
+ if (DRM_DEBUG_RELOCATION) {
+ if (buffers[buf_index].presumed_offset_correct &&
+ relocatee->data_page[index] != val) {
+ DRM_DEBUG
+ ("Relocation mismatch source %d target %d buffer %d user %08x kernel %08x\n",
+ reloc[0], reloc[1], buf_index,
+ relocatee->data_page[index], val);
+ }
+ }
+
+ if (relocatee->is_iomem)
+ iowrite32(val, relocatee->data_page + index);
+ else
+ relocatee->data_page[index] = val;
+ return 0;
+}
+
+int i915_process_relocs(struct drm_file *file_priv,
+ uint32_t buf_handle,
+ uint32_t __user ** reloc_user_ptr,
+ struct i915_relocatee_info *relocatee,
+ struct drm_i915_validate_buffer *buffers,
+ uint32_t num_buffers)
+{
+ int ret, reloc_stride;
+ uint32_t cur_offset;
+ uint32_t reloc_count;
+ uint32_t reloc_type;
+ uint32_t reloc_buf_size;
+ uint32_t *reloc_buf = NULL;
+ int i;
+
+ /* do a copy from user from the user ptr */
+ ret = get_user(reloc_count, *reloc_user_ptr);
+ if (ret) {
+ DRM_ERROR("Could not map relocation buffer.\n");
+ goto out;
+ }
+
+ ret = get_user(reloc_type, (*reloc_user_ptr) + 1);
+ if (ret) {
+ DRM_ERROR("Could not map relocation buffer.\n");
+ goto out;
+ }
+
+ if (reloc_type != 0) {
+ DRM_ERROR("Unsupported relocation type requested\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ reloc_buf_size =
+ (I915_RELOC_HEADER +
+ (reloc_count * I915_RELOC0_STRIDE)) * sizeof(uint32_t);
+ reloc_buf = kmalloc(reloc_buf_size, GFP_KERNEL);
+ if (!reloc_buf) {
+ DRM_ERROR("Out of memory for reloc buffer\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (copy_from_user(reloc_buf, *reloc_user_ptr, reloc_buf_size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ /* get next relocate buffer handle */
+ *reloc_user_ptr = (uint32_t *) * (unsigned long *)&reloc_buf[2];
+
+ reloc_stride = I915_RELOC0_STRIDE * sizeof(uint32_t); /* may be different for other types of relocs */
+
+ DRM_DEBUG("num relocs is %d, next is %p\n", reloc_count,
+ *reloc_user_ptr);
+
+ for (i = 0; i < reloc_count; i++) {
+ cur_offset = I915_RELOC_HEADER + (i * I915_RELOC0_STRIDE);
+
+ ret = i915_apply_reloc(file_priv, num_buffers, buffers,
+ relocatee, reloc_buf + cur_offset);
+ if (ret)
+ goto out;
+ }
+
+ out:
+ if (reloc_buf)
+ kfree(reloc_buf);
+
+ if (relocatee->data_page) {
+ drm_bo_kunmap(&relocatee->kmap);
+ relocatee->data_page = NULL;
+ }
+
+ return ret;
+}
+
+static int i915_exec_reloc(struct drm_file *file_priv, drm_handle_t buf_handle,
+ uint32_t __user * reloc_user_ptr,
+ struct drm_i915_validate_buffer *buffers,
+ uint32_t buf_count)
+{
+ struct drm_device *dev = file_priv->minor->dev;
+ struct i915_relocatee_info relocatee;
+ int ret = 0;
+ int b;
+
+ /*
+ * Short circuit relocations when all previous
+ * buffers offsets were correctly guessed by
+ * the client
+ */
+ if (!DRM_DEBUG_RELOCATION) {
+ for (b = 0; b < buf_count; b++)
+ if (!buffers[b].presumed_offset_correct)
+ break;
+
+ if (b == buf_count)
+ return 0;
+ }
+
+ memset(&relocatee, 0, sizeof(relocatee));
+ relocatee.idle = I915_RELOC_UNCHECKED;
+
+ mutex_lock(&dev->struct_mutex);
+ relocatee.buf = drm_lookup_buffer_object(file_priv, buf_handle, 1);
+ mutex_unlock(&dev->struct_mutex);
+ if (!relocatee.buf) {
+ DRM_DEBUG("relocatee buffer invalid %08x\n", buf_handle);
+ ret = -EINVAL;
+ goto out_err;
+ }
+
+ mutex_lock(&relocatee.buf->mutex);
+ while (reloc_user_ptr) {
+ ret =
+ i915_process_relocs(file_priv, buf_handle, &reloc_user_ptr,
+ &relocatee, buffers, buf_count);
+ if (ret) {
+ DRM_ERROR("process relocs failed\n");
+ goto out_err1;
+ }
+ }
+
+ out_err1:
+ mutex_unlock(&relocatee.buf->mutex);
+ drm_bo_usage_deref_unlocked(&relocatee.buf);
+ out_err:
+ return ret;
+}
+
+static void i915_clear_relocatee(struct i915_relocatee_info *relocatee)
+{
+ if (relocatee->data_page) {
+#ifndef DRM_KMAP_ATOMIC_PROT_PFN
+ drm_bo_kunmap(&relocatee->kmap);
+#else
+ kunmap_atomic(relocatee->data_page, KM_USER0);
+#endif
+ relocatee->data_page = NULL;
+ }
+ relocatee->buf = NULL;
+ relocatee->dst = ~0;
+}
+
+static int i915_update_relocatee(struct i915_relocatee_info *relocatee,
+ struct drm_i915_validate_buffer *buffers,
+ unsigned int dst, unsigned long dst_offset)
+{
+ int ret;
+
+ if (unlikely(dst != relocatee->dst || NULL == relocatee->buf)) {
+ i915_clear_relocatee(relocatee);
+ relocatee->dst = dst;
+ relocatee->buf = buffers[dst].buffer;
+ relocatee->idle = buffers[dst].idle;
+
+ /*
+ * Check for buffer idle. If the buffer is busy, revert to
+ * ring relocations.
+ */
+
+ if (relocatee->idle == I915_RELOC_UNCHECKED) {
+ preempt_enable();
+ mutex_lock(&relocatee->buf->mutex);
+
+ ret = drm_bo_wait(relocatee->buf, 0, 1, 1, 0);
+ if (ret == 0)
+ relocatee->idle = I915_RELOC_IDLE;
+ else {
+ relocatee->idle = I915_RELOC_BUSY;
+ relocatee->performed_ring_relocs = 1;
+ }
+ mutex_unlock(&relocatee->buf->mutex);
+ preempt_disable();
+ buffers[dst].idle = relocatee->idle;
+ }
+ }
+
+ if (relocatee->idle == I915_RELOC_BUSY)
+ return 0;
+
+ if (unlikely(dst_offset > relocatee->buf->num_pages * PAGE_SIZE)) {
+ DRM_ERROR("Relocation destination out of bounds.\n");
+ return -EINVAL;
+ }
+ if (unlikely(!drm_bo_same_page(relocatee->page_offset, dst_offset) ||
+ NULL == relocatee->data_page)) {
+#ifdef DRM_KMAP_ATOMIC_PROT_PFN
+ if (NULL != relocatee->data_page) {
+ kunmap_atomic(relocatee->data_page, KM_USER0);
+ relocatee->data_page = NULL;
+ }
+ ret = drm_bo_pfn_prot(relocatee->buf, dst_offset,
+ &relocatee->pfn, &relocatee->pg_prot);
+ if (ret) {
+ DRM_ERROR("Can't map relocation destination.\n");
+ return -EINVAL;
+ }
+ relocatee->data_page =
+ kmap_atomic_prot_pfn(relocatee->pfn, KM_USER0,
+ relocatee->pg_prot);
+#else
+ if (NULL != relocatee->data_page) {
+ drm_bo_kunmap(&relocatee->kmap);
+ relocatee->data_page = NULL;
+ }
+
+ ret = drm_bo_kmap(relocatee->buf, dst_offset >> PAGE_SHIFT,
+ 1, &relocatee->kmap);
+ if (ret) {
+ DRM_ERROR("Can't map relocation destination.\n");
+ return ret;
+ }
+
+ relocatee->data_page = drm_bmo_virtual(&relocatee->kmap,
+ &relocatee->is_iomem);
+#endif
+ relocatee->page_offset = dst_offset & PAGE_MASK;
+ }
+ return 0;
+}
+
+static int i915_apply_post_reloc(uint32_t reloc[],
+ struct drm_i915_validate_buffer *buffers,
+ uint32_t num_buffers,
+ struct i915_relocatee_info *relocatee)
+{
+ uint32_t reloc_buffer = reloc[2];
+ uint32_t dst_buffer = reloc[3];
+ uint32_t val;
+ uint32_t index;
+ int ret;
+
+ if (likely(buffers[reloc_buffer].presumed_offset_correct))
+ return 0;
+ if (unlikely(reloc_buffer >= num_buffers)) {
+ DRM_ERROR("Invalid reloc buffer index.\n");
+ return -EINVAL;
+ }
+ if (unlikely(dst_buffer >= num_buffers)) {
+ DRM_ERROR("Invalid dest buffer index.\n");
+ return -EINVAL;
+ }
+
+ ret = i915_update_relocatee(relocatee, buffers, dst_buffer, reloc[0]);
+ if (unlikely(ret))
+ return ret;
+
+ val = buffers[reloc_buffer].buffer->offset;
+ index = (reloc[0] - relocatee->page_offset) >> 2;
+ val = val + reloc[1];
+
+ if (relocatee->idle == I915_RELOC_BUSY) {
+ i915_emit_ring_reloc(relocatee->buf->dev,
+ relocatee->buf->offset + reloc[0], val);
+ return 0;
+ }
+#ifdef DRM_KMAP_ATOMIC_PROT_PFN
+ relocatee->data_page[index] = val;
+#else
+ if (likely(relocatee->is_iomem))
+ iowrite32(val, relocatee->data_page + index);
+ else
+ relocatee->data_page[index] = val;
+#endif
+
+ return 0;
+}
+
+static int i915_post_relocs(struct drm_file *file_priv,
+ uint32_t __user * new_reloc_ptr,
+ struct drm_i915_validate_buffer *buffers,
+ unsigned int num_buffers)
+{
+ uint32_t *reloc;
+ uint32_t reloc_stride = I915_RELOC0_STRIDE * sizeof(uint32_t);
+ uint32_t header_size = I915_RELOC_HEADER * sizeof(uint32_t);
+ struct i915_relocatee_info relocatee;
+ uint32_t reloc_type;
+ uint32_t num_relocs;
+ uint32_t count;
+ int ret = 0;
+ int i;
+ int short_circuit = 1;
+ uint32_t __user *reloc_ptr;
+ uint64_t new_reloc_data;
+ uint32_t reloc_buf_size;
+ uint32_t *reloc_buf;
+
+ for (i = 0; i < num_buffers; ++i) {
+ if (unlikely(!buffers[i].presumed_offset_correct)) {
+ short_circuit = 0;
+ break;
+ }
+ }
+
+ if (likely(short_circuit))
+ return 0;
+
+ memset(&relocatee, 0, sizeof(relocatee));
+
+ while (new_reloc_ptr) {
+ reloc_ptr = new_reloc_ptr;
+
+ ret = get_user(num_relocs, reloc_ptr);
+ if (unlikely(ret))
+ goto out;
+ if (unlikely(!access_ok(VERIFY_READ, reloc_ptr,
+ header_size +
+ num_relocs * reloc_stride)))
+ return -EFAULT;
+
+ ret = __get_user(reloc_type, reloc_ptr + 1);
+ if (unlikely(ret))
+ goto out;
+
+ if (unlikely(reloc_type != 1)) {
+ DRM_ERROR("Unsupported relocation type requested.\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = __get_user(new_reloc_data, reloc_ptr + 2);
+ new_reloc_ptr = (uint32_t __user *) (unsigned long)
+ new_reloc_data;
+
+ reloc_ptr += I915_RELOC_HEADER;
+
+ if (num_relocs == 0)
+ goto out;
+
+ reloc_buf_size =
+ (num_relocs * I915_RELOC0_STRIDE) * sizeof(uint32_t);
+ reloc_buf = kmalloc(reloc_buf_size, GFP_KERNEL);
+ if (!reloc_buf) {
+ DRM_ERROR("Out of memory for reloc buffer\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (__copy_from_user(reloc_buf, reloc_ptr, reloc_buf_size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ reloc = reloc_buf;
+ preempt_disable();
+ for (count = 0; count < num_relocs; ++count) {
+ ret = i915_apply_post_reloc(reloc, buffers,
+ num_buffers, &relocatee);
+ if (unlikely(ret)) {
+ preempt_enable();
+ goto out;
+ }
+ reloc += I915_RELOC0_STRIDE;
+ }
+ preempt_enable();
+
+ if (reloc_buf) {
+ kfree(reloc_buf);
+ reloc_buf = NULL;
+ }
+ i915_clear_relocatee(&relocatee);
+ }
+
+ out:
+ /*
+ * Flush ring relocs so the command parser will pick them up.
+ */
+
+ if (relocatee.performed_ring_relocs)
+ (void)i915_emit_mi_flush(file_priv->minor->dev, 0);
+
+ i915_clear_relocatee(&relocatee);
+ if (reloc_buf) {
+ kfree(reloc_buf);
+ reloc_buf = NULL;
+ }
+
+ return ret;
+}
+
+static int i915_check_presumed(struct drm_i915_op_arg *arg,
+ struct drm_buffer_object *bo,
+ uint32_t __user * data, int *presumed_ok)
+{
+ struct drm_bo_op_req *req = &arg->d.req;
+ uint32_t hint_offset;
+ uint32_t hint = req->bo_req.hint;
+
+ *presumed_ok = 0;
+
+ if (!(hint & DRM_BO_HINT_PRESUMED_OFFSET))
+ return 0;
+ if (bo->offset == req->bo_req.presumed_offset) {
+ *presumed_ok = 1;
+ return 0;
+ }
+
+ /*
+ * We need to turn off the HINT_PRESUMED_OFFSET for this buffer in
+ * the user-space IOCTL argument list, since the buffer has moved,
+ * we're about to apply relocations and we might subsequently
+ * hit an -EAGAIN. In that case the argument list will be reused by
+ * user-space, but the presumed offset is no longer valid.
+ *
+ * Needless to say, this is a bit ugly.
+ */
+
+ hint_offset = (uint32_t *) & req->bo_req.hint - (uint32_t *) arg;
+ hint &= ~DRM_BO_HINT_PRESUMED_OFFSET;
+ return __put_user(hint, data + hint_offset);
+}
+
+/*
+ * Validate, add fence and relocate a block of bos from a userspace list
+ */
+int i915_validate_buffer_list(struct drm_file *file_priv,
+ unsigned int fence_class, uint64_t data,
+ struct drm_i915_validate_buffer *buffers,
+ uint32_t * num_buffers,
+ uint32_t __user ** post_relocs)
+{
+ struct drm_i915_op_arg arg;
+ struct drm_bo_op_req *req = &arg.d.req;
+ int ret = 0;
+ unsigned buf_count = 0;
+ uint32_t buf_handle;
+ uint32_t __user *reloc_user_ptr;
+ struct drm_i915_validate_buffer *item = buffers;
+ *post_relocs = NULL;
+
+ do {
+ if (buf_count >= *num_buffers) {
+ DRM_ERROR("Buffer count exceeded %d\n.", *num_buffers);
+ ret = -EINVAL;
+ goto out_err;
+ }
+ item = buffers + buf_count;
+ item->buffer = NULL;
+ item->presumed_offset_correct = 0;
+ item->idle = I915_RELOC_UNCHECKED;
+
+ if (copy_from_user
+ (&arg, (void __user *)(unsigned long)data, sizeof(arg))) {
+ ret = -EFAULT;
+ goto out_err;
+ }
+
+ ret = 0;
+ if (req->op != drm_bo_validate) {
+ DRM_ERROR
+ ("Buffer object operation wasn't \"validate\".\n");
+ ret = -EINVAL;
+ goto out_err;
+ }
+ item->ret = 0;
+ item->data = (void __user *)(unsigned long)data;
+
+ buf_handle = req->bo_req.handle;
+ reloc_user_ptr = (uint32_t *) (unsigned long)arg.reloc_ptr;
+
+ /*
+ * Switch mode to post-validation relocations?
+ */
+
+ if (unlikely((buf_count == 0) && (*post_relocs == NULL) &&
+ (reloc_user_ptr != NULL))) {
+ uint32_t reloc_type;
+
+ ret = get_user(reloc_type, reloc_user_ptr + 1);
+ if (ret)
+ goto out_err;
+
+ if (reloc_type == 1)
+ *post_relocs = reloc_user_ptr;
+
+ }
+
+ if ((*post_relocs == NULL) && (reloc_user_ptr != NULL)) {
+ ret =
+ i915_exec_reloc(file_priv, buf_handle,
+ reloc_user_ptr, buffers, buf_count);
+ if (ret)
+ goto out_err;
+ DRM_MEMORYBARRIER();
+ }
+
+ ret = drm_bo_handle_validate(file_priv, req->bo_req.handle,
+ req->bo_req.flags,
+ req->bo_req.mask, req->bo_req.hint,
+ req->bo_req.fence_class,
+ NULL, &item->buffer);
+ if (ret) {
+ DRM_ERROR("error on handle validate %d\n", ret);
+ goto out_err;
+ }
+
+ buf_count++;
+
+ ret = i915_check_presumed(&arg, item->buffer,
+ (uint32_t __user *)
+ (unsigned long)data,
+ &item->presumed_offset_correct);
+ if (ret)
+ goto out_err;
+
+ data = arg.next;
+ } while (data != 0);
+ out_err:
+ *num_buffers = buf_count;
+ item->ret = (ret != -EAGAIN) ? ret : 0;
+ return ret;
+}
+
+/*
+ * Remove all buffers from the unfenced list.
+ * If the execbuffer operation was aborted, for example due to a signal,
+ * this also make sure that buffers retain their original state and
+ * fence pointers.
+ * Copy back buffer information to user-space unless we were interrupted
+ * by a signal. In which case the IOCTL must be rerun.
+ */
+
+static int i915_handle_copyback(struct drm_device *dev,
+ struct drm_i915_validate_buffer *buffers,
+ unsigned int num_buffers, int ret)
+{
+ int err = ret;
+ int i;
+ struct drm_i915_op_arg arg;
+ struct drm_buffer_object *bo;
+
+ if (ret)
+ drm_putback_buffer_objects(dev);
+
+ if (ret != -EAGAIN) {
+ for (i = 0; i < num_buffers; ++i) {
+ arg.handled = 1;
+ arg.d.rep.ret = buffers->ret;
+ bo = buffers->buffer;
+ mutex_lock(&bo->mutex);
+ drm_bo_fill_rep_arg(bo, &arg.d.rep.bo_info);
+ mutex_unlock(&bo->mutex);
+ if (__copy_to_user(buffers->data, &arg, sizeof(arg)))
+ err = -EFAULT;
+ buffers++;
+ }
+ }
+
+ return err;
+}
+
+/*
+ * Create a fence object, and if that fails, pretend that everything is
+ * OK and just idle the GPU.
+ */
+
+void i915_fence_or_sync(struct drm_file *file_priv,
+ uint32_t fence_flags,
+ struct drm_fence_arg *fence_arg,
+ struct drm_fence_object **fence_p)
+{
+ struct drm_device *dev = file_priv->minor->dev;
+ int ret;
+ struct drm_fence_object *fence;
+
+ ret = drm_fence_buffer_objects(dev, NULL, fence_flags, NULL, &fence);
+
+ if (ret) {
+
+ /*
+ * Fence creation failed.
+ * Fall back to synchronous operation and idle the engine.
+ */
+
+ (void)i915_emit_mi_flush(dev, MI_READ_FLUSH);
+ (void)i915_quiescent(dev);
+
+ if (!(fence_flags & DRM_FENCE_FLAG_NO_USER)) {
+
+ /*
+ * Communicate to user-space that
+ * fence creation has failed and that
+ * the engine is idle.
+ */
+
+ fence_arg->handle = ~0;
+ fence_arg->error = ret;
+ }
+ drm_putback_buffer_objects(dev);
+ if (fence_p)
+ *fence_p = NULL;
+ return;
+ }
+
+ if (!(fence_flags & DRM_FENCE_FLAG_NO_USER)) {
+
+ ret = drm_fence_add_user_object(file_priv, fence,
+ fence_flags &
+ DRM_FENCE_FLAG_SHAREABLE);
+ if (!ret)
+ drm_fence_fill_arg(fence, fence_arg);
+ else {
+ /*
+ * Fence user object creation failed.
+ * We must idle the engine here as well, as user-
+ * space expects a fence object to wait on. Since we
+ * have a fence object we wait for it to signal
+ * to indicate engine "sufficiently" idle.
+ */
+
+ (void)drm_fence_object_wait(fence, 0, 1, fence->type);
+ drm_fence_usage_deref_unlocked(&fence);
+ fence_arg->handle = ~0;
+ fence_arg->error = ret;
+ }
+ }
+
+ if (fence_p)
+ *fence_p = fence;
+ else if (fence)
+ drm_fence_usage_deref_unlocked(&fence);
+}
+
+int i915_execbuffer(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_private *dev_priv = (struct drm_i915_private *)
+ dev->dev_private;
+ struct drm_i915_master_private *master_priv =
+ (struct drm_i915_master_private *)
+ dev->primary->master->driver_priv;
+ struct drm_i915_sarea *sarea_priv = (struct drm_i915_sarea *)
+ master_priv->sarea_priv;
+ struct drm_i915_execbuffer *exec_buf = data;
+ struct drm_i915_batchbuffer *batch = &exec_buf->batch;
+ struct drm_fence_arg *fence_arg = &exec_buf->fence_arg;
+ int num_buffers;
+ int ret;
+ uint32_t __user *post_relocs;
+
+ if (!dev_priv->allow_batchbuffer) {
+ DRM_ERROR("Batchbuffer ioctl disabled\n");
+ return -EINVAL;
+ }
+
+ if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
+ batch->num_cliprects *
+ sizeof(struct
+ drm_clip_rect)))
+ return -EFAULT;
+
+ if (exec_buf->num_buffers > dev_priv->max_validate_buffers)
+ return -EINVAL;
+
+ ret = drm_bo_read_lock(&dev->bm.bm_lock, 1);
+ if (ret)
+ return ret;
+
+ /*
+ * The cmdbuf_mutex makes sure the validate-submit-fence
+ * operation is atomic.
+ */
+
+ ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
+ if (ret) {
+ drm_bo_read_unlock(&dev->bm.bm_lock);
+ return -EAGAIN;
+ }
+
+ num_buffers = exec_buf->num_buffers;
+
+ if (!dev_priv->val_bufs) {
+ dev_priv->val_bufs =
+ vmalloc(sizeof(struct drm_i915_validate_buffer) *
+ dev_priv->max_validate_buffers);
+ }
+ if (!dev_priv->val_bufs) {
+ drm_bo_read_unlock(&dev->bm.bm_lock);
+ mutex_unlock(&dev_priv->cmdbuf_mutex);
+ return -ENOMEM;
+ }
+
+ /* validate buffer list + fixup relocations */
+ ret = i915_validate_buffer_list(file_priv, 0, exec_buf->ops_list,
+ dev_priv->val_bufs, &num_buffers,
+ &post_relocs);
+ if (ret)
+ goto out_err0;
+
+ if (post_relocs) {
+ ret = i915_post_relocs(file_priv, post_relocs,
+ dev_priv->val_bufs, num_buffers);
+ if (ret)
+ goto out_err0;
+ }
+
+ /* make sure all previous memory operations have passed */
+ DRM_MEMORYBARRIER();
+
+ if (!post_relocs) {
+ drm_agp_chipset_flush(dev);
+ batch->start =
+ dev_priv->val_bufs[num_buffers - 1].buffer->offset;
+ } else {
+ batch->start += dev_priv->val_bufs[0].buffer->offset;
+ }
+
+ DRM_DEBUG("i915 exec batchbuffer, start %x used %d cliprects %d\n",
+ batch->start, batch->used, batch->num_cliprects);
+
+ ret = i915_dispatch_batchbuffer(dev, batch);
+ if (ret)
+ goto out_err0;
+ if (sarea_priv)
+ sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+ i915_fence_or_sync(file_priv, fence_arg->flags, fence_arg, NULL);
+
+ out_err0:
+ ret = i915_handle_copyback(dev, dev_priv->val_bufs, num_buffers, ret);
+ mutex_lock(&dev->struct_mutex);
+ i915_dereference_buffers_locked(dev_priv->val_bufs, num_buffers);
+ mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->cmdbuf_mutex);
+ drm_bo_read_unlock(&dev->bm.bm_lock);
+ return ret;
+}
diff --git a/linux-core/i915_fence.c b/linux-core/i915_fence.c
index f392e8e6..3ca8403f 100644
--- a/linux-core/i915_fence.c
+++ b/linux-core/i915_fence.c
@@ -162,11 +162,13 @@ static int i915_fence_emit_sequence(struct drm_device *dev, uint32_t class,
void i915_fence_handler(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
struct drm_fence_manager *fm = &dev->fm;
struct drm_fence_class_manager *fc = &fm->fence_class[0];
write_lock(&fm->lock);
- i915_fence_poll(dev, 0, fc->waiting_types);
+ if (likely(dev_priv->fence_irq_on))
+ i915_fence_poll(dev, 0, fc->waiting_types);
write_unlock(&fm->lock);
}
diff --git a/linux-core/nouveau_buffer.c b/linux-core/nouveau_bo.c
index 11549317..ab3b23a4 100644
--- a/linux-core/nouveau_buffer.c
+++ b/linux-core/nouveau_bo.c
@@ -198,8 +198,8 @@ nouveau_bo_move_m2mf(struct drm_buffer_object *bo, int evict, int no_wait,
/* Flip pages into the GART and move if we can. */
static int
-nouveau_bo_move_gart(struct drm_buffer_object *bo, int evict, int no_wait,
- struct drm_bo_mem_reg *new_mem)
+nouveau_bo_move_flipd(struct drm_buffer_object *bo, int evict, int no_wait,
+ struct drm_bo_mem_reg *new_mem)
{
struct drm_device *dev = bo->dev;
struct drm_bo_mem_reg tmp_mem;
@@ -212,11 +212,10 @@ nouveau_bo_move_gart(struct drm_buffer_object *bo, int evict, int no_wait,
DRM_BO_FLAG_FORCE_CACHING);
ret = drm_bo_mem_space(bo, &tmp_mem, no_wait);
-
if (ret)
return ret;
- ret = drm_ttm_bind (bo->ttm, &tmp_mem);
+ ret = drm_ttm_bind(bo->ttm, &tmp_mem);
if (ret)
goto out_cleanup;
@@ -234,6 +233,7 @@ out_cleanup:
tmp_mem.mm_node = NULL;
mutex_unlock(&dev->struct_mutex);
}
+
return ret;
}
@@ -246,22 +246,19 @@ nouveau_bo_move(struct drm_buffer_object *bo, int evict, int no_wait,
if (new_mem->mem_type == DRM_BO_MEM_LOCAL) {
if (old_mem->mem_type == DRM_BO_MEM_LOCAL)
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
-#if 0
- if (!nouveau_bo_move_to_gart(bo, evict, no_wait, new_mem))
-#endif
+ if (nouveau_bo_move_flipd(bo, evict, no_wait, new_mem))
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
}
else
if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
-#if 0
- if (nouveau_bo_move_to_gart(bo, evict, no_wait, new_mem))
-#endif
+ if (1 /*nouveau_bo_move_flips(bo, evict, no_wait, new_mem)*/)
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
}
else {
-// if (nouveau_bo_move_m2mf(bo, evict, no_wait, new_mem))
+ if (nouveau_bo_move_m2mf(bo, evict, no_wait, new_mem))
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
}
+
return 0;
}
diff --git a/linux-core/nouveau_drv.c b/linux-core/nouveau_drv.c
index e9623eb1..c8f57dff 100644
--- a/linux-core/nouveau_drv.c
+++ b/linux-core/nouveau_drv.c
@@ -86,7 +86,11 @@ static struct drm_driver driver = {
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
+#ifdef GIT_REVISION
+ .date = GIT_REVISION,
+#else
.date = DRIVER_DATE,
+#endif
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
diff --git a/linux-core/nouveau_fence.c b/linux-core/nouveau_fence.c
index 59dcf7d0..4ad51ae4 100644
--- a/linux-core/nouveau_fence.c
+++ b/linux-core/nouveau_fence.c
@@ -80,12 +80,11 @@ nouveau_fence_poll(struct drm_device *dev, uint32_t class, uint32_t waiting_type
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_fence_class_manager *fc = &dev->fm.fence_class[class];
struct nouveau_channel *chan = dev_priv->fifos[class];
- uint32_t pending_types = 0;
DRM_DEBUG("class=%d\n", class);
DRM_DEBUG("pending: 0x%08x 0x%08x\n", waiting_types, fc->waiting_types);
- if (pending_types) {
+ if (waiting_types & DRM_FENCE_TYPE_EXE) {
uint32_t sequence = NV_READ(chan->ref_cnt);
DRM_DEBUG("got 0x%08x\n", sequence);
diff --git a/linux-core/via_dmablit.c b/linux-core/via_dmablit.c
index a6a21782..b5f9f05f 100644
--- a/linux-core/via_dmablit.c
+++ b/linux-core/via_dmablit.c
@@ -618,7 +618,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
* (Not a big limitation anyway.)
*/
- if ((xfer->mem_stride - xfer->line_length) >= PAGE_SIZE) {
+ if ((xfer->mem_stride - xfer->line_length) > 2*PAGE_SIZE) {
DRM_ERROR("Too large system memory stride. Stride: %d, "
"Length: %d\n", xfer->mem_stride, xfer->line_length);
return -EINVAL;
diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c
index a7d3ea24..614616a9 100644
--- a/linux-core/xgi_pcie.c
+++ b/linux-core/xgi_pcie.c
@@ -86,6 +86,7 @@ int xgi_pcie_heap_init(struct xgi_info * info)
return err;
}
+ info->gart_info.table_mask = DMA_BIT_MASK(32);
info->gart_info.gart_table_location = DRM_ATI_GART_MAIN;
info->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
info->gart_info.table_size = info->dev->sg->pages * sizeof(u32);