aboutsummaryrefslogtreecommitdiff
path: root/linux-core/drm_vm.c
diff options
context:
space:
mode:
authorThomas Hellstrom <thomas-at-tungstengraphics-dot-com>2006-10-11 13:40:35 +0200
committerThomas Hellstrom <thomas-at-tungstengraphics-dot-com>2006-10-11 13:40:35 +0200
commitf2db76e2f206d2017f710eaddc4b33add4498898 (patch)
treea245512bc51f70c4458af047507605a27fae5d02 /linux-core/drm_vm.c
parentc58574c60505a699e19e1ed59e1b441be2594e53 (diff)
Big update:
Adapt for new functions in the 2.6.19 kernel. Remove the ability to have multiple regions in one TTM. This simplifies a lot of code. Remove the ability to access TTMs from user space. We don't need it anymore without ttm regions. Don't change caching policy for evicted buffers. Instead change it only when the buffer is accessed by the CPU (on the first page fault). This tremendously speeds up eviction rates. Current code is safe for kernels <= 2.6.14. Should also be OK with 2.6.19 and above.
Diffstat (limited to 'linux-core/drm_vm.c')
-rw-r--r--linux-core/drm_vm.c255
1 files changed, 85 insertions, 170 deletions
diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c
index 5fbbaadd..45951156 100644
--- a/linux-core/drm_vm.c
+++ b/linux-core/drm_vm.c
@@ -159,120 +159,48 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
}
#endif /* __OS_HAS_AGP */
-
-static int drm_ttm_remap_bound_pfn(struct vm_area_struct *vma,
- unsigned long address,
- unsigned long size)
-{
- unsigned long
- page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
- unsigned long
- num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
- drm_ttm_vma_list_t *entry = (drm_ttm_vma_list_t *)
- vma->vm_private_data;
- drm_map_t *map = entry->map;
- drm_ttm_t *ttm = (drm_ttm_t *) map->offset;
- unsigned long i, cur_pfn;
- unsigned long start = 0;
- unsigned long end = 0;
- unsigned long last_pfn = 0;
- unsigned long start_pfn = 0;
- int bound_sequence = FALSE;
- int ret = 0;
- uint32_t cur_flags;
-
- for (i=page_offset; i<page_offset + num_pages; ++i) {
- cur_flags = ttm->page_flags[i];
-
- if (!bound_sequence && (cur_flags & DRM_TTM_PAGE_UNCACHED)) {
-
- start = i;
- end = i;
- last_pfn = (cur_flags & DRM_TTM_MASK_PFN) >> PAGE_SHIFT;
- start_pfn = last_pfn;
- bound_sequence = TRUE;
-
- } else if (bound_sequence) {
-
- cur_pfn = (cur_flags & DRM_TTM_MASK_PFN) >> PAGE_SHIFT;
-
- if ( !(cur_flags & DRM_TTM_PAGE_UNCACHED) ||
- (cur_pfn != last_pfn + 1)) {
-
- ret = io_remap_pfn_range(vma,
- vma->vm_start + (start << PAGE_SHIFT),
- (ttm->aperture_base >> PAGE_SHIFT)
- + start_pfn,
- (end - start + 1) << PAGE_SHIFT,
- drm_io_prot(_DRM_AGP, vma));
-
- if (ret)
- break;
-
- bound_sequence = (cur_flags & DRM_TTM_PAGE_UNCACHED);
- if (!bound_sequence)
- continue;
-
- start = i;
- end = i;
- last_pfn = cur_pfn;
- start_pfn = last_pfn;
-
- } else {
-
- end++;
- last_pfn = cur_pfn;
-
- }
- }
- }
-
- if (!ret && bound_sequence) {
- ret = io_remap_pfn_range(vma,
- vma->vm_start + (start << PAGE_SHIFT),
- (ttm->aperture_base >> PAGE_SHIFT)
- + start_pfn,
- (end - start + 1) << PAGE_SHIFT,
- drm_io_prot(_DRM_AGP, vma));
- }
-
- if (ret) {
- DRM_ERROR("Map returned %c\n", ret);
- }
- return ret;
-}
-
-static __inline__ struct page *drm_do_vm_ttm_nopage(struct vm_area_struct *vma,
- unsigned long address)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
+static
+#endif
+struct page *drm_vm_ttm_fault(struct vm_area_struct *vma,
+ struct fault_data *data)
{
- drm_ttm_vma_list_t *entry = (drm_ttm_vma_list_t *)
- vma->vm_private_data;
- drm_map_t *map;
+ unsigned long address = data->address;
+ drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data;
unsigned long page_offset;
struct page *page;
drm_ttm_t *ttm;
- pgprot_t default_prot;
- uint32_t page_flags;
drm_buffer_manager_t *bm;
drm_device_t *dev;
+ unsigned long pfn;
+ int err;
+ pgprot_t pgprot;
- if (address > vma->vm_end)
- return NOPAGE_SIGBUS; /* Disallow mremap */
- if (!entry)
- return NOPAGE_OOM; /* Nothing allocated */
+ if (!map) {
+ data->type = VM_FAULT_OOM;
+ return NULL;
+ }
+
+ if (address > vma->vm_end) {
+ data->type = VM_FAULT_SIGBUS;
+ return NULL;
+ }
- map = (drm_map_t *) entry->map;
ttm = (drm_ttm_t *) map->offset;
dev = ttm->dev;
+
+ /*
+ * Perhaps retry here?
+ */
+
mutex_lock(&dev->struct_mutex);
+ drm_fixup_ttm_caching(ttm);
bm = &dev->bm;
page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
page = ttm->pages[page_offset];
- page_flags = ttm->page_flags[page_offset];
-
if (!page) {
if (bm->cur_pages >= bm->max_pages) {
DRM_ERROR("Maximum locked page count exceeded\n");
@@ -281,40 +209,65 @@ static __inline__ struct page *drm_do_vm_ttm_nopage(struct vm_area_struct *vma,
}
++bm->cur_pages;
page = ttm->pages[page_offset] = drm_alloc_gatt_pages(0);
- if (page) {
- SetPageLocked(page);
- } else {
- page = NOPAGE_OOM;
+ if (!page) {
+ data->type = VM_FAULT_OOM;
+ goto out;
}
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
+ SetPageLocked(page);
+#else
+ SetPageReserved(page);
+#endif
}
- if (page_flags & DRM_TTM_PAGE_UNCACHED) {
+ if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) {
/*
- * This makes sure we don't race with another
- * drm_ttm_remap_bound_pfn();
+ * FIXME: Check can't map aperture flag.
*/
- if (!drm_pte_is_clear(vma, address)) {
- page = NOPAGE_RETRY;
- goto out1;
- }
-
- drm_ttm_remap_bound_pfn(vma, address, PAGE_SIZE);
- page = NOPAGE_RETRY;
- goto out1;
+ pfn = ttm->aper_offset + page_offset +
+ (ttm->be->aperture_base >> PAGE_SHIFT);
+ pgprot = drm_io_prot(ttm->be->drm_map_type, vma);
+ } else {
+ pfn = page_to_pfn(page);
+ pgprot = vma->vm_page_prot;
}
- get_page(page);
- out1:
- default_prot = vm_get_page_prot(vma->vm_flags);
- vma->vm_page_prot = default_prot;
+ err = vm_insert_pfn(vma, address, pfn, pgprot);
+ if (!err && (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) &&
+ ttm->num_pages > 1) {
+
+ /*
+ * FIXME: Check can't map aperture flag.
+ */
+
+ /*
+ * Since we're not racing with anybody else,
+ * we might as well populate the whole object space.
+ * Note that we're touching vma->vm_flags with this
+ * operation, but we are not changing them, so we should be
+ * OK.
+ */
+
+ BUG_ON(ttm->state == ttm_unpopulated);
+ err = io_remap_pfn_range(vma, address + PAGE_SIZE, pfn+1,
+ (ttm->num_pages - 1) * PAGE_SIZE,
+ pgprot);
+ }
+
+
+ if (!err || err == -EBUSY)
+ data->type = VM_FAULT_MINOR;
+ else
+ data->type = VM_FAULT_OOM;
out:
mutex_unlock(&dev->struct_mutex);
- return page;
+ return NULL;
}
+
/**
* \c nopage method for shared virtual memory.
*
@@ -547,14 +500,6 @@ static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
return drm_do_vm_sg_nopage(vma, address);
}
-static struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
- unsigned long address, int *type)
-{
- if (type)
- *type = VM_FAULT_MINOR;
- return drm_do_vm_ttm_nopage(vma, address);
-}
-
#else /* LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,0) */
@@ -582,13 +527,6 @@ static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
return drm_do_vm_sg_nopage(vma, address);
}
-static struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
- unsigned long address, int unused)
-{
- return drm_do_vm_ttm_nopage(vma, address);
-}
-
-
#endif
/** AGP virtual memory operations */
@@ -619,11 +557,19 @@ static struct vm_operations_struct drm_vm_sg_ops = {
.close = drm_vm_close,
};
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
static struct vm_operations_struct drm_vm_ttm_ops = {
.nopage = drm_vm_ttm_nopage,
.open = drm_vm_ttm_open_wrapper,
.close = drm_vm_ttm_close,
};
+#else
+static struct vm_operations_struct drm_vm_ttm_ops = {
+ .fault = drm_vm_ttm_fault,
+ .open = drm_vm_ttm_open_wrapper,
+ .close = drm_vm_ttm_close,
+};
+#endif
/**
* \c open method for shared virtual memory.
@@ -656,36 +602,17 @@ static void drm_vm_open(struct vm_area_struct *vma)
static int drm_vm_ttm_open(struct vm_area_struct *vma) {
- drm_ttm_vma_list_t *entry, *tmp_vma =
- (drm_ttm_vma_list_t *) vma->vm_private_data;
- drm_map_t *map;
+ drm_local_map_t *map = (drm_local_map_t *)vma->vm_private_data;
drm_ttm_t *ttm;
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->head->dev;
- int ret = 0;
drm_vm_open(vma);
mutex_lock(&dev->struct_mutex);
- entry = drm_calloc(1, sizeof(*entry), DRM_MEM_VMAS);
- if (entry) {
- *entry = *tmp_vma;
- map = (drm_map_t *) entry->map;
- ttm = (drm_ttm_t *) map->offset;
- if (!ret) {
- atomic_inc(&ttm->vma_count);
- INIT_LIST_HEAD(&entry->head);
- entry->vma = vma;
- entry->orig_protection = vma->vm_page_prot;
- list_add_tail(&entry->head, &ttm->vma_list->head);
- vma->vm_private_data = (void *) entry;
- DRM_DEBUG("Added VMA to ttm at 0x%016lx\n",
- (unsigned long) ttm);
- }
- } else {
- ret = -ENOMEM;
- }
+ ttm = (drm_ttm_t *) map->offset;
+ atomic_inc(&ttm->vma_count);
mutex_unlock(&dev->struct_mutex);
- return ret;
+ return 0;
}
static void drm_vm_ttm_open_wrapper(struct vm_area_struct *vma)
@@ -729,21 +656,16 @@ static void drm_vm_close(struct vm_area_struct *vma)
static void drm_vm_ttm_close(struct vm_area_struct *vma)
{
- drm_ttm_vma_list_t *ttm_vma =
- (drm_ttm_vma_list_t *) vma->vm_private_data;
- drm_map_t *map;
+ drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data;
drm_ttm_t *ttm;
drm_device_t *dev;
int ret;
drm_vm_close(vma);
- if (ttm_vma) {
- map = (drm_map_t *) ttm_vma->map;
+ if (map) {
ttm = (drm_ttm_t *) map->offset;
dev = ttm->dev;
mutex_lock(&dev->struct_mutex);
- list_del(&ttm_vma->head);
- drm_free(ttm_vma, sizeof(*ttm_vma), DRM_MEM_VMAS);
if (atomic_dec_and_test(&ttm->vma_count)) {
if (ttm->destroy) {
ret = drm_destroy_ttm(ttm);
@@ -951,17 +873,10 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
#endif
break;
case _DRM_TTM: {
- drm_ttm_vma_list_t tmp_vma;
- tmp_vma.orig_protection = vma->vm_page_prot;
- tmp_vma.map = map;
vma->vm_ops = &drm_vm_ttm_ops;
- vma->vm_private_data = (void *) &tmp_vma;
+ vma->vm_private_data = (void *) map;
vma->vm_file = filp;
vma->vm_flags |= VM_RESERVED | VM_IO;
- if (drm_ttm_remap_bound_pfn(vma,
- vma->vm_start,
- vma->vm_end - vma->vm_start))
- return -EAGAIN;
if (drm_vm_ttm_open(vma))
return -EAGAIN;
return 0;