From 3624e43282b0c6aad32829f116fd8f7bce66fbb6 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Fri, 20 Oct 2006 15:06:31 +0200 Subject: Bug #8707, 2.6.19-rc compatibility for memory manager code. --- linux-core/drm_compat.c | 53 ++++++++++++++++++++++++++++++------------------- linux-core/drm_compat.h | 22 +++++++++++--------- linux-core/drm_drv.c | 6 +++++- linux-core/drm_vm.c | 8 +++++--- 4 files changed, 56 insertions(+), 33 deletions(-) diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c index 90e53419..b466f8bd 100644 --- a/linux-core/drm_compat.c +++ b/linux-core/drm_compat.c @@ -28,6 +28,11 @@ #include "drmP.h" #if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) + +/* + * These have bad performance in the AGP module for the indicated kernel versions. + */ + int drm_map_page_into_agp(struct page *page) { int i; @@ -45,8 +50,14 @@ int drm_unmap_page_from_agp(struct page *page) * performance reasons */ return i; } -#endif +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) + +/* + * The protection map was exported in 2.6.19 + */ pgprot_t vm_get_page_prot(unsigned long vm_flags) { @@ -62,8 +73,17 @@ pgprot_t vm_get_page_prot(unsigned long vm_flags) return protection_map[vm_flags & 0x0F]; #endif }; +#endif -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) + +/* + * vm code for kernels below 2,6,15 in which version a major vm write + * occured. This implement a simple straightforward + * version similar to what's going to be + * in kernel 2.6.20+? + */ static int drm_pte_is_clear(struct vm_area_struct *vma, unsigned long addr) @@ -76,12 +96,7 @@ static int drm_pte_is_clear(struct vm_area_struct *vma, pgd_t *pgd; -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) spin_lock(&mm->page_table_lock); -#else - spinlock_t *ptl; -#endif - pgd = pgd_offset(mm, addr); if (pgd_none(*pgd)) goto unlock; @@ -91,22 +106,13 @@ static int drm_pte_is_clear(struct vm_area_struct *vma, pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) goto unlock; -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) pte = pte_offset_map(pmd, addr); -#else - pte = pte_offset_map_lock(mm, pmd, addr, &ptl); -#endif if (!pte) goto unlock; ret = pte_none(*pte); -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) pte_unmap(pte); unlock: spin_unlock(&mm->page_table_lock); -#else - pte_unmap_unlock(pte, ptl); - unlock: -#endif return ret; } @@ -121,7 +127,6 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, return ret; } - static struct { spinlock_t lock; struct page *dummy_page; @@ -154,9 +159,6 @@ void free_nopage_retry(void) spin_unlock(&drm_np_retry.lock); } } -#endif - -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma, unsigned long address, @@ -186,6 +188,17 @@ struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma, #ifdef DRM_ODD_MM_COMPAT +/* + * VM compatibility code for 2.6.15-2.6.19(?). This code implements a complicated + * workaround for a single BUG statement in do_no_page in these versions. The + * tricky thing is that we need to take the mmap_sem in exclusive mode for _all_ + * vmas mapping the ttm, before dev->struct_mutex is taken. The way we do this is to + * check first take the dev->struct_mutex, and then trylock all mmap_sems. If this + * fails for a single mmap_sem, we have to release all sems and the dev->struct_mutex, + * release the cpu and retry. We also need to keep track of all vmas mapping the ttm. + * phew. + */ + typedef struct p_mm_entry { struct list_head head; struct mm_struct *mm; diff --git a/linux-core/drm_compat.h b/linux-core/drm_compat.h index 5617fb7f..a1a94399 100644 --- a/linux-core/drm_compat.h +++ b/linux-core/drm_compat.h @@ -231,7 +231,7 @@ static inline int remap_pfn_range(struct vm_area_struct *vma, unsigned long from #include #include -#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) && \ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) && \ (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))) #define DRM_ODD_MM_COMPAT #endif @@ -277,7 +277,18 @@ extern int drm_map_page_into_agp(struct page *page); #define unmap_page_from_agp drm_unmap_page_from_agp #endif -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) +extern struct page *get_nopage_retry(void); +extern void free_nopage_retry(void); +struct fault_data; +extern struct page *drm_vm_ttm_fault(struct vm_area_struct *vma, + struct fault_data *data); + +#define NOPAGE_REFAULT get_nopage_retry() +#endif + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) /* * Hopefully, real NOPAGE_RETRY functionality will be in 2.6.19. @@ -295,10 +306,6 @@ struct fault_data { int type; }; -extern struct page *get_nopage_retry(void); -extern void free_nopage_retry(void); - -#define NOPAGE_REFAULT get_nopage_retry() extern int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, pgprot_t pgprot); @@ -307,9 +314,6 @@ extern struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma, unsigned long address, int *type); -extern struct page *drm_vm_ttm_fault(struct vm_area_struct *vma, - struct fault_data *data); - #endif #ifdef DRM_ODD_MM_COMPAT diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index 75c89c1c..518e2aa3 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -433,7 +433,7 @@ void drm_exit(struct drm_driver *driver) } } else pci_unregister_driver(&driver->pci_driver); -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) free_nopage_retry(); #endif DRM_INFO("Module unloaded\n"); @@ -472,10 +472,14 @@ static void drm_free_mem_cache(kmem_cache_t *cache, { if (!cache) return; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) if (kmem_cache_destroy(cache)) { DRM_ERROR("Warning! DRM is leaking %s memory.\n", name); } +#else + kmem_cache_destroy(cache); +#endif } static void drm_free_memory_caches(void ) diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index ba4b1451..fd6e89d8 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -159,7 +159,9 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, } #endif /* __OS_HAS_AGP */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) || \ + LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) static #endif struct page *drm_vm_ttm_fault(struct vm_area_struct *vma, @@ -244,7 +246,7 @@ struct page *drm_vm_ttm_fault(struct vm_area_struct *vma, mutex_unlock(&dev->struct_mutex); return NULL; } - +#endif /** * \c nopage method for shared virtual memory. @@ -535,7 +537,7 @@ static struct vm_operations_struct drm_vm_sg_ops = { .close = drm_vm_close, }; -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) static struct vm_operations_struct drm_vm_ttm_ops = { .nopage = drm_vm_ttm_nopage, .open = drm_vm_ttm_open_wrapper, -- cgit v1.2.3