From 43a432b1559798d33970261f710030f787770231 Mon Sep 17 00:00:00 2001 From: Suresh Siddha Date: Thu, 9 Apr 2009 14:26:47 -0700 Subject: x86, CPA: Change idmap attribute before ioremap attribute setup Change the identity mapping with the requested attribute first, before we setup the virtual memory mapping with the new requested attribute. This makes sure that there is no window when identity map'ed attribute may disagree with ioremap range on the attribute type. This also avoids doing cpa on the ioremap'ed address twice (first in ioremap_page_range and then in ioremap_change_attr using vaddr), and should improve ioremap performance a bit. Signed-off-by: Suresh Siddha Signed-off-by: Venkatesh Pallipadi LKML-Reference: <20090409212708.373330000@intel.com> Signed-off-by: Ingo Molnar --- arch/x86/mm/ioremap.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 0dfa09d69e8..329387eca12 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -280,15 +280,16 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, return NULL; area->phys_addr = phys_addr; vaddr = (unsigned long) area->addr; - if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) { + + if (kernel_map_sync_memtype(phys_addr, size, prot_val)) { free_memtype(phys_addr, phys_addr + size); free_vm_area(area); return NULL; } - if (ioremap_change_attr(vaddr, size, prot_val) < 0) { + if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) { free_memtype(phys_addr, phys_addr + size); - vunmap(area->addr); + free_vm_area(area); return NULL; } -- cgit v1.2.3 From a5593e0b329a14dea41ea173380dbf1533de2bd2 Mon Sep 17 00:00:00 2001 From: "venkatesh.pallipadi@intel.com" Date: Thu, 9 Apr 2009 14:26:48 -0700 Subject: x86, PAT: Change order of cpa and free in set_memory_wb To be free of aliasing due to races, set_memory_* interfaces should follow ordering of reserving, changing memtype to UC/WC, changing memtype back to WB followed by free. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Suresh Siddha LKML-Reference: <20090409212708.512280000@intel.com> Signed-off-by: Ingo Molnar --- arch/x86/mm/pageattr.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index d71e1b636ce..d487eaa17bf 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -1021,15 +1021,19 @@ int _set_memory_wb(unsigned long addr, int numpages) int set_memory_wb(unsigned long addr, int numpages) { + int ret = _set_memory_wb(addr, numpages); free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); - - return _set_memory_wb(addr, numpages); + return ret; } EXPORT_SYMBOL(set_memory_wb); int set_memory_array_wb(unsigned long *addr, int addrinarray) { int i; + int ret; + + ret = change_page_attr_clear(addr, addrinarray, + __pgprot(_PAGE_CACHE_MASK), 1); for (i = 0; i < addrinarray; i++) { unsigned long start = __pa(addr[i]); @@ -1042,8 +1046,7 @@ int set_memory_array_wb(unsigned long *addr, int addrinarray) } free_memtype(start, end); } - return change_page_attr_clear(addr, addrinarray, - __pgprot(_PAGE_CACHE_MASK), 1); + return ret; } EXPORT_SYMBOL(set_memory_array_wb); -- cgit v1.2.3 From 9fa3ab390abfc8b49fc0dd7c845b0ad224ec429f Mon Sep 17 00:00:00 2001 From: "venkatesh.pallipadi@intel.com" Date: Thu, 9 Apr 2009 14:26:49 -0700 Subject: x86, PAT: Handle faults cleanly in set_memory_ APIs Handle faults and do proper cleanups in set_memory_*() functions. In some cases, these functions were not doing proper free on failure paths. With the changes to tracking memtype of RAM pages in struct page instead of pat list, we do not need the changes in commits c5e147. This patch reverts that change. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Suresh Siddha LKML-Reference: <20090409212708.653222000@intel.com> Signed-off-by: Ingo Molnar --- arch/x86/mm/pageattr.c | 113 ++++++++++++++++++++++++++++--------------------- 1 file changed, 65 insertions(+), 48 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index d487eaa17bf..985eef80c55 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -945,52 +945,56 @@ int _set_memory_uc(unsigned long addr, int numpages) int set_memory_uc(unsigned long addr, int numpages) { + int ret; + /* * for now UC MINUS. see comments in ioremap_nocache() */ - if (reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, - _PAGE_CACHE_UC_MINUS, NULL)) - return -EINVAL; + ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, + _PAGE_CACHE_UC_MINUS, NULL); + if (ret) + goto out_err; + + ret = _set_memory_uc(addr, numpages); + if (ret) + goto out_free; + + return 0; - return _set_memory_uc(addr, numpages); +out_free: + free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); +out_err: + return ret; } EXPORT_SYMBOL(set_memory_uc); int set_memory_array_uc(unsigned long *addr, int addrinarray) { - unsigned long start; - unsigned long end; - int i; + int i, j; + int ret; + /* * for now UC MINUS. see comments in ioremap_nocache() */ for (i = 0; i < addrinarray; i++) { - start = __pa(addr[i]); - for (end = start + PAGE_SIZE; i < addrinarray - 1; end += PAGE_SIZE) { - if (end != __pa(addr[i + 1])) - break; - i++; - } - if (reserve_memtype(start, end, _PAGE_CACHE_UC_MINUS, NULL)) - goto out; + ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE, + _PAGE_CACHE_UC_MINUS, NULL); + if (ret) + goto out_free; } - return change_page_attr_set(addr, addrinarray, + ret = change_page_attr_set(addr, addrinarray, __pgprot(_PAGE_CACHE_UC_MINUS), 1); -out: - for (i = 0; i < addrinarray; i++) { - unsigned long tmp = __pa(addr[i]); - - if (tmp == start) - break; - for (end = tmp + PAGE_SIZE; i < addrinarray - 1; end += PAGE_SIZE) { - if (end != __pa(addr[i + 1])) - break; - i++; - } - free_memtype(tmp, end); - } - return -EINVAL; + if (ret) + goto out_free; + + return 0; + +out_free: + for (j = 0; j < i; j++) + free_memtype(__pa(addr[j]), __pa(addr[j]) + PAGE_SIZE); + + return ret; } EXPORT_SYMBOL(set_memory_array_uc); @@ -1002,14 +1006,26 @@ int _set_memory_wc(unsigned long addr, int numpages) int set_memory_wc(unsigned long addr, int numpages) { + int ret; + if (!pat_enabled) return set_memory_uc(addr, numpages); - if (reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, - _PAGE_CACHE_WC, NULL)) - return -EINVAL; + ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, + _PAGE_CACHE_WC, NULL); + if (ret) + goto out_err; - return _set_memory_wc(addr, numpages); + ret = _set_memory_wc(addr, numpages); + if (ret) + goto out_free; + + return 0; + +out_free: + free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); +out_err: + return ret; } EXPORT_SYMBOL(set_memory_wc); @@ -1021,9 +1037,14 @@ int _set_memory_wb(unsigned long addr, int numpages) int set_memory_wb(unsigned long addr, int numpages) { - int ret = _set_memory_wb(addr, numpages); + int ret; + + ret = _set_memory_wb(addr, numpages); + if (ret) + return ret; + free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); - return ret; + return 0; } EXPORT_SYMBOL(set_memory_wb); @@ -1034,19 +1055,13 @@ int set_memory_array_wb(unsigned long *addr, int addrinarray) ret = change_page_attr_clear(addr, addrinarray, __pgprot(_PAGE_CACHE_MASK), 1); + if (ret) + return ret; - for (i = 0; i < addrinarray; i++) { - unsigned long start = __pa(addr[i]); - unsigned long end; + for (i = 0; i < addrinarray; i++) + free_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE); - for (end = start + PAGE_SIZE; i < addrinarray - 1; end += PAGE_SIZE) { - if (end != __pa(addr[i + 1])) - break; - i++; - } - free_memtype(start, end); - } - return ret; + return 0; } EXPORT_SYMBOL(set_memory_array_wb); @@ -1139,6 +1154,8 @@ int set_pages_array_wb(struct page **pages, int addrinarray) retval = cpa_clear_pages_array(pages, addrinarray, __pgprot(_PAGE_CACHE_MASK)); + if (retval) + return retval; for (i = 0; i < addrinarray; i++) { start = (unsigned long)page_address(pages[i]); @@ -1146,7 +1163,7 @@ int set_pages_array_wb(struct page **pages, int addrinarray) free_memtype(start, end); } - return retval; + return 0; } EXPORT_SYMBOL(set_pages_array_wb); -- cgit v1.2.3 From 3869c4aa18835c8c61b44bd0f3ace36e9d3b5bd0 Mon Sep 17 00:00:00 2001 From: "venkatesh.pallipadi@intel.com" Date: Thu, 9 Apr 2009 14:26:50 -0700 Subject: x86, PAT: Changing memtype to WC ensuring no WB alias As per SDM, there should not be any aliasing of a WC with any cacheable type across CPUs. That is if one CPU is changing the identity map memtype to _WC, no other CPU at the time of this change should not have a TLB for this page that carries a WB attribute. SDM suggests to make the page not present. But for that we will have to handle any page faults that can potentially happen due to these pages being not present. Other way to deal with this without having any WB mapping is to change the page first to UC and then to WC. This ensures that we meet the SDM requirement of no cacheable alais to WC page. This also has same or lower overhead than marking the page not present and making it present later. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Suresh Siddha LKML-Reference: <20090409212708.797481000@intel.com> Signed-off-by: Ingo Molnar --- arch/x86/mm/pageattr.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 985eef80c55..797f9f107cb 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -1000,8 +1000,15 @@ EXPORT_SYMBOL(set_memory_array_uc); int _set_memory_wc(unsigned long addr, int numpages) { - return change_page_attr_set(&addr, numpages, + int ret; + ret = change_page_attr_set(&addr, numpages, + __pgprot(_PAGE_CACHE_UC_MINUS), 0); + + if (!ret) { + ret = change_page_attr_set(&addr, numpages, __pgprot(_PAGE_CACHE_WC), 0); + } + return ret; } int set_memory_wc(unsigned long addr, int numpages) -- cgit v1.2.3 From b6ff32d9aaeeeecf98f9a852d715569183585312 Mon Sep 17 00:00:00 2001 From: Suresh Siddha Date: Thu, 9 Apr 2009 14:26:51 -0700 Subject: x86, PAT: Consolidate code in pat_x_mtrr_type() and reserve_memtype() Fix pat_x_mtrr_type() to use UC_MINUS when the mtrr type return UC. This is to be consistent with ioremap() and ioremap_nocache() which uses UC_MINUS. Consolidate the code such that reserve_memtype() also uses pat_x_mtrr_type() when the caller doesn't specify any special attribute (non WB attribute). Signed-off-by: Suresh Siddha Signed-off-by: Venkatesh Pallipadi LKML-Reference: <20090409212708.939936000@intel.com> Signed-off-by: Ingo Molnar --- arch/x86/mm/ioremap.c | 3 ++- arch/x86/mm/pat.c | 35 +++++++++++++---------------------- 2 files changed, 15 insertions(+), 23 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 329387eca12..d4c4b2c4dbb 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -375,7 +375,8 @@ static void __iomem *ioremap_default(resource_size_t phys_addr, * - UC_MINUS for non-WB-able memory with no other conflicting mappings * - Inherit from confliting mappings otherwise */ - err = reserve_memtype(phys_addr, phys_addr + size, -1, &flags); + err = reserve_memtype(phys_addr, phys_addr + size, + _PAGE_CACHE_WB, &flags); if (err < 0) return NULL; diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 640339ee4fb..8d3de958050 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -182,10 +182,10 @@ static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type) u8 mtrr_type; mtrr_type = mtrr_type_lookup(start, end); - if (mtrr_type == MTRR_TYPE_UNCACHABLE) - return _PAGE_CACHE_UC; - if (mtrr_type == MTRR_TYPE_WRCOMB) - return _PAGE_CACHE_WC; + if (mtrr_type != MTRR_TYPE_WRBACK) + return _PAGE_CACHE_UC_MINUS; + + return _PAGE_CACHE_WB; } return req_type; @@ -352,23 +352,13 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, return 0; } - if (req_type == -1) { - /* - * Call mtrr_lookup to get the type hint. This is an - * optimization for /dev/mem mmap'ers into WB memory (BIOS - * tools and ACPI tools). Use WB request for WB memory and use - * UC_MINUS otherwise. - */ - u8 mtrr_type = mtrr_type_lookup(start, end); - - if (mtrr_type == MTRR_TYPE_WRBACK) - actual_type = _PAGE_CACHE_WB; - else - actual_type = _PAGE_CACHE_UC_MINUS; - } else { - actual_type = pat_x_mtrr_type(start, end, - req_type & _PAGE_CACHE_MASK); - } + /* + * Call mtrr_lookup to get the type hint. This is an + * optimization for /dev/mem mmap'ers into WB memory (BIOS + * tools and ACPI tools). Use WB request for WB memory and use + * UC_MINUS otherwise. + */ + actual_type = pat_x_mtrr_type(start, end, req_type & _PAGE_CACHE_MASK); if (new_type) *new_type = actual_type; @@ -587,7 +577,8 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, if (flags != -1) { retval = reserve_memtype(offset, offset + size, flags, NULL); } else { - retval = reserve_memtype(offset, offset + size, -1, &flags); + retval = reserve_memtype(offset, offset + size, + _PAGE_CACHE_WB, &flags); } if (retval < 0) -- cgit v1.2.3 From 0c3c8a18361a636069f5a5d9d0d0f9c2124e6b94 Mon Sep 17 00:00:00 2001 From: Suresh Siddha Date: Thu, 9 Apr 2009 14:26:52 -0700 Subject: x86, PAT: Remove duplicate memtype reserve in devmem mmap /dev/mem mmap code was doing memtype reserve/free for a while now. Recently we added memtype tracking in remap_pfn_range, and /dev/mem mmap uses it indirectly. So, we don't need seperate tracking in /dev/mem code any more. That means another ~100 lines of code removed :-). Signed-off-by: Suresh Siddha Signed-off-by: Venkatesh Pallipadi LKML-Reference: <20090409212709.085210000@intel.com> Signed-off-by: Ingo Molnar --- arch/x86/include/asm/pat.h | 4 ---- arch/x86/mm/pat.c | 60 ++-------------------------------------------- 2 files changed, 2 insertions(+), 62 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h index 2cd07b9422f..7af14e512f9 100644 --- a/arch/x86/include/asm/pat.h +++ b/arch/x86/include/asm/pat.h @@ -18,9 +18,5 @@ extern int free_memtype(u64 start, u64 end); extern int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flag); -extern void map_devmem(unsigned long pfn, unsigned long size, - struct pgprot vma_prot); -extern void unmap_devmem(unsigned long pfn, unsigned long size, - struct pgprot vma_prot); #endif /* _ASM_X86_PAT_H */ diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 8d3de958050..cc5e0e24e44 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -536,9 +536,7 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size) int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, unsigned long size, pgprot_t *vma_prot) { - u64 offset = ((u64) pfn) << PAGE_SHIFT; - unsigned long flags = -1; - int retval; + unsigned long flags = _PAGE_CACHE_WB; if (!range_is_allowed(pfn, size)) return 0; @@ -566,65 +564,11 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, } #endif - /* - * With O_SYNC, we can only take UC_MINUS mapping. Fail if we cannot. - * - * Without O_SYNC, we want to get - * - WB for WB-able memory and no other conflicting mappings - * - UC_MINUS for non-WB-able memory with no other conflicting mappings - * - Inherit from confliting mappings otherwise - */ - if (flags != -1) { - retval = reserve_memtype(offset, offset + size, flags, NULL); - } else { - retval = reserve_memtype(offset, offset + size, - _PAGE_CACHE_WB, &flags); - } - - if (retval < 0) - return 0; - - if (((pfn < max_low_pfn_mapped) || - (pfn >= (1UL<<(32 - PAGE_SHIFT)) && pfn < max_pfn_mapped)) && - ioremap_change_attr((unsigned long)__va(offset), size, flags) < 0) { - free_memtype(offset, offset + size); - printk(KERN_INFO - "%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n", - current->comm, current->pid, - cattr_name(flags), - offset, (unsigned long long)(offset + size)); - return 0; - } - *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) | flags); return 1; } -void map_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot) -{ - unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK); - u64 addr = (u64)pfn << PAGE_SHIFT; - unsigned long flags; - - reserve_memtype(addr, addr + size, want_flags, &flags); - if (flags != want_flags) { - printk(KERN_INFO - "%s:%d /dev/mem expected mapping type %s for %Lx-%Lx, got %s\n", - current->comm, current->pid, - cattr_name(want_flags), - addr, (unsigned long long)(addr + size), - cattr_name(flags)); - } -} - -void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot) -{ - u64 addr = (u64)pfn << PAGE_SHIFT; - - free_memtype(addr, addr + size); -} - /* * Change the memory type for the physial address range in kernel identity * mapping space if that range is a part of identity map. @@ -662,8 +606,8 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, { int is_ram = 0; int ret; - unsigned long flags; unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK); + unsigned long flags = want_flags; is_ram = pat_pagerange_is_ram(paddr, paddr + size); -- cgit v1.2.3 From ff6c6fed3a8ab9b0a7b02574e095e905e89421d9 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sun, 12 Apr 2009 23:24:21 +0530 Subject: x86: pci-swiotlb.c swiotlb_dma_ops should be static Impact: reduce kernel size a bit, address sparse warning Addresses the problem pointed out by this sparse warning: arch/x86/kernel/pci-swiotlb.c:53:20: warning: symbol 'swiotlb_dma_ops' was not declared. Should it be static? For x86: swiotlb_dma_ops can be static, because it's not used outside of pci-swiotlb.c Signed-off-by: Jaswinder Singh Rajput Acked-by: FUJITA Tomonori LKML-Reference: <1239558861.3938.2.camel@localhost.localdomain> Signed-off-by: Ingo Molnar --- arch/x86/kernel/pci-swiotlb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c index 34f12e9996e..221a3853e26 100644 --- a/arch/x86/kernel/pci-swiotlb.c +++ b/arch/x86/kernel/pci-swiotlb.c @@ -50,7 +50,7 @@ static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags); } -struct dma_map_ops swiotlb_dma_ops = { +static struct dma_map_ops swiotlb_dma_ops = { .mapping_error = swiotlb_dma_mapping_error, .alloc_coherent = x86_swiotlb_alloc_coherent, .free_coherent = swiotlb_free_coherent, -- cgit v1.2.3 From d45b41ae8da0f54aec0eebcc6f893ba5f22a1e8e Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 15 Apr 2009 23:15:14 +0200 Subject: x86: disable X86_PTRACE_BTS for now Oleg Nesterov found a couple of races in the ptrace-bts code and fixes are queued up for it but they did not get ready in time for the merge window. We'll merge them in v2.6.31 - until then mark the feature as CONFIG_BROKEN. There's no user-space yet making use of this so it's not a big issue. Cc: Signed-off-by: Ingo Molnar --- arch/x86/Kconfig.cpu | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index 924e156a85a..8130334329c 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu @@ -506,6 +506,7 @@ config X86_PTRACE_BTS bool "Branch Trace Store" default y depends on X86_DEBUGCTLMSR + depends on BROKEN ---help--- This adds a ptrace interface to the hardware's branch trace store. -- cgit v1.2.3 From 4b065046273afa01ec8e3de7da407e8d3599251d Mon Sep 17 00:00:00 2001 From: "Pallipadi, Venkatesh" Date: Wed, 8 Apr 2009 15:37:16 -0700 Subject: x86, PAT: Remove page granularity tracking for vm_insert_pfn maps This change resolves the problem of too many single page entries in pat_memtype_list and "freeing invalid memtype" errors with i915, reported here: http://marc.info/?l=linux-kernel&m=123845244713183&w=2 Remove page level granularity track and untrack of vm_insert_pfn. memtype tracking at page granularity does not scale and cleaner approach would be for the driver to request a type for a bigger IO address range or PCI io memory range for that device, either at mmap time or driver init time and just use that type during vm_insert_pfn. This patch just removes the track/untrack of vm_insert_pfn. That means we will be in same state as 2.6.28, with respect to these APIs. Newer APIs for the drivers to request a memtype for a bigger region is coming soon. [ Impact: fix Xorg startup warnings and hangs ] Reported-by: Arkadiusz Miskiewicz Tested-by: Arkadiusz Miskiewicz Signed-off-by: Venkatesh Pallipadi Signed-off-by: Suresh Siddha Cc: Jesse Barnes LKML-Reference: <20090408223716.GC3493@linux-os.sc.intel.com> Signed-off-by: Ingo Molnar --- arch/x86/mm/pat.c | 98 +++++++++++-------------------------------------------- 1 file changed, 19 insertions(+), 79 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index cc5e0e24e44..41c80571815 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -669,29 +669,28 @@ static void free_pfn_range(u64 paddr, unsigned long size) * * If the vma has a linear pfn mapping for the entire range, we get the prot * from pte and reserve the entire vma range with single reserve_pfn_range call. - * Otherwise, we reserve the entire vma range, my ging through the PTEs page - * by page to get physical address and protection. */ int track_pfn_vma_copy(struct vm_area_struct *vma) { - int retval = 0; - unsigned long i, j; resource_size_t paddr; unsigned long prot; - unsigned long vma_start = vma->vm_start; - unsigned long vma_end = vma->vm_end; - unsigned long vma_size = vma_end - vma_start; + unsigned long vma_size = vma->vm_end - vma->vm_start; pgprot_t pgprot; if (!pat_enabled) return 0; + /* + * For now, only handle remap_pfn_range() vmas where + * is_linear_pfn_mapping() == TRUE. Handling of + * vm_insert_pfn() is TBD. + */ if (is_linear_pfn_mapping(vma)) { /* * reserve the whole chunk covered by vma. We need the * starting address and protection from pte. */ - if (follow_phys(vma, vma_start, 0, &prot, &paddr)) { + if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) { WARN_ON_ONCE(1); return -EINVAL; } @@ -699,28 +698,7 @@ int track_pfn_vma_copy(struct vm_area_struct *vma) return reserve_pfn_range(paddr, vma_size, &pgprot, 1); } - /* reserve entire vma page by page, using pfn and prot from pte */ - for (i = 0; i < vma_size; i += PAGE_SIZE) { - if (follow_phys(vma, vma_start + i, 0, &prot, &paddr)) - continue; - - pgprot = __pgprot(prot); - retval = reserve_pfn_range(paddr, PAGE_SIZE, &pgprot, 1); - if (retval) - goto cleanup_ret; - } return 0; - -cleanup_ret: - /* Reserve error: Cleanup partial reservation and return error */ - for (j = 0; j < i; j += PAGE_SIZE) { - if (follow_phys(vma, vma_start + j, 0, &prot, &paddr)) - continue; - - free_pfn_range(paddr, PAGE_SIZE); - } - - return retval; } /* @@ -730,50 +708,28 @@ cleanup_ret: * prot is passed in as a parameter for the new mapping. If the vma has a * linear pfn mapping for the entire range reserve the entire vma range with * single reserve_pfn_range call. - * Otherwise, we look t the pfn and size and reserve only the specified range - * page by page. - * - * Note that this function can be called with caller trying to map only a - * subrange/page inside the vma. */ int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, unsigned long pfn, unsigned long size) { - int retval = 0; - unsigned long i, j; - resource_size_t base_paddr; resource_size_t paddr; - unsigned long vma_start = vma->vm_start; - unsigned long vma_end = vma->vm_end; - unsigned long vma_size = vma_end - vma_start; + unsigned long vma_size = vma->vm_end - vma->vm_start; if (!pat_enabled) return 0; + /* + * For now, only handle remap_pfn_range() vmas where + * is_linear_pfn_mapping() == TRUE. Handling of + * vm_insert_pfn() is TBD. + */ if (is_linear_pfn_mapping(vma)) { /* reserve the whole chunk starting from vm_pgoff */ paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; return reserve_pfn_range(paddr, vma_size, prot, 0); } - /* reserve page by page using pfn and size */ - base_paddr = (resource_size_t)pfn << PAGE_SHIFT; - for (i = 0; i < size; i += PAGE_SIZE) { - paddr = base_paddr + i; - retval = reserve_pfn_range(paddr, PAGE_SIZE, prot, 0); - if (retval) - goto cleanup_ret; - } return 0; - -cleanup_ret: - /* Reserve error: Cleanup partial reservation and return error */ - for (j = 0; j < i; j += PAGE_SIZE) { - paddr = base_paddr + j; - free_pfn_range(paddr, PAGE_SIZE); - } - - return retval; } /* @@ -784,39 +740,23 @@ cleanup_ret: void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn, unsigned long size) { - unsigned long i; resource_size_t paddr; - unsigned long prot; - unsigned long vma_start = vma->vm_start; - unsigned long vma_end = vma->vm_end; - unsigned long vma_size = vma_end - vma_start; + unsigned long vma_size = vma->vm_end - vma->vm_start; if (!pat_enabled) return; + /* + * For now, only handle remap_pfn_range() vmas where + * is_linear_pfn_mapping() == TRUE. Handling of + * vm_insert_pfn() is TBD. + */ if (is_linear_pfn_mapping(vma)) { /* free the whole chunk starting from vm_pgoff */ paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; free_pfn_range(paddr, vma_size); return; } - - if (size != 0 && size != vma_size) { - /* free page by page, using pfn and size */ - paddr = (resource_size_t)pfn << PAGE_SHIFT; - for (i = 0; i < size; i += PAGE_SIZE) { - paddr = paddr + i; - free_pfn_range(paddr, PAGE_SIZE); - } - } else { - /* free entire vma, page by page, using the pfn from pte */ - for (i = 0; i < vma_size; i += PAGE_SIZE) { - if (follow_phys(vma, vma_start + i, 0, &prot, &paddr)) - continue; - - free_pfn_range(paddr, PAGE_SIZE); - } - } } pgprot_t pgprot_writecombine(pgprot_t prot) -- cgit v1.2.3 From 0917798d82212f884fff650e7e520de3b438f947 Mon Sep 17 00:00:00 2001 From: Dmitry Adamushko Date: Wed, 15 Apr 2009 16:51:48 +0200 Subject: x86: fix microcode driver newly spewing warnings Jeff Garzik reported this WARN_ON() noise: > Kernel: 2.6.30-rc1-00306-g8371f87 > Hardware: ICH10 x86-64 > > This is a regression from 2.6.29. Microcode spews the following WARNING > multiple times during boot: > > ------------[ cut here ]------------ > WARNING: at fs/sysfs/group.c:138 sysfs_remove_group+0xeb/0xf0() > Hardware name: sysfs group ffffffffa0209700 not found for > kobject 'cpu0' Keep sysfs files around for cpus even when we failed to locate microcode for them at the moment of module loading. The appropriate microcode firmware can become available later on. Signed-off-by: Ingo Molnar --- arch/x86/kernel/microcode_core.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c index a0f3851ef31..4d420de9ac6 100644 --- a/arch/x86/kernel/microcode_core.c +++ b/arch/x86/kernel/microcode_core.c @@ -391,8 +391,6 @@ static int mc_sysdev_add(struct sys_device *sys_dev) return err; err = microcode_init_cpu(cpu); - if (err) - sysfs_remove_group(&sys_dev->kobj, &mc_attr_group); return err; } -- cgit v1.2.3