diff options
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/abort-ev6.S | 3 | ||||
-rw-r--r-- | arch/arm/mm/copypage-feroceon.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v3.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v4mc.c | 4 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v4wb.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v4wt.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/copypage-xsc3.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/copypage-xscale.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 20 | ||||
-rw-r--r-- | arch/arm/mm/fault-armv.c | 5 | ||||
-rw-r--r-- | arch/arm/mm/init.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/ioremap.c | 11 | ||||
-rw-r--r-- | arch/arm/mm/mmap.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/mmu.c | 3 |
14 files changed, 32 insertions, 30 deletions
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S index 8a7f65ba14b..94077fbd96b 100644 --- a/arch/arm/mm/abort-ev6.S +++ b/arch/arm/mm/abort-ev6.S @@ -23,7 +23,8 @@ ENTRY(v6_early_abort) #ifdef CONFIG_CPU_32v6K clrex #else - strex r0, r1, [sp] @ Clear the exclusive monitor + sub r1, sp, #4 @ Get unused stack location + strex r0, r1, [r1] @ Clear the exclusive monitor #endif mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c index c3ba6a94da0..70997d5bee2 100644 --- a/arch/arm/mm/copypage-feroceon.c +++ b/arch/arm/mm/copypage-feroceon.c @@ -13,7 +13,7 @@ #include <linux/init.h> #include <linux/highmem.h> -static void __attribute__((naked)) +static void __naked feroceon_copy_user_page(void *kto, const void *kfrom) { asm("\ diff --git a/arch/arm/mm/copypage-v3.c b/arch/arm/mm/copypage-v3.c index 70ed96c8af8..de9c06854ad 100644 --- a/arch/arm/mm/copypage-v3.c +++ b/arch/arm/mm/copypage-v3.c @@ -15,7 +15,7 @@ * * FIXME: do we need to handle cache stuff... */ -static void __attribute__((naked)) +static void __naked v3_copy_user_page(void *kto, const void *kfrom) { asm("\n\ diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index bdb5fd983b1..7370a7142b0 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c @@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(minicache_lock); * instruction. If your processor does not supply this, you have to write your * own copy_user_highpage that does the right thing. */ -static void __attribute__((naked)) +static void __naked mc_copy_user_page(void *from, void *to) { asm volatile( @@ -68,7 +68,7 @@ mc_copy_user_page(void *from, void *to) : "r" (from), "r" (to), "I" (PAGE_SIZE / 64)); } -void v4_mc_copy_user_highpage(struct page *from, struct page *to, +void v4_mc_copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr) { void *kto = kmap_atomic(to, KM_USER1); diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c index 3ec93dab765..9ab09841422 100644 --- a/arch/arm/mm/copypage-v4wb.c +++ b/arch/arm/mm/copypage-v4wb.c @@ -22,7 +22,7 @@ * instruction. If your processor does not supply this, you have to write your * own copy_user_highpage that does the right thing. */ -static void __attribute__((naked)) +static void __naked v4wb_copy_user_page(void *kto, const void *kfrom) { asm("\ diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c index 0f1188efae4..300efafd664 100644 --- a/arch/arm/mm/copypage-v4wt.c +++ b/arch/arm/mm/copypage-v4wt.c @@ -20,7 +20,7 @@ * dirty data in the cache. However, we do have to ensure that * subsequent reads are up to date. */ -static void __attribute__((naked)) +static void __naked v4wt_copy_user_page(void *kto, const void *kfrom) { asm("\ diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c index 39a994542ca..bc4525f5ab2 100644 --- a/arch/arm/mm/copypage-xsc3.c +++ b/arch/arm/mm/copypage-xsc3.c @@ -29,7 +29,7 @@ * if we eventually end up using our copied page. * */ -static void __attribute__((naked)) +static void __naked xsc3_mc_copy_user_page(void *kto, const void *kfrom) { asm("\ diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index d18f2397ee2..76824d3e966 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c @@ -42,7 +42,7 @@ static DEFINE_SPINLOCK(minicache_lock); * Dcache aliasing issue. The writes will be forwarded to the write buffer, * and merged as appropriate. */ -static void __attribute__((naked)) +static void __naked mc_copy_user_page(void *from, void *to) { /* diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 310e479309e..f1ef5613ccd 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -490,26 +490,30 @@ core_initcall(consistent_init); */ void dma_cache_maint(const void *start, size_t size, int direction) { - const void *end = start + size; + void (*inner_op)(const void *, const void *); + void (*outer_op)(unsigned long, unsigned long); - BUG_ON(!virt_addr_valid(start) || !virt_addr_valid(end - 1)); + BUG_ON(!virt_addr_valid(start) || !virt_addr_valid(start + size - 1)); switch (direction) { case DMA_FROM_DEVICE: /* invalidate only */ - dmac_inv_range(start, end); - outer_inv_range(__pa(start), __pa(end)); + inner_op = dmac_inv_range; + outer_op = outer_inv_range; break; case DMA_TO_DEVICE: /* writeback only */ - dmac_clean_range(start, end); - outer_clean_range(__pa(start), __pa(end)); + inner_op = dmac_clean_range; + outer_op = outer_clean_range; break; case DMA_BIDIRECTIONAL: /* writeback and invalidate */ - dmac_flush_range(start, end); - outer_flush_range(__pa(start), __pa(end)); + inner_op = dmac_flush_range; + outer_op = outer_flush_range; break; default: BUG(); } + + inner_op(start, start + size); + outer_op(__pa(start), __pa(start) + size); } EXPORT_SYMBOL(dma_cache_maint); diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index 81d0b8772de..bc0099d5ae8 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -66,7 +66,10 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address) * fault (ie, is old), we can safely ignore any issues. */ if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) { - flush_cache_page(vma, address, pte_pfn(entry)); + unsigned long pfn = pte_pfn(entry); + flush_cache_page(vma, address, pfn); + outer_flush_range((pfn << PAGE_SHIFT), + (pfn << PAGE_SHIFT) + PAGE_SIZE); pte_val(entry) &= ~L_PTE_MT_MASK; pte_val(entry) |= shared_pte_mask; set_pte_at(vma->vm_mm, address, pte, entry); diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 34df4d9d03a..80fd3b69ae1 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -382,7 +382,7 @@ void __init bootmem_init(void) for_each_node(node) bootmem_free_node(node, mi); - high_memory = __va(memend_pfn << PAGE_SHIFT); + high_memory = __va((memend_pfn << PAGE_SHIFT) - 1) + 1; /* * This doesn't seem to be used by the Linux memory manager any diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 18373f73f2f..9f88dd3be60 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -138,7 +138,7 @@ void __check_kvm_seq(struct mm_struct *mm) */ static void unmap_area_sections(unsigned long virt, unsigned long size) { - unsigned long addr = virt, end = virt + (size & ~SZ_1M); + unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1)); pgd_t *pgd; flush_cache_vunmap(addr, end); @@ -337,10 +337,7 @@ void __iounmap(volatile void __iomem *io_addr) void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); #ifndef CONFIG_SMP struct vm_struct **p, *tmp; -#endif - unsigned int section_mapping = 0; -#ifndef CONFIG_SMP /* * If this is a section based mapping we need to handle it * specially as the VM subsystem does not know how to handle @@ -352,11 +349,8 @@ void __iounmap(volatile void __iomem *io_addr) for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) { if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) { if (tmp->flags & VM_ARM_SECTION_MAPPING) { - *p = tmp->next; unmap_area_sections((unsigned long)tmp->addr, tmp->size); - kfree(tmp); - section_mapping = 1; } break; } @@ -364,7 +358,6 @@ void __iounmap(volatile void __iomem *io_addr) write_unlock(&vmlist_lock); #endif - if (!section_mapping) - vunmap(addr); + vunmap(addr); } EXPORT_SYMBOL(__iounmap); diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index 5358fcc7f61..f7457fea6de 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c @@ -124,7 +124,7 @@ int valid_phys_addr_range(unsigned long addr, size_t size) { if (addr < PHYS_OFFSET) return 0; - if (addr + size > __pa(high_memory)) + if (addr + size >= __pa(high_memory - 1)) return 0; return 1; diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 9b36c5cb5e9..d4d082c5c2d 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -693,7 +693,8 @@ static void __init sanity_check_meminfo(void) * Check whether this memory bank would entirely overlap * the vmalloc area. */ - if (__va(bank->start) >= VMALLOC_MIN) { + if (__va(bank->start) >= VMALLOC_MIN || + __va(bank->start) < PAGE_OFFSET) { printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx " "(vmalloc region overlap).\n", bank->start, bank->start + bank->size - 1); |