diff options
Diffstat (limited to 'arch/sh/mm')
-rw-r--r-- | arch/sh/mm/cache-sh4.c | 2 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh7705.c | 4 | ||||
-rw-r--r-- | arch/sh/mm/ioremap_32.c | 2 | ||||
-rw-r--r-- | arch/sh/mm/pmb.c | 37 |
4 files changed, 35 insertions, 10 deletions
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index b2453bbef4c..a98c7d8984f 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c @@ -43,7 +43,7 @@ static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) = * Called from kernel/module.c:sys_init_module and routine for a.out format, * signal handler code and kprobes code */ -static void sh4_flush_icache_range(void *args) +static void __uses_jump_to_uncached sh4_flush_icache_range(void *args) { struct flusher_data *data = args; unsigned long start, end; diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c index 2cadee2037a..2601935eb58 100644 --- a/arch/sh/mm/cache-sh7705.c +++ b/arch/sh/mm/cache-sh7705.c @@ -78,7 +78,7 @@ static void sh7705_flush_icache_range(void *args) /* * Writeback&Invalidate the D-cache of the page */ -static void __flush_dcache_page(unsigned long phys) +static void __uses_jump_to_uncached __flush_dcache_page(unsigned long phys) { unsigned long ways, waysize, addrstart; unsigned long flags; @@ -144,7 +144,7 @@ static void sh7705_flush_dcache_page(void *arg) __flush_dcache_page(PHYSADDR(page_address(page))); } -static void sh7705_flush_cache_all(void *args) +static void __uses_jump_to_uncached sh7705_flush_cache_all(void *args) { unsigned long flags; diff --git a/arch/sh/mm/ioremap_32.c b/arch/sh/mm/ioremap_32.c index c3250614e3a..a86eaa9d75a 100644 --- a/arch/sh/mm/ioremap_32.c +++ b/arch/sh/mm/ioremap_32.c @@ -83,7 +83,7 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, * * PMB entries are all pre-faulted. */ - if (unlikely(size >= 0x1000000)) { + if (unlikely(phys_addr >= P1SEG)) { unsigned long mapped = pmb_remap(addr, phys_addr, size, flags); if (likely(mapped)) { diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index b1a714a92b1..aade3110211 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -33,6 +33,8 @@ #define NR_PMB_ENTRIES 16 +static void __pmb_unmap(struct pmb_entry *); + static struct kmem_cache *pmb_cache; static unsigned long pmb_map; @@ -218,9 +220,10 @@ static struct { long pmb_remap(unsigned long vaddr, unsigned long phys, unsigned long size, unsigned long flags) { - struct pmb_entry *pmbp; + struct pmb_entry *pmbp, *pmbe; unsigned long wanted; int pmb_flags, i; + long err; /* Convert typical pgprot value to the PMB equivalent */ if (flags & _PAGE_CACHABLE) { @@ -236,20 +239,22 @@ long pmb_remap(unsigned long vaddr, unsigned long phys, again: for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { - struct pmb_entry *pmbe; int ret; if (size < pmb_sizes[i].size) continue; pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag); - if (IS_ERR(pmbe)) - return PTR_ERR(pmbe); + if (IS_ERR(pmbe)) { + err = PTR_ERR(pmbe); + goto out; + } ret = set_pmb_entry(pmbe); if (ret != 0) { pmb_free(pmbe); - return -EBUSY; + err = -EBUSY; + goto out; } phys += pmb_sizes[i].size; @@ -264,12 +269,25 @@ again: pmbp->link = pmbe; pmbp = pmbe; + + /* + * Instead of trying smaller sizes on every iteration + * (even if we succeed in allocating space), try using + * pmb_sizes[i].size again. + */ + i--; } if (size >= 0x1000000) goto again; return wanted - size; + +out: + if (pmbp) + __pmb_unmap(pmbp); + + return err; } void pmb_unmap(unsigned long addr) @@ -283,12 +301,19 @@ void pmb_unmap(unsigned long addr) if (unlikely(!pmbe)) return; + __pmb_unmap(pmbe); +} + +static void __pmb_unmap(struct pmb_entry *pmbe) +{ WARN_ON(!test_bit(pmbe->entry, &pmb_map)); do { struct pmb_entry *pmblink = pmbe; - clear_pmb_entry(pmbe); + if (pmbe->entry != PMB_NO_ENTRY) + clear_pmb_entry(pmbe); + pmbe = pmblink->link; pmb_free(pmblink); |