From d7cdc9e8ac82c43fdcd4fde6b5b53d2dcba7f707 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 27 Sep 2006 15:16:42 +0900 Subject: sh: ioremap() overhaul. ioremap() overhaul. Add support for transparent PMB mapping, get rid of p3_ioremap(), etc. Also drop ioremap() and iounmap() routines from the machvec, as everyone can use the generic ioremap() API instead. For PCI memory apertures and other special cases, use the pci_iomap() API, as boards are already required to get the mapping right there. Signed-off-by: Paul Mundt --- arch/sh/mm/Makefile | 7 +-- arch/sh/mm/pmb.c | 145 ++++++++++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 144 insertions(+), 8 deletions(-) (limited to 'arch/sh/mm') diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile index d90906367c5..87a7c07265c 100644 --- a/arch/sh/mm/Makefile +++ b/arch/sh/mm/Makefile @@ -12,13 +12,14 @@ obj-$(CONFIG_DMA_PAGE_OPS) += pg-dma.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o mmu-y := fault-nommu.o tlb-nommu.o pg-nommu.o -mmu-$(CONFIG_MMU) := fault.o clear_page.o copy_page.o tlb-flush.o +mmu-$(CONFIG_MMU) := fault.o clear_page.o copy_page.o tlb-flush.o \ + ioremap.o obj-y += $(mmu-y) ifdef CONFIG_MMU -obj-$(CONFIG_CPU_SH3) += tlb-sh3.o -obj-$(CONFIG_CPU_SH4) += tlb-sh4.o ioremap.o +obj-$(CONFIG_CPU_SH3) += tlb-sh3.o +obj-$(CONFIG_CPU_SH4) += tlb-sh4.o obj-$(CONFIG_SH7705_CACHE_32KB) += pg-sh7705.o endif diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index ff5bde74564..819fd0faf02 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -3,7 +3,7 @@ * * Privileged Space Mapping Buffer (PMB) Support. * - * Copyright (C) 2005 Paul Mundt + * Copyright (C) 2005, 2006 Paul Mundt * * P1/P2 Section mapping definitions from map32.h, which was: * @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -127,11 +128,15 @@ repeat: return 0; } -void set_pmb_entry(struct pmb_entry *pmbe) +int set_pmb_entry(struct pmb_entry *pmbe) { + int ret; + jump_to_P2(); - __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &pmbe->entry); + ret = __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &pmbe->entry); back_to_P1(); + + return ret; } void clear_pmb_entry(struct pmb_entry *pmbe) @@ -162,11 +167,141 @@ void clear_pmb_entry(struct pmb_entry *pmbe) clear_bit(entry, &pmb_map); } +static DEFINE_SPINLOCK(pmb_list_lock); +static struct pmb_entry *pmb_list; + +static inline void pmb_list_add(struct pmb_entry *pmbe) +{ + struct pmb_entry **p, *tmp; + + p = &pmb_list; + while ((tmp = *p) != NULL) + p = &tmp->next; + + pmbe->next = tmp; + *p = pmbe; +} + +static inline void pmb_list_del(struct pmb_entry *pmbe) +{ + struct pmb_entry **p, *tmp; + + for (p = &pmb_list; (tmp = *p); p = &tmp->next) + if (tmp == pmbe) { + *p = tmp->next; + return; + } +} + +static struct { + unsigned long size; + int flag; +} pmb_sizes[] = { + { .size = 0x20000000, .flag = PMB_SZ_512M, }, + { .size = 0x08000000, .flag = PMB_SZ_128M, }, + { .size = 0x04000000, .flag = PMB_SZ_64M, }, + { .size = 0x01000000, .flag = PMB_SZ_16M, }, +}; + +long pmb_remap(unsigned long vaddr, unsigned long phys, + unsigned long size, unsigned long flags) +{ + struct pmb_entry *pmbp; + unsigned long wanted; + int pmb_flags, i; + + /* Convert typical pgprot value to the PMB equivalent */ + if (flags & _PAGE_CACHABLE) { + if (flags & _PAGE_WT) + pmb_flags = PMB_WT; + else + pmb_flags = PMB_C; + } else + pmb_flags = PMB_WT | PMB_UB; + + pmbp = NULL; + wanted = size; + +again: + for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { + struct pmb_entry *pmbe; + int ret; + + if (size < pmb_sizes[i].size) + continue; + + pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag); + if (IS_ERR(pmbe)) + return PTR_ERR(pmbe); + + ret = set_pmb_entry(pmbe); + if (ret != 0) { + pmb_free(pmbe); + return -EBUSY; + } + + phys += pmb_sizes[i].size; + vaddr += pmb_sizes[i].size; + size -= pmb_sizes[i].size; + + /* + * Link adjacent entries that span multiple PMB entries + * for easier tear-down. + */ + if (likely(pmbp)) + pmbp->link = pmbe; + + pmbp = pmbe; + } + + if (size >= 0x1000000) + goto again; + + return wanted - size; +} + +void pmb_unmap(unsigned long addr) +{ + struct pmb_entry **p, *pmbe; + + for (p = &pmb_list; (pmbe = *p); p = &pmbe->next) + if (pmbe->vpn == addr) + break; + + if (unlikely(!pmbe)) + return; + + WARN_ON(!test_bit(pmbe->entry, &pmb_map)); + + do { + struct pmb_entry *pmblink = pmbe; + + clear_pmb_entry(pmbe); + pmbe = pmblink->link; + + pmb_free(pmblink); + } while (pmbe); +} + static void pmb_cache_ctor(void *pmb, kmem_cache_t *cachep, unsigned long flags) { + struct pmb_entry *pmbe = pmb; + memset(pmb, 0, sizeof(struct pmb_entry)); - ((struct pmb_entry *)pmb)->entry = PMB_NO_ENTRY; + spin_lock_irq(&pmb_list_lock); + + pmbe->entry = PMB_NO_ENTRY; + pmb_list_add(pmbe); + + spin_unlock_irq(&pmb_list_lock); +} + +static void pmb_cache_dtor(void *pmb, kmem_cache_t *cachep, unsigned long flags) +{ + spin_lock_irq(&pmb_list_lock); + pmb_list_del(pmb); + spin_unlock_irq(&pmb_list_lock); } static int __init pmb_init(void) @@ -177,7 +312,7 @@ static int __init pmb_init(void) BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES)); pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), - 0, 0, pmb_cache_ctor, NULL); + 0, 0, pmb_cache_ctor, pmb_cache_dtor); BUG_ON(!pmb_cache); jump_to_P2(); -- cgit v1.2.3