From 850f6ac316cf84bba63fdb775c897834eccbfaa3 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Thu, 18 Jun 2009 19:25:00 +0000 Subject: powerpc/mm: Make k(un)map_atomic out of line Those functions are way too big to be inline, besides, kmap_atomic() wants to call debug_kmap_atomic() which isn't exported for modules and causes module link failures. Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/mm/highmem.c | 77 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) create mode 100644 arch/powerpc/mm/highmem.c (limited to 'arch/powerpc/mm/highmem.c') diff --git a/arch/powerpc/mm/highmem.c b/arch/powerpc/mm/highmem.c new file mode 100644 index 00000000000..c2186c74c85 --- /dev/null +++ b/arch/powerpc/mm/highmem.c @@ -0,0 +1,77 @@ +/* + * highmem.c: virtual kernel memory mappings for high memory + * + * PowerPC version, stolen from the i386 version. + * + * Used in CONFIG_HIGHMEM systems for memory pages which + * are not addressable by direct kernel virtual addresses. + * + * Copyright (C) 1999 Gerhard Wichert, Siemens AG + * Gerhard.Wichert@pdb.siemens.de + * + * + * Redesigned the x86 32-bit VM architecture to deal with + * up to 16 Terrabyte physical memory. With current x86 CPUs + * we now support up to 64 Gigabytes physical RAM. + * + * Copyright (C) 1999 Ingo Molnar + * + * Reworked for PowerPC by various contributors. Moved from + * highmem.h by Benjamin Herrenschmidt (c) 2009 IBM Corp. + */ + +#include +#include + +/* + * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap + * gives a more generic (and caching) interface. But kmap_atomic can + * be used in IRQ contexts, so in some (very limited) cases we need + * it. + */ +void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) +{ + unsigned int idx; + unsigned long vaddr; + + /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ + pagefault_disable(); + if (!PageHighMem(page)) + return page_address(page); + + debug_kmap_atomic(type); + idx = type + KM_TYPE_NR*smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); +#ifdef CONFIG_DEBUG_HIGHMEM + BUG_ON(!pte_none(*(kmap_pte-idx))); +#endif + __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1); + local_flush_tlb_page(NULL, vaddr); + + return (void*) vaddr; +} +EXPORT_SYMBOL(kmap_atomic_prot); + +void kunmap_atomic(void *kvaddr, enum km_type type) +{ +#ifdef CONFIG_DEBUG_HIGHMEM + unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; + enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); + + if (vaddr < __fix_to_virt(FIX_KMAP_END)) { + pagefault_enable(); + return; + } + + BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); + + /* + * force other mappings to Oops if they'll try to access + * this pte without first remap it + */ + pte_clear(&init_mm, vaddr, kmap_pte-idx); + local_flush_tlb_page(NULL, vaddr); +#endif + pagefault_enable(); +} +EXPORT_SYMBOL(kunmap_atomic); -- cgit v1.2.3