From 5f0fbf9ecaf354fa4bbf266fffdea2ea3d14a0ed Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Tue, 16 Sep 2008 13:05:53 -0400 Subject: [ARM] fixmap support This is the minimum fixmap interface expected to be implemented by architectures supporting highmem. We have a second level page table already allocated and covering 0xfff00000-0xffffffff because the exception vector page is located at 0xffff0000, and various cache tricks already use some entries above 0xffff0000. Therefore the PTEs covering 0xfff00000-0xfffeffff are free to be used. However the XScale cache flushing code already uses virtual addresses between 0xfffe0000 and 0xfffeffff. So this reserves the 0xfff00000-0xfffdffff range for fixmap stuff. The Documentation/arm/memory.txt information is updated accordingly, including the information about the actual top of DMA memory mapping region which didn't match the code. Signed-off-by: Nicolas Pitre --- arch/arm/include/asm/fixmap.h | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 arch/arm/include/asm/fixmap.h (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h new file mode 100644 index 00000000000..bbae919bceb --- /dev/null +++ b/arch/arm/include/asm/fixmap.h @@ -0,0 +1,41 @@ +#ifndef _ASM_FIXMAP_H +#define _ASM_FIXMAP_H + +/* + * Nothing too fancy for now. + * + * On ARM we already have well known fixed virtual addresses imposed by + * the architecture such as the vector page which is located at 0xffff0000, + * therefore a second level page table is already allocated covering + * 0xfff00000 upwards. + * + * The cache flushing code in proc-xscale.S uses the virtual area between + * 0xfffe0000 and 0xfffeffff. + */ + +#define FIXADDR_START 0xfff00000UL +#define FIXADDR_TOP 0xfffe0000UL +#define FIXADDR_SIZE (FIXADDR_TOP - FIXADDR_START) + +#define FIX_KMAP_BEGIN 0 +#define FIX_KMAP_END (FIXADDR_SIZE >> PAGE_SHIFT) + +#define __fix_to_virt(x) (FIXADDR_START + ((x) << PAGE_SHIFT)) +#define __virt_to_fix(x) (((x) - FIXADDR_START) >> PAGE_SHIFT) + +extern void __this_fixmap_does_not_exist(void); + +static inline unsigned long fix_to_virt(const unsigned int idx) +{ + if (idx >= FIX_KMAP_END) + __this_fixmap_does_not_exist(); + return __fix_to_virt(idx); +} + +static inline unsigned int virt_to_fix(const unsigned long vaddr) +{ + BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); + return __virt_to_fix(vaddr); +} + +#endif -- cgit v1.2.3 From d73cd42893f4cdc06e6829fea2347bb92cb789d1 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Mon, 15 Sep 2008 16:44:55 -0400 Subject: [ARM] kmap support The kmap virtual area borrows a 2MB range at the top of the 16MB area below PAGE_OFFSET currently reserved for kernel modules and/or the XIP kernel. This 2MB corresponds to the range covered by 2 consecutive second-level page tables, or a single pmd entry as seen by the Linux page table abstraction. Because XIP kernels are unlikely to be seen on systems needing highmem support, there shouldn't be any shortage of VM space for modules (14 MB for modules is still way more than twice the typical usage). Because the virtual mapping of highmem pages can go away at any moment after kunmap() is called on them, we need to bypass the delayed cache flushing provided by flush_dcache_page() in that case. The atomic kmap versions are based on fixmaps, and __cpuc_flush_dcache_page() is used directly in that case. Signed-off-by: Nicolas Pitre --- arch/arm/include/asm/highmem.h | 28 ++++++++++++++++++++++++++++ arch/arm/include/asm/memory.h | 13 ++++++++++--- 2 files changed, 38 insertions(+), 3 deletions(-) create mode 100644 arch/arm/include/asm/highmem.h (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h new file mode 100644 index 00000000000..023d5b37454 --- /dev/null +++ b/arch/arm/include/asm/highmem.h @@ -0,0 +1,28 @@ +#ifndef _ASM_HIGHMEM_H +#define _ASM_HIGHMEM_H + +#include + +#define PKMAP_BASE (PAGE_OFFSET - PMD_SIZE) +#define LAST_PKMAP PTRS_PER_PTE +#define LAST_PKMAP_MASK (LAST_PKMAP - 1) +#define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT) +#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) + +#define kmap_prot PAGE_KERNEL + +#define flush_cache_kmaps() flush_cache_all() + +extern pte_t *pkmap_page_table; + +extern void *kmap_high(struct page *page); +extern void kunmap_high(struct page *page); + +extern void *kmap(struct page *page); +extern void kunmap(struct page *page); +extern void *kmap_atomic(struct page *page, enum km_type type); +extern void kunmap_atomic(void *kvaddr, enum km_type type); +extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); +extern struct page *kmap_atomic_to_page(const void *ptr); + +#endif diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 0202a7c20e6..ae472bc376d 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -44,13 +44,20 @@ * The module space lives between the addresses given by TASK_SIZE * and PAGE_OFFSET - it must be within 32MB of the kernel text. */ -#define MODULES_END (PAGE_OFFSET) -#define MODULES_VADDR (MODULES_END - 16*1048576) - +#define MODULES_VADDR (PAGE_OFFSET - 16*1024*1024) #if TASK_SIZE > MODULES_VADDR #error Top of user space clashes with start of module space #endif +/* + * The highmem pkmap virtual space shares the end of the module area. + */ +#ifdef CONFIG_HIGHMEM +#define MODULES_END (PAGE_OFFSET - PMD_SIZE) +#else +#define MODULES_END (PAGE_OFFSET) +#endif + /* * The XIP kernel gets mapped at the bottom of the module vm area. * Since we use sections to map it, this macro replaces the physical address -- cgit v1.2.3 From 43377453af83b8ff8c1c731da1508bd6b84ebfea Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Thu, 12 Mar 2009 22:52:09 -0400 Subject: [ARM] introduce dma_cache_maint_page() This is a helper to be used by the DMA mapping API to handle cache maintenance for memory identified by a page structure instead of a virtual address. Those pages may or may not be highmem pages, and when they're highmem pages, they may or may not be virtually mapped. When they're not mapped then there is no L1 cache to worry about. But even in that case the L2 cache must be processed since unmapped highmem pages can still be L2 cached. Signed-off-by: Nicolas Pitre --- arch/arm/include/asm/dma-mapping.h | 4 +++- arch/arm/include/asm/highmem.h | 3 +++ 2 files changed, 6 insertions(+), 1 deletion(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 22cb14ec343..59fa762e9c6 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -57,6 +57,8 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) * Use the driver DMA support - see dma-mapping.h (dma_sync_*) */ extern void dma_cache_maint(const void *kaddr, size_t size, int rw); +extern void dma_cache_maint_page(struct page *page, unsigned long offset, + size_t size, int rw); /* * Return whether the given device DMA address mask can be supported @@ -316,7 +318,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, BUG_ON(!valid_dma_direction(dir)); if (!arch_is_coherent()) - dma_cache_maint(page_address(page) + offset, size, dir); + dma_cache_maint_page(page, offset, size, dir); return page_to_dma(dev, page) + offset; } diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h index 023d5b37454..7f36d00600b 100644 --- a/arch/arm/include/asm/highmem.h +++ b/arch/arm/include/asm/highmem.h @@ -15,7 +15,10 @@ extern pte_t *pkmap_page_table; +#define ARCH_NEEDS_KMAP_HIGH_GET + extern void *kmap_high(struct page *page); +extern void *kmap_high_get(struct page *page); extern void kunmap_high(struct page *page); extern void *kmap(struct page *page); -- cgit v1.2.3 From 58edb515724f9e63e569536d01ac8d8f8ddb367a Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Tue, 9 Sep 2008 15:54:13 -0400 Subject: [ARM] make page_to_dma() highmem aware If a machine class has a custom __virt_to_bus() implementation then it must provide a __arch_page_to_dma() implementation as well which is _not_ based on page_address() to support highmem. This patch fixes existing __arch_page_to_dma() and provide a default implementation otherwise. The default implementation for highmem is based on __pfn_to_bus() which is defined only when no custom __virt_to_bus() is provided by the machine class. That leaves only ebsa110 and footbridge which cannot support highmem until they provide their own __arch_page_to_dma() implementation. But highmem support on those legacy platforms with limited memory is certainly not a priority. Signed-off-by: Nicolas Pitre --- arch/arm/include/asm/dma-mapping.h | 10 ++++++++++ arch/arm/include/asm/memory.h | 1 + 2 files changed, 11 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 59fa762e9c6..ff46dfa68a9 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -15,10 +15,20 @@ * must not be used by drivers. */ #ifndef __arch_page_to_dma + +#if !defined(CONFIG_HIGHMEM) static inline dma_addr_t page_to_dma(struct device *dev, struct page *page) { return (dma_addr_t)__virt_to_bus((unsigned long)page_address(page)); } +#elif defined(__pfn_to_bus) +static inline dma_addr_t page_to_dma(struct device *dev, struct page *page) +{ + return (dma_addr_t)__pfn_to_bus(page_to_pfn(page)); +} +#else +#error "this machine class needs to define __arch_page_to_dma to use HIGHMEM" +#endif static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) { diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index ae472bc376d..85763db8744 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -188,6 +188,7 @@ static inline void *phys_to_virt(unsigned long x) #ifndef __virt_to_bus #define __virt_to_bus __virt_to_phys #define __bus_to_virt __phys_to_virt +#define __pfn_to_bus(x) ((x) << PAGE_SHIFT) #endif static inline __deprecated unsigned long virt_to_bus(void *x) -- cgit v1.2.3 From 1bb772679ffb0ba1ff1d40d8c6b855ab029f177d Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Fri, 12 Sep 2008 16:11:51 -0400 Subject: [ARM] Feroceon: add highmem support to L2 cache handling code The choice is between looping over the physical range and performing single cache line operations, or to map highmem pages somewhere, as cache range ops are possible only on virtual addresses. Because L2 range ops are much faster, we go with the later by factoring the physical-to-virtual address conversion and use a fixmap entry for it in the HIGHMEM case. Possible future optimizations to avoid the pte setup cost: - do the pte setup for highmem pages only - determine a threshold for doing a line-by-line processing on physical addresses when the range is small Signed-off-by: Nicolas Pitre --- arch/arm/include/asm/kmap_types.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h index 45def13ee17..d16ec97ec9a 100644 --- a/arch/arm/include/asm/kmap_types.h +++ b/arch/arm/include/asm/kmap_types.h @@ -18,6 +18,7 @@ enum km_type { KM_IRQ1, KM_SOFTIRQ0, KM_SOFTIRQ1, + KM_L2_CACHE, KM_TYPE_NR }; -- cgit v1.2.3