diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-07-28 11:59:34 +1000 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-08-20 10:25:06 +1000 |
commit | 57e2a99f74b0d3720c97a6aadb57ae6aad3c61ea (patch) | |
tree | 4268a98ad222dbcf790749aed52417eb0a3a2a35 /arch/powerpc/include/asm | |
parent | 0257c99cdfaca53a881339e1cbca638c61569b05 (diff) |
powerpc: Add memory management headers for new 64-bit BookE
This adds the PTE and pgtable format definitions, along with changes
to the kernel memory map and other definitions related to implementing
support for 64-bit Book3E. This also shields some asm-offset bits that
are currently only relevant on 32-bit
We also move the definition of the "linux" page size constants to
the common mmu.h file and add a few sizes that are relevant to
embedded processors.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/include/asm')
-rw-r--r-- | arch/powerpc/include/asm/mmu-book3e.h | 27 | ||||
-rw-r--r-- | arch/powerpc/include/asm/mmu-hash64.h | 20 | ||||
-rw-r--r-- | arch/powerpc/include/asm/mmu.h | 37 | ||||
-rw-r--r-- | arch/powerpc/include/asm/page.h | 4 | ||||
-rw-r--r-- | arch/powerpc/include/asm/page_64.h | 10 | ||||
-rw-r--r-- | arch/powerpc/include/asm/pgtable-ppc64.h | 61 | ||||
-rw-r--r-- | arch/powerpc/include/asm/pte-book3e.h | 70 | ||||
-rw-r--r-- | arch/powerpc/include/asm/pte-common.h | 3 |
8 files changed, 195 insertions, 37 deletions
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h index 42a39b4aace..6ddbe48d07f 100644 --- a/arch/powerpc/include/asm/mmu-book3e.h +++ b/arch/powerpc/include/asm/mmu-book3e.h @@ -170,6 +170,33 @@ typedef struct { unsigned int active; unsigned long vdso_base; } mm_context_t; + +/* Page size definitions, common between 32 and 64-bit + * + * shift : is the "PAGE_SHIFT" value for that page size + * penc : is the pte encoding mask + * + */ +struct mmu_psize_def +{ + unsigned int shift; /* number of bits */ + unsigned int enc; /* PTE encoding */ +}; +extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; + +/* The page sizes use the same names as 64-bit hash but are + * constants + */ +#if defined(CONFIG_PPC_4K_PAGES) +#define mmu_virtual_psize MMU_PAGE_4K +#elif defined(CONFIG_PPC_64K_PAGES) +#define mmu_virtual_psize MMU_PAGE_64K +#else +#error Unsupported page size +#endif + +extern int mmu_linear_psize; + #endif /* !__ASSEMBLY__ */ #endif /* _ASM_POWERPC_MMU_BOOK3E_H_ */ diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index 98c104a0996..b537903b9fc 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h @@ -139,26 +139,6 @@ struct mmu_psize_def #endif /* __ASSEMBLY__ */ /* - * The kernel use the constants below to index in the page sizes array. - * The use of fixed constants for this purpose is better for performances - * of the low level hash refill handlers. - * - * A non supported page size has a "shift" field set to 0 - * - * Any new page size being implemented can get a new entry in here. Whether - * the kernel will use it or not is a different matter though. The actual page - * size used by hugetlbfs is not defined here and may be made variable - */ - -#define MMU_PAGE_4K 0 /* 4K */ -#define MMU_PAGE_64K 1 /* 64K */ -#define MMU_PAGE_64K_AP 2 /* 64K Admixed (in a 4K segment) */ -#define MMU_PAGE_1M 3 /* 1M */ -#define MMU_PAGE_16M 4 /* 16M */ -#define MMU_PAGE_16G 5 /* 16G */ -#define MMU_PAGE_COUNT 6 - -/* * Segment sizes. * These are the values used by hardware in the B field of * SLB entries and the first dword of MMU hashtable entries. diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index fb57ded592f..2fcfefc6089 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -17,6 +17,7 @@ #define MMU_FTR_TYPE_40x ASM_CONST(0x00000004) #define MMU_FTR_TYPE_44x ASM_CONST(0x00000008) #define MMU_FTR_TYPE_FSL_E ASM_CONST(0x00000010) +#define MMU_FTR_TYPE_3E ASM_CONST(0x00000020) /* * This is individual features @@ -73,6 +74,41 @@ extern void early_init_mmu_secondary(void); #endif /* !__ASSEMBLY__ */ +/* The kernel use the constants below to index in the page sizes array. + * The use of fixed constants for this purpose is better for performances + * of the low level hash refill handlers. + * + * A non supported page size has a "shift" field set to 0 + * + * Any new page size being implemented can get a new entry in here. Whether + * the kernel will use it or not is a different matter though. The actual page + * size used by hugetlbfs is not defined here and may be made variable + * + * Note: This array ended up being a false good idea as it's growing to the + * point where I wonder if we should replace it with something different, + * to think about, feedback welcome. --BenH. + */ + +/* There are #define as they have to be used in assembly + * + * WARNING: If you change this list, make sure to update the array of + * names currently in arch/powerpc/mm/hugetlbpage.c or bad things will + * happen + */ +#define MMU_PAGE_4K 0 +#define MMU_PAGE_16K 1 +#define MMU_PAGE_64K 2 +#define MMU_PAGE_64K_AP 3 /* "Admixed pages" (hash64 only) */ +#define MMU_PAGE_256K 4 +#define MMU_PAGE_1M 5 +#define MMU_PAGE_8M 6 +#define MMU_PAGE_16M 7 +#define MMU_PAGE_256M 8 +#define MMU_PAGE_1G 9 +#define MMU_PAGE_16G 10 +#define MMU_PAGE_64G 11 +#define MMU_PAGE_COUNT 12 + #if defined(CONFIG_PPC_STD_MMU_64) /* 64-bit classic hash table MMU */ @@ -94,5 +130,6 @@ extern void early_init_mmu_secondary(void); # include <asm/mmu-8xx.h> #endif + #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_MMU_H_ */ diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 4940662ee87..ff24254990e 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -139,7 +139,11 @@ extern phys_addr_t kernstart_addr; * Don't compare things with KERNELBASE or PAGE_OFFSET to test for * "kernelness", use is_kernel_addr() - it should do what you want. */ +#ifdef CONFIG_PPC_BOOK3E_64 +#define is_kernel_addr(x) ((x) >= 0x8000000000000000ul) +#else #define is_kernel_addr(x) ((x) >= PAGE_OFFSET) +#endif #ifndef __ASSEMBLY__ diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h index 5817a3b747e..3f17b83f55a 100644 --- a/arch/powerpc/include/asm/page_64.h +++ b/arch/powerpc/include/asm/page_64.h @@ -135,12 +135,22 @@ extern void slice_set_range_psize(struct mm_struct *mm, unsigned long start, #endif /* __ASSEMBLY__ */ #else #define slice_init() +#ifdef CONFIG_PPC_STD_MMU_64 #define get_slice_psize(mm, addr) ((mm)->context.user_psize) #define slice_set_user_psize(mm, psize) \ do { \ (mm)->context.user_psize = (psize); \ (mm)->context.sllp = SLB_VSID_USER | mmu_psize_defs[(psize)].sllp; \ } while (0) +#else /* CONFIG_PPC_STD_MMU_64 */ +#ifdef CONFIG_PPC_64K_PAGES +#define get_slice_psize(mm, addr) MMU_PAGE_64K +#else /* CONFIG_PPC_64K_PAGES */ +#define get_slice_psize(mm, addr) MMU_PAGE_4K +#endif /* !CONFIG_PPC_64K_PAGES */ +#define slice_set_user_psize(mm, psize) do { BUG(); } while(0) +#endif /* !CONFIG_PPC_STD_MMU_64 */ + #define slice_set_range_psize(mm, start, len, psize) \ slice_set_user_psize((mm), (psize)) #define slice_mm_new_context(mm) 1 diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h index 8cd083c6150..7254c5a3187 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64.h +++ b/arch/powerpc/include/asm/pgtable-ppc64.h @@ -5,11 +5,6 @@ * the ppc64 hashed page table. */ -#ifndef __ASSEMBLY__ -#include <linux/stddef.h> -#include <asm/tlbflush.h> -#endif /* __ASSEMBLY__ */ - #ifdef CONFIG_PPC_64K_PAGES #include <asm/pgtable-ppc64-64k.h> #else @@ -38,26 +33,46 @@ #endif /* - * Define the address range of the vmalloc VM area. + * Define the address range of the kernel non-linear virtual area + */ + +#ifdef CONFIG_PPC_BOOK3E +#define KERN_VIRT_START ASM_CONST(0x8000000000000000) +#else +#define KERN_VIRT_START ASM_CONST(0xD000000000000000) +#endif +#define KERN_VIRT_SIZE PGTABLE_RANGE + +/* + * The vmalloc space starts at the beginning of that region, and + * occupies half of it on hash CPUs and a quarter of it on Book3E */ -#define VMALLOC_START ASM_CONST(0xD000000000000000) -#define VMALLOC_SIZE (PGTABLE_RANGE >> 1) -#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) +#define VMALLOC_START KERN_VIRT_START +#ifdef CONFIG_PPC_BOOK3E +#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 2) +#else +#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1) +#endif +#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) /* - * Define the address ranges for MMIO and IO space : + * The second half of the kernel virtual space is used for IO mappings, + * it's itself carved into the PIO region (ISA and PHB IO space) and + * the ioremap space * - * ISA_IO_BASE = VMALLOC_END, 64K reserved area + * ISA_IO_BASE = KERN_IO_START, 64K reserved area * PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE */ +#define KERN_IO_START (KERN_VIRT_START + (KERN_VIRT_SIZE >> 1)) #define FULL_IO_SIZE 0x80000000ul -#define ISA_IO_BASE (VMALLOC_END) -#define ISA_IO_END (VMALLOC_END + 0x10000ul) +#define ISA_IO_BASE (KERN_IO_START) +#define ISA_IO_END (KERN_IO_START + 0x10000ul) #define PHB_IO_BASE (ISA_IO_END) -#define PHB_IO_END (VMALLOC_END + FULL_IO_SIZE) +#define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE) #define IOREMAP_BASE (PHB_IO_END) -#define IOREMAP_END (VMALLOC_START + PGTABLE_RANGE) +#define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE) + /* * Region IDs @@ -72,19 +87,28 @@ #define USER_REGION_ID (0UL) /* - * Defines the address of the vmemap area, in its own region + * Defines the address of the vmemap area, in its own region on + * hash table CPUs and after the vmalloc space on Book3E */ +#ifdef CONFIG_PPC_BOOK3E +#define VMEMMAP_BASE VMALLOC_END +#define VMEMMAP_END KERN_IO_START +#else #define VMEMMAP_BASE (VMEMMAP_REGION_ID << REGION_SHIFT) +#endif #define vmemmap ((struct page *)VMEMMAP_BASE) /* * Include the PTE bits definitions */ +#ifdef CONFIG_PPC_BOOK3S #include <asm/pte-hash64.h> +#else +#include <asm/pte-book3e.h> +#endif #include <asm/pte-common.h> - #ifdef CONFIG_PPC_MM_SLICES #define HAVE_ARCH_UNMAPPED_AREA #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN @@ -92,6 +116,9 @@ #ifndef __ASSEMBLY__ +#include <linux/stddef.h> +#include <asm/tlbflush.h> + /* * This is the default implementation of various PTE accessors, it's * used in all cases except Book3S with 64K pages where we have a diff --git a/arch/powerpc/include/asm/pte-book3e.h b/arch/powerpc/include/asm/pte-book3e.h new file mode 100644 index 00000000000..1d27c77d770 --- /dev/null +++ b/arch/powerpc/include/asm/pte-book3e.h @@ -0,0 +1,70 @@ +#ifndef _ASM_POWERPC_PTE_BOOK3E_H +#define _ASM_POWERPC_PTE_BOOK3E_H +#ifdef __KERNEL__ + +/* PTE bit definitions for processors compliant to the Book3E + * architecture 2.06 or later. The position of the PTE bits + * matches the HW definition of the optional Embedded Page Table + * category. + */ + +/* Architected bits */ +#define _PAGE_PRESENT 0x000001 /* software: pte contains a translation */ +#define _PAGE_FILE 0x000002 /* (!present only) software: pte holds file offset */ +#define _PAGE_SW1 0x000002 +#define _PAGE_BAP_SR 0x000004 +#define _PAGE_BAP_UR 0x000008 +#define _PAGE_BAP_SW 0x000010 +#define _PAGE_BAP_UW 0x000020 +#define _PAGE_BAP_SX 0x000040 +#define _PAGE_BAP_UX 0x000080 +#define _PAGE_PSIZE_MSK 0x000f00 +#define _PAGE_PSIZE_4K 0x000200 +#define _PAGE_PSIZE_64K 0x000600 +#define _PAGE_PSIZE_1M 0x000a00 +#define _PAGE_PSIZE_16M 0x000e00 +#define _PAGE_DIRTY 0x001000 /* C: page changed */ +#define _PAGE_SW0 0x002000 +#define _PAGE_U3 0x004000 +#define _PAGE_U2 0x008000 +#define _PAGE_U1 0x010000 +#define _PAGE_U0 0x020000 +#define _PAGE_ACCESSED 0x040000 +#define _PAGE_LENDIAN 0x080000 +#define _PAGE_GUARDED 0x100000 +#define _PAGE_COHERENT 0x200000 /* M: enforce memory coherence */ +#define _PAGE_NO_CACHE 0x400000 /* I: cache inhibit */ +#define _PAGE_WRITETHRU 0x800000 /* W: cache write-through */ + +/* "Higher level" linux bit combinations */ +#define _PAGE_EXEC _PAGE_BAP_SX /* Can be executed from potentially */ +#define _PAGE_HWEXEC _PAGE_BAP_UX /* .. and was cache cleaned */ +#define _PAGE_RW (_PAGE_BAP_SW | _PAGE_BAP_UW) /* User write permission */ +#define _PAGE_KERNEL_RW (_PAGE_BAP_SW | _PAGE_BAP_SR | _PAGE_DIRTY) +#define _PAGE_KERNEL_RO (_PAGE_BAP_SR) +#define _PAGE_USER (_PAGE_BAP_UR | _PAGE_BAP_SR) /* Can be read */ + +#define _PAGE_HASHPTE 0 +#define _PAGE_BUSY 0 + +#define _PAGE_SPECIAL _PAGE_SW0 + +/* Flags to be preserved on PTE modifications */ +#define _PAGE_HPTEFLAGS _PAGE_BUSY + +/* Base page size */ +#ifdef CONFIG_PPC_64K_PAGES +#define _PAGE_PSIZE _PAGE_PSIZE_64K +#define PTE_RPN_SHIFT (28) +#else +#define _PAGE_PSIZE _PAGE_PSIZE_4K +#define PTE_RPN_SHIFT (24) +#endif + +/* On 32-bit, we never clear the top part of the PTE */ +#ifdef CONFIG_PPC32 +#define _PTE_NONE_MASK 0xffffffff00000000ULL +#endif + +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_PTE_FSL_BOOKE_H */ diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h index a7e210b6b48..8bb6464ba61 100644 --- a/arch/powerpc/include/asm/pte-common.h +++ b/arch/powerpc/include/asm/pte-common.h @@ -34,6 +34,9 @@ #ifndef _PAGE_4K_PFN #define _PAGE_4K_PFN 0 #endif +#ifndef _PAGE_SAO +#define _PAGE_SAO 0 +#endif #ifndef _PAGE_PSIZE #define _PAGE_PSIZE 0 #endif |