From e28f7faf05159f1cfd564596f5e6178edba6bd49 Mon Sep 17 00:00:00 2001 From: David Gibson Date: Fri, 5 Aug 2005 19:39:06 +1000 Subject: [PATCH] Four level pagetables for ppc64 Implement 4-level pagetables for ppc64 This patch implements full four-level page tables for ppc64, thereby extending the usable user address range to 44 bits (16T). The patch uses a full page for the tables at the bottom and top level, and a quarter page for the intermediate levels. It uses full 64-bit pointers at every level, thus also increasing the addressable range of physical memory. This patch also tweaks the VSID allocation to allow matching range for user addresses (this halves the number of available contexts) and adds some #if and BUILD_BUG sanity checks. Signed-off-by: David Gibson Signed-off-by: Paul Mackerras --- include/asm-ppc64/mmu.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'include/asm-ppc64/mmu.h') diff --git a/include/asm-ppc64/mmu.h b/include/asm-ppc64/mmu.h index 70348a85131..959a4bfdcd6 100644 --- a/include/asm-ppc64/mmu.h +++ b/include/asm-ppc64/mmu.h @@ -259,8 +259,10 @@ extern void stabs_alloc(void); #define VSID_BITS 36 #define VSID_MODULUS ((1UL< Date: Fri, 19 Aug 2005 14:52:31 +1000 Subject: [PATCH] Change address of ppc64 initial segment table On ppc64 machines with segment tables, CPU0's segment table is at a fixed address, currently 0x9000. This patch moves it to the free space at 0x6000, just below the fwnmi data area. This saves 8k of space in vmlinux and the runtime kernel image. Signed-off-by: David Gibson Signed-off-by: Paul Mackerras --- include/asm-ppc64/mmu.h | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'include/asm-ppc64/mmu.h') diff --git a/include/asm-ppc64/mmu.h b/include/asm-ppc64/mmu.h index 959a4bfdcd6..789c2693483 100644 --- a/include/asm-ppc64/mmu.h +++ b/include/asm-ppc64/mmu.h @@ -28,9 +28,12 @@ #define STE_VSID_SHIFT 12 /* Location of cpu0's segment table */ -#define STAB0_PAGE 0x9 +#define STAB0_PAGE 0x6 #define STAB0_PHYS_ADDR (STAB0_PAGE< Date: Thu, 11 Aug 2005 16:55:21 +1000 Subject: [PATCH] Dynamic hugepage addresses for ppc64 Paulus, I think this is now a reasonable candidate for the post-2.6.13 queue. Relax address restrictions for hugepages on ppc64 Presently, 64-bit applications on ppc64 may only use hugepages in the address region from 1-1.5T. Furthermore, if hugepages are enabled in the kernel config, they may only use hugepages and never normal pages in this area. This patch relaxes this restriction, allowing any address to be used with hugepages, but with a 1TB granularity. That is if you map a hugepage anywhere in the region 1TB-2TB, that entire area will be reserved exclusively for hugepages for the remainder of the process's lifetime. This works analagously to hugepages in 32-bit applications, where hugepages can be mapped anywhere, but with 256MB (mmu segment) granularity. This patch applies on top of the four level pagetable patch (http://patchwork.ozlabs.org/linuxppc64/patch?id=1936). Signed-off-by: David Gibson Signed-off-by: Paul Mackerras --- include/asm-ppc64/mmu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/asm-ppc64/mmu.h') diff --git a/include/asm-ppc64/mmu.h b/include/asm-ppc64/mmu.h index 789c2693483..ad36bb28de2 100644 --- a/include/asm-ppc64/mmu.h +++ b/include/asm-ppc64/mmu.h @@ -307,7 +307,7 @@ typedef unsigned long mm_context_id_t; typedef struct { mm_context_id_t id; #ifdef CONFIG_HUGETLB_PAGE - u16 htlb_segs; /* bitmask */ + u16 low_htlb_areas, high_htlb_areas; #endif } mm_context_t; -- cgit v1.2.3