diff options
author | Paul Mackerras <paulus@samba.org> | 2006-06-15 10:45:18 +1000 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2006-06-15 10:45:18 +1000 |
commit | bf72aeba2ffef599d1d386425c9e46b82be657cd (patch) | |
tree | ead8e5111dbcfa22e156999d1bb8a96e50f06fef /include/asm-powerpc | |
parent | 31925323b1b51bb65db729e029472a8b1f635b7d (diff) |
powerpc: Use 64k pages without needing cache-inhibited large pages
Some POWER5+ machines can do 64k hardware pages for normal memory but
not for cache-inhibited pages. This patch lets us use 64k hardware
pages for most user processes on such machines (assuming the kernel
has been configured with CONFIG_PPC_64K_PAGES=y). User processes
start out using 64k pages and get switched to 4k pages if they use any
non-cacheable mappings.
With this, we use 64k pages for the vmalloc region and 4k pages for
the imalloc region. If anything creates a non-cacheable mapping in
the vmalloc region, the vmalloc region will get switched to 4k pages.
I don't know of any driver other than the DRM that would do this,
though, and these machines don't have AGP.
When a region gets switched from 64k pages to 4k pages, we do not have
to clear out all the 64k HPTEs from the hash table immediately. We
use the _PAGE_COMBO bit in the Linux PTE to indicate whether the page
was hashed in as a 64k page or a set of 4k pages. If hash_page is
trying to insert a 4k page for a Linux PTE and it sees that it has
already been inserted as a 64k page, it first invalidates the 64k HPTE
before inserting the 4k HPTE. The hash invalidation routines also use
the _PAGE_COMBO bit, to determine whether to look for a 64k HPTE or a
set of 4k HPTEs to remove. With those two changes, we can tolerate a
mix of 4k and 64k HPTEs in the hash table, and they will all get
removed when the address space is torn down.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'include/asm-powerpc')
-rw-r--r-- | include/asm-powerpc/mmu.h | 13 | ||||
-rw-r--r-- | include/asm-powerpc/paca.h | 1 | ||||
-rw-r--r-- | include/asm-powerpc/pgtable-4k.h | 2 | ||||
-rw-r--r-- | include/asm-powerpc/pgtable-64k.h | 2 | ||||
-rw-r--r-- | include/asm-powerpc/pgtable.h | 10 |
5 files changed, 20 insertions, 8 deletions
diff --git a/include/asm-powerpc/mmu.h b/include/asm-powerpc/mmu.h index 88539742010..3a5ebe229af 100644 --- a/include/asm-powerpc/mmu.h +++ b/include/asm-powerpc/mmu.h @@ -165,6 +165,16 @@ struct mmu_psize_def extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; extern int mmu_linear_psize; extern int mmu_virtual_psize; +extern int mmu_vmalloc_psize; +extern int mmu_io_psize; + +/* + * If the processor supports 64k normal pages but not 64k cache + * inhibited pages, we have to be prepared to switch processes + * to use 4k pages when they create cache-inhibited mappings. + * If this is the case, mmu_ci_restrictions will be set to 1. + */ +extern int mmu_ci_restrictions; #ifdef CONFIG_HUGETLB_PAGE /* @@ -256,6 +266,7 @@ extern long iSeries_hpte_insert(unsigned long hpte_group, extern void stabs_alloc(void); extern void slb_initialize(void); +extern void slb_flush_and_rebolt(void); extern void stab_initialize(unsigned long stab); #endif /* __ASSEMBLY__ */ @@ -359,6 +370,8 @@ typedef unsigned long mm_context_id_t; typedef struct { mm_context_id_t id; + u16 user_psize; /* page size index */ + u16 sllp; /* SLB entry page size encoding */ #ifdef CONFIG_HUGETLB_PAGE u16 low_htlb_areas, high_htlb_areas; #endif diff --git a/include/asm-powerpc/paca.h b/include/asm-powerpc/paca.h index c17fd54d995..17406353e2c 100644 --- a/include/asm-powerpc/paca.h +++ b/include/asm-powerpc/paca.h @@ -81,6 +81,7 @@ struct paca_struct { * on the linear mapping */ mm_context_t context; + u16 vmalloc_sllp; u16 slb_cache[SLB_CACHE_ENTRIES]; u16 slb_cache_ptr; diff --git a/include/asm-powerpc/pgtable-4k.h b/include/asm-powerpc/pgtable-4k.h index b2e18629932..e7036155672 100644 --- a/include/asm-powerpc/pgtable-4k.h +++ b/include/asm-powerpc/pgtable-4k.h @@ -78,6 +78,8 @@ #define pte_iterate_hashed_end() } while(0) +#define pte_pagesize_index(pte) MMU_PAGE_4K + /* * 4-level page tables related bits */ diff --git a/include/asm-powerpc/pgtable-64k.h b/include/asm-powerpc/pgtable-64k.h index 653915014dc..4b7126c53f3 100644 --- a/include/asm-powerpc/pgtable-64k.h +++ b/include/asm-powerpc/pgtable-64k.h @@ -90,6 +90,8 @@ #define pte_iterate_hashed_end() } while(0); } } while(0) +#define pte_pagesize_index(pte) \ + (((pte) & _PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K) #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/include/asm-powerpc/pgtable.h b/include/asm-powerpc/pgtable.h index e9f1f4627e6..260a0fabe97 100644 --- a/include/asm-powerpc/pgtable.h +++ b/include/asm-powerpc/pgtable.h @@ -47,8 +47,8 @@ struct mm_struct; /* * Define the address range of the vmalloc VM area. */ -#define VMALLOC_START (0xD000000000000000ul) -#define VMALLOC_SIZE (0x80000000000UL) +#define VMALLOC_START ASM_CONST(0xD000000000000000) +#define VMALLOC_SIZE ASM_CONST(0x80000000000) #define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) /* @@ -413,12 +413,6 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, flush_tlb_pending(); } pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); - -#ifdef CONFIG_PPC_64K_PAGES - if (mmu_virtual_psize != MMU_PAGE_64K) - pte = __pte(pte_val(pte) | _PAGE_COMBO); -#endif /* CONFIG_PPC_64K_PAGES */ - *ptep = pte; } |