diff options
Diffstat (limited to 'include/asm-ppc64/pgtable.h')
-rw-r--r-- | include/asm-ppc64/pgtable.h | 161 |
1 files changed, 54 insertions, 107 deletions
diff --git a/include/asm-ppc64/pgtable.h b/include/asm-ppc64/pgtable.h index 8c3f574046b..a9783ba7fe9 100644 --- a/include/asm-ppc64/pgtable.h +++ b/include/asm-ppc64/pgtable.h @@ -13,42 +13,14 @@ #include <asm/mmu.h> #include <asm/page.h> #include <asm/tlbflush.h> +struct mm_struct; #endif /* __ASSEMBLY__ */ -/* - * Entries per page directory level. The PTE level must use a 64b record - * for each page table entry. The PMD and PGD level use a 32b record for - * each entry by assuming that each entry is page aligned. - */ -#define PTE_INDEX_SIZE 9 -#define PMD_INDEX_SIZE 7 -#define PUD_INDEX_SIZE 7 -#define PGD_INDEX_SIZE 9 - -#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE) -#define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE) -#define PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE) -#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) - -#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) -#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) -#define PTRS_PER_PUD (1 << PMD_INDEX_SIZE) -#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) - -/* PMD_SHIFT determines what a second-level page table entry can map */ -#define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE) -#define PMD_SIZE (1UL << PMD_SHIFT) -#define PMD_MASK (~(PMD_SIZE-1)) - -/* PUD_SHIFT determines what a third-level page table entry can map */ -#define PUD_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE) -#define PUD_SIZE (1UL << PUD_SHIFT) -#define PUD_MASK (~(PUD_SIZE-1)) - -/* PGDIR_SHIFT determines what a fourth-level page table entry can map */ -#define PGDIR_SHIFT (PUD_SHIFT + PUD_INDEX_SIZE) -#define PGDIR_SIZE (1UL << PGDIR_SHIFT) -#define PGDIR_MASK (~(PGDIR_SIZE-1)) +#ifdef CONFIG_PPC_64K_PAGES +#include <asm/pgtable-64k.h> +#else +#include <asm/pgtable-4k.h> +#endif #define FIRST_USER_ADDRESS 0 @@ -75,8 +47,9 @@ #define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) /* - * Bits in a linux-style PTE. These match the bits in the - * (hardware-defined) PowerPC PTE as closely as possible. + * Common bits in a linux-style PTE. These match the bits in the + * (hardware-defined) PowerPC PTE as closely as possible. Additional + * bits may be defined in pgtable-*.h */ #define _PAGE_PRESENT 0x0001 /* software: pte contains a translation */ #define _PAGE_USER 0x0002 /* matches one of the PP bits */ @@ -91,15 +64,6 @@ #define _PAGE_RW 0x0200 /* software: user write access allowed */ #define _PAGE_HASHPTE 0x0400 /* software: pte has an associated HPTE */ #define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */ -#define _PAGE_SECONDARY 0x8000 /* software: HPTE is in secondary group */ -#define _PAGE_GROUP_IX 0x7000 /* software: HPTE index within group */ -#define _PAGE_HUGE 0x10000 /* 16MB page */ -/* Bits 0x7000 identify the index within an HPT Group */ -#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | _PAGE_SECONDARY | _PAGE_GROUP_IX) -/* PAGE_MASK gives the right answer below, but only by accident */ -/* It should be preserving the high 48 bits and then specifically */ -/* preserving _PAGE_SECONDARY | _PAGE_GROUP_IX */ -#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HPTEFLAGS) #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT) @@ -122,10 +86,10 @@ #define PAGE_AGP __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE) #define HAVE_PAGE_AGP -/* - * This bit in a hardware PTE indicates that the page is *not* executable. - */ -#define HW_NO_EXEC _PAGE_EXEC +/* PTEIDX nibble */ +#define _PTEIDX_SECONDARY 0x8 +#define _PTEIDX_GROUP_IX 0x7 + /* * POWER4 and newer have per page execute protection, older chips can only @@ -164,21 +128,10 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) #endif /* __ASSEMBLY__ */ -/* shift to put page number into pte */ -#define PTE_SHIFT (17) - #ifdef CONFIG_HUGETLB_PAGE -#ifndef __ASSEMBLY__ -int hash_huge_page(struct mm_struct *mm, unsigned long access, - unsigned long ea, unsigned long vsid, int local); -#endif /* __ASSEMBLY__ */ - #define HAVE_ARCH_UNMAPPED_AREA #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN -#else - -#define hash_huge_page(mm,a,ea,vsid,local) -1 #endif @@ -197,7 +150,7 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) pte_t pte; - pte_val(pte) = (pfn << PTE_SHIFT) | pgprot_val(pgprot); + pte_val(pte) = (pfn << PTE_RPN_SHIFT) | pgprot_val(pgprot); return pte; } @@ -209,30 +162,25 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) /* pte_clear moved to later in this file */ -#define pte_pfn(x) ((unsigned long)((pte_val(x) >> PTE_SHIFT))) +#define pte_pfn(x) ((unsigned long)((pte_val(x)>>PTE_RPN_SHIFT))) #define pte_page(x) pfn_to_page(pte_pfn(x)) -#define pmd_set(pmdp, ptep) ({BUG_ON((u64)ptep < KERNELBASE); pmd_val(*(pmdp)) = (unsigned long)(ptep);}) +#define pmd_set(pmdp, pmdval) (pmd_val(*(pmdp)) = (pmdval)) #define pmd_none(pmd) (!pmd_val(pmd)) #define pmd_bad(pmd) (pmd_val(pmd) == 0) #define pmd_present(pmd) (pmd_val(pmd) != 0) #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0) -#define pmd_page_kernel(pmd) (pmd_val(pmd)) +#define pmd_page_kernel(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS) #define pmd_page(pmd) virt_to_page(pmd_page_kernel(pmd)) -#define pud_set(pudp, pmdp) (pud_val(*(pudp)) = (unsigned long)(pmdp)) +#define pud_set(pudp, pudval) (pud_val(*(pudp)) = (pudval)) #define pud_none(pud) (!pud_val(pud)) #define pud_bad(pud) ((pud_val(pud)) == 0) #define pud_present(pud) (pud_val(pud) != 0) #define pud_clear(pudp) (pud_val(*(pudp)) = 0) -#define pud_page(pud) (pud_val(pud)) +#define pud_page(pud) (pud_val(pud) & ~PUD_MASKED_BITS) #define pgd_set(pgdp, pudp) ({pgd_val(*(pgdp)) = (unsigned long)(pudp);}) -#define pgd_none(pgd) (!pgd_val(pgd)) -#define pgd_bad(pgd) (pgd_val(pgd) == 0) -#define pgd_present(pgd) (pgd_val(pgd) != 0) -#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0) -#define pgd_page(pgd) (pgd_val(pgd)) /* * Find an entry in a page-table-directory. We combine the address region @@ -243,9 +191,6 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) -#define pud_offset(pgdp, addr) \ - (((pud_t *) pgd_page(*(pgdp))) + (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))) - #define pmd_offset(pudp,addr) \ (((pmd_t *) pud_page(*(pudp))) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) @@ -271,7 +216,6 @@ static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC;} static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;} static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;} static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;} -static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE;} static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } @@ -286,7 +230,6 @@ static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~(_PAGE_DIRTY); return pte; } static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } - static inline pte_t pte_mkread(pte_t pte) { pte_val(pte) |= _PAGE_USER; return pte; } static inline pte_t pte_mkexec(pte_t pte) { @@ -298,7 +241,7 @@ static inline pte_t pte_mkdirty(pte_t pte) { static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; } static inline pte_t pte_mkhuge(pte_t pte) { - pte_val(pte) |= _PAGE_HUGE; return pte; } + return pte; } /* Atomic PTE updates */ static inline unsigned long pte_update(pte_t *p, unsigned long clr) @@ -321,11 +264,13 @@ static inline unsigned long pte_update(pte_t *p, unsigned long clr) /* PTE updating functions, this function puts the PTE in the * batch, doesn't actually triggers the hash flush immediately, * you need to call flush_tlb_pending() to do that. + * Pass -1 for "normal" size (4K or 64K) */ -extern void hpte_update(struct mm_struct *mm, unsigned long addr, unsigned long pte, - int wrprot); +extern void hpte_update(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned long pte, int huge); -static inline int __ptep_test_and_clear_young(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +static inline int __ptep_test_and_clear_young(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) { unsigned long old; @@ -333,7 +278,7 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm, unsigned lon return 0; old = pte_update(ptep, _PAGE_ACCESSED); if (old & _PAGE_HASHPTE) { - hpte_update(mm, addr, old, 0); + hpte_update(mm, addr, ptep, old, 0); flush_tlb_pending(); } return (old & _PAGE_ACCESSED) != 0; @@ -351,7 +296,8 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm, unsigned lon * moment we always flush but we need to fix hpte_update and test if the * optimisation is worth it. */ -static inline int __ptep_test_and_clear_dirty(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +static inline int __ptep_test_and_clear_dirty(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) { unsigned long old; @@ -359,7 +305,7 @@ static inline int __ptep_test_and_clear_dirty(struct mm_struct *mm, unsigned lon return 0; old = pte_update(ptep, _PAGE_DIRTY); if (old & _PAGE_HASHPTE) - hpte_update(mm, addr, old, 0); + hpte_update(mm, addr, ptep, old, 0); return (old & _PAGE_DIRTY) != 0; } #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY @@ -371,7 +317,8 @@ static inline int __ptep_test_and_clear_dirty(struct mm_struct *mm, unsigned lon }) #define __HAVE_ARCH_PTEP_SET_WRPROTECT -static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) { unsigned long old; @@ -379,7 +326,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, return; old = pte_update(ptep, _PAGE_RW); if (old & _PAGE_HASHPTE) - hpte_update(mm, addr, old, 0); + hpte_update(mm, addr, ptep, old, 0); } /* @@ -408,21 +355,23 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, }) #define __HAVE_ARCH_PTEP_GET_AND_CLEAR -static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) { unsigned long old = pte_update(ptep, ~0UL); if (old & _PAGE_HASHPTE) - hpte_update(mm, addr, old, 0); + hpte_update(mm, addr, ptep, old, 0); return __pte(old); } -static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t * ptep) +static inline void pte_clear(struct mm_struct *mm, unsigned long addr, + pte_t * ptep) { unsigned long old = pte_update(ptep, ~0UL); if (old & _PAGE_HASHPTE) - hpte_update(mm, addr, old, 0); + hpte_update(mm, addr, ptep, old, 0); } /* @@ -435,7 +384,14 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_clear(mm, addr, ptep); flush_tlb_pending(); } - *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); + pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); + +#ifdef CONFIG_PPC_64K_PAGES + if (mmu_virtual_psize != MMU_PAGE_64K) + pte = __pte(pte_val(pte) | _PAGE_COMBO); +#endif /* CONFIG_PPC_64K_PAGES */ + + *ptep = pte; } /* Set the dirty and/or accessed bits atomically in a linux PTE, this @@ -482,8 +438,6 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) #define pmd_ERROR(e) \ printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) -#define pud_ERROR(e) \ - printk("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e)) #define pgd_ERROR(e) \ printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) @@ -509,12 +463,12 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); /* Encode and de-code a swap entry */ #define __swp_type(entry) (((entry).val >> 1) & 0x3f) #define __swp_offset(entry) ((entry).val >> 8) -#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) }) -#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> PTE_SHIFT }) -#define __swp_entry_to_pte(x) ((pte_t) { (x).val << PTE_SHIFT }) -#define pte_to_pgoff(pte) (pte_val(pte) >> PTE_SHIFT) -#define pgoff_to_pte(off) ((pte_t) {((off) << PTE_SHIFT)|_PAGE_FILE}) -#define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_SHIFT) +#define __swp_entry(type, offset) ((swp_entry_t){((type)<< 1)|((offset)<<8)}) +#define __pte_to_swp_entry(pte) ((swp_entry_t){pte_val(pte) >> PTE_RPN_SHIFT}) +#define __swp_entry_to_pte(x) ((pte_t) { (x).val << PTE_RPN_SHIFT }) +#define pte_to_pgoff(pte) (pte_val(pte) >> PTE_RPN_SHIFT) +#define pgoff_to_pte(off) ((pte_t) {((off) << PTE_RPN_SHIFT)|_PAGE_FILE}) +#define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_RPN_SHIFT) /* * kern_addr_valid is intended to indicate whether an address is a valid @@ -532,29 +486,22 @@ void pgtable_cache_init(void); /* * find_linux_pte returns the address of a linux pte for a given * effective address and directory. If not found, it returns zero. - */ -static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea) + */static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea) { pgd_t *pg; pud_t *pu; pmd_t *pm; pte_t *pt = NULL; - pte_t pte; pg = pgdir + pgd_index(ea); if (!pgd_none(*pg)) { pu = pud_offset(pg, ea); if (!pud_none(*pu)) { pm = pmd_offset(pu, ea); - if (pmd_present(*pm)) { + if (pmd_present(*pm)) pt = pte_offset_kernel(pm, ea); - pte = *pt; - if (!pte_present(pte)) - pt = NULL; - } } } - return pt; } |