From 584f8b71a2e8abdaeb4b6f4fddaf542b61392453 Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Thu, 6 Dec 2007 17:24:48 +1100 Subject: [POWERPC] Use SLB size from the device tree Currently we hardwire the number of SLBs to 64, but PAPR says we should use the ibm,slb-size property to obtain the number of SLB entries. This uses this property instead of assuming 64. If no property is found, we assume 64 entries as before. This soft patches the SLB handler, so it shouldn't change performance at all. Signed-off-by: Michael Neuling Signed-off-by: Paul Mackerras --- include/asm-powerpc/mmu-hash64.h | 1 + include/asm-powerpc/reg.h | 6 ------ 2 files changed, 1 insertion(+), 6 deletions(-) (limited to 'include') diff --git a/include/asm-powerpc/mmu-hash64.h b/include/asm-powerpc/mmu-hash64.h index 82328dec2b5..12e5e773c67 100644 --- a/include/asm-powerpc/mmu-hash64.h +++ b/include/asm-powerpc/mmu-hash64.h @@ -180,6 +180,7 @@ extern int mmu_vmalloc_psize; extern int mmu_io_psize; extern int mmu_kernel_ssize; extern int mmu_highuser_ssize; +extern u16 mmu_slb_size; /* * If the processor supports 64k normal pages but not 64k cache diff --git a/include/asm-powerpc/reg.h b/include/asm-powerpc/reg.h index e775ff1ca41..1f685047c6f 100644 --- a/include/asm-powerpc/reg.h +++ b/include/asm-powerpc/reg.h @@ -691,12 +691,6 @@ #define PV_BE 0x0070 #define PV_PA6T 0x0090 -/* - * Number of entries in the SLB. If this ever changes we should handle - * it with a use a cpu feature fixup. - */ -#define SLB_NUM_ENTRIES 64 - /* Macros for setting and retrieving special purpose registers */ #ifndef __ASSEMBLY__ #define mfmsr() ({unsigned long rval; \ -- cgit v1.2.3