aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/kernel/head_40x.S2
-rw-r--r--arch/powerpc/mm/40x_mmu.c4
-rw-r--r--include/asm-powerpc/mmu-40x.h65
-rw-r--r--include/asm-powerpc/mmu.h3
4 files changed, 71 insertions, 3 deletions
diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S
index adc7f8097cd..fe8afb60290 100644
--- a/arch/powerpc/kernel/head_40x.S
+++ b/arch/powerpc/kernel/head_40x.S
@@ -772,7 +772,7 @@ finish_tlb_load:
*/
lwz r9, tlb_4xx_index@l(0)
addi r9, r9, 1
- andi. r9, r9, (PPC4XX_TLB_SIZE-1)
+ andi. r9, r9, (PPC40X_TLB_SIZE-1)
stw r9, tlb_4xx_index@l(0)
6:
diff --git a/arch/powerpc/mm/40x_mmu.c b/arch/powerpc/mm/40x_mmu.c
index 7ff2609b64d..e067df836be 100644
--- a/arch/powerpc/mm/40x_mmu.c
+++ b/arch/powerpc/mm/40x_mmu.c
@@ -108,7 +108,7 @@ unsigned long __init mmu_mapin_ram(void)
pmd_t *pmdp;
unsigned long val = p | _PMD_SIZE_16M | _PAGE_HWEXEC | _PAGE_HWWRITE;
- pmdp = pmd_offset(pgd_offset_k(v), v);
+ pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v);
pmd_val(*pmdp++) = val;
pmd_val(*pmdp++) = val;
pmd_val(*pmdp++) = val;
@@ -123,7 +123,7 @@ unsigned long __init mmu_mapin_ram(void)
pmd_t *pmdp;
unsigned long val = p | _PMD_SIZE_4M | _PAGE_HWEXEC | _PAGE_HWWRITE;
- pmdp = pmd_offset(pgd_offset_k(v), v);
+ pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v);
pmd_val(*pmdp) = val;
v += LARGE_PAGE_SIZE_4M;
diff --git a/include/asm-powerpc/mmu-40x.h b/include/asm-powerpc/mmu-40x.h
new file mode 100644
index 00000000000..7d37f77043a
--- /dev/null
+++ b/include/asm-powerpc/mmu-40x.h
@@ -0,0 +1,65 @@
+#ifndef _ASM_POWERPC_MMU_40X_H_
+#define _ASM_POWERPC_MMU_40X_H_
+
+/*
+ * PPC40x support
+ */
+
+#define PPC40X_TLB_SIZE 64
+
+/*
+ * TLB entries are defined by a "high" tag portion and a "low" data
+ * portion. On all architectures, the data portion is 32-bits.
+ *
+ * TLB entries are managed entirely under software control by reading,
+ * writing, and searchoing using the 4xx-specific tlbre, tlbwr, and tlbsx
+ * instructions.
+ */
+
+#define TLB_LO 1
+#define TLB_HI 0
+
+#define TLB_DATA TLB_LO
+#define TLB_TAG TLB_HI
+
+/* Tag portion */
+
+#define TLB_EPN_MASK 0xFFFFFC00 /* Effective Page Number */
+#define TLB_PAGESZ_MASK 0x00000380
+#define TLB_PAGESZ(x) (((x) & 0x7) << 7)
+#define PAGESZ_1K 0
+#define PAGESZ_4K 1
+#define PAGESZ_16K 2
+#define PAGESZ_64K 3
+#define PAGESZ_256K 4
+#define PAGESZ_1M 5
+#define PAGESZ_4M 6
+#define PAGESZ_16M 7
+#define TLB_VALID 0x00000040 /* Entry is valid */
+
+/* Data portion */
+
+#define TLB_RPN_MASK 0xFFFFFC00 /* Real Page Number */
+#define TLB_PERM_MASK 0x00000300
+#define TLB_EX 0x00000200 /* Instruction execution allowed */
+#define TLB_WR 0x00000100 /* Writes permitted */
+#define TLB_ZSEL_MASK 0x000000F0
+#define TLB_ZSEL(x) (((x) & 0xF) << 4)
+#define TLB_ATTR_MASK 0x0000000F
+#define TLB_W 0x00000008 /* Caching is write-through */
+#define TLB_I 0x00000004 /* Caching is inhibited */
+#define TLB_M 0x00000002 /* Memory is coherent */
+#define TLB_G 0x00000001 /* Memory is guarded from prefetch */
+
+#ifndef __ASSEMBLY__
+
+typedef unsigned long phys_addr_t;
+
+typedef struct {
+ unsigned long id;
+ unsigned long vdso_base;
+} mm_context_t;
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_MMU_40X_H_ */
diff --git a/include/asm-powerpc/mmu.h b/include/asm-powerpc/mmu.h
index d44d211e758..4c0e1b4f975 100644
--- a/include/asm-powerpc/mmu.h
+++ b/include/asm-powerpc/mmu.h
@@ -8,6 +8,9 @@
#elif defined(CONFIG_PPC_STD_MMU)
/* 32-bit classic hash table MMU */
# include <asm/mmu-hash32.h>
+#elif defined(CONFIG_40x)
+/* 40x-style software loaded TLB */
+# include <asm/mmu-40x.h>
#elif defined(CONFIG_44x)
/* 44x-style software loaded TLB */
# include <asm/mmu-44x.h>