aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/Makefile10
-rw-r--r--arch/powerpc/mm/fault.c14
-rw-r--r--arch/powerpc/mm/hash_low_32.S111
-rw-r--r--arch/powerpc/mm/hugetlbpage.c22
-rw-r--r--arch/powerpc/mm/init_32.c6
-rw-r--r--arch/powerpc/mm/mem.c6
-rw-r--r--arch/powerpc/mm/mmu_context_32.c84
-rw-r--r--arch/powerpc/mm/mmu_context_hash32.c103
-rw-r--r--arch/powerpc/mm/mmu_context_hash64.c (renamed from arch/powerpc/mm/mmu_context_64.c)8
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c397
-rw-r--r--arch/powerpc/mm/mmu_decl.h65
-rw-r--r--arch/powerpc/mm/pgtable.c117
-rw-r--r--arch/powerpc/mm/pgtable_32.c56
-rw-r--r--arch/powerpc/mm/ppc_mmu_32.c10
-rw-r--r--arch/powerpc/mm/tlb_hash32.c (renamed from arch/powerpc/mm/tlb_32.c)4
-rw-r--r--arch/powerpc/mm/tlb_hash64.c (renamed from arch/powerpc/mm/tlb_64.c)86
-rw-r--r--arch/powerpc/mm/tlb_nohash.c209
-rw-r--r--arch/powerpc/mm/tlb_nohash_low.S166
18 files changed, 1191 insertions, 283 deletions
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index e7392b45a5e..953cc4a1cde 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -6,17 +6,19 @@ ifeq ($(CONFIG_PPC64),y)
EXTRA_CFLAGS += -mno-minimal-toc
endif
-obj-y := fault.o mem.o \
+obj-y := fault.o mem.o pgtable.o \
init_$(CONFIG_WORD_SIZE).o \
- pgtable_$(CONFIG_WORD_SIZE).o \
- mmu_context_$(CONFIG_WORD_SIZE).o
+ pgtable_$(CONFIG_WORD_SIZE).o
+obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \
+ tlb_nohash_low.o
hash-$(CONFIG_PPC_NATIVE) := hash_native_64.o
obj-$(CONFIG_PPC64) += hash_utils_64.o \
slb_low.o slb.o stab.o \
gup.o mmap.o $(hash-y)
obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o
obj-$(CONFIG_PPC_STD_MMU) += hash_low_$(CONFIG_WORD_SIZE).o \
- tlb_$(CONFIG_WORD_SIZE).o
+ tlb_hash$(CONFIG_WORD_SIZE).o \
+ mmu_context_hash$(CONFIG_WORD_SIZE).o
obj-$(CONFIG_40x) += 40x_mmu.o
obj-$(CONFIG_44x) += 44x_mmu.o
obj-$(CONFIG_FSL_BOOKE) += fsl_booke_mmu.o
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 866098686da..91c7b8636b8 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -30,6 +30,7 @@
#include <linux/kprobes.h>
#include <linux/kdebug.h>
+#include <asm/firmware.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
@@ -283,7 +284,7 @@ good_area:
}
pte_update(ptep, 0, _PAGE_HWEXEC |
_PAGE_ACCESSED);
- _tlbie(address, mm->context.id);
+ local_flush_tlb_page(vma, address);
pte_unmap_unlock(ptep, ptl);
up_read(&mm->mmap_sem);
return 0;
@@ -318,9 +319,16 @@ good_area:
goto do_sigbus;
BUG();
}
- if (ret & VM_FAULT_MAJOR)
+ if (ret & VM_FAULT_MAJOR) {
current->maj_flt++;
- else
+#ifdef CONFIG_PPC_SMLPAR
+ if (firmware_has_feature(FW_FEATURE_CMO)) {
+ preempt_disable();
+ get_lppaca()->page_ins += (1 << PAGE_FACTOR);
+ preempt_enable();
+ }
+#endif
+ } else
current->min_flt++;
up_read(&mm->mmap_sem);
return 0;
diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S
index 7bffb70b9fe..67850ec9feb 100644
--- a/arch/powerpc/mm/hash_low_32.S
+++ b/arch/powerpc/mm/hash_low_32.S
@@ -36,36 +36,6 @@ mmu_hash_lock:
#endif /* CONFIG_SMP */
/*
- * Sync CPUs with hash_page taking & releasing the hash
- * table lock
- */
-#ifdef CONFIG_SMP
- .text
-_GLOBAL(hash_page_sync)
- mfmsr r10
- rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
- mtmsr r0
- lis r8,mmu_hash_lock@h
- ori r8,r8,mmu_hash_lock@l
- lis r0,0x0fff
- b 10f
-11: lwz r6,0(r8)
- cmpwi 0,r6,0
- bne 11b
-10: lwarx r6,0,r8
- cmpwi 0,r6,0
- bne- 11b
- stwcx. r0,0,r8
- bne- 10b
- isync
- eieio
- li r0,0
- stw r0,0(r8)
- mtmsr r10
- blr
-#endif /* CONFIG_SMP */
-
-/*
* Load a PTE into the hash table, if possible.
* The address is in r4, and r3 contains an access flag:
* _PAGE_RW (0x400) if a write.
@@ -353,8 +323,8 @@ _GLOBAL(create_hpte)
ori r8,r8,0xe14 /* clear out reserved bits and M */
andc r8,r5,r8 /* PP = user? (rw&dirty? 2: 3): 0 */
BEGIN_FTR_SECTION
- ori r8,r8,_PAGE_COHERENT /* set M (coherence required) */
-END_FTR_SECTION_IFSET(CPU_FTR_NEED_COHERENT)
+ rlwinm r8,r8,0,~_PAGE_COHERENT /* clear M (coherence not required) */
+END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
#ifdef CONFIG_PTE_64BIT
/* Put the XPN bits into the PTE */
rlwimi r8,r10,8,20,22
@@ -663,3 +633,80 @@ _GLOBAL(flush_hash_patch_B)
SYNC_601
isync
blr
+
+/*
+ * Flush an entry from the TLB
+ */
+_GLOBAL(_tlbie)
+#ifdef CONFIG_SMP
+ rlwinm r8,r1,0,0,(31-THREAD_SHIFT)
+ lwz r8,TI_CPU(r8)
+ oris r8,r8,11
+ mfmsr r10
+ SYNC
+ rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
+ rlwinm r0,r0,0,28,26 /* clear DR */
+ mtmsr r0
+ SYNC_601
+ isync
+ lis r9,mmu_hash_lock@h
+ ori r9,r9,mmu_hash_lock@l
+ tophys(r9,r9)
+10: lwarx r7,0,r9
+ cmpwi 0,r7,0
+ bne- 10b
+ stwcx. r8,0,r9
+ bne- 10b
+ eieio
+ tlbie r3
+ sync
+ TLBSYNC
+ li r0,0
+ stw r0,0(r9) /* clear mmu_hash_lock */
+ mtmsr r10
+ SYNC_601
+ isync
+#else /* CONFIG_SMP */
+ tlbie r3
+ sync
+#endif /* CONFIG_SMP */
+ blr
+
+/*
+ * Flush the entire TLB. 603/603e only
+ */
+_GLOBAL(_tlbia)
+#if defined(CONFIG_SMP)
+ rlwinm r8,r1,0,0,(31-THREAD_SHIFT)
+ lwz r8,TI_CPU(r8)
+ oris r8,r8,10
+ mfmsr r10
+ SYNC
+ rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
+ rlwinm r0,r0,0,28,26 /* clear DR */
+ mtmsr r0
+ SYNC_601
+ isync
+ lis r9,mmu_hash_lock@h
+ ori r9,r9,mmu_hash_lock@l
+ tophys(r9,r9)
+10: lwarx r7,0,r9
+ cmpwi 0,r7,0
+ bne- 10b
+ stwcx. r8,0,r9
+ bne- 10b
+ sync
+ tlbia
+ sync
+ TLBSYNC
+ li r0,0
+ stw r0,0(r9) /* clear mmu_hash_lock */
+ mtmsr r10
+ SYNC_601
+ isync
+#else /* CONFIG_SMP */
+ sync
+ tlbia
+ sync
+#endif /* CONFIG_SMP */
+ blr
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index f0c3b88d50f..201c7a5486c 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -53,8 +53,7 @@ unsigned int mmu_huge_psizes[MMU_PAGE_COUNT] = { }; /* initialize all to 0 */
/* Subtract one from array size because we don't need a cache for 4K since
* is not a huge page size */
-#define huge_pgtable_cache(psize) (pgtable_cache[HUGEPTE_CACHE_NUM \
- + psize-1])
+#define HUGE_PGTABLE_INDEX(psize) (HUGEPTE_CACHE_NUM + psize - 1)
#define HUGEPTE_CACHE_NAME(psize) (huge_pgtable_cache_name[psize])
static const char *huge_pgtable_cache_name[MMU_PAGE_COUNT] = {
@@ -113,7 +112,7 @@ static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr,
static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
unsigned long address, unsigned int psize)
{
- pte_t *new = kmem_cache_zalloc(huge_pgtable_cache(psize),
+ pte_t *new = kmem_cache_zalloc(pgtable_cache[HUGE_PGTABLE_INDEX(psize)],
GFP_KERNEL|__GFP_REPEAT);
if (! new)
@@ -121,7 +120,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
spin_lock(&mm->page_table_lock);
if (!hugepd_none(*hpdp))
- kmem_cache_free(huge_pgtable_cache(psize), new);
+ kmem_cache_free(pgtable_cache[HUGE_PGTABLE_INDEX(psize)], new);
else
hpdp->pd = (unsigned long)new | HUGEPD_OK;
spin_unlock(&mm->page_table_lock);
@@ -763,13 +762,14 @@ static int __init hugetlbpage_init(void)
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
if (mmu_huge_psizes[psize]) {
- huge_pgtable_cache(psize) = kmem_cache_create(
- HUGEPTE_CACHE_NAME(psize),
- HUGEPTE_TABLE_SIZE(psize),
- HUGEPTE_TABLE_SIZE(psize),
- 0,
- NULL);
- if (!huge_pgtable_cache(psize))
+ pgtable_cache[HUGE_PGTABLE_INDEX(psize)] =
+ kmem_cache_create(
+ HUGEPTE_CACHE_NAME(psize),
+ HUGEPTE_TABLE_SIZE(psize),
+ HUGEPTE_TABLE_SIZE(psize),
+ 0,
+ NULL);
+ if (!pgtable_cache[HUGE_PGTABLE_INDEX(psize)])
panic("hugetlbpage_init(): could not create %s"\
"\n", HUGEPTE_CACHE_NAME(psize));
}
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 388ceda632f..666a5e8a5be 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -35,7 +35,6 @@
#include <asm/pgalloc.h>
#include <asm/prom.h>
#include <asm/io.h>
-#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/smp.h>
@@ -49,7 +48,7 @@
#if defined(CONFIG_KERNEL_START_BOOL) || defined(CONFIG_LOWMEM_SIZE_BOOL)
/* The ammount of lowmem must be within 0xF0000000 - KERNELBASE. */
-#if (CONFIG_LOWMEM_SIZE > (0xF0000000 - KERNELBASE))
+#if (CONFIG_LOWMEM_SIZE > (0xF0000000 - PAGE_OFFSET))
#error "You must adjust CONFIG_LOWMEM_SIZE or CONFIG_START_KERNEL"
#endif
#endif
@@ -180,9 +179,6 @@ void __init MMU_init(void)
if (ppc_md.progress)
ppc_md.progress("MMU:setio", 0x302);
- /* Initialize the context management stuff */
- mmu_context_init();
-
if (ppc_md.progress)
ppc_md.progress("MMU:exit", 0x211);
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index b9e1a1da6e5..53b06ebb3f2 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -102,8 +102,8 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
if (!page_is_ram(pfn))
- vma_prot = __pgprot(pgprot_val(vma_prot)
- | _PAGE_GUARDED | _PAGE_NO_CACHE);
+ vma_prot = pgprot_noncached(vma_prot);
+
return vma_prot;
}
EXPORT_SYMBOL(phys_mem_access_prot);
@@ -488,7 +488,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
* we invalidate the TLB here, thus avoiding dcbst
* misbehaviour.
*/
- _tlbie(address, 0 /* 8xx doesn't care about PID */);
+ _tlbil_va(address, 0 /* 8xx doesn't care about PID */);
#endif
/* The _PAGE_USER test should really be _PAGE_EXEC, but
* older glibc versions execute some code from no-exec
diff --git a/arch/powerpc/mm/mmu_context_32.c b/arch/powerpc/mm/mmu_context_32.c
deleted file mode 100644
index cc32ba41d90..00000000000
--- a/arch/powerpc/mm/mmu_context_32.c
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * This file contains the routines for handling the MMU on those
- * PowerPC implementations where the MMU substantially follows the
- * architecture specification. This includes the 6xx, 7xx, 7xxx,
- * 8260, and POWER3 implementations but excludes the 8xx and 4xx.
- * -- paulus
- *
- * Derived from arch/ppc/mm/init.c:
- * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
- *
- * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
- * and Cort Dougan (PReP) (cort@cs.nmt.edu)
- * Copyright (C) 1996 Paul Mackerras
- *
- * Derived from "arch/i386/mm/init.c"
- * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- */
-
-#include <linux/mm.h>
-#include <linux/init.h>
-
-#include <asm/mmu_context.h>
-#include <asm/tlbflush.h>
-
-unsigned long next_mmu_context;
-unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
-#ifdef FEW_CONTEXTS
-atomic_t nr_free_contexts;
-struct mm_struct *context_mm[LAST_CONTEXT+1];
-void steal_context(void);
-#endif /* FEW_CONTEXTS */
-
-/*
- * Initialize the context management stuff.
- */
-void __init
-mmu_context_init(void)
-{
- /*
- * Some processors have too few contexts to reserve one for
- * init_mm, and require using context 0 for a normal task.
- * Other processors reserve the use of context zero for the kernel.
- * This code assumes FIRST_CONTEXT < 32.
- */
- context_map[0] = (1 << FIRST_CONTEXT) - 1;
- next_mmu_context = FIRST_CONTEXT;
-#ifdef FEW_CONTEXTS
- atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
-#endif /* FEW_CONTEXTS */
-}
-
-#ifdef FEW_CONTEXTS
-/*
- * Steal a context from a task that has one at the moment.
- * This is only used on 8xx and 4xx and we presently assume that
- * they don't do SMP. If they do then this will have to check
- * whether the MM we steal is in use.
- * We also assume that this is only used on systems that don't
- * use an MMU hash table - this is true for 8xx and 4xx.
- * This isn't an LRU system, it just frees up each context in
- * turn (sort-of pseudo-random replacement :). This would be the
- * place to implement an LRU scheme if anyone was motivated to do it.
- * -- paulus
- */
-void
-steal_context(void)
-{
- struct mm_struct *mm;
-
- /* free up context `next_mmu_context' */
- /* if we shouldn't free context 0, don't... */
- if (next_mmu_context < FIRST_CONTEXT)
- next_mmu_context = FIRST_CONTEXT;
- mm = context_mm[next_mmu_context];
- flush_tlb_mm(mm);
- destroy_context(mm);
-}
-#endif /* FEW_CONTEXTS */
diff --git a/arch/powerpc/mm/mmu_context_hash32.c b/arch/powerpc/mm/mmu_context_hash32.c
new file mode 100644
index 00000000000..0dfba2bf7f3
--- /dev/null
+++ b/arch/powerpc/mm/mmu_context_hash32.c
@@ -0,0 +1,103 @@
+/*
+ * This file contains the routines for handling the MMU on those
+ * PowerPC implementations where the MMU substantially follows the
+ * architecture specification. This includes the 6xx, 7xx, 7xxx,
+ * 8260, and POWER3 implementations but excludes the 8xx and 4xx.
+ * -- paulus
+ *
+ * Derived from arch/ppc/mm/init.c:
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
+ * and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ * Copyright (C) 1996 Paul Mackerras
+ *
+ * Derived from "arch/i386/mm/init.c"
+ * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/mm.h>
+#include <linux/init.h>
+
+#include <asm/mmu_context.h>
+#include <asm/tlbflush.h>
+
+/*
+ * On 32-bit PowerPC 6xx/7xx/7xxx CPUs, we use a set of 16 VSIDs
+ * (virtual segment identifiers) for each context. Although the
+ * hardware supports 24-bit VSIDs, and thus >1 million contexts,
+ * we only use 32,768 of them. That is ample, since there can be
+ * at most around 30,000 tasks in the system anyway, and it means
+ * that we can use a bitmap to indicate which contexts are in use.
+ * Using a bitmap means that we entirely avoid all of the problems
+ * that we used to have when the context number overflowed,
+ * particularly on SMP systems.
+ * -- paulus.
+ */
+#define NO_CONTEXT ((unsigned long) -1)
+#define LAST_CONTEXT 32767
+#define FIRST_CONTEXT 1
+
+/*
+ * This function defines the mapping from contexts to VSIDs (virtual
+ * segment IDs). We use a skew on both the context and the high 4 bits
+ * of the 32-bit virtual address (the "effective segment ID") in order
+ * to spread out the entries in the MMU hash table. Note, if this
+ * function is changed then arch/ppc/mm/hashtable.S will have to be
+ * changed to correspond.
+ *
+ *
+ * CTX_TO_VSID(ctx, va) (((ctx) * (897 * 16) + ((va) >> 28) * 0x111) \
+ * & 0xffffff)
+ */
+
+static unsigned long next_mmu_context;
+static unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
+
+
+/*
+ * Set up the context for a new address space.
+ */
+int init_new_context(struct task_struct *t, struct mm_struct *mm)
+{
+ unsigned long ctx = next_mmu_context;
+
+ while (test_and_set_bit(ctx, context_map)) {
+ ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx);
+ if (ctx > LAST_CONTEXT)
+ ctx = 0;
+ }
+ next_mmu_context = (ctx + 1) & LAST_CONTEXT;
+ mm->context.id = ctx;
+
+ return 0;
+}
+
+/*
+ * We're finished using the context for an address space.
+ */
+void destroy_context(struct mm_struct *mm)
+{
+ preempt_disable();
+ if (mm->context.id != NO_CONTEXT) {
+ clear_bit(mm->context.id, context_map);
+ mm->context.id = NO_CONTEXT;
+ }
+ preempt_enable();
+}
+
+/*
+ * Initialize the context management stuff.
+ */
+void __init mmu_context_init(void)
+{
+ /* Reserve context 0 for kernel use */
+ context_map[0] = (1 << FIRST_CONTEXT) - 1;
+ next_mmu_context = FIRST_CONTEXT;
+}
diff --git a/arch/powerpc/mm/mmu_context_64.c b/arch/powerpc/mm/mmu_context_hash64.c
index 1db38ba1f54..dbeb86ac90c 100644
--- a/arch/powerpc/mm/mmu_context_64.c
+++ b/arch/powerpc/mm/mmu_context_hash64.c
@@ -24,6 +24,14 @@
static DEFINE_SPINLOCK(mmu_context_lock);
static DEFINE_IDR(mmu_context_idr);
+/*
+ * The proto-VSID space has 2^35 - 1 segments available for user mappings.
+ * Each segment contains 2^28 bytes. Each context maps 2^44 bytes,
+ * so we can support 2^19-1 contexts (19 == 35 + 28 - 44).
+ */
+#define NO_CONTEXT 0
+#define MAX_CONTEXT ((1UL << 19) - 1)
+
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
int index;
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
new file mode 100644
index 00000000000..52a0cfc38b6
--- /dev/null
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -0,0 +1,397 @@
+/*
+ * This file contains the routines for handling the MMU on those
+ * PowerPC implementations where the MMU is not using the hash
+ * table, such as 8xx, 4xx, BookE's etc...
+ *
+ * Copyright 2008 Ben Herrenschmidt <benh@kernel.crashing.org>
+ * IBM Corp.
+ *
+ * Derived from previous arch/powerpc/mm/mmu_context.c
+ * and arch/powerpc/include/asm/mmu_context.h
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * TODO:
+ *
+ * - The global context lock will not scale very well
+ * - The maps should be dynamically allocated to allow for processors
+ * that support more PID bits at runtime
+ * - Implement flush_tlb_mm() by making the context stale and picking
+ * a new one
+ * - More aggressively clear stale map bits and maybe find some way to
+ * also clear mm->cpu_vm_mask bits when processes are migrated
+ */
+
+#undef DEBUG
+#define DEBUG_STEAL_ONLY
+#undef DEBUG_MAP_CONSISTENCY
+/*#define DEBUG_CLAMP_LAST_CONTEXT 15 */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/bootmem.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+
+#include <asm/mmu_context.h>
+#include <asm/tlbflush.h>
+
+static unsigned int first_context, last_context;
+static unsigned int next_context, nr_free_contexts;
+static unsigned long *context_map;
+static unsigned long *stale_map[NR_CPUS];
+static struct mm_struct **context_mm;
+static spinlock_t context_lock = SPIN_LOCK_UNLOCKED;
+
+#define CTX_MAP_SIZE \
+ (sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1))
+
+
+/* Steal a context from a task that has one at the moment.
+ *
+ * This is used when we are running out of available PID numbers
+ * on the processors.
+ *
+ * This isn't an LRU system, it just frees up each context in
+ * turn (sort-of pseudo-random replacement :). This would be the
+ * place to implement an LRU scheme if anyone was motivated to do it.
+ * -- paulus
+ *
+ * For context stealing, we use a slightly different approach for
+ * SMP and UP. Basically, the UP one is simpler and doesn't use
+ * the stale map as we can just flush the local CPU
+ * -- benh
+ */
+#ifdef CONFIG_SMP
+static unsigned int steal_context_smp(unsigned int id)
+{
+ struct mm_struct *mm;
+ unsigned int cpu, max;
+
+ again:
+ max = last_context - first_context;
+
+ /* Attempt to free next_context first and then loop until we manage */
+ while (max--) {
+ /* Pick up the victim mm */
+ mm = context_mm[id];
+
+ /* We have a candidate victim, check if it's active, on SMP
+ * we cannot steal active contexts
+ */
+ if (mm->context.active) {
+ id++;
+ if (id > last_context)
+ id = first_context;
+ continue;
+ }
+ pr_debug("[%d] steal context %d from mm @%p\n",
+ smp_processor_id(), id, mm);
+
+ /* Mark this mm has having no context anymore */
+ mm->context.id = MMU_NO_CONTEXT;
+
+ /* Mark it stale on all CPUs that used this mm */
+ for_each_cpu_mask_nr(cpu, mm->cpu_vm_mask)
+ __set_bit(id, stale_map[cpu]);
+ return id;
+ }
+
+ /* This will happen if you have more CPUs than available contexts,
+ * all we can do here is wait a bit and try again
+ */
+ spin_unlock(&context_lock);
+ cpu_relax();
+ spin_lock(&context_lock);
+ goto again;
+}
+#endif /* CONFIG_SMP */
+
+/* Note that this will also be called on SMP if all other CPUs are
+ * offlined, which means that it may be called for cpu != 0. For
+ * this to work, we somewhat assume that CPUs that are onlined
+ * come up with a fully clean TLB (or are cleaned when offlined)
+ */
+static unsigned int steal_context_up(unsigned int id)
+{
+ struct mm_struct *mm;
+ int cpu = smp_processor_id();
+
+ /* Pick up the victim mm */
+ mm = context_mm[id];
+
+ pr_debug("[%d] steal context %d from mm @%p\n", cpu, id, mm);
+
+ /* Mark this mm has having no context anymore */
+ mm->context.id = MMU_NO_CONTEXT;
+
+ /* Flush the TLB for that context */
+ local_flush_tlb_mm(mm);
+
+ /* XXX This clear should ultimately be part of local_flush_tlb_mm */
+ __clear_bit(id, stale_map[cpu]);
+
+ return id;
+}
+
+#ifdef DEBUG_MAP_CONSISTENCY
+static void context_check_map(void)
+{
+ unsigned int id, nrf, nact;
+
+ nrf = nact = 0;
+ for (id = first_context; id <= last_context; id++) {
+ int used = test_bit(id, context_map);
+ if (!used)
+ nrf++;
+ if (used != (context_mm[id] != NULL))
+ pr_err("MMU: Context %d is %s and MM is %p !\n",
+ id, used ? "used" : "free", context_mm[id]);
+ if (context_mm[id] != NULL)
+ nact += context_mm[id]->context.active;
+ }
+ if (nrf != nr_free_contexts) {
+ pr_err("MMU: Free context count out of sync ! (%d vs %d)\n",
+ nr_free_contexts, nrf);
+ nr_free_contexts = nrf;
+ }
+ if (nact > num_online_cpus())
+ pr_err("MMU: More active contexts than CPUs ! (%d vs %d)\n",
+ nact, num_online_cpus());
+ if (first_context > 0 && !test_bit(0, context_map))
+ pr_err("MMU: Context 0 has been freed !!!\n");
+}
+#else
+static void context_check_map(void) { }
+#endif
+
+void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
+{
+ unsigned int id, cpu = smp_processor_id();
+ unsigned long *map;
+
+ /* No lockless fast path .. yet */
+ spin_lock(&context_lock);
+
+#ifndef DEBUG_STEAL_ONLY
+ pr_debug("[%d] activating context for mm @%p, active=%d, id=%d\n",
+ cpu, next, next->context.active, next->context.id);
+#endif
+
+#ifdef CONFIG_SMP
+ /* Mark us active and the previous one not anymore */
+ next->context.active++;
+ if (prev) {
+#ifndef DEBUG_STEAL_ONLY
+ pr_debug(" old context %p active was: %d\n",
+ prev, prev->context.active);
+#endif
+ WARN_ON(prev->context.active < 1);
+ prev->context.active--;
+ }
+#endif /* CONFIG_SMP */
+
+ /* If we already have a valid assigned context, skip all that */
+ id = next->context.id;
+ if (likely(id != MMU_NO_CONTEXT))
+ goto ctxt_ok;
+
+ /* We really don't have a context, let's try to acquire one */
+ id = next_context;
+ if (id > last_context)
+ id = first_context;
+ map = context_map;
+
+ /* No more free contexts, let's try to steal one */
+ if (nr_free_contexts == 0) {
+#ifdef CONFIG_SMP
+ if (num_online_cpus() > 1) {
+ id = steal_context_smp(id);
+ goto stolen;
+ }
+#endif /* CONFIG_SMP */
+ id = steal_context_up(id);
+ goto stolen;
+ }
+ nr_free_contexts--;
+
+ /* We know there's at least one free context, try to find it */
+ while (__test_and_set_bit(id, map)) {
+ id = find_next_zero_bit(map, last_context+1, id);
+ if (id > last_context)
+ id = first_context;
+ }
+ stolen:
+ next_context = id + 1;
+ context_mm[id] = next;
+ next->context.id = id;
+
+#ifndef DEBUG_STEAL_ONLY
+ pr_debug("[%d] picked up new id %d, nrf is now %d\n",
+ cpu, id, nr_free_contexts);
+#endif
+
+ context_check_map();
+ ctxt_ok:
+
+ /* If that context got marked stale on this CPU, then flush the
+ * local TLB for it and unmark it before we use it
+ */
+ if (test_bit(id, stale_map[cpu])) {
+ pr_debug("[%d] flushing stale context %d for mm @%p !\n",
+ cpu, id, next);
+ local_flush_tlb_mm(next);
+
+ /* XXX This clear should ultimately be part of local_flush_tlb_mm */
+ __clear_bit(id, stale_map[cpu]);
+ }
+
+ /* Flick the MMU and release lock */
+ set_context(id, next->pgd);
+ spin_unlock(&context_lock);
+}
+
+/*
+ * Set up the context for a new address space.
+ */
+int init_new_context(struct task_struct *t, struct mm_struct *mm)
+{
+ mm->context.id = MMU_NO_CONTEXT;
+ mm->context.active = 0;
+
+ return 0;
+}
+
+/*
+ * We're finished using the context for an address space.
+ */
+void destroy_context(struct mm_struct *mm)
+{
+ unsigned int id;
+
+ if (mm->context.id == MMU_NO_CONTEXT)
+ return;
+
+ WARN_ON(mm->context.active != 0);
+
+ spin_lock(&context_lock);
+ id = mm->context.id;
+ if (id != MMU_NO_CONTEXT) {
+ __clear_bit(id, context_map);
+ mm->context.id = MMU_NO_CONTEXT;
+#ifdef DEBUG_MAP_CONSISTENCY
+ mm->context.active = 0;
+ context_mm[id] = NULL;
+#endif
+ nr_free_contexts++;
+ }
+ spin_unlock(&context_lock);
+}
+
+#ifdef CONFIG_SMP
+
+static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned int)(long)hcpu;
+
+ /* We don't touch CPU 0 map, it's allocated at aboot and kept
+ * around forever
+ */
+ if (cpu == 0)
+ return NOTIFY_OK;
+
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ pr_debug("MMU: Allocating stale context map for CPU %d\n", cpu);
+ stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL);
+ break;
+#ifdef CONFIG_HOTPLUG_CPU
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ pr_debug("MMU: Freeing stale context map for CPU %d\n", cpu);
+ kfree(stale_map[cpu]);
+ stale_map[cpu] = NULL;
+ break;
+#endif
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata mmu_context_cpu_nb = {
+ .notifier_call = mmu_context_cpu_notify,
+};
+
+#endif /* CONFIG_SMP */
+
+/*
+ * Initialize the context management stuff.
+ */
+void __init mmu_context_init(void)
+{
+ /* Mark init_mm as being active on all possible CPUs since
+ * we'll get called with prev == init_mm the first time
+ * we schedule on a given CPU
+ */
+ init_mm.context.active = NR_CPUS;
+
+ /*
+ * The MPC8xx has only 16 contexts. We rotate through them on each
+ * task switch. A better way would be to keep track of tasks that
+ * own contexts, and implement an LRU usage. That way very active
+ * tasks don't always have to pay the TLB reload overhead. The
+ * kernel pages are mapped shared, so the kernel can run on behalf
+ * of any task that makes a kernel entry. Shared does not mean they
+ * are not protected, just that the ASID comparison is not performed.
+ * -- Dan
+ *
+ * The IBM4xx has 256 contexts, so we can just rotate through these
+ * as a way of "switching" contexts. If the TID of the TLB is zero,
+ * the PID/TID comparison is disabled, so we can use a TID of zero
+ * to represent all kernel pages as shared among all contexts.
+ * -- Dan
+ */
+ if (mmu_has_feature(MMU_FTR_TYPE_8xx)) {
+ first_context = 0;
+ last_context = 15;
+ } else {
+ first_context = 1;
+ last_context = 255;
+ }
+
+#ifdef DEBUG_CLAMP_LAST_CONTEXT
+ last_context = DEBUG_CLAMP_LAST_CONTEXT;
+#endif
+ /*
+ * Allocate the maps used by context management
+ */
+ context_map = alloc_bootmem(CTX_MAP_SIZE);
+ context_mm = alloc_bootmem(sizeof(void *) * (last_context + 1));
+ stale_map[0] = alloc_bootmem(CTX_MAP_SIZE);
+
+#ifdef CONFIG_SMP
+ register_cpu_notifier(&mmu_context_cpu_nb);
+#endif
+
+ printk(KERN_INFO
+ "MMU: Allocated %d bytes of context maps for %d contexts\n",
+ 2 * CTX_MAP_SIZE + (sizeof(void *) * (last_context + 1)),
+ last_context - first_context + 1);
+
+ /*
+ * Some processors have too few contexts to reserve one for
+ * init_mm, and require using context 0 for a normal task.
+ * Other processors reserve the use of context zero for the kernel.
+ * This code assumes first_context < 32.
+ */
+ context_map[0] = (1 << first_context) - 1;
+ next_context = first_context;
+ nr_free_contexts = last_context - first_context + 1;
+}
+
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index fab3cfad409..4314b39b6fa 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -22,10 +22,58 @@
#include <asm/tlbflush.h>
#include <asm/mmu.h>
+#ifdef CONFIG_PPC_MMU_NOHASH
+
+/*
+ * On 40x and 8xx, we directly inline tlbia and tlbivax
+ */
+#if defined(CONFIG_40x) || defined(CONFIG_8xx)
+static inline void _tlbil_all(void)
+{
+ asm volatile ("sync; tlbia; isync" : : : "memory")
+}
+static inline void _tlbil_pid(unsigned int pid)
+{
+ asm volatile ("sync; tlbia; isync" : : : "memory")
+}
+#else /* CONFIG_40x || CONFIG_8xx */
+extern void _tlbil_all(void);
+extern void _tlbil_pid(unsigned int pid);
+#endif /* !(CONFIG_40x || CONFIG_8xx) */
+
+/*
+ * On 8xx, we directly inline tlbie, on others, it's extern
+ */
+#ifdef CONFIG_8xx
+static inline void _tlbil_va(unsigned long address, unsigned int pid)
+{
+ asm volatile ("tlbie %0; sync" : : "r" (address) : "memory")
+}
+#else /* CONFIG_8xx */
+extern void _tlbil_va(unsigned long address, unsigned int pid);
+#endif /* CONIFG_8xx */
+
+/*
+ * As of today, we don't support tlbivax broadcast on any
+ * implementation. When that becomes the case, this will be
+ * an extern.
+ */
+static inline void _tlbivax_bcast(unsigned long address, unsigned int pid)
+{
+ BUG();
+}
+
+#else /* CONFIG_PPC_MMU_NOHASH */
+
extern void hash_preload(struct mm_struct *mm, unsigned long ea,
unsigned long access, unsigned long trap);
+extern void _tlbie(unsigned long address);
+extern void _tlbia(void);
+
+#endif /* CONFIG_PPC_MMU_NOHASH */
+
#ifdef CONFIG_PPC32
extern void mapin_ram(void);
extern int map_page(unsigned long va, phys_addr_t pa, int flags);
@@ -58,17 +106,14 @@ extern phys_addr_t lowmem_end_addr;
* architectures. -- Dan
*/
#if defined(CONFIG_8xx)
-#define flush_HPTE(X, va, pg) _tlbie(va, 0 /* 8xx doesn't care about PID */)
#define MMU_init_hw() do { } while(0)
#define mmu_mapin_ram() (0UL)
#elif defined(CONFIG_4xx)
-#define flush_HPTE(pid, va, pg) _tlbie(va, pid)
extern void MMU_init_hw(void);
extern unsigned long mmu_mapin_ram(void);
#elif defined(CONFIG_FSL_BOOKE)
-#define flush_HPTE(pid, va, pg) _tlbie(va, pid)
extern void MMU_init_hw(void);
extern unsigned long mmu_mapin_ram(void);
extern void adjust_total_lowmem(void);
@@ -77,18 +122,4 @@ extern void adjust_total_lowmem(void);
/* anything 32-bit except 4xx or 8xx */
extern void MMU_init_hw(void);
extern unsigned long mmu_mapin_ram(void);
-
-/* Be careful....this needs to be updated if we ever encounter 603 SMPs,
- * which includes all new 82xx processors. We need tlbie/tlbsync here
- * in that case (I think). -- Dan.
- */
-static inline void flush_HPTE(unsigned context, unsigned long va,
- unsigned long pdval)
-{
- if ((Hash != 0) &&
- cpu_has_feature(CPU_FTR_HPTE_TABLE))
- flush_hash_pages(0, va, pdval, 1);
- else
- _tlbie(va);
-}
#endif
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
new file mode 100644
index 00000000000..6d94116fdea
--- /dev/null
+++ b/arch/powerpc/mm/pgtable.c
@@ -0,0 +1,117 @@
+/*
+ * This file contains common routines for dealing with free of page tables
+ *
+ * Derived from arch/powerpc/mm/tlb_64.c:
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
+ * and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ * Copyright (C) 1996 Paul Mackerras
+ *
+ * Derived from "arch/i386/mm/init.c"
+ * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
+ *
+ * Dave Engebretsen <engebret@us.ibm.com>
+ * Rework for PPC64 port.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/percpu.h>
+#include <linux/hardirq.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+#include <asm/tlb.h>
+
+static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
+static unsigned long pte_freelist_forced_free;
+
+struct pte_freelist_batch
+{
+ struct rcu_head rcu;
+ unsigned int index;
+ pgtable_free_t tables[0];
+};
+
+#define PTE_FREELIST_SIZE \
+ ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \
+ / sizeof(pgtable_free_t))
+
+static void pte_free_smp_sync(void *arg)
+{
+ /* Do nothing, just ensure we sync with all CPUs */
+}
+
+/* This is only called when we are critically out of memory
+ * (and fail to get a page in pte_free_tlb).
+ */
+static void pgtable_free_now(pgtable_free_t pgf)
+{
+ pte_freelist_forced_free++;
+
+ smp_call_function(pte_free_smp_sync, NULL, 1);
+
+ pgtable_free(pgf);
+}
+
+static void pte_free_rcu_callback(struct rcu_head *head)
+{
+ struct pte_freelist_batch *batch =
+ container_of(head, struct pte_freelist_batch, rcu);
+ unsigned int i;
+
+ for (i = 0; i < batch->index; i++)
+ pgtable_free(batch->tables[i]);
+
+ free_page((unsigned long)batch);
+}
+
+static void pte_free_submit(struct pte_freelist_batch *batch)
+{
+ INIT_RCU_HEAD(&batch->rcu);
+ call_rcu(&batch->rcu, pte_free_rcu_callback);
+}
+
+void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
+{
+ /* This is safe since tlb_gather_mmu has disabled preemption */
+ cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
+ struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
+
+ if (atomic_read(&tlb->mm->mm_users) < 2 ||
+ cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) {
+ pgtable_free(pgf);
+ return;
+ }
+
+ if (*batchp == NULL) {
+ *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
+ if (*batchp == NULL) {
+ pgtable_free_now(pgf);
+ return;
+ }
+ (*batchp)->index = 0;
+ }
+ (*batchp)->tables[(*batchp)->index++] = pgf;
+ if ((*batchp)->index == PTE_FREELIST_SIZE) {
+ pte_free_submit(*batchp);
+ *batchp = NULL;
+ }
+}
+
+void pte_free_finish(void)
+{
+ /* This is safe since tlb_gather_mmu has disabled preemption */
+ struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
+
+ if (*batchp == NULL)
+ return;
+ pte_free_submit(*batchp);
+ *batchp = NULL;
+}
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index c31d6d26f0b..38ff35f2142 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -48,10 +48,6 @@ EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */
extern char etext[], _stext[];
-#ifdef CONFIG_SMP
-extern void hash_page_sync(void);
-#endif
-
#ifdef HAVE_BATS
extern phys_addr_t v_mapped_by_bats(unsigned long va);
extern unsigned long p_mapped_by_bats(phys_addr_t pa);
@@ -72,24 +68,29 @@ extern unsigned long p_mapped_by_tlbcam(unsigned long pa);
#define p_mapped_by_tlbcam(x) (0UL)
#endif /* HAVE_TLBCAM */
-#ifdef CONFIG_PTE_64BIT
-/* Some processors use an 8kB pgdir because they have 8-byte Linux PTEs. */
-#define PGDIR_ORDER 1
-#else
-#define PGDIR_ORDER 0
-#endif
+#define PGDIR_ORDER (32 + PGD_T_LOG2 - PGDIR_SHIFT)
pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *ret;
- ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, PGDIR_ORDER);
+ /* pgdir take page or two with 4K pages and a page fraction otherwise */
+#ifndef CONFIG_PPC_4K_PAGES
+ ret = (pgd_t *)kzalloc(1 << PGDIR_ORDER, GFP_KERNEL);
+#else
+ ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
+ PGDIR_ORDER - PAGE_SHIFT);
+#endif
return ret;
}
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
- free_pages((unsigned long)pgd, PGDIR_ORDER);
+#ifndef CONFIG_PPC_4K_PAGES
+ kfree((void *)pgd);
+#else
+ free_pages((unsigned long)pgd, PGDIR_ORDER - PAGE_SHIFT);
+#endif
}
__init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
@@ -125,23 +126,6 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
return ptepage;
}
-void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
-{
-#ifdef CONFIG_SMP
- hash_page_sync();
-#endif
- free_page((unsigned long)pte);
-}
-
-void pte_free(struct mm_struct *mm, pgtable_t ptepage)
-{
-#ifdef CONFIG_SMP
- hash_page_sync();
-#endif
- pgtable_page_dtor(ptepage);
- __free_page(ptepage);
-}
-
void __iomem *
ioremap(phys_addr_t addr, unsigned long size)
{
@@ -194,6 +178,7 @@ __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
if (p < 16*1024*1024)
p += _ISA_MEM_BASE;
+#ifndef CONFIG_CRASH_DUMP
/*
* Don't allow anybody to remap normal RAM that we're using.
* mem_init() sets high_memory so only do the check after that.
@@ -203,6 +188,7 @@ __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
(unsigned long long)p, __builtin_return_address(0));
return NULL;
}
+#endif
if (size == 0)
return NULL;
@@ -288,7 +274,7 @@ int map_page(unsigned long va, phys_addr_t pa, int flags)
}
/*
- * Map in a big chunk of physical memory starting at KERNELBASE.
+ * Map in a big chunk of physical memory starting at PAGE_OFFSET.
*/
void __init mapin_ram(void)
{
@@ -297,7 +283,7 @@ void __init mapin_ram(void)
int ktext;
s = mmu_mapin_ram();
- v = KERNELBASE + s;
+ v = PAGE_OFFSET + s;
p = memstart_addr + s;
for (; s < total_lowmem; s += PAGE_SIZE) {
ktext = ((char *) v >= _stext && (char *) v < etext);
@@ -363,7 +349,11 @@ static int __change_page_attr(struct page *page, pgprot_t prot)
return -EINVAL;
set_pte_at(&init_mm, address, kpte, mk_pte(page, prot));
wmb();
- flush_HPTE(0, address, pmd_val(*kpmd));
+#ifdef CONFIG_PPC_STD_MMU
+ flush_hash_pages(0, address, pmd_val(*kpmd), 1);
+#else
+ flush_tlb_page(NULL, address);
+#endif
pte_unmap(kpte);
return 0;
@@ -400,7 +390,7 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
#endif /* CONFIG_DEBUG_PAGEALLOC */
static int fixmaps;
-unsigned long FIXADDR_TOP = 0xfffff000;
+unsigned long FIXADDR_TOP = (-PAGE_SIZE);
EXPORT_SYMBOL(FIXADDR_TOP);
void __set_fixmap (enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags)
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c
index 6aa12081377..45d925360b8 100644
--- a/arch/powerpc/mm/ppc_mmu_32.c
+++ b/arch/powerpc/mm/ppc_mmu_32.c
@@ -95,16 +95,16 @@ unsigned long __init mmu_mapin_ram(void)
break;
}
- setbat(2, KERNELBASE, 0, bl, _PAGE_RAM);
- done = (unsigned long)bat_addrs[2].limit - KERNELBASE + 1;
+ setbat(2, PAGE_OFFSET, 0, bl, _PAGE_RAM);
+ done = (unsigned long)bat_addrs[2].limit - PAGE_OFFSET + 1;
if ((done < tot) && !bat_addrs[3].limit) {
/* use BAT3 to cover a bit more */
tot -= done;
for (bl = 128<<10; bl < max_size; bl <<= 1)
if (bl * 2 > tot)
break;
- setbat(3, KERNELBASE+done, done, bl, _PAGE_RAM);
- done = (unsigned long)bat_addrs[3].limit - KERNELBASE + 1;
+ setbat(3, PAGE_OFFSET+done, done, bl, _PAGE_RAM);
+ done = (unsigned long)bat_addrs[3].limit - PAGE_OFFSET + 1;
}
return done;
@@ -192,7 +192,7 @@ void __init MMU_init_hw(void)
extern unsigned int hash_page[];
extern unsigned int flush_hash_patch_A[], flush_hash_patch_B[];
- if (!cpu_has_feature(CPU_FTR_HPTE_TABLE)) {
+ if (!mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
/*
* Put a blr (procedure return) instruction at the
* start of hash_page, since we can still get DSI
diff --git a/arch/powerpc/mm/tlb_32.c b/arch/powerpc/mm/tlb_hash32.c
index f9a47fee392..65190587a36 100644
--- a/arch/powerpc/mm/tlb_32.c
+++ b/arch/powerpc/mm/tlb_hash32.c
@@ -137,6 +137,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
flush_range(&init_mm, start, end);
FINISH_FLUSH;
}
+EXPORT_SYMBOL(flush_tlb_kernel_range);
/*
* Flush all the (user) entries for the address space described by mm.
@@ -160,6 +161,7 @@ void flush_tlb_mm(struct mm_struct *mm)
flush_range(mp->vm_mm, mp->vm_start, mp->vm_end);
FINISH_FLUSH;
}
+EXPORT_SYMBOL(flush_tlb_mm);
void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
{
@@ -176,6 +178,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1);
FINISH_FLUSH;
}
+EXPORT_SYMBOL(flush_tlb_page);
/*
* For each address in the range, find the pte for the address
@@ -188,3 +191,4 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
flush_range(vma->vm_mm, start, end);
FINISH_FLUSH;
}
+EXPORT_SYMBOL(flush_tlb_range);
diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_hash64.c
index be7dd422c0f..c931bc7d107 100644
--- a/arch/powerpc/mm/tlb_64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -37,81 +37,6 @@ DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
* arch/powerpc/include/asm/tlb.h file -- tgall
*/
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
-static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
-static unsigned long pte_freelist_forced_free;
-
-struct pte_freelist_batch
-{
- struct rcu_head rcu;
- unsigned int index;
- pgtable_free_t tables[0];
-};
-
-#define PTE_FREELIST_SIZE \
- ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \
- / sizeof(pgtable_free_t))
-
-static void pte_free_smp_sync(void *arg)
-{
- /* Do nothing, just ensure we sync with all CPUs */
-}
-
-/* This is only called when we are critically out of memory
- * (and fail to get a page in pte_free_tlb).
- */
-static void pgtable_free_now(pgtable_free_t pgf)
-{
- pte_freelist_forced_free++;
-
- smp_call_function(pte_free_smp_sync, NULL, 1);
-
- pgtable_free(pgf);
-}
-
-static void pte_free_rcu_callback(struct rcu_head *head)
-{
- struct pte_freelist_batch *batch =
- container_of(head, struct pte_freelist_batch, rcu);
- unsigned int i;
-
- for (i = 0; i < batch->index; i++)
- pgtable_free(batch->tables[i]);
-
- free_page((unsigned long)batch);
-}
-
-static void pte_free_submit(struct pte_freelist_batch *batch)
-{
- INIT_RCU_HEAD(&batch->rcu);
- call_rcu(&batch->rcu, pte_free_rcu_callback);
-}
-
-void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
-{
- /* This is safe since tlb_gather_mmu has disabled preemption */
- cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
- struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
-
- if (atomic_read(&tlb->mm->mm_users) < 2 ||
- cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) {
- pgtable_free(pgf);
- return;
- }
-
- if (*batchp == NULL) {
- *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
- if (*batchp == NULL) {
- pgtable_free_now(pgf);
- return;
- }
- (*batchp)->index = 0;
- }
- (*batchp)->tables[(*batchp)->index++] = pgf;
- if ((*batchp)->index == PTE_FREELIST_SIZE) {
- pte_free_submit(*batchp);
- *batchp = NULL;
- }
-}
/*
* A linux PTE was changed and the corresponding hash table entry
@@ -229,17 +154,6 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
batch->index = 0;
}
-void pte_free_finish(void)
-{
- /* This is safe since tlb_gather_mmu has disabled preemption */
- struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
-
- if (*batchp == NULL)
- return;
- pte_free_submit(*batchp);
- *batchp = NULL;
-}
-
/**
* __flush_hash_table_range - Flush all HPTEs for a given address range
* from the hash table (and the TLB). But keeps
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
new file mode 100644
index 00000000000..803a64c02b0
--- /dev/null
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -0,0 +1,209 @@
+/*
+ * This file contains the routines for TLB flushing.
+ * On machines where the MMU does not use a hash table to store virtual to
+ * physical translations (ie, SW loaded TLBs or Book3E compilant processors,
+ * this does -not- include 603 however which shares the implementation with
+ * hash based processors)
+ *
+ * -- BenH
+ *
+ * Copyright 2008 Ben Herrenschmidt <benh@kernel.crashing.org>
+ * IBM Corp.
+ *
+ * Derived from arch/ppc/mm/init.c:
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
+ * and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ * Copyright (C) 1996 Paul Mackerras
+ *
+ * Derived from "arch/i386/mm/init.c"
+ * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/preempt.h>
+#include <linux/spinlock.h>
+
+#include <asm/tlbflush.h>
+#include <asm/tlb.h>
+
+#include "mmu_decl.h"
+
+/*
+ * Base TLB flushing operations:
+ *
+ * - flush_tlb_mm(mm) flushes the specified mm context TLB's
+ * - flush_tlb_page(vma, vmaddr) flushes one page
+ * - flush_tlb_range(vma, start, end) flushes a range of pages
+ * - flush_tlb_kernel_range(start, end) flushes kernel pages
+ *
+ * - local_* variants of page and mm only apply to the current
+ * processor
+ */
+
+/*
+ * These are the base non-SMP variants of page and mm flushing
+ */
+void local_flush_tlb_mm(struct mm_struct *mm)
+{
+ unsigned int pid;
+
+ preempt_disable();
+ pid = mm->context.id;
+ if (pid != MMU_NO_CONTEXT)
+ _tlbil_pid(pid);
+ preempt_enable();
+}
+EXPORT_SYMBOL(local_flush_tlb_mm);
+
+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
+{
+ unsigned int pid;
+
+ preempt_disable();
+ pid = vma ? vma->vm_mm->context.id : 0;
+ if (pid != MMU_NO_CONTEXT)
+ _tlbil_va(vmaddr, pid);
+ preempt_enable();
+}
+EXPORT_SYMBOL(local_flush_tlb_page);
+
+
+/*
+ * And here are the SMP non-local implementations
+ */
+#ifdef CONFIG_SMP
+
+static DEFINE_SPINLOCK(tlbivax_lock);
+
+struct tlb_flush_param {
+ unsigned long addr;
+ unsigned int pid;
+};
+
+static void do_flush_tlb_mm_ipi(void *param)
+{
+ struct tlb_flush_param *p = param;
+
+ _tlbil_pid(p ? p->pid : 0);
+}
+
+static void do_flush_tlb_page_ipi(void *param)
+{
+ struct tlb_flush_param *p = param;
+
+ _tlbil_va(p->addr, p->pid);
+}
+
+
+/* Note on invalidations and PID:
+ *
+ * We snapshot the PID with preempt disabled. At this point, it can still
+ * change either because:
+ * - our context is being stolen (PID -> NO_CONTEXT) on another CPU
+ * - we are invaliating some target that isn't currently running here
+ * and is concurrently acquiring a new PID on another CPU
+ * - some other CPU is re-acquiring a lost PID for this mm
+ * etc...
+ *
+ * However, this shouldn't be a problem as we only guarantee
+ * invalidation of TLB entries present prior to this call, so we
+ * don't care about the PID changing, and invalidating a stale PID
+ * is generally harmless.
+ */
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+ cpumask_t cpu_mask;
+ unsigned int pid;
+
+ preempt_disable();
+ pid = mm->context.id;
+ if (unlikely(pid == MMU_NO_CONTEXT))
+ goto no_context;
+ cpu_mask = mm->cpu_vm_mask;
+ cpu_clear(smp_processor_id(), cpu_mask);
+ if (!cpus_empty(cpu_mask)) {
+ struct tlb_flush_param p = { .pid = pid };
+ smp_call_function_mask(cpu_mask, do_flush_tlb_mm_ipi, &p, 1);
+ }
+ _tlbil_pid(pid);
+ no_context:
+ preempt_enable();
+}
+EXPORT_SYMBOL(flush_tlb_mm);
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
+{
+ cpumask_t cpu_mask;
+ unsigned int pid;
+
+ preempt_disable();
+ pid = vma ? vma->vm_mm->context.id : 0;
+ if (unlikely(pid == MMU_NO_CONTEXT))
+ goto bail;
+ cpu_mask = vma->vm_mm->cpu_vm_mask;
+ cpu_clear(smp_processor_id(), cpu_mask);
+ if (!cpus_empty(cpu_mask)) {
+ /* If broadcast tlbivax is supported, use it */
+ if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) {
+ int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL);
+ if (lock)
+ spin_lock(&tlbivax_lock);
+ _tlbivax_bcast(vmaddr, pid);
+ if (lock)
+ spin_unlock(&tlbivax_lock);
+ goto bail;
+ } else {
+ struct tlb_flush_param p = { .pid = pid, .addr = vmaddr };
+ smp_call_function_mask(cpu_mask,
+ do_flush_tlb_page_ipi, &p, 1);
+ }
+ }
+ _tlbil_va(vmaddr, pid);
+ bail:
+ preempt_enable();
+}
+EXPORT_SYMBOL(flush_tlb_page);
+
+#endif /* CONFIG_SMP */
+
+/*
+ * Flush kernel TLB entries in the given range
+ */
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+#ifdef CONFIG_SMP
+ preempt_disable();
+ smp_call_function(do_flush_tlb_mm_ipi, NULL, 1);
+ _tlbil_pid(0);
+ preempt_enable();
+#endif
+ _tlbil_pid(0);
+}
+EXPORT_SYMBOL(flush_tlb_kernel_range);
+
+/*
+ * Currently, for range flushing, we just do a full mm flush. This should
+ * be optimized based on a threshold on the size of the range, since
+ * some implementation can stack multiple tlbivax before a tlbsync but
+ * for now, we keep it that way
+ */
+void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+
+{
+ flush_tlb_mm(vma->vm_mm);
+}
+EXPORT_SYMBOL(flush_tlb_range);
diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S
new file mode 100644
index 00000000000..f900a39e6ec
--- /dev/null
+++ b/arch/powerpc/mm/tlb_nohash_low.S
@@ -0,0 +1,166 @@
+/*
+ * This file contains low-level functions for performing various
+ * types of TLB invalidations on various processors with no hash
+ * table.
+ *
+ * This file implements the following functions for all no-hash
+ * processors. Some aren't implemented for some variants. Some
+ * are inline in tlbflush.h
+ *
+ * - tlbil_va
+ * - tlbil_pid
+ * - tlbil_all
+ * - tlbivax_bcast (not yet)
+ *
+ * Code mostly moved over from misc_32.S
+ *
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * Partially rewritten by Cort Dougan (cort@cs.nmt.edu)
+ * Paul Mackerras, Kumar Gala and Benjamin Herrenschmidt.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <asm/reg.h>
+#include <asm/page.h>
+#include <asm/cputable.h>
+#include <asm/mmu.h>
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/processor.h>
+
+#if defined(CONFIG_40x)
+
+/*
+ * 40x implementation needs only tlbil_va
+ */
+_GLOBAL(_tlbil_va)
+ /* We run the search with interrupts disabled because we have to change
+ * the PID and I don't want to preempt when that happens.
+ */
+ mfmsr r5
+ mfspr r6,SPRN_PID
+ wrteei 0
+ mtspr SPRN_PID,r4
+ tlbsx. r3, 0, r3
+ mtspr SPRN_PID,r6
+ wrtee r5
+ bne 1f
+ sync
+ /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is
+ * clear. Since 25 is the V bit in the TLB_TAG, loading this value
+ * will invalidate the TLB entry. */
+ tlbwe r3, r3, TLB_TAG
+ isync
+1: blr
+
+#elif defined(CONFIG_8xx)
+
+/*
+ * Nothing to do for 8xx, everything is inline
+ */
+
+#elif defined(CONFIG_44x)
+
+/*
+ * 440 implementation uses tlbsx/we for tlbil_va and a full sweep
+ * of the TLB for everything else.
+ */
+_GLOBAL(_tlbil_va)
+ mfspr r5,SPRN_MMUCR
+ rlwimi r5,r4,0,24,31 /* Set TID */
+
+ /* We have to run the search with interrupts disabled, otherwise
+ * an interrupt which causes a TLB miss can clobber the MMUCR
+ * between the mtspr and the tlbsx.
+ *
+ * Critical and Machine Check interrupts take care of saving
+ * and restoring MMUCR, so only normal interrupts have to be
+ * taken care of.
+ */
+ mfmsr r4
+ wrteei 0
+ mtspr SPRN_MMUCR,r5
+ tlbsx. r3, 0, r3
+ wrtee r4
+ bne 1f
+ sync
+ /* There are only 64 TLB entries, so r3 < 64,
+ * which means bit 22, is clear. Since 22 is
+ * the V bit in the TLB_PAGEID, loading this
+ * value will invalidate the TLB entry.
+ */
+ tlbwe r3, r3, PPC44x_TLB_PAGEID
+ isync
+1: blr
+
+_GLOBAL(_tlbil_all)
+_GLOBAL(_tlbil_pid)
+ li r3,0
+ sync
+
+ /* Load high watermark */
+ lis r4,tlb_44x_hwater@ha
+ lwz r5,tlb_44x_hwater@l(r4)
+
+1: tlbwe r3,r3,PPC44x_TLB_PAGEID
+ addi r3,r3,1
+ cmpw 0,r3,r5
+ ble 1b
+
+ isync
+ blr
+
+#elif defined(CONFIG_FSL_BOOKE)
+/*
+ * FSL BookE implementations. Currently _pid and _all are the
+ * same. This will change when tlbilx is actually supported and
+ * performs invalidate-by-PID. This change will be driven by
+ * mmu_features conditional
+ */
+
+/*
+ * Flush MMU TLB on the local processor
+ */
+_GLOBAL(_tlbil_pid)
+_GLOBAL(_tlbil_all)
+#define MMUCSR0_TLBFI (MMUCSR0_TLB0FI | MMUCSR0_TLB1FI | \
+ MMUCSR0_TLB2FI | MMUCSR0_TLB3FI)
+ li r3,(MMUCSR0_TLBFI)@l
+ mtspr SPRN_MMUCSR0, r3
+1:
+ mfspr r3,SPRN_MMUCSR0
+ andi. r3,r3,MMUCSR0_TLBFI@l
+ bne 1b
+ msync
+ isync
+ blr
+
+/*
+ * Flush MMU TLB for a particular address, but only on the local processor
+ * (no broadcast)
+ */
+_GLOBAL(_tlbil_va)
+ mfmsr r10
+ wrteei 0
+ slwi r4,r4,16
+ mtspr SPRN_MAS6,r4 /* assume AS=0 for now */
+ tlbsx 0,r3
+ mfspr r4,SPRN_MAS1 /* check valid */
+ andis. r3,r4,MAS1_VALID@h
+ beq 1f
+ rlwinm r4,r4,0,1,31
+ mtspr SPRN_MAS1,r4
+ tlbwe
+ msync
+ isync
+1: wrtee r10
+ blr
+#elif
+#error Unsupported processor type !
+#endif