diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2008-12-18 19:13:38 +0000 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2008-12-21 14:21:16 +1100 |
commit | f048aace29e007f2b642097e2da8231e0e9cce2d (patch) | |
tree | 5e99b1d1d37817703132e97388994386a7bee8da /arch/powerpc/mm | |
parent | 7c03d653cd257793dc40520c94e229b5fd0578e7 (diff) |
powerpc/mm: Add SMP support to no-hash TLB handling
This commit moves the whole no-hash TLB handling out of line into a
new tlb_nohash.c file, and implements some basic SMP support using
IPIs and/or broadcast tlbivax instructions.
Note that I'm using local invalidations for D->I cache coherency.
At worst, if another processor is trying to execute the same and
has the old entry in its TLB, it will just take a fault and re-do
the TLB flush locally (it won't re-do the cache flush in any case).
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Acked-by: Kumar Gala <galak@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r-- | arch/powerpc/mm/Makefile | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/fault.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/mem.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/tlb_hash32.c | 4 | ||||
-rw-r--r-- | arch/powerpc/mm/tlb_nohash.c | 209 |
5 files changed, 216 insertions, 3 deletions
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index 923bd3fa7d6..af987df8d5a 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile @@ -9,7 +9,7 @@ endif obj-y := fault.o mem.o pgtable.o \ init_$(CONFIG_WORD_SIZE).o \ pgtable_$(CONFIG_WORD_SIZE).o -obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o +obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o hash-$(CONFIG_PPC_NATIVE) := hash_native_64.o obj-$(CONFIG_PPC64) += hash_utils_64.o \ slb_low.o slb.o stab.o \ diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 7df0409107a..87f1f955dea 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -284,7 +284,7 @@ good_area: } pte_update(ptep, 0, _PAGE_HWEXEC | _PAGE_ACCESSED); - _tlbie(address, mm->context.id); + local_flush_tlb_page(vma, address); pte_unmap_unlock(ptep, ptl); up_read(&mm->mmap_sem); return 0; diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index b9e1a1da6e5..8fee696fb79 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -488,7 +488,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, * we invalidate the TLB here, thus avoiding dcbst * misbehaviour. */ - _tlbie(address, 0 /* 8xx doesn't care about PID */); + _tlbil_va(address, 0 /* 8xx doesn't care about PID */); #endif /* The _PAGE_USER test should really be _PAGE_EXEC, but * older glibc versions execute some code from no-exec diff --git a/arch/powerpc/mm/tlb_hash32.c b/arch/powerpc/mm/tlb_hash32.c index f9a47fee392..65190587a36 100644 --- a/arch/powerpc/mm/tlb_hash32.c +++ b/arch/powerpc/mm/tlb_hash32.c @@ -137,6 +137,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) flush_range(&init_mm, start, end); FINISH_FLUSH; } +EXPORT_SYMBOL(flush_tlb_kernel_range); /* * Flush all the (user) entries for the address space described by mm. @@ -160,6 +161,7 @@ void flush_tlb_mm(struct mm_struct *mm) flush_range(mp->vm_mm, mp->vm_start, mp->vm_end); FINISH_FLUSH; } +EXPORT_SYMBOL(flush_tlb_mm); void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) { @@ -176,6 +178,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1); FINISH_FLUSH; } +EXPORT_SYMBOL(flush_tlb_page); /* * For each address in the range, find the pte for the address @@ -188,3 +191,4 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, flush_range(vma->vm_mm, start, end); FINISH_FLUSH; } +EXPORT_SYMBOL(flush_tlb_range); diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c new file mode 100644 index 00000000000..803a64c02b0 --- /dev/null +++ b/arch/powerpc/mm/tlb_nohash.c @@ -0,0 +1,209 @@ +/* + * This file contains the routines for TLB flushing. + * On machines where the MMU does not use a hash table to store virtual to + * physical translations (ie, SW loaded TLBs or Book3E compilant processors, + * this does -not- include 603 however which shares the implementation with + * hash based processors) + * + * -- BenH + * + * Copyright 2008 Ben Herrenschmidt <benh@kernel.crashing.org> + * IBM Corp. + * + * Derived from arch/ppc/mm/init.c: + * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) + * + * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) + * and Cort Dougan (PReP) (cort@cs.nmt.edu) + * Copyright (C) 1996 Paul Mackerras + * + * Derived from "arch/i386/mm/init.c" + * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/init.h> +#include <linux/highmem.h> +#include <linux/pagemap.h> +#include <linux/preempt.h> +#include <linux/spinlock.h> + +#include <asm/tlbflush.h> +#include <asm/tlb.h> + +#include "mmu_decl.h" + +/* + * Base TLB flushing operations: + * + * - flush_tlb_mm(mm) flushes the specified mm context TLB's + * - flush_tlb_page(vma, vmaddr) flushes one page + * - flush_tlb_range(vma, start, end) flushes a range of pages + * - flush_tlb_kernel_range(start, end) flushes kernel pages + * + * - local_* variants of page and mm only apply to the current + * processor + */ + +/* + * These are the base non-SMP variants of page and mm flushing + */ +void local_flush_tlb_mm(struct mm_struct *mm) +{ + unsigned int pid; + + preempt_disable(); + pid = mm->context.id; + if (pid != MMU_NO_CONTEXT) + _tlbil_pid(pid); + preempt_enable(); +} +EXPORT_SYMBOL(local_flush_tlb_mm); + +void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) +{ + unsigned int pid; + + preempt_disable(); + pid = vma ? vma->vm_mm->context.id : 0; + if (pid != MMU_NO_CONTEXT) + _tlbil_va(vmaddr, pid); + preempt_enable(); +} +EXPORT_SYMBOL(local_flush_tlb_page); + + +/* + * And here are the SMP non-local implementations + */ +#ifdef CONFIG_SMP + +static DEFINE_SPINLOCK(tlbivax_lock); + +struct tlb_flush_param { + unsigned long addr; + unsigned int pid; +}; + +static void do_flush_tlb_mm_ipi(void *param) +{ + struct tlb_flush_param *p = param; + + _tlbil_pid(p ? p->pid : 0); +} + +static void do_flush_tlb_page_ipi(void *param) +{ + struct tlb_flush_param *p = param; + + _tlbil_va(p->addr, p->pid); +} + + +/* Note on invalidations and PID: + * + * We snapshot the PID with preempt disabled. At this point, it can still + * change either because: + * - our context is being stolen (PID -> NO_CONTEXT) on another CPU + * - we are invaliating some target that isn't currently running here + * and is concurrently acquiring a new PID on another CPU + * - some other CPU is re-acquiring a lost PID for this mm + * etc... + * + * However, this shouldn't be a problem as we only guarantee + * invalidation of TLB entries present prior to this call, so we + * don't care about the PID changing, and invalidating a stale PID + * is generally harmless. + */ + +void flush_tlb_mm(struct mm_struct *mm) +{ + cpumask_t cpu_mask; + unsigned int pid; + + preempt_disable(); + pid = mm->context.id; + if (unlikely(pid == MMU_NO_CONTEXT)) + goto no_context; + cpu_mask = mm->cpu_vm_mask; + cpu_clear(smp_processor_id(), cpu_mask); + if (!cpus_empty(cpu_mask)) { + struct tlb_flush_param p = { .pid = pid }; + smp_call_function_mask(cpu_mask, do_flush_tlb_mm_ipi, &p, 1); + } + _tlbil_pid(pid); + no_context: + preempt_enable(); +} +EXPORT_SYMBOL(flush_tlb_mm); + +void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) +{ + cpumask_t cpu_mask; + unsigned int pid; + + preempt_disable(); + pid = vma ? vma->vm_mm->context.id : 0; + if (unlikely(pid == MMU_NO_CONTEXT)) + goto bail; + cpu_mask = vma->vm_mm->cpu_vm_mask; + cpu_clear(smp_processor_id(), cpu_mask); + if (!cpus_empty(cpu_mask)) { + /* If broadcast tlbivax is supported, use it */ + if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) { + int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL); + if (lock) + spin_lock(&tlbivax_lock); + _tlbivax_bcast(vmaddr, pid); + if (lock) + spin_unlock(&tlbivax_lock); + goto bail; + } else { + struct tlb_flush_param p = { .pid = pid, .addr = vmaddr }; + smp_call_function_mask(cpu_mask, + do_flush_tlb_page_ipi, &p, 1); + } + } + _tlbil_va(vmaddr, pid); + bail: + preempt_enable(); +} +EXPORT_SYMBOL(flush_tlb_page); + +#endif /* CONFIG_SMP */ + +/* + * Flush kernel TLB entries in the given range + */ +void flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ +#ifdef CONFIG_SMP + preempt_disable(); + smp_call_function(do_flush_tlb_mm_ipi, NULL, 1); + _tlbil_pid(0); + preempt_enable(); +#endif + _tlbil_pid(0); +} +EXPORT_SYMBOL(flush_tlb_kernel_range); + +/* + * Currently, for range flushing, we just do a full mm flush. This should + * be optimized based on a threshold on the size of the range, since + * some implementation can stack multiple tlbivax before a tlbsync but + * for now, we keep it that way + */ +void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end) + +{ + flush_tlb_mm(vma->vm_mm); +} +EXPORT_SYMBOL(flush_tlb_range); |