diff options
author | Paul Mundt <lethal@linux-sh.org> | 2006-12-25 09:51:47 +0900 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2007-02-13 10:54:45 +0900 |
commit | aec5e0e1c179fac4bbca4007a3f0d3107275a73c (patch) | |
tree | 3b251e52a89445a5546f398fb16a002435b6c2b6 /arch/sh/mm | |
parent | 506b85f4114b912d2e91fab8da9849289e43857f (diff) |
sh: Use a per-cpu ASID cache.
Previously this was implemented using a global cache, cache
this per-CPU instead and bump up the number of context IDs to
match NR_CPUS.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm')
-rw-r--r-- | arch/sh/mm/init.c | 5 | ||||
-rw-r--r-- | arch/sh/mm/tlb-flush.c | 26 |
2 files changed, 16 insertions, 15 deletions
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index bf0c263cb6f..d172065182f 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -39,11 +39,6 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); pgd_t swapper_pg_dir[PTRS_PER_PGD]; -/* - * Cache of MMU context last used. - */ -unsigned long mmu_context_cache = NO_CONTEXT; - #ifdef CONFIG_MMU /* It'd be good if these lines were in the standard header file. */ #define START_PFN (NODE_DATA(0)->bdata->node_boot_start >> PAGE_SHIFT) diff --git a/arch/sh/mm/tlb-flush.c b/arch/sh/mm/tlb-flush.c index ef3e4d47786..b829c17c1d1 100644 --- a/arch/sh/mm/tlb-flush.c +++ b/arch/sh/mm/tlb-flush.c @@ -16,12 +16,14 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { - if (vma->vm_mm && vma->vm_mm->context.id != NO_CONTEXT) { + unsigned int cpu = smp_processor_id(); + + if (vma->vm_mm && cpu_context(cpu, vma->vm_mm) != NO_CONTEXT) { unsigned long flags; unsigned long asid; unsigned long saved_asid = MMU_NO_ASID; - asid = vma->vm_mm->context.id & MMU_CONTEXT_ASID_MASK; + asid = cpu_asid(cpu, vma->vm_mm); page &= PAGE_MASK; local_irq_save(flags); @@ -40,22 +42,23 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; + unsigned int cpu = smp_processor_id(); - if (mm->context.id != NO_CONTEXT) { + if (cpu_context(cpu, mm) != NO_CONTEXT) { unsigned long flags; int size; local_irq_save(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ - mm->context.id = NO_CONTEXT; + cpu_context(cpu, mm) = NO_CONTEXT; if (mm == current->mm) - activate_context(mm); + activate_context(mm, cpu); } else { unsigned long asid; unsigned long saved_asid = MMU_NO_ASID; - asid = mm->context.id & MMU_CONTEXT_ASID_MASK; + asid = cpu_asid(cpu, mm); start &= PAGE_MASK; end += (PAGE_SIZE - 1); end &= PAGE_MASK; @@ -76,6 +79,7 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, void flush_tlb_kernel_range(unsigned long start, unsigned long end) { + unsigned int cpu = smp_processor_id(); unsigned long flags; int size; @@ -87,7 +91,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) unsigned long asid; unsigned long saved_asid = get_asid(); - asid = init_mm.context.id & MMU_CONTEXT_ASID_MASK; + asid = cpu_asid(cpu, &init_mm); start &= PAGE_MASK; end += (PAGE_SIZE - 1); end &= PAGE_MASK; @@ -103,15 +107,17 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) void flush_tlb_mm(struct mm_struct *mm) { + unsigned int cpu = smp_processor_id(); + /* Invalidate all TLB of this process. */ /* Instead of invalidating each TLB, we get new MMU context. */ - if (mm->context.id != NO_CONTEXT) { + if (cpu_context(cpu, mm) != NO_CONTEXT) { unsigned long flags; local_irq_save(flags); - mm->context.id = NO_CONTEXT; + cpu_context(cpu, mm) = NO_CONTEXT; if (mm == current->mm) - activate_context(mm); + activate_context(mm, cpu); local_irq_restore(flags); } } |