aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-01-21 17:26:06 +0900
committerTejun Heo <tj@kernel.org>2009-01-21 17:26:06 +0900
commit6dd01bedee6c3191643db303a1dc530bad56ec55 (patch)
tree55d86313c7973e7fe626ceffa7d38925f9997dfb
parentbdbcdd48883940bbd8d17eb01172d58a261a413a (diff)
x86: prepare for tlb merge
Impact: clean up, ipi vector number reordering for x86_32 Make the following changes to prepare for tlb merge. * reorder x86_32 ip vectors * adjust tlb_32.c and tlb_64.c such that their logics coincide exactly - on spurious invalidate ipi, tlb_32 acks the irq - tlb_64 now has proper memory barriers around clearing flush_cpumask (no change in generated code) * unexport flush_tlb_page from tlb_32.c, there's no user * use unsigned int for cpu id * drop unnecessary includes from tlb_64.c Signed-off-by: Tejun Heo <tj@kernel.org>
-rw-r--r--arch/x86/include/asm/irq_vectors.h33
-rw-r--r--arch/x86/kernel/tlb_32.c10
-rw-r--r--arch/x86/kernel/tlb_64.c18
3 files changed, 28 insertions, 33 deletions
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index a16a2ab2b42..4ee8f800504 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -49,31 +49,30 @@
* some of the following vectors are 'rare', they are merged
* into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
* TLB, reschedule and local APIC vectors are performance-critical.
- *
- * Vectors 0xf0-0xfa are free (reserved for future Linux use).
*/
#ifdef CONFIG_X86_32
# define SPURIOUS_APIC_VECTOR 0xff
# define ERROR_APIC_VECTOR 0xfe
-# define INVALIDATE_TLB_VECTOR 0xfd
-# define RESCHEDULE_VECTOR 0xfc
-# define CALL_FUNCTION_VECTOR 0xfb
-# define CALL_FUNCTION_SINGLE_VECTOR 0xfa
-# define THERMAL_APIC_VECTOR 0xf0
+# define RESCHEDULE_VECTOR 0xfd
+# define CALL_FUNCTION_VECTOR 0xfc
+# define CALL_FUNCTION_SINGLE_VECTOR 0xfb
+# define THERMAL_APIC_VECTOR 0xfa
+/* 0xf1 - 0xf9 : free */
+# define INVALIDATE_TLB_VECTOR 0xf0
#else
-#define SPURIOUS_APIC_VECTOR 0xff
-#define ERROR_APIC_VECTOR 0xfe
-#define RESCHEDULE_VECTOR 0xfd
-#define CALL_FUNCTION_VECTOR 0xfc
-#define CALL_FUNCTION_SINGLE_VECTOR 0xfb
-#define THERMAL_APIC_VECTOR 0xfa
-#define THRESHOLD_APIC_VECTOR 0xf9
-#define UV_BAU_MESSAGE 0xf8
-#define INVALIDATE_TLB_VECTOR_END 0xf7
-#define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */
+# define SPURIOUS_APIC_VECTOR 0xff
+# define ERROR_APIC_VECTOR 0xfe
+# define RESCHEDULE_VECTOR 0xfd
+# define CALL_FUNCTION_VECTOR 0xfc
+# define CALL_FUNCTION_SINGLE_VECTOR 0xfb
+# define THERMAL_APIC_VECTOR 0xfa
+# define THRESHOLD_APIC_VECTOR 0xf9
+# define UV_BAU_MESSAGE 0xf8
+# define INVALIDATE_TLB_VECTOR_END 0xf7
+# define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */
#define NUM_INVALIDATE_TLB_VECTORS 8
diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c
index abf0808d6fc..93fcb05c7d4 100644
--- a/arch/x86/kernel/tlb_32.c
+++ b/arch/x86/kernel/tlb_32.c
@@ -84,13 +84,15 @@ EXPORT_SYMBOL_GPL(leave_mm);
*
* 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
* 2) Leave the mm if we are in the lazy tlb mode.
+ *
+ * Interrupts are disabled.
*/
void smp_invalidate_interrupt(struct pt_regs *regs)
{
- unsigned long cpu;
+ unsigned int cpu;
- cpu = get_cpu();
+ cpu = smp_processor_id();
if (!cpumask_test_cpu(cpu, flush_cpumask))
goto out;
@@ -112,12 +114,11 @@ void smp_invalidate_interrupt(struct pt_regs *regs)
} else
leave_mm(cpu);
}
+out:
ack_APIC_irq();
smp_mb__before_clear_bit();
cpumask_clear_cpu(cpu, flush_cpumask);
smp_mb__after_clear_bit();
-out:
- put_cpu_no_resched();
inc_irq_stat(irq_tlb_count);
}
@@ -215,7 +216,6 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
flush_tlb_others(&mm->cpu_vm_mask, mm, va);
preempt_enable();
}
-EXPORT_SYMBOL(flush_tlb_page);
static void do_flush_tlb_all(void *info)
{
diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c
index b8bed841ad6..19ac661422f 100644
--- a/arch/x86/kernel/tlb_64.c
+++ b/arch/x86/kernel/tlb_64.c
@@ -1,20 +1,14 @@
#include <linux/init.h>
#include <linux/mm.h>
-#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/smp.h>
-#include <linux/kernel_stat.h>
-#include <linux/mc146818rtc.h>
#include <linux/interrupt.h>
+#include <linux/module.h>
-#include <asm/mtrr.h>
-#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
-#include <asm/proto.h>
-#include <asm/apicdef.h>
-#include <asm/idle.h>
+#include <asm/apic.h>
#include <asm/uv/uv.h>
DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
@@ -121,8 +115,8 @@ EXPORT_SYMBOL_GPL(leave_mm);
asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
{
- int cpu;
- int sender;
+ unsigned int cpu;
+ unsigned int sender;
union smp_flush_state *f;
cpu = smp_processor_id();
@@ -155,14 +149,16 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
}
out:
ack_APIC_irq();
+ smp_mb__before_clear_bit();
cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask));
+ smp_mb__after_clear_bit();
inc_irq_stat(irq_tlb_count);
}
static void flush_tlb_others_ipi(const struct cpumask *cpumask,
struct mm_struct *mm, unsigned long va)
{
- int sender;
+ unsigned int sender;
union smp_flush_state *f;
/* Caller has disabled preemption */