aboutsummaryrefslogtreecommitdiff
path: root/arch/ia64/kernel/smp.c
diff options
context:
space:
mode:
authorJack Steiner <steiner@sgi.com>2007-05-08 14:50:43 -0700
committerTony Luck <tony.luck@intel.com>2007-05-08 14:50:43 -0700
commit3be44b9cc33d26930cb3bb014f35f582c6522481 (patch)
tree09225c5f0fb4c6caa81bbdff216ec83a093e4d12 /arch/ia64/kernel/smp.c
parent8737d59579c5e61ea3d5da4bd63303159fd1cf7e (diff)
[IA64] Optional method to purge the TLB on SN systems
This patch adds an optional method for purging the TLB on SN IA64 systems. The change should not affect any non-SN system. Signed-off-by: Jack Steiner <steiner@sgi.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64/kernel/smp.c')
-rw-r--r--arch/ia64/kernel/smp.c68
1 files changed, 68 insertions, 0 deletions
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index 55ddd809b02..221de380456 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -50,6 +50,18 @@
#include <asm/mca.h>
/*
+ * Note: alignment of 4 entries/cacheline was empirically determined
+ * to be a good tradeoff between hot cachelines & spreading the array
+ * across too many cacheline.
+ */
+static struct local_tlb_flush_counts {
+ unsigned int count;
+} __attribute__((__aligned__(32))) local_tlb_flush_counts[NR_CPUS];
+
+static DEFINE_PER_CPU(unsigned int, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned;
+
+
+/*
* Structure and data for smp_call_function(). This is designed to minimise static memory
* requirements. It also looks cleaner.
*/
@@ -248,6 +260,62 @@ smp_send_reschedule (int cpu)
platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
}
+/*
+ * Called with preeemption disabled.
+ */
+static void
+smp_send_local_flush_tlb (int cpu)
+{
+ platform_send_ipi(cpu, IA64_IPI_LOCAL_TLB_FLUSH, IA64_IPI_DM_INT, 0);
+}
+
+void
+smp_local_flush_tlb(void)
+{
+ /*
+ * Use atomic ops. Otherwise, the load/increment/store sequence from
+ * a "++" operation can have the line stolen between the load & store.
+ * The overhead of the atomic op in negligible in this case & offers
+ * significant benefit for the brief periods where lots of cpus
+ * are simultaneously flushing TLBs.
+ */
+ ia64_fetchadd(1, &local_tlb_flush_counts[smp_processor_id()].count, acq);
+ local_flush_tlb_all();
+}
+
+#define FLUSH_DELAY 5 /* Usec backoff to eliminate excessive cacheline bouncing */
+
+void
+smp_flush_tlb_cpumask(cpumask_t xcpumask)
+{
+ unsigned int *counts = __ia64_per_cpu_var(shadow_flush_counts);
+ cpumask_t cpumask = xcpumask;
+ int mycpu, cpu, flush_mycpu = 0;
+
+ preempt_disable();
+ mycpu = smp_processor_id();
+
+ for_each_cpu_mask(cpu, cpumask)
+ counts[cpu] = local_tlb_flush_counts[cpu].count;
+
+ mb();
+ for_each_cpu_mask(cpu, cpumask) {
+ if (cpu == mycpu)
+ flush_mycpu = 1;
+ else
+ smp_send_local_flush_tlb(cpu);
+ }
+
+ if (flush_mycpu)
+ smp_local_flush_tlb();
+
+ for_each_cpu_mask(cpu, cpumask)
+ while(counts[cpu] == local_tlb_flush_counts[cpu].count)
+ udelay(FLUSH_DELAY);
+
+ preempt_enable();
+}
+
void
smp_flush_tlb_all (void)
{