aboutsummaryrefslogtreecommitdiff
path: root/arch/mips/kernel/smtc.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel/smtc.c')
-rw-r--r--arch/mips/kernel/smtc.c146
1 files changed, 103 insertions, 43 deletions
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index f09404377ef..a8c1a698d58 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -1,5 +1,6 @@
/* Copyright (C) 2004 Mips Technologies, Inc */
+#include <linux/clockchips.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/cpumask.h>
@@ -62,7 +63,7 @@ asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
* Clock interrupt "latch" buffers, per "CPU"
*/
-unsigned int ipi_timer_latch[NR_CPUS];
+static atomic_t ipi_timer_latch[NR_CPUS];
/*
* Number of InterProcessor Interupt (IPI) message buffers to allocate
@@ -179,7 +180,7 @@ void __init sanitize_tlb_entries(void)
static void smtc_configure_tlb(void)
{
- int i,tlbsiz,vpes;
+ int i, tlbsiz, vpes;
unsigned long mvpconf0;
unsigned long config1val;
@@ -296,8 +297,10 @@ int __init mipsmt_build_cpu_map(int start_cpu_slot)
__cpu_number_map[i] = i;
__cpu_logical_map[i] = i;
}
+#ifdef CONFIG_MIPS_MT_FPAFF
/* Initialize map of CPUs with FPUs */
cpus_clear(mt_fpu_cpumask);
+#endif
/* One of those TC's is the one booting, and not a secondary... */
printk("%i available secondary CPU TC(s)\n", i - 1);
@@ -359,7 +362,7 @@ void mipsmt_prepare_cpus(void)
IPIQ[i].head = IPIQ[i].tail = NULL;
spin_lock_init(&IPIQ[i].lock);
IPIQ[i].depth = 0;
- ipi_timer_latch[i] = 0;
+ atomic_set(&ipi_timer_latch[i], 0);
}
/* cpu_data index starts at zero */
@@ -369,7 +372,7 @@ void mipsmt_prepare_cpus(void)
cpu++;
/* Report on boot-time options */
- mips_mt_set_cpuoptions ();
+ mips_mt_set_cpuoptions();
if (vpelimit > 0)
printk("Limit of %d VPEs set\n", vpelimit);
if (tclimit > 0)
@@ -420,7 +423,7 @@ void mipsmt_prepare_cpus(void)
* code. Leave it alone!
*/
if (tc != 0) {
- smtc_tc_setup(vpe,tc, cpu);
+ smtc_tc_setup(vpe, tc, cpu);
cpu++;
}
printk(" %d", tc);
@@ -428,7 +431,7 @@ void mipsmt_prepare_cpus(void)
}
if (slop) {
if (tc != 0) {
- smtc_tc_setup(vpe,tc, cpu);
+ smtc_tc_setup(vpe, tc, cpu);
cpu++;
}
printk(" %d", tc);
@@ -482,10 +485,12 @@ void mipsmt_prepare_cpus(void)
/* Set up coprocessor affinity CPU mask(s) */
+#ifdef CONFIG_MIPS_MT_FPAFF
for (tc = 0; tc < ntc; tc++) {
if (cpu_data[tc].options & MIPS_CPU_FPU)
cpu_set(tc, mt_fpu_cpumask);
}
+#endif
/* set up ipi interrupts... */
@@ -567,7 +572,7 @@ void smtc_init_secondary(void)
if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
((read_c0_tcbind() & TCBIND_CURVPE)
!= cpu_data[smp_processor_id() - 1].vpe_id)){
- write_c0_compare (read_c0_count() + mips_hpt_frequency/HZ);
+ write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
}
local_irq_enable();
@@ -606,6 +611,60 @@ int setup_irq_smtc(unsigned int irq, struct irqaction * new,
return setup_irq(irq, new);
}
+#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
+/*
+ * Support for IRQ affinity to TCs
+ */
+
+void smtc_set_irq_affinity(unsigned int irq, cpumask_t affinity)
+{
+ /*
+ * If a "fast path" cache of quickly decodable affinity state
+ * is maintained, this is where it gets done, on a call up
+ * from the platform affinity code.
+ */
+}
+
+void smtc_forward_irq(unsigned int irq)
+{
+ int target;
+
+ /*
+ * OK wise guy, now figure out how to get the IRQ
+ * to be serviced on an authorized "CPU".
+ *
+ * Ideally, to handle the situation where an IRQ has multiple
+ * eligible CPUS, we would maintain state per IRQ that would
+ * allow a fair distribution of service requests. Since the
+ * expected use model is any-or-only-one, for simplicity
+ * and efficiency, we just pick the easiest one to find.
+ */
+
+ target = first_cpu(irq_desc[irq].affinity);
+
+ /*
+ * We depend on the platform code to have correctly processed
+ * IRQ affinity change requests to ensure that the IRQ affinity
+ * mask has been purged of bits corresponding to nonexistent and
+ * offline "CPUs", and to TCs bound to VPEs other than the VPE
+ * connected to the physical interrupt input for the interrupt
+ * in question. Otherwise we have a nasty problem with interrupt
+ * mask management. This is best handled in non-performance-critical
+ * platform IRQ affinity setting code, to minimize interrupt-time
+ * checks.
+ */
+
+ /* If no one is eligible, service locally */
+ if (target >= NR_CPUS) {
+ do_IRQ_no_affinity(irq);
+ return;
+ }
+
+ smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq);
+}
+
+#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
+
/*
* IPI model for SMTC is tricky, because interrupts aren't TC-specific.
* Within a VPE one TC can interrupt another by different approaches.
@@ -648,7 +707,7 @@ static void smtc_ipi_qdump(void)
* be done with the atomic.h primitives). And since this is
* MIPS MT, we can assume that we have LL/SC.
*/
-static __inline__ int atomic_postincrement(unsigned int *pv)
+static inline int atomic_postincrement(atomic_t *v)
{
unsigned long result;
@@ -659,9 +718,9 @@ static __inline__ int atomic_postincrement(unsigned int *pv)
" addu %1, %0, 1 \n"
" sc %1, %2 \n"
" beqz %1, 1b \n"
- " sync \n"
- : "=&r" (result), "=&r" (temp), "=m" (*pv)
- : "m" (*pv)
+ __WEAK_LLSC_MB
+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
+ : "m" (v->counter)
: "memory");
return result;
@@ -689,6 +748,8 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
pipi->arg = (void *)action;
pipi->dest = cpu;
if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
+ if (type == SMTC_CLOCK_TICK)
+ atomic_inc(&ipi_timer_latch[cpu]);
/* If not on same VPE, enqueue and send cross-VPE interupt */
smtc_ipi_nq(&IPIQ[cpu], pipi);
LOCK_CORE_PRA();
@@ -730,6 +791,8 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
}
smtc_ipi_nq(&IPIQ[cpu], pipi);
} else {
+ if (type == SMTC_CLOCK_TICK)
+ atomic_inc(&ipi_timer_latch[cpu]);
post_direct_ipi(cpu, pipi);
write_tc_c0_tchalt(0);
UNLOCK_CORE_PRA();
@@ -747,6 +810,7 @@ static void post_direct_ipi(int cpu, struct smtc_ipi *pipi)
unsigned long tcrestart;
extern u32 kernelsp[NR_CPUS];
extern void __smtc_ipi_vector(void);
+//printk("%s: on %d for %d\n", __func__, smp_processor_id(), cpu);
/* Extract Status, EPC from halted TC */
tcstatus = read_tc_c0_tcstatus();
@@ -797,25 +861,31 @@ static void ipi_call_interrupt(void)
smp_call_function_interrupt();
}
+DECLARE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device);
+
void ipi_decode(struct smtc_ipi *pipi)
{
+ unsigned int cpu = smp_processor_id();
+ struct clock_event_device *cd;
void *arg_copy = pipi->arg;
int type_copy = pipi->type;
- int dest_copy = pipi->dest;
+ int ticks;
smtc_ipi_nq(&freeIPIq, pipi);
switch (type_copy) {
case SMTC_CLOCK_TICK:
irq_enter();
- kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + cp0_compare_irq]++;
- /* Invoke Clock "Interrupt" */
- ipi_timer_latch[dest_copy] = 0;
-#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
- clock_hang_reported[dest_copy] = 0;
-#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
- local_timer_interrupt(0, NULL);
+ kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++;
+ cd = &per_cpu(smtc_dummy_clockevent_device, cpu);
+ ticks = atomic_read(&ipi_timer_latch[cpu]);
+ atomic_sub(ticks, &ipi_timer_latch[cpu]);
+ while (ticks) {
+ cd->event_handler(cd);
+ ticks--;
+ }
irq_exit();
break;
+
case LINUX_SMP_IPI:
switch ((int)arg_copy) {
case SMP_RESCHEDULE_YOURSELF:
@@ -830,6 +900,15 @@ void ipi_decode(struct smtc_ipi *pipi)
break;
}
break;
+#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
+ case IRQ_AFFINITY_IPI:
+ /*
+ * Accept a "forwarded" interrupt that was initially
+ * taken by a TC who doesn't have affinity for the IRQ.
+ */
+ do_IRQ_no_affinity((int)arg_copy);
+ break;
+#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
default:
printk("Impossible SMTC IPI Type 0x%x\n", type_copy);
break;
@@ -858,25 +937,6 @@ void deferred_smtc_ipi(void)
}
/*
- * Send clock tick to all TCs except the one executing the funtion
- */
-
-void smtc_timer_broadcast(void)
-{
- int cpu;
- int myTC = cpu_data[smp_processor_id()].tc_id;
- int myVPE = cpu_data[smp_processor_id()].vpe_id;
-
- smtc_cpu_stats[smp_processor_id()].timerints++;
-
- for_each_online_cpu(cpu) {
- if (cpu_data[cpu].vpe_id == myVPE &&
- cpu_data[cpu].tc_id != myTC)
- smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
- }
-}
-
-/*
* Cross-VPE interrupts in the SMTC prototype use "software interrupts"
* set via cross-VPE MTTR manipulation of the Cause register. It would be
* in some regards preferable to have external logic for "doorbell" hardware
@@ -1117,11 +1177,11 @@ void smtc_idle_loop_hook(void)
for (tc = 0; tc < NR_CPUS; tc++) {
/* Don't check ourself - we'll dequeue IPIs just below */
if ((tc != smp_processor_id()) &&
- ipi_timer_latch[tc] > timerq_limit) {
+ atomic_read(&ipi_timer_latch[tc]) > timerq_limit) {
if (clock_hang_reported[tc] == 0) {
pdb_msg += sprintf(pdb_msg,
"TC %d looks hung with timer latch at %d\n",
- tc, ipi_timer_latch[tc]);
+ tc, atomic_read(&ipi_timer_latch[tc]));
clock_hang_reported[tc]++;
}
}
@@ -1162,7 +1222,7 @@ void smtc_soft_dump(void)
smtc_ipi_qdump();
printk("Timer IPI Backlogs:\n");
for (i=0; i < NR_CPUS; i++) {
- printk("%d: %d\n", i, ipi_timer_latch[i]);
+ printk("%d: %d\n", i, atomic_read(&ipi_timer_latch[i]));
}
printk("%d Recoveries of \"stolen\" FPU\n",
atomic_read(&smtc_fpu_recoveries));
@@ -1204,7 +1264,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
if (cpu_has_vtag_icache)
flush_icache_all();
/* Traverse all online CPUs (hack requires contigous range) */
- for (i = 0; i < num_online_cpus(); i++) {
+ for_each_online_cpu(i) {
/*
* We don't need to worry about our own CPU, nor those of
* CPUs who don't share our TLB.
@@ -1233,7 +1293,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
/*
* SMTC shares the TLB within VPEs and possibly across all VPEs.
*/
- for (i = 0; i < num_online_cpus(); i++) {
+ for_each_online_cpu(i) {
if ((smtc_status & SMTC_TLB_SHARED) ||
(cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))
cpu_context(i, mm) = asid_cache(i) = asid;