aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig5
-rw-r--r--arch/powerpc/kernel/Makefile14
-rw-r--r--arch/powerpc/kernel/entry_32.S127
-rw-r--r--arch/powerpc/kernel/entry_64.S65
-rw-r--r--arch/powerpc/kernel/ftrace.c154
-rw-r--r--arch/powerpc/kernel/io.c3
-rw-r--r--arch/powerpc/kernel/irq.c6
-rw-r--r--arch/powerpc/kernel/legacy_serial.c5
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c2
-rw-r--r--arch/powerpc/kernel/of_platform.c2
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c5
-rw-r--r--arch/powerpc/kernel/rtas.c2
-rw-r--r--arch/powerpc/kernel/setup_32.c6
-rw-r--r--arch/powerpc/kernel/smp.c234
-rw-r--r--arch/powerpc/kernel/stacktrace.c2
-rw-r--r--arch/powerpc/kernel/tau_6xx.c4
-rw-r--r--arch/powerpc/kernel/time.c2
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S2
-rw-r--r--arch/powerpc/mm/slice.c2
-rw-r--r--arch/powerpc/mm/tlb_64.c2
-rw-r--r--arch/powerpc/oprofile/common.c6
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c1
-rw-r--r--arch/powerpc/platforms/powermac/Makefile5
-rw-r--r--arch/powerpc/platforms/ps3/smp.c7
-rw-r--r--arch/powerpc/platforms/pseries/xics.c6
-rw-r--r--arch/powerpc/sysdev/mpic.c2
26 files changed, 427 insertions, 244 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 3934e265940..20eacf2a842 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -105,11 +105,14 @@ config ARCH_NO_VIRT_TO_BUS
config PPC
bool
default y
+ select HAVE_DYNAMIC_FTRACE
+ select HAVE_FTRACE
select HAVE_IDE
- select HAVE_OPROFILE
select HAVE_KPROBES
select HAVE_KRETPROBES
select HAVE_LMB
+ select USE_GENERIC_SMP_HELPERS if SMP
+ select HAVE_OPROFILE
config EARLY_PRINTK
bool
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 2346d271fbf..f3f5e264143 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -12,6 +12,18 @@ CFLAGS_prom_init.o += -fPIC
CFLAGS_btext.o += -fPIC
endif
+ifdef CONFIG_FTRACE
+# Do not trace early boot code
+CFLAGS_REMOVE_cputable.o = -pg
+CFLAGS_REMOVE_prom_init.o = -pg
+
+ifdef CONFIG_DYNAMIC_FTRACE
+# dynamic ftrace setup.
+CFLAGS_REMOVE_ftrace.o = -pg
+endif
+
+endif
+
obj-y := cputable.o ptrace.o syscalls.o \
irq.o align.o signal_32.o pmc.o vdso.o \
init_task.o process.o systbl.o idle.o \
@@ -78,6 +90,8 @@ obj-$(CONFIG_KEXEC) += machine_kexec.o crash.o \
obj-$(CONFIG_AUDIT) += audit.o
obj64-$(CONFIG_AUDIT) += compat_audit.o
+obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
+
obj-$(CONFIG_8XX_MINIMAL_FPEMU) += softemu8xx.o
ifneq ($(CONFIG_PPC_INDIRECT_IO),y)
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 0c8614d9875..7231a708af0 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -30,6 +30,7 @@
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
+#include <asm/ftrace.h>
#undef SHOW_SYSCALLS
#undef SHOW_SYSCALLS_TASK
@@ -1035,3 +1036,129 @@ machine_check_in_rtas:
/* XXX load up BATs and panic */
#endif /* CONFIG_PPC_RTAS */
+
+#ifdef CONFIG_FTRACE
+#ifdef CONFIG_DYNAMIC_FTRACE
+_GLOBAL(mcount)
+_GLOBAL(_mcount)
+ stwu r1,-48(r1)
+ stw r3, 12(r1)
+ stw r4, 16(r1)
+ stw r5, 20(r1)
+ stw r6, 24(r1)
+ mflr r3
+ stw r7, 28(r1)
+ mfcr r5
+ stw r8, 32(r1)
+ stw r9, 36(r1)
+ stw r10,40(r1)
+ stw r3, 44(r1)
+ stw r5, 8(r1)
+ subi r3, r3, MCOUNT_INSN_SIZE
+ .globl mcount_call
+mcount_call:
+ bl ftrace_stub
+ nop
+ lwz r6, 8(r1)
+ lwz r0, 44(r1)
+ lwz r3, 12(r1)
+ mtctr r0
+ lwz r4, 16(r1)
+ mtcr r6
+ lwz r5, 20(r1)
+ lwz r6, 24(r1)
+ lwz r0, 52(r1)
+ lwz r7, 28(r1)
+ lwz r8, 32(r1)
+ mtlr r0
+ lwz r9, 36(r1)
+ lwz r10,40(r1)
+ addi r1, r1, 48
+ bctr
+
+_GLOBAL(ftrace_caller)
+ /* Based off of objdump optput from glibc */
+ stwu r1,-48(r1)
+ stw r3, 12(r1)
+ stw r4, 16(r1)
+ stw r5, 20(r1)
+ stw r6, 24(r1)
+ mflr r3
+ lwz r4, 52(r1)
+ mfcr r5
+ stw r7, 28(r1)
+ stw r8, 32(r1)
+ stw r9, 36(r1)
+ stw r10,40(r1)
+ stw r3, 44(r1)
+ stw r5, 8(r1)
+ subi r3, r3, MCOUNT_INSN_SIZE
+.globl ftrace_call
+ftrace_call:
+ bl ftrace_stub
+ nop
+ lwz r6, 8(r1)
+ lwz r0, 44(r1)
+ lwz r3, 12(r1)
+ mtctr r0
+ lwz r4, 16(r1)
+ mtcr r6
+ lwz r5, 20(r1)
+ lwz r6, 24(r1)
+ lwz r0, 52(r1)
+ lwz r7, 28(r1)
+ lwz r8, 32(r1)
+ mtlr r0
+ lwz r9, 36(r1)
+ lwz r10,40(r1)
+ addi r1, r1, 48
+ bctr
+#else
+_GLOBAL(mcount)
+_GLOBAL(_mcount)
+ stwu r1,-48(r1)
+ stw r3, 12(r1)
+ stw r4, 16(r1)
+ stw r5, 20(r1)
+ stw r6, 24(r1)
+ mflr r3
+ lwz r4, 52(r1)
+ mfcr r5
+ stw r7, 28(r1)
+ stw r8, 32(r1)
+ stw r9, 36(r1)
+ stw r10,40(r1)
+ stw r3, 44(r1)
+ stw r5, 8(r1)
+
+ subi r3, r3, MCOUNT_INSN_SIZE
+ LOAD_REG_ADDR(r5, ftrace_trace_function)
+ lwz r5,0(r5)
+
+ mtctr r5
+ bctrl
+
+ nop
+
+ lwz r6, 8(r1)
+ lwz r0, 44(r1)
+ lwz r3, 12(r1)
+ mtctr r0
+ lwz r4, 16(r1)
+ mtcr r6
+ lwz r5, 20(r1)
+ lwz r6, 24(r1)
+ lwz r0, 52(r1)
+ lwz r7, 28(r1)
+ lwz r8, 32(r1)
+ mtlr r0
+ lwz r9, 36(r1)
+ lwz r10,40(r1)
+ addi r1, r1, 48
+ bctr
+#endif
+
+_GLOBAL(ftrace_stub)
+ blr
+
+#endif /* CONFIG_MCOUNT */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index c0db5b769e5..2f511a969d2 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -31,6 +31,7 @@
#include <asm/bug.h>
#include <asm/ptrace.h>
#include <asm/irqflags.h>
+#include <asm/ftrace.h>
/*
* System calls.
@@ -870,3 +871,67 @@ _GLOBAL(enter_prom)
ld r0,16(r1)
mtlr r0
blr
+
+#ifdef CONFIG_FTRACE
+#ifdef CONFIG_DYNAMIC_FTRACE
+_GLOBAL(mcount)
+_GLOBAL(_mcount)
+ /* Taken from output of objdump from lib64/glibc */
+ mflr r3
+ stdu r1, -112(r1)
+ std r3, 128(r1)
+ subi r3, r3, MCOUNT_INSN_SIZE
+ .globl mcount_call
+mcount_call:
+ bl ftrace_stub
+ nop
+ ld r0, 128(r1)
+ mtlr r0
+ addi r1, r1, 112
+ blr
+
+_GLOBAL(ftrace_caller)
+ /* Taken from output of objdump from lib64/glibc */
+ mflr r3
+ ld r11, 0(r1)
+ stdu r1, -112(r1)
+ std r3, 128(r1)
+ ld r4, 16(r11)
+ subi r3, r3, MCOUNT_INSN_SIZE
+.globl ftrace_call
+ftrace_call:
+ bl ftrace_stub
+ nop
+ ld r0, 128(r1)
+ mtlr r0
+ addi r1, r1, 112
+_GLOBAL(ftrace_stub)
+ blr
+#else
+_GLOBAL(mcount)
+ blr
+
+_GLOBAL(_mcount)
+ /* Taken from output of objdump from lib64/glibc */
+ mflr r3
+ ld r11, 0(r1)
+ stdu r1, -112(r1)
+ std r3, 128(r1)
+ ld r4, 16(r11)
+
+ subi r3, r3, MCOUNT_INSN_SIZE
+ LOAD_REG_ADDR(r5,ftrace_trace_function)
+ ld r5,0(r5)
+ ld r5,0(r5)
+ mtctr r5
+ bctrl
+
+ nop
+ ld r0, 128(r1)
+ mtlr r0
+ addi r1, r1, 112
+_GLOBAL(ftrace_stub)
+ blr
+
+#endif
+#endif
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
new file mode 100644
index 00000000000..3855ceb937b
--- /dev/null
+++ b/arch/powerpc/kernel/ftrace.c
@@ -0,0 +1,154 @@
+/*
+ * Code for replacing ftrace calls with jumps.
+ *
+ * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
+ *
+ * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
+ *
+ */
+
+#include <linux/spinlock.h>
+#include <linux/hardirq.h>
+#include <linux/ftrace.h>
+#include <linux/percpu.h>
+#include <linux/init.h>
+#include <linux/list.h>
+
+#include <asm/cacheflush.h>
+#include <asm/ftrace.h>
+
+
+static unsigned int ftrace_nop = 0x60000000;
+
+#ifdef CONFIG_PPC32
+# define GET_ADDR(addr) addr
+#else
+/* PowerPC64's functions are data that points to the functions */
+# define GET_ADDR(addr) *(unsigned long *)addr
+#endif
+
+
+static unsigned int notrace ftrace_calc_offset(long ip, long addr)
+{
+ return (int)(addr - ip);
+}
+
+notrace unsigned char *ftrace_nop_replace(void)
+{
+ return (char *)&ftrace_nop;
+}
+
+notrace unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
+{
+ static unsigned int op;
+
+ /*
+ * It would be nice to just use create_function_call, but that will
+ * update the code itself. Here we need to just return the
+ * instruction that is going to be modified, without modifying the
+ * code.
+ */
+ addr = GET_ADDR(addr);
+
+ /* Set to "bl addr" */
+ op = 0x48000001 | (ftrace_calc_offset(ip, addr) & 0x03fffffc);
+
+ /*
+ * No locking needed, this must be called via kstop_machine
+ * which in essence is like running on a uniprocessor machine.
+ */
+ return (unsigned char *)&op;
+}
+
+#ifdef CONFIG_PPC64
+# define _ASM_ALIGN " .align 3 "
+# define _ASM_PTR " .llong "
+#else
+# define _ASM_ALIGN " .align 2 "
+# define _ASM_PTR " .long "
+#endif
+
+notrace int
+ftrace_modify_code(unsigned long ip, unsigned char *old_code,
+ unsigned char *new_code)
+{
+ unsigned replaced;
+ unsigned old = *(unsigned *)old_code;
+ unsigned new = *(unsigned *)new_code;
+ int faulted = 0;
+
+ /*
+ * Note: Due to modules and __init, code can
+ * disappear and change, we need to protect against faulting
+ * as well as code changing.
+ *
+ * No real locking needed, this code is run through
+ * kstop_machine.
+ */
+ asm volatile (
+ "1: lwz %1, 0(%2)\n"
+ " cmpw %1, %5\n"
+ " bne 2f\n"
+ " stwu %3, 0(%2)\n"
+ "2:\n"
+ ".section .fixup, \"ax\"\n"
+ "3: li %0, 1\n"
+ " b 2b\n"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n"
+ _ASM_ALIGN "\n"
+ _ASM_PTR "1b, 3b\n"
+ ".previous"
+ : "=r"(faulted), "=r"(replaced)
+ : "r"(ip), "r"(new),
+ "0"(faulted), "r"(old)
+ : "memory");
+
+ if (replaced != old && replaced != new)
+ faulted = 2;
+
+ if (!faulted)
+ flush_icache_range(ip, ip + 8);
+
+ return faulted;
+}
+
+notrace int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+ unsigned long ip = (unsigned long)(&ftrace_call);
+ unsigned char old[MCOUNT_INSN_SIZE], *new;
+ int ret;
+
+ memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
+ new = ftrace_call_replace(ip, (unsigned long)func);
+ ret = ftrace_modify_code(ip, old, new);
+
+ return ret;
+}
+
+notrace int ftrace_mcount_set(unsigned long *data)
+{
+ unsigned long ip = (long)(&mcount_call);
+ unsigned long *addr = data;
+ unsigned char old[MCOUNT_INSN_SIZE], *new;
+
+ /*
+ * Replace the mcount stub with a pointer to the
+ * ip recorder function.
+ */
+ memcpy(old, &mcount_call, MCOUNT_INSN_SIZE);
+ new = ftrace_call_replace(ip, *addr);
+ *addr = ftrace_modify_code(ip, old, new);
+
+ return 0;
+}
+
+int __init ftrace_dyn_arch_init(void *data)
+{
+ /* This is running in kstop_machine */
+
+ ftrace_mcount_set(data);
+
+ return 0;
+}
+
diff --git a/arch/powerpc/kernel/io.c b/arch/powerpc/kernel/io.c
index e31aca9208e..1882bf419fa 100644
--- a/arch/powerpc/kernel/io.c
+++ b/arch/powerpc/kernel/io.c
@@ -120,7 +120,8 @@ EXPORT_SYMBOL(_outsl_ns);
#define IO_CHECK_ALIGN(v,a) ((((unsigned long)(v)) & ((a) - 1)) == 0)
-void _memset_io(volatile void __iomem *addr, int c, unsigned long n)
+notrace void
+_memset_io(volatile void __iomem *addr, int c, unsigned long n)
{
void *p = (void __force *)addr;
u32 lc = c;
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index bcc249d90c4..dcc946e6709 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -98,7 +98,7 @@ EXPORT_SYMBOL(irq_desc);
int distribute_irqs = 1;
-static inline unsigned long get_hard_enabled(void)
+static inline notrace unsigned long get_hard_enabled(void)
{
unsigned long enabled;
@@ -108,13 +108,13 @@ static inline unsigned long get_hard_enabled(void)
return enabled;
}
-static inline void set_soft_enabled(unsigned long enable)
+static inline notrace void set_soft_enabled(unsigned long enable)
{
__asm__ __volatile__("stb %0,%1(13)"
: : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
}
-void raw_local_irq_restore(unsigned long en)
+notrace void raw_local_irq_restore(unsigned long en)
{
/*
* get_paca()->soft_enabled = en;
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
index cf37f5ca4b7..4d96e1db55e 100644
--- a/arch/powerpc/kernel/legacy_serial.c
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -33,13 +33,14 @@ static struct legacy_serial_info {
phys_addr_t taddr;
} legacy_serial_infos[MAX_LEGACY_SERIAL_PORTS];
-static struct __initdata of_device_id parents[] = {
+static struct __initdata of_device_id legacy_serial_parents[] = {
{.type = "soc",},
{.type = "tsi-bridge",},
{.type = "opb", },
{.compatible = "ibm,opb",},
{.compatible = "simple-bus",},
{.compatible = "wrs,epld-localbus",},
+ {},
};
static unsigned int legacy_serial_count;
@@ -327,7 +328,7 @@ void __init find_legacy_serial_ports(void)
struct device_node *parent = of_get_parent(np);
if (!parent)
continue;
- if (of_match_node(parents, parent) != NULL) {
+ if (of_match_node(legacy_serial_parents, parent) != NULL) {
index = add_legacy_soc_port(np, np);
if (index >= 0 && np == stdout)
legacy_serial_console = index;
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index 704375bda73..b732b5f8e35 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -172,7 +172,7 @@ static void kexec_prepare_cpus(void)
{
int my_cpu, i, notified=-1;
- smp_call_function(kexec_smp_down, NULL, 0, /* wait */0);
+ smp_call_function(kexec_smp_down, NULL, /* wait */0);
my_cpu = get_cpu();
/* check the others cpus are now down (via paca hw cpu id == -1) */
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
index e79ad8afda0..3f37a6e6277 100644
--- a/arch/powerpc/kernel/of_platform.c
+++ b/arch/powerpc/kernel/of_platform.c
@@ -76,6 +76,8 @@ struct of_device* of_platform_device_create(struct device_node *np,
return NULL;
dev->dma_mask = 0xffffffffUL;
+ dev->dev.coherent_dma_mask = DMA_32BIT_MASK;
+
dev->dev.bus = &of_platform_bus_type;
/* We do not fill the DMA ops for platform devices by default.
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index d3ac631cbd2..a8d02506468 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -42,6 +42,7 @@
#include <asm/div64.h>
#include <asm/signal.h>
#include <asm/dcr.h>
+#include <asm/ftrace.h>
#ifdef CONFIG_PPC32
extern void transfer_to_handler(void);
@@ -67,6 +68,10 @@ EXPORT_SYMBOL(single_step_exception);
EXPORT_SYMBOL(sys_sigreturn);
#endif
+#ifdef CONFIG_FTRACE
+EXPORT_SYMBOL(_mcount);
+#endif
+
EXPORT_SYMBOL(strcpy);
EXPORT_SYMBOL(strncpy);
EXPORT_SYMBOL(strcat);
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 34843c31841..647f3e8677d 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -747,7 +747,7 @@ static int rtas_ibm_suspend_me(struct rtas_args *args)
/* Call function on all CPUs. One of us will make the
* rtas call
*/
- if (on_each_cpu(rtas_percpu_suspend_me, &data, 1, 0))
+ if (on_each_cpu(rtas_percpu_suspend_me, &data, 0))
data.error = -EINVAL;
wait_for_completion(&done);
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 5112a4aa801..19e8fcb9cea 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -81,7 +81,7 @@ int ucache_bsize;
* from the address that it was linked at, so we must use RELOC/PTRRELOC
* to access static data (including strings). -- paulus
*/
-unsigned long __init early_init(unsigned long dt_ptr)
+notrace unsigned long __init early_init(unsigned long dt_ptr)
{
unsigned long offset = reloc_offset();
struct cpu_spec *spec;
@@ -111,7 +111,7 @@ unsigned long __init early_init(unsigned long dt_ptr)
* This is called very early on the boot process, after a minimal
* MMU environment has been set up but before MMU_init is called.
*/
-void __init machine_init(unsigned long dt_ptr, unsigned long phys)
+notrace void __init machine_init(unsigned long dt_ptr, unsigned long phys)
{
/* Enable early debugging if any specified (see udbg.h) */
udbg_early_init();
@@ -133,7 +133,7 @@ void __init machine_init(unsigned long dt_ptr, unsigned long phys)
#ifdef CONFIG_BOOKE_WDT
/* Checks wdt=x and wdt_period=xx command-line option */
-int __init early_parse_wdt(char *p)
+notrace int __init early_parse_wdt(char *p)
{
if (p && strncmp(p, "0", 1) != 0)
booke_wdt_enabled = 1;
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 1457aa0a08f..5191b46a611 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -72,12 +72,8 @@ struct smp_ops_t *smp_ops;
static volatile unsigned int cpu_callin_map[NR_CPUS];
-void smp_call_function_interrupt(void);
-
int smt_enabled_at_boot = 1;
-static int ipi_fail_ok;
-
static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;
#ifdef CONFIG_PPC64
@@ -99,12 +95,15 @@ void smp_message_recv(int msg)
{
switch(msg) {
case PPC_MSG_CALL_FUNCTION:
- smp_call_function_interrupt();
+ generic_smp_call_function_interrupt();
break;
case PPC_MSG_RESCHEDULE:
/* XXX Do we have to do this? */
set_need_resched();
break;
+ case PPC_MSG_CALL_FUNC_SINGLE:
+ generic_smp_call_function_single_interrupt();
+ break;
case PPC_MSG_DEBUGGER_BREAK:
if (crash_ipi_function_ptr) {
crash_ipi_function_ptr(get_irq_regs());
@@ -128,6 +127,19 @@ void smp_send_reschedule(int cpu)
smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE);
}
+void arch_send_call_function_single_ipi(int cpu)
+{
+ smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE);
+}
+
+void arch_send_call_function_ipi(cpumask_t mask)
+{
+ unsigned int cpu;
+
+ for_each_cpu_mask(cpu, mask)
+ smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION);
+}
+
#ifdef CONFIG_DEBUGGER
void smp_send_debugger_break(int cpu)
{
@@ -154,215 +166,9 @@ static void stop_this_cpu(void *dummy)
;
}
-/*
- * Structure and data for smp_call_function(). This is designed to minimise
- * static memory requirements. It also looks cleaner.
- * Stolen from the i386 version.
- */
-static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock);
-
-static struct call_data_struct {
- void (*func) (void *info);
- void *info;
- atomic_t started;
- atomic_t finished;
- int wait;
-} *call_data;
-
-/* delay of at least 8 seconds */
-#define SMP_CALL_TIMEOUT 8
-
-/*
- * These functions send a 'generic call function' IPI to other online
- * CPUS in the system.
- *
- * [SUMMARY] Run a function on other CPUs.
- * <func> The function to run. This must be fast and non-blocking.
- * <info> An arbitrary pointer to pass to the function.
- * <nonatomic> currently unused.
- * <wait> If true, wait (atomically) until function has completed on other CPUs.
- * [RETURNS] 0 on success, else a negative status code. Does not return until
- * remote CPUs are nearly ready to execute <<func>> or are or have executed.
- * <map> is a cpu map of the cpus to send IPI to.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.
- */
-static int __smp_call_function_map(void (*func) (void *info), void *info,
- int nonatomic, int wait, cpumask_t map)
-{
- struct call_data_struct data;
- int ret = -1, num_cpus;
- int cpu;
- u64 timeout;
-
- if (unlikely(smp_ops == NULL))
- return ret;
-
- data.func = func;
- data.info = info;
- atomic_set(&data.started, 0);
- data.wait = wait;
- if (wait)
- atomic_set(&data.finished, 0);
-
- /* remove 'self' from the map */
- if (cpu_isset(smp_processor_id(), map))
- cpu_clear(smp_processor_id(), map);
-
- /* sanity check the map, remove any non-online processors. */
- cpus_and(map, map, cpu_online_map);
-
- num_cpus = cpus_weight(map);
- if (!num_cpus)
- goto done;
-
- call_data = &data;
- smp_wmb();
- /* Send a message to all CPUs in the map */
- for_each_cpu_mask(cpu, map)
- smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION);
-
- timeout = get_tb() + (u64) SMP_CALL_TIMEOUT * tb_ticks_per_sec;
-
- /* Wait for indication that they have received the message */
- while (atomic_read(&data.started) != num_cpus) {
- HMT_low();
- if (get_tb() >= timeout) {
- printk("smp_call_function on cpu %d: other cpus not "
- "responding (%d)\n", smp_processor_id(),
- atomic_read(&data.started));
- if (!ipi_fail_ok)
- debugger(NULL);
- goto out;
- }
- }
-
- /* optionally wait for the CPUs to complete */
- if (wait) {
- while (atomic_read(&data.finished) != num_cpus) {
- HMT_low();
- if (get_tb() >= timeout) {
- printk("smp_call_function on cpu %d: other "
- "cpus not finishing (%d/%d)\n",
- smp_processor_id(),
- atomic_read(&data.finished),
- atomic_read(&data.started));
- debugger(NULL);
- goto out;
- }
- }
- }
-
- done:
- ret = 0;
-
- out:
- call_data = NULL;
- HMT_medium();
- return ret;
-}
-
-static int __smp_call_function(void (*func)(void *info), void *info,
- int nonatomic, int wait)
-{
- int ret;
- spin_lock(&call_lock);
- ret =__smp_call_function_map(func, info, nonatomic, wait,
- cpu_online_map);
- spin_unlock(&call_lock);
- return ret;
-}
-
-int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
- int wait)
-{
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
- return __smp_call_function(func, info, nonatomic, wait);
-}
-EXPORT_SYMBOL(smp_call_function);
-
-int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
- int nonatomic, int wait)
-{
- cpumask_t map = CPU_MASK_NONE;
- int ret = 0;
-
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
-
- if (!cpu_online(cpu))
- return -EINVAL;
-
- cpu_set(cpu, map);
- if (cpu != get_cpu()) {
- spin_lock(&call_lock);
- ret = __smp_call_function_map(func, info, nonatomic, wait, map);
- spin_unlock(&call_lock);
- } else {
- local_irq_disable();
- func(info);
- local_irq_enable();
- }
- put_cpu();
- return ret;
-}
-EXPORT_SYMBOL(smp_call_function_single);
-
void smp_send_stop(void)
{
- int nolock;
-
- /* It's OK to fail sending the IPI, since the alternative is to
- * be stuck forever waiting on the other CPU to take the interrupt.
- *
- * It's better to at least continue and go through reboot, since this
- * function is usually called at panic or reboot time in the first
- * place.
- */
- ipi_fail_ok = 1;
-
- /* Don't deadlock in case we got called through panic */
- nolock = !spin_trylock(&call_lock);
- __smp_call_function_map(stop_this_cpu, NULL, 1, 0, cpu_online_map);
- if (!nolock)
- spin_unlock(&call_lock);
-}
-
-void smp_call_function_interrupt(void)
-{
- void (*func) (void *info);
- void *info;
- int wait;
-
- /* call_data will be NULL if the sender timed out while
- * waiting on us to receive the call.
- */
- if (!call_data)
- return;
-
- func = call_data->func;
- info = call_data->info;
- wait = call_data->wait;
-
- if (!wait)
- smp_mb__before_atomic_inc();
-
- /*
- * Notify initiating CPU that I've grabbed the data and am
- * about to execute the function
- */
- atomic_inc(&call_data->started);
- /*
- * At this point the info structure may be out of scope unless wait==1
- */
- (*func)(info);
- if (wait) {
- smp_mb__before_atomic_inc();
- atomic_inc(&call_data->finished);
- }
+ smp_call_function(stop_this_cpu, NULL, 0);
}
extern struct gettimeofday_struct do_gtod;
@@ -596,9 +402,9 @@ int __devinit start_secondary(void *unused)
secondary_cpu_time_init();
- spin_lock(&call_lock);
+ ipi_call_lock();
cpu_set(cpu, cpu_online_map);
- spin_unlock(&call_lock);
+ ipi_call_unlock();
local_irq_enable();
diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c
index 96294403843..3cf0d94ba34 100644
--- a/arch/powerpc/kernel/stacktrace.c
+++ b/arch/powerpc/kernel/stacktrace.c
@@ -12,6 +12,7 @@
#include <linux/sched.h>
#include <linux/stacktrace.h>
+#include <linux/module.h>
#include <asm/ptrace.h>
/*
@@ -44,3 +45,4 @@ void save_stack_trace(struct stack_trace *trace)
sp = newsp;
}
}
+EXPORT_SYMBOL_GPL(save_stack_trace);
diff --git a/arch/powerpc/kernel/tau_6xx.c b/arch/powerpc/kernel/tau_6xx.c
index 368a4934f7e..c3a56d65c5a 100644
--- a/arch/powerpc/kernel/tau_6xx.c
+++ b/arch/powerpc/kernel/tau_6xx.c
@@ -192,7 +192,7 @@ static void tau_timeout_smp(unsigned long unused)
/* schedule ourselves to be run again */
mod_timer(&tau_timer, jiffies + shrink_timer) ;
- on_each_cpu(tau_timeout, NULL, 1, 0);
+ on_each_cpu(tau_timeout, NULL, 0);
}
/*
@@ -234,7 +234,7 @@ int __init TAU_init(void)
tau_timer.expires = jiffies + shrink_timer;
add_timer(&tau_timer);
- on_each_cpu(TAU_init_smp, NULL, 1, 0);
+ on_each_cpu(TAU_init_smp, NULL, 0);
printk("Thermal assist unit ");
#ifdef CONFIG_TAU_INT
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 73401e83739..f1a38a6c1e2 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -322,7 +322,7 @@ void snapshot_timebases(void)
{
if (!cpu_has_feature(CPU_FTR_PURR))
return;
- on_each_cpu(snapshot_tb_and_purr, NULL, 0, 1);
+ on_each_cpu(snapshot_tb_and_purr, NULL, 1);
}
/*
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 0c3000bf8d7..53d57d17a89 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -64,8 +64,6 @@ SECTIONS
NOTES
- BUG_TABLE
-
/*
* Init sections discarded at runtime
*/
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index ad928edafb0..2bd12d965db 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -218,7 +218,7 @@ static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psiz
mb();
/* XXX this is sub-optimal but will do for now */
- on_each_cpu(slice_flush_segments, mm, 0, 1);
+ on_each_cpu(slice_flush_segments, mm, 1);
#ifdef CONFIG_SPU_BASE
spu_flush_all_slbs(mm);
#endif
diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c
index e2d867ce1c7..69ad829a7fa 100644
--- a/arch/powerpc/mm/tlb_64.c
+++ b/arch/powerpc/mm/tlb_64.c
@@ -66,7 +66,7 @@ static void pgtable_free_now(pgtable_free_t pgf)
{
pte_freelist_forced_free++;
- smp_call_function(pte_free_smp_sync, NULL, 0, 1);
+ smp_call_function(pte_free_smp_sync, NULL, 1);
pgtable_free(pgf);
}
diff --git a/arch/powerpc/oprofile/common.c b/arch/powerpc/oprofile/common.c
index 4908dc98f9c..17807acb05d 100644
--- a/arch/powerpc/oprofile/common.c
+++ b/arch/powerpc/oprofile/common.c
@@ -65,7 +65,7 @@ static int op_powerpc_setup(void)
/* Configure the registers on all cpus. If an error occurs on one
* of the cpus, op_per_cpu_rc will be set to the error */
- on_each_cpu(op_powerpc_cpu_setup, NULL, 0, 1);
+ on_each_cpu(op_powerpc_cpu_setup, NULL, 1);
out: if (op_per_cpu_rc) {
/* error on setup release the performance counter hardware */
@@ -100,7 +100,7 @@ static int op_powerpc_start(void)
if (model->global_start)
return model->global_start(ctr);
if (model->start) {
- on_each_cpu(op_powerpc_cpu_start, NULL, 0, 1);
+ on_each_cpu(op_powerpc_cpu_start, NULL, 1);
return op_per_cpu_rc;
}
return -EIO; /* No start function is defined for this
@@ -115,7 +115,7 @@ static inline void op_powerpc_cpu_stop(void *dummy)
static void op_powerpc_stop(void)
{
if (model->stop)
- on_each_cpu(op_powerpc_cpu_stop, NULL, 0, 1);
+ on_each_cpu(op_powerpc_cpu_stop, NULL, 1);
if (model->global_stop)
model->global_stop();
}
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 5bf7df14602..2d5bb22d6c0 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -218,6 +218,7 @@ void iic_request_IPIs(void)
{
iic_request_ipi(PPC_MSG_CALL_FUNCTION, "IPI-call");
iic_request_ipi(PPC_MSG_RESCHEDULE, "IPI-resched");
+ iic_request_ipi(PPC_MSG_CALL_FUNC_SINGLE, "IPI-call-single");
#ifdef CONFIG_DEBUGGER
iic_request_ipi(PPC_MSG_DEBUGGER_BREAK, "IPI-debug");
#endif /* CONFIG_DEBUGGER */
diff --git a/arch/powerpc/platforms/powermac/Makefile b/arch/powerpc/platforms/powermac/Makefile
index 4d72c8f7215..89774177b20 100644
--- a/arch/powerpc/platforms/powermac/Makefile
+++ b/arch/powerpc/platforms/powermac/Makefile
@@ -1,5 +1,10 @@
CFLAGS_bootx_init.o += -fPIC
+ifdef CONFIG_FTRACE
+# Do not trace early boot code
+CFLAGS_REMOVE_bootx_init.o = -pg
+endif
+
obj-y += pic.o setup.o time.o feature.o pci.o \
sleep.o low_i2c.o cache.o pfunc_core.o \
pfunc_base.o
diff --git a/arch/powerpc/platforms/ps3/smp.c b/arch/powerpc/platforms/ps3/smp.c
index f0b12f21236..a0927a3bacb 100644
--- a/arch/powerpc/platforms/ps3/smp.c
+++ b/arch/powerpc/platforms/ps3/smp.c
@@ -105,9 +105,10 @@ static void __init ps3_smp_setup_cpu(int cpu)
* to index needs to be setup.
*/
- BUILD_BUG_ON(PPC_MSG_CALL_FUNCTION != 0);
- BUILD_BUG_ON(PPC_MSG_RESCHEDULE != 1);
- BUILD_BUG_ON(PPC_MSG_DEBUGGER_BREAK != 3);
+ BUILD_BUG_ON(PPC_MSG_CALL_FUNCTION != 0);
+ BUILD_BUG_ON(PPC_MSG_RESCHEDULE != 1);
+ BUILD_BUG_ON(PPC_MSG_CALL_FUNC_SINGLE != 2);
+ BUILD_BUG_ON(PPC_MSG_DEBUGGER_BREAK != 3);
for (i = 0; i < MSG_COUNT; i++) {
result = ps3_event_receive_port_setup(cpu, &virqs[i]);
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index ebebc28fe89..0fc830f576f 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -383,13 +383,11 @@ static irqreturn_t xics_ipi_dispatch(int cpu)
mb();
smp_message_recv(PPC_MSG_RESCHEDULE);
}
-#if 0
- if (test_and_clear_bit(PPC_MSG_MIGRATE_TASK,
+ if (test_and_clear_bit(PPC_MSG_CALL_FUNC_SINGLE,
&xics_ipi_message[cpu].value)) {
mb();
- smp_message_recv(PPC_MSG_MIGRATE_TASK);
+ smp_message_recv(PPC_MSG_CALL_FUNC_SINGLE);
}
-#endif
#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK,
&xics_ipi_message[cpu].value)) {
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 7680001676a..6c90c95b454 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -1494,7 +1494,7 @@ void mpic_request_ipis(void)
static char *ipi_names[] = {
"IPI0 (call function)",
"IPI1 (reschedule)",
- "IPI2 (unused)",
+ "IPI2 (call function single)",
"IPI3 (debugger break)",
};
BUG_ON(mpic == NULL);