aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/Makefile1
-rw-r--r--arch/powerpc/kernel/cputable.c20
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S187
-rw-r--r--arch/powerpc/kernel/idle.c2
-rw-r--r--arch/powerpc/kernel/iommu.c13
-rw-r--r--arch/powerpc/kernel/kgdb.c410
-rw-r--r--arch/powerpc/kernel/pci-common.c1
-rw-r--r--arch/powerpc/kernel/prom_parse.c44
-rw-r--r--arch/powerpc/kernel/setup_32.c16
-rw-r--r--arch/powerpc/kernel/stacktrace.c2
-rw-r--r--arch/powerpc/kernel/suspend.c1
-rw-r--r--arch/powerpc/kernel/sysfs.c15
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S31
13 files changed, 554 insertions, 189 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index bf0b1fd0ec3..1a4094704b1 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -74,6 +74,7 @@ obj-y += time.o prom.o traps.o setup-common.o \
misc_$(CONFIG_WORD_SIZE).o
obj-$(CONFIG_PPC32) += entry_32.o setup_32.o
obj-$(CONFIG_PPC64) += dma_64.o iommu.o
+obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o
obj-$(CONFIG_MODULES) += ppc_ksyms.o
obj-$(CONFIG_BOOTX_TEXT) += btext.o
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index f7f3c215d06..b936a1dd0a5 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -355,6 +355,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.icache_bsize = 128,
.dcache_bsize = 128,
.machine_check = machine_check_generic,
+ .oprofile_cpu_type = "ppc64/compat-power5+",
.platform = "power5+",
},
{ /* Power6 */
@@ -386,6 +387,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.icache_bsize = 128,
.dcache_bsize = 128,
.machine_check = machine_check_generic,
+ .oprofile_cpu_type = "ppc64/compat-power6",
.platform = "power6",
},
{ /* 2.06-compliant processor, i.e. Power7 "architected" mode */
@@ -397,6 +399,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.icache_bsize = 128,
.dcache_bsize = 128,
.machine_check = machine_check_generic,
+ .oprofile_cpu_type = "ppc64/compat-power7",
.platform = "power7",
},
{ /* Power7 */
@@ -1629,6 +1632,23 @@ struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr)
t->cpu_setup = s->cpu_setup;
t->cpu_restore = s->cpu_restore;
t->platform = s->platform;
+ /*
+ * If we have passed through this logic once
+ * before and have pulled the default case
+ * because the real PVR was not found inside
+ * cpu_specs[], then we are possibly running in
+ * compatibility mode. In that case, let the
+ * oprofiler know which set of compatibility
+ * counters to pull from by making sure the
+ * oprofile_cpu_type string is set to that of
+ * compatibility mode. If the oprofile_cpu_type
+ * already has a value, then we are possibly
+ * overriding a real PVR with a logical one, and,
+ * in that case, keep the current value for
+ * oprofile_cpu_type.
+ */
+ if (t->oprofile_cpu_type == NULL)
+ t->oprofile_cpu_type = s->oprofile_cpu_type;
} else
*t = *s;
*PTRRELOC(&cur_cpu_spec) = &the_cpu_spec;
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index c4268500e85..3cb52fa0eda 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -151,16 +151,11 @@ skpinv: addi r6,r6,1 /* Increment */
/* Invalidate TLB0 */
li r6,0x04
tlbivax 0,r6
-#ifdef CONFIG_SMP
- tlbsync
-#endif
+ TLBSYNC
/* Invalidate TLB1 */
li r6,0x0c
tlbivax 0,r6
-#ifdef CONFIG_SMP
- tlbsync
-#endif
- msync
+ TLBSYNC
/* 3. Setup a temp mapping and jump to it */
andi. r5, r3, 0x1 /* Find an entry not used and is non-zero */
@@ -238,10 +233,7 @@ skpinv: addi r6,r6,1 /* Increment */
/* Invalidate TLB1 */
li r9,0x0c
tlbivax 0,r9
-#ifdef CONFIG_SMP
- tlbsync
-#endif
- msync
+ TLBSYNC
/* 6. Setup KERNELBASE mapping in TLB1[0] */
lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */
@@ -283,10 +275,7 @@ skpinv: addi r6,r6,1 /* Increment */
/* Invalidate TLB1 */
li r9,0x0c
tlbivax 0,r9
-#ifdef CONFIG_SMP
- tlbsync
-#endif
- msync
+ TLBSYNC
/* Establish the interrupt vector offsets */
SET_IVOR(0, CriticalInput);
@@ -483,90 +472,16 @@ interrupt_base:
/* Data Storage Interrupt */
START_EXCEPTION(DataStorage)
- mtspr SPRN_SPRG0, r10 /* Save some working registers */
- mtspr SPRN_SPRG1, r11
- mtspr SPRN_SPRG4W, r12
- mtspr SPRN_SPRG5W, r13
- mfcr r11
- mtspr SPRN_SPRG7W, r11
-
- /*
- * Check if it was a store fault, if not then bail
- * because a user tried to access a kernel or
- * read-protected page. Otherwise, get the
- * offending address and handle it.
- */
- mfspr r10, SPRN_ESR
- andis. r10, r10, ESR_ST@h
- beq 2f
-
- mfspr r10, SPRN_DEAR /* Get faulting address */
-
- /* If we are faulting a kernel address, we have to use the
- * kernel page tables.
- */
- lis r11, PAGE_OFFSET@h
- cmplw 0, r10, r11
- bge 2f
-
- /* Get the PGD for the current thread */
-3:
- mfspr r11,SPRN_SPRG3
- lwz r11,PGDIR(r11)
-4:
- FIND_PTE
-
- /* Are _PAGE_USER & _PAGE_RW set & _PAGE_HWWRITE not? */
- andi. r13, r11, _PAGE_RW|_PAGE_USER|_PAGE_HWWRITE
- cmpwi 0, r13, _PAGE_RW|_PAGE_USER
- bne 2f /* Bail if not */
-
- /* Update 'changed'. */
- ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
- stw r11, PTE_FLAGS_OFFSET(r12) /* Update Linux page table */
-
- /* MAS2 not updated as the entry does exist in the tlb, this
- fault taken to detect state transition (eg: COW -> DIRTY)
- */
- andi. r11, r11, _PAGE_HWEXEC
- rlwimi r11, r11, 31, 27, 27 /* SX <- _PAGE_HWEXEC */
- ori r11, r11, (MAS3_UW|MAS3_SW|MAS3_UR|MAS3_SR)@l /* set static perms */
-
- /* update search PID in MAS6, AS = 0 */
- mfspr r12, SPRN_PID0
- slwi r12, r12, 16
- mtspr SPRN_MAS6, r12
-
- /* find the TLB index that caused the fault. It has to be here. */
- tlbsx 0, r10
-
- /* only update the perm bits, assume the RPN is fine */
- mfspr r12, SPRN_MAS3
- rlwimi r12, r11, 0, 20, 31
- mtspr SPRN_MAS3,r12
- tlbwe
-
- /* Done...restore registers and get out of here. */
- mfspr r11, SPRN_SPRG7R
- mtcr r11
- mfspr r13, SPRN_SPRG5R
- mfspr r12, SPRN_SPRG4R
- mfspr r11, SPRN_SPRG1
- mfspr r10, SPRN_SPRG0
- rfi /* Force context change */
-
-2:
- /*
- * The bailout. Restore registers to pre-exception conditions
- * and call the heavyweights to help us out.
- */
- mfspr r11, SPRN_SPRG7R
- mtcr r11
- mfspr r13, SPRN_SPRG5R
- mfspr r12, SPRN_SPRG4R
- mfspr r11, SPRN_SPRG1
- mfspr r10, SPRN_SPRG0
- b data_access
+ NORMAL_EXCEPTION_PROLOG
+ mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
+ stw r5,_ESR(r11)
+ mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
+ andis. r10,r5,(ESR_ILK|ESR_DLK)@h
+ bne 1f
+ EXC_XFER_EE_LITE(0x0300, handle_page_fault)
+1:
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ EXC_XFER_EE_LITE(0x0300, CacheLockingException)
/* Instruction Storage Interrupt */
INSTRUCTION_STORAGE_EXCEPTION
@@ -645,15 +560,30 @@ interrupt_base:
lwz r11,PGDIR(r11)
4:
+ /* Mask of required permission bits. Note that while we
+ * do copy ESR:ST to _PAGE_RW position as trying to write
+ * to an RO page is pretty common, we don't do it with
+ * _PAGE_DIRTY. We could do it, but it's a fairly rare
+ * event so I'd rather take the overhead when it happens
+ * rather than adding an instruction here. We should measure
+ * whether the whole thing is worth it in the first place
+ * as we could avoid loading SPRN_ESR completely in the first
+ * place...
+ *
+ * TODO: Is it worth doing that mfspr & rlwimi in the first
+ * place or can we save a couple of instructions here ?
+ */
+ mfspr r12,SPRN_ESR
+ li r13,_PAGE_PRESENT|_PAGE_ACCESSED
+ rlwimi r13,r12,11,29,29
+
FIND_PTE
- andi. r13, r11, _PAGE_PRESENT /* Is the page present? */
- beq 2f /* Bail if not present */
+ andc. r13,r13,r11 /* Check permission */
+ bne 2f /* Bail if permission mismach */
#ifdef CONFIG_PTE_64BIT
lwz r13, 0(r12)
#endif
- ori r11, r11, _PAGE_ACCESSED
- stw r11, PTE_FLAGS_OFFSET(r12)
/* Jump to common tlb load */
b finish_tlb_load
@@ -667,7 +597,7 @@ interrupt_base:
mfspr r12, SPRN_SPRG4R
mfspr r11, SPRN_SPRG1
mfspr r10, SPRN_SPRG0
- b data_access
+ b DataStorage
/* Instruction TLB Error Interrupt */
/*
@@ -705,15 +635,16 @@ interrupt_base:
lwz r11,PGDIR(r11)
4:
+ /* Make up the required permissions */
+ li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_HWEXEC
+
FIND_PTE
- andi. r13, r11, _PAGE_PRESENT /* Is the page present? */
- beq 2f /* Bail if not present */
+ andc. r13,r13,r11 /* Check permission */
+ bne 2f /* Bail if permission mismach */
#ifdef CONFIG_PTE_64BIT
lwz r13, 0(r12)
#endif
- ori r11, r11, _PAGE_ACCESSED
- stw r11, PTE_FLAGS_OFFSET(r12)
/* Jump to common TLB load point */
b finish_tlb_load
@@ -768,29 +699,13 @@ interrupt_base:
* Local functions
*/
- /*
- * Data TLB exceptions will bail out to this point
- * if they can't resolve the lightweight TLB fault.
- */
-data_access:
- NORMAL_EXCEPTION_PROLOG
- mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
- stw r5,_ESR(r11)
- mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
- andis. r10,r5,(ESR_ILK|ESR_DLK)@h
- bne 1f
- EXC_XFER_EE_LITE(0x0300, handle_page_fault)
-1:
- addi r3,r1,STACK_FRAME_OVERHEAD
- EXC_XFER_EE_LITE(0x0300, CacheLockingException)
-
/*
-
* Both the instruction and data TLB miss get to this
* point to load the TLB.
* r10 - EA of fault
* r11 - TLB (info from Linux PTE)
- * r12, r13 - available to use
+ * r12 - available to use
+ * r13 - upper bits of PTE (if PTE_64BIT) or available to use
* CR5 - results of addr >= PAGE_OFFSET
* MAS0, MAS1 - loaded with proper value when we get here
* MAS2, MAS3 - will need additional info from Linux PTE
@@ -812,20 +727,14 @@ finish_tlb_load:
#endif
mtspr SPRN_MAS2, r12
- bge 5, 1f
-
- /* is user addr */
- andi. r12, r11, (_PAGE_USER | _PAGE_HWWRITE | _PAGE_HWEXEC)
+ li r10, (_PAGE_HWEXEC | _PAGE_PRESENT)
+ rlwimi r10, r11, 31, 29, 29 /* extract _PAGE_DIRTY into SW */
+ and r12, r11, r10
andi. r10, r11, _PAGE_USER /* Test for _PAGE_USER */
- srwi r10, r12, 1
- or r12, r12, r10 /* Copy user perms into supervisor */
- iseleq r12, 0, r12
- b 2f
-
- /* is kernel addr */
-1: rlwinm r12, r11, 31, 29, 29 /* Extract _PAGE_HWWRITE into SW */
- ori r12, r12, (MAS3_SX | MAS3_SR)
-
+ slwi r10, r12, 1
+ or r10, r10, r12
+ iseleq r12, r12, r10
+
#ifdef CONFIG_PTE_64BIT
2: rlwimi r12, r13, 24, 0, 7 /* grab RPN[32:39] */
rlwimi r12, r11, 24, 8, 19 /* grab RPN[40:51] */
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index c3cf0e8f3ac..d308a9f70f1 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -60,7 +60,7 @@ void cpu_idle(void)
set_thread_flag(TIF_POLLING_NRFLAG);
while (1) {
- tick_nohz_stop_sched_tick();
+ tick_nohz_stop_sched_tick(1);
while (!need_resched() && !cpu_should_die()) {
ppc64_runlatch_off();
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 8c68ee9e5d1..2385f68c175 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -186,7 +186,8 @@ static unsigned long iommu_range_alloc(struct device *dev,
static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
void *page, unsigned int npages,
enum dma_data_direction direction,
- unsigned long mask, unsigned int align_order)
+ unsigned long mask, unsigned int align_order,
+ struct dma_attrs *attrs)
{
unsigned long entry, flags;
dma_addr_t ret = DMA_ERROR_CODE;
@@ -205,7 +206,7 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
/* Put the TCEs in the HW table */
ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & IOMMU_PAGE_MASK,
- direction);
+ direction, attrs);
/* Flush/invalidate TLB caches if necessary */
@@ -336,7 +337,8 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
npages, entry, dma_addr);
/* Insert into HW table */
- ppc_md.tce_build(tbl, entry, npages, vaddr & IOMMU_PAGE_MASK, direction);
+ ppc_md.tce_build(tbl, entry, npages, vaddr & IOMMU_PAGE_MASK,
+ direction, attrs);
/* If we are in an open segment, try merging */
if (segstart != s) {
@@ -573,7 +575,8 @@ dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl,
align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
- mask >> IOMMU_PAGE_SHIFT, align);
+ mask >> IOMMU_PAGE_SHIFT, align,
+ attrs);
if (dma_handle == DMA_ERROR_CODE) {
if (printk_ratelimit()) {
printk(KERN_INFO "iommu_alloc failed, "
@@ -642,7 +645,7 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
nio_pages = size >> IOMMU_PAGE_SHIFT;
io_order = get_iommu_order(size);
mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
- mask >> IOMMU_PAGE_SHIFT, io_order);
+ mask >> IOMMU_PAGE_SHIFT, io_order, NULL);
if (mapping == DMA_ERROR_CODE) {
free_pages((unsigned long)ret, order);
return NULL;
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
new file mode 100644
index 00000000000..b4fdf2f2743
--- /dev/null
+++ b/arch/powerpc/kernel/kgdb.c
@@ -0,0 +1,410 @@
+/*
+ * PowerPC backend to the KGDB stub.
+ *
+ * 1998 (c) Michael AK Tesch (tesch@cs.wisc.edu)
+ * Copyright (C) 2003 Timesys Corporation.
+ * Copyright (C) 2004-2006 MontaVista Software, Inc.
+ * PPC64 Mods (C) 2005 Frank Rowand (frowand@mvista.com)
+ * PPC32 support restored by Vitaly Wool <vwool@ru.mvista.com> and
+ * Sergei Shtylyov <sshtylyov@ru.mvista.com>
+ * Copyright (C) 2007-2008 Wind River Systems, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program as licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/kgdb.h>
+#include <linux/smp.h>
+#include <linux/signal.h>
+#include <linux/ptrace.h>
+#include <asm/current.h>
+#include <asm/processor.h>
+#include <asm/machdep.h>
+
+/*
+ * This table contains the mapping between PowerPC hardware trap types, and
+ * signals, which are primarily what GDB understands. GDB and the kernel
+ * don't always agree on values, so we use constants taken from gdb-6.2.
+ */
+static struct hard_trap_info
+{
+ unsigned int tt; /* Trap type code for powerpc */
+ unsigned char signo; /* Signal that we map this trap into */
+} hard_trap_info[] = {
+ { 0x0100, 0x02 /* SIGINT */ }, /* system reset */
+ { 0x0200, 0x0b /* SIGSEGV */ }, /* machine check */
+ { 0x0300, 0x0b /* SIGSEGV */ }, /* data access */
+ { 0x0400, 0x0b /* SIGSEGV */ }, /* instruction access */
+ { 0x0500, 0x02 /* SIGINT */ }, /* external interrupt */
+ { 0x0600, 0x0a /* SIGBUS */ }, /* alignment */
+ { 0x0700, 0x05 /* SIGTRAP */ }, /* program check */
+ { 0x0800, 0x08 /* SIGFPE */ }, /* fp unavailable */
+ { 0x0900, 0x0e /* SIGALRM */ }, /* decrementer */
+ { 0x0c00, 0x14 /* SIGCHLD */ }, /* system call */
+#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
+ { 0x2002, 0x05 /* SIGTRAP */ }, /* debug */
+#if defined(CONFIG_FSL_BOOKE)
+ { 0x2010, 0x08 /* SIGFPE */ }, /* spe unavailable */
+ { 0x2020, 0x08 /* SIGFPE */ }, /* spe unavailable */
+ { 0x2030, 0x08 /* SIGFPE */ }, /* spe fp data */
+ { 0x2040, 0x08 /* SIGFPE */ }, /* spe fp data */
+ { 0x2050, 0x08 /* SIGFPE */ }, /* spe fp round */
+ { 0x2060, 0x0e /* SIGILL */ }, /* performace monitor */
+ { 0x2900, 0x08 /* SIGFPE */ }, /* apu unavailable */
+ { 0x3100, 0x0e /* SIGALRM */ }, /* fixed interval timer */
+ { 0x3200, 0x02 /* SIGINT */ }, /* watchdog */
+#else /* ! CONFIG_FSL_BOOKE */
+ { 0x1000, 0x0e /* SIGALRM */ }, /* prog interval timer */
+ { 0x1010, 0x0e /* SIGALRM */ }, /* fixed interval timer */
+ { 0x1020, 0x02 /* SIGINT */ }, /* watchdog */
+ { 0x2010, 0x08 /* SIGFPE */ }, /* fp unavailable */
+ { 0x2020, 0x08 /* SIGFPE */ }, /* ap unavailable */
+#endif
+#else /* ! (defined(CONFIG_40x) || defined(CONFIG_BOOKE)) */
+ { 0x0d00, 0x05 /* SIGTRAP */ }, /* single-step */
+#if defined(CONFIG_8xx)
+ { 0x1000, 0x04 /* SIGILL */ }, /* software emulation */
+#else /* ! CONFIG_8xx */
+ { 0x0f00, 0x04 /* SIGILL */ }, /* performance monitor */
+ { 0x0f20, 0x08 /* SIGFPE */ }, /* altivec unavailable */
+ { 0x1300, 0x05 /* SIGTRAP */ }, /* instruction address break */
+#if defined(CONFIG_PPC64)
+ { 0x1200, 0x05 /* SIGILL */ }, /* system error */
+ { 0x1500, 0x04 /* SIGILL */ }, /* soft patch */
+ { 0x1600, 0x04 /* SIGILL */ }, /* maintenance */
+ { 0x1700, 0x08 /* SIGFPE */ }, /* altivec assist */
+ { 0x1800, 0x04 /* SIGILL */ }, /* thermal */
+#else /* ! CONFIG_PPC64 */
+ { 0x1400, 0x02 /* SIGINT */ }, /* SMI */
+ { 0x1600, 0x08 /* SIGFPE */ }, /* altivec assist */
+ { 0x1700, 0x04 /* SIGILL */ }, /* TAU */
+ { 0x2000, 0x05 /* SIGTRAP */ }, /* run mode */
+#endif
+#endif
+#endif
+ { 0x0000, 0x00 } /* Must be last */
+};
+
+static int computeSignal(unsigned int tt)
+{
+ struct hard_trap_info *ht;
+
+ for (ht = hard_trap_info; ht->tt && ht->signo; ht++)
+ if (ht->tt == tt)
+ return ht->signo;
+
+ return SIGHUP; /* default for things we don't know about */
+}
+
+static int kgdb_call_nmi_hook(struct pt_regs *regs)
+{
+ kgdb_nmicallback(raw_smp_processor_id(), regs);
+ return 0;
+}
+
+#ifdef CONFIG_SMP
+void kgdb_roundup_cpus(unsigned long flags)
+{
+ smp_send_debugger_break(MSG_ALL_BUT_SELF);
+}
+#endif
+
+/* KGDB functions to use existing PowerPC64 hooks. */
+static int kgdb_debugger(struct pt_regs *regs)
+{
+ return kgdb_handle_exception(0, computeSignal(TRAP(regs)), 0, regs);
+}
+
+static int kgdb_handle_breakpoint(struct pt_regs *regs)
+{
+ if (user_mode(regs))
+ return 0;
+
+ if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
+ return 0;
+
+ if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
+ regs->nip += 4;
+
+ return 1;
+}
+
+static int kgdb_singlestep(struct pt_regs *regs)
+{
+ struct thread_info *thread_info, *exception_thread_info;
+
+ if (user_mode(regs))
+ return 0;
+
+ /*
+ * On Book E and perhaps other processsors, singlestep is handled on
+ * the critical exception stack. This causes current_thread_info()
+ * to fail, since it it locates the thread_info by masking off
+ * the low bits of the current stack pointer. We work around
+ * this issue by copying the thread_info from the kernel stack
+ * before calling kgdb_handle_exception, and copying it back
+ * afterwards. On most processors the copy is avoided since
+ * exception_thread_info == thread_info.
+ */
+ thread_info = (struct thread_info *)(regs->gpr[1] & ~(THREAD_SIZE-1));
+ exception_thread_info = current_thread_info();
+
+ if (thread_info != exception_thread_info)
+ memcpy(exception_thread_info, thread_info, sizeof *thread_info);
+
+ kgdb_handle_exception(0, SIGTRAP, 0, regs);
+
+ if (thread_info != exception_thread_info)
+ memcpy(thread_info, exception_thread_info, sizeof *thread_info);
+
+ return 1;
+}
+
+static int kgdb_iabr_match(struct pt_regs *regs)
+{
+ if (user_mode(regs))
+ return 0;
+
+ if (kgdb_handle_exception(0, computeSignal(TRAP(regs)), 0, regs) != 0)
+ return 0;
+ return 1;
+}
+
+static int kgdb_dabr_match(struct pt_regs *regs)
+{
+ if (user_mode(regs))
+ return 0;
+
+ if (kgdb_handle_exception(0, computeSignal(TRAP(regs)), 0, regs) != 0)
+ return 0;
+ return 1;
+}
+
+#define PACK64(ptr, src) do { *(ptr++) = (src); } while (0)
+
+#define PACK32(ptr, src) do { \
+ u32 *ptr32; \
+ ptr32 = (u32 *)ptr; \
+ *(ptr32++) = (src); \
+ ptr = (unsigned long *)ptr32; \
+ } while (0)
+
+
+void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
+{
+ unsigned long *ptr = gdb_regs;
+ int reg;
+
+ memset(gdb_regs, 0, NUMREGBYTES);
+
+ for (reg = 0; reg < 32; reg++)
+ PACK64(ptr, regs->gpr[reg]);
+
+#ifdef CONFIG_FSL_BOOKE
+#ifdef CONFIG_SPE
+ for (reg = 0; reg < 32; reg++)
+ PACK64(ptr, current->thread.evr[reg]);
+#else
+ ptr += 32;
+#endif
+#else
+ /* fp registers not used by kernel, leave zero */
+ ptr += 32 * 8 / sizeof(long);
+#endif
+
+ PACK64(ptr, regs->nip);
+ PACK64(ptr, regs->msr);
+ PACK32(ptr, regs->ccr);
+ PACK64(ptr, regs->link);
+ PACK64(ptr, regs->ctr);
+ PACK32(ptr, regs->xer);
+
+ BUG_ON((unsigned long)ptr >
+ (unsigned long)(((void *)gdb_regs) + NUMREGBYTES));
+}
+
+void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
+{
+ struct pt_regs *regs = (struct pt_regs *)(p->thread.ksp +
+ STACK_FRAME_OVERHEAD);
+ unsigned long *ptr = gdb_regs;
+ int reg;
+
+ memset(gdb_regs, 0, NUMREGBYTES);
+
+ /* Regs GPR0-2 */
+ for (reg = 0; reg < 3; reg++)
+ PACK64(ptr, regs->gpr[reg]);
+
+ /* Regs GPR3-13 are caller saved, not in regs->gpr[] */
+ ptr += 11;
+
+ /* Regs GPR14-31 */
+ for (reg = 14; reg < 32; reg++)
+ PACK64(ptr, regs->gpr[reg]);
+
+#ifdef CONFIG_FSL_BOOKE
+#ifdef CONFIG_SPE
+ for (reg = 0; reg < 32; reg++)
+ PACK64(ptr, p->thread.evr[reg]);
+#else
+ ptr += 32;
+#endif
+#else
+ /* fp registers not used by kernel, leave zero */
+ ptr += 32 * 8 / sizeof(long);
+#endif
+
+ PACK64(ptr, regs->nip);
+ PACK64(ptr, regs->msr);
+ PACK32(ptr, regs->ccr);
+ PACK64(ptr, regs->link);
+ PACK64(ptr, regs->ctr);
+ PACK32(ptr, regs->xer);
+
+ BUG_ON((unsigned long)ptr >
+ (unsigned long)(((void *)gdb_regs) + NUMREGBYTES));
+}
+
+#define UNPACK64(dest, ptr) do { dest = *(ptr++); } while (0)
+
+#define UNPACK32(dest, ptr) do { \
+ u32 *ptr32; \
+ ptr32 = (u32 *)ptr; \
+ dest = *(ptr32++); \
+ ptr = (unsigned long *)ptr32; \
+ } while (0)
+
+void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
+{
+ unsigned long *ptr = gdb_regs;
+ int reg;
+#ifdef CONFIG_SPE
+ union {
+ u32 v32[2];
+ u64 v64;
+ } acc;
+#endif
+
+ for (reg = 0; reg < 32; reg++)
+ UNPACK64(regs->gpr[reg], ptr);
+
+#ifdef CONFIG_FSL_BOOKE
+#ifdef CONFIG_SPE
+ for (reg = 0; reg < 32; reg++)
+ UNPACK64(current->thread.evr[reg], ptr);
+#else
+ ptr += 32;
+#endif
+#else
+ /* fp registers not used by kernel, leave zero */
+ ptr += 32 * 8 / sizeof(int);
+#endif
+
+ UNPACK64(regs->nip, ptr);
+ UNPACK64(regs->msr, ptr);
+ UNPACK32(regs->ccr, ptr);
+ UNPACK64(regs->link, ptr);
+ UNPACK64(regs->ctr, ptr);
+ UNPACK32(regs->xer, ptr);
+
+ BUG_ON((unsigned long)ptr >
+ (unsigned long)(((void *)gdb_regs) + NUMREGBYTES));
+}
+
+/*
+ * This function does PowerPC specific procesing for interfacing to gdb.
+ */
+int kgdb_arch_handle_exception(int vector, int signo, int err_code,
+ char *remcom_in_buffer, char *remcom_out_buffer,
+ struct pt_regs *linux_regs)
+{
+ char *ptr = &remcom_in_buffer[1];
+ unsigned long addr;
+
+ switch (remcom_in_buffer[0]) {
+ /*
+ * sAA..AA Step one instruction from AA..AA
+ * This will return an error to gdb ..
+ */
+ case 's':
+ case 'c':
+ /* handle the optional parameter */
+ if (kgdb_hex2long(&ptr, &addr))
+ linux_regs->nip = addr;
+
+ atomic_set(&kgdb_cpu_doing_single_step, -1);
+ /* set the trace bit if we're stepping */
+ if (remcom_in_buffer[0] == 's') {
+#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
+ mtspr(SPRN_DBCR0,
+ mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
+ linux_regs->msr |= MSR_DE;
+#else
+ linux_regs->msr |= MSR_SE;
+#endif
+ kgdb_single_step = 1;
+ if (kgdb_contthread)
+ atomic_set(&kgdb_cpu_doing_single_step,
+ raw_smp_processor_id());
+ }
+ return 0;
+ }
+
+ return -1;
+}
+
+/*
+ * Global data
+ */
+struct kgdb_arch arch_kgdb_ops = {
+ .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
+};
+
+static int kgdb_not_implemented(struct pt_regs *regs)
+{
+ return 0;
+}
+
+static void *old__debugger_ipi;
+static void *old__debugger;
+static void *old__debugger_bpt;
+static void *old__debugger_sstep;
+static void *old__debugger_iabr_match;
+static void *old__debugger_dabr_match;
+static void *old__debugger_fault_handler;
+
+int kgdb_arch_init(void)
+{
+ old__debugger_ipi = __debugger_ipi;
+ old__debugger = __debugger;
+ old__debugger_bpt = __debugger_bpt;
+ old__debugger_sstep = __debugger_sstep;
+ old__debugger_iabr_match = __debugger_iabr_match;
+ old__debugger_dabr_match = __debugger_dabr_match;
+ old__debugger_fault_handler = __debugger_fault_handler;
+
+ __debugger_ipi = kgdb_call_nmi_hook;
+ __debugger = kgdb_debugger;
+ __debugger_bpt = kgdb_handle_breakpoint;
+ __debugger_sstep = kgdb_singlestep;
+ __debugger_iabr_match = kgdb_iabr_match;
+ __debugger_dabr_match = kgdb_dabr_match;
+ __debugger_fault_handler = kgdb_not_implemented;
+
+ return 0;
+}
+
+void kgdb_arch_exit(void)
+{
+ __debugger_ipi = old__debugger_ipi;
+ __debugger = old__debugger;
+ __debugger_bpt = old__debugger_bpt;
+ __debugger_sstep = old__debugger_sstep;
+ __debugger_iabr_match = old__debugger_iabr_match;
+ __debugger_dabr_match = old__debugger_dabr_match;
+ __debugger_fault_handler = old__debugger_fault_handler;
+}
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 063cdd41304..224e9a11765 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -598,6 +598,7 @@ void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
res->start = pci_addr;
break;
case 2: /* PCI Memory space */
+ case 3: /* PCI 64 bits Memory space */
printk(KERN_INFO
" MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
cpu_addr, cpu_addr + size - 1, pci_addr,
diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c
index 90eb3a3e383..bc1fb27368a 100644
--- a/arch/powerpc/kernel/prom_parse.c
+++ b/arch/powerpc/kernel/prom_parse.c
@@ -128,12 +128,35 @@ static void of_bus_pci_count_cells(struct device_node *np,
*sizec = 2;
}
+static unsigned int of_bus_pci_get_flags(const u32 *addr)
+{
+ unsigned int flags = 0;
+ u32 w = addr[0];
+
+ switch((w >> 24) & 0x03) {
+ case 0x01:
+ flags |= IORESOURCE_IO;
+ break;
+ case 0x02: /* 32 bits */
+ case 0x03: /* 64 bits */
+ flags |= IORESOURCE_MEM;
+ break;
+ }
+ if (w & 0x40000000)
+ flags |= IORESOURCE_PREFETCH;
+ return flags;
+}
+
static u64 of_bus_pci_map(u32 *addr, const u32 *range, int na, int ns, int pna)
{
u64 cp, s, da;
+ unsigned int af, rf;
+
+ af = of_bus_pci_get_flags(addr);
+ rf = of_bus_pci_get_flags(range);
/* Check address type match */
- if ((addr[0] ^ range[0]) & 0x03000000)
+ if ((af ^ rf) & (IORESOURCE_MEM | IORESOURCE_IO))
return OF_BAD_ADDR;
/* Read address values, skipping high cell */
@@ -153,25 +176,6 @@ static int of_bus_pci_translate(u32 *addr, u64 offset, int na)
return of_bus_default_translate(addr + 1, offset, na - 1);
}
-static unsigned int of_bus_pci_get_flags(const u32 *addr)
-{
- unsigned int flags = 0;
- u32 w = addr[0];
-
- switch((w >> 24) & 0x03) {
- case 0x01:
- flags |= IORESOURCE_IO;
- break;
- case 0x02: /* 32 bits */
- case 0x03: /* 64 bits */
- flags |= IORESOURCE_MEM;
- break;
- }
- if (w & 0x40000000)
- flags |= IORESOURCE_PREFETCH;
- return flags;
-}
-
const u32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size,
unsigned int *flags)
{
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 4efebe88e64..066e65c59b5 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -43,10 +43,6 @@
#define DBG(fmt...)
-#if defined CONFIG_KGDB
-#include <asm/kgdb.h>
-#endif
-
extern void bootx_init(unsigned long r4, unsigned long phys);
int boot_cpuid;
@@ -302,18 +298,6 @@ void __init setup_arch(char **cmdline_p)
xmon_setup();
-#if defined(CONFIG_KGDB)
- if (ppc_md.kgdb_map_scc)
- ppc_md.kgdb_map_scc();
- set_debug_traps();
- if (strstr(cmd_line, "gdb")) {
- if (ppc_md.progress)
- ppc_md.progress("setup_arch: kgdb breakpoint", 0x4000);
- printk("kgdb breakpoint activated\n");
- breakpoint();
- }
-#endif
-
/*
* Set cache line size based on type of cpu as a default.
* Systems with OF can look in the properties on the cpu node(s)
diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c
index 071bee3ec74..f2589645870 100644
--- a/arch/powerpc/kernel/stacktrace.c
+++ b/arch/powerpc/kernel/stacktrace.c
@@ -59,6 +59,6 @@ EXPORT_SYMBOL_GPL(save_stack_trace);
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
- save_context_stack(trace, tsk->thread.regs->gpr[1], tsk, 0);
+ save_context_stack(trace, tsk->thread.ksp, tsk, 0);
}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
diff --git a/arch/powerpc/kernel/suspend.c b/arch/powerpc/kernel/suspend.c
index 8cee5710754..6fc6328dc62 100644
--- a/arch/powerpc/kernel/suspend.c
+++ b/arch/powerpc/kernel/suspend.c
@@ -7,6 +7,7 @@
* Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
*/
+#include <linux/mm.h>
#include <asm/page.h>
/* References to section boundaries */
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index c8127f832df..aba0ba95f06 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -28,7 +28,9 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices);
/* Time in microseconds we delay before sleeping in the idle loop */
DEFINE_PER_CPU(unsigned long, smt_snooze_delay) = { 100 };
-static ssize_t store_smt_snooze_delay(struct sys_device *dev, const char *buf,
+static ssize_t store_smt_snooze_delay(struct sys_device *dev,
+ struct sysdev_attribute *attr,
+ const char *buf,
size_t count)
{
struct cpu *cpu = container_of(dev, struct cpu, sysdev);
@@ -44,7 +46,9 @@ static ssize_t store_smt_snooze_delay(struct sys_device *dev, const char *buf,
return count;
}
-static ssize_t show_smt_snooze_delay(struct sys_device *dev, char *buf)
+static ssize_t show_smt_snooze_delay(struct sys_device *dev,
+ struct sysdev_attribute *attr,
+ char *buf)
{
struct cpu *cpu = container_of(dev, struct cpu, sysdev);
@@ -152,14 +156,17 @@ static unsigned long write_##NAME(unsigned long val) \
mtspr(ADDRESS, val); \
return 0; \
} \
-static ssize_t show_##NAME(struct sys_device *dev, char *buf) \
+static ssize_t show_##NAME(struct sys_device *dev, \
+ struct sysdev_attribute *attr, \
+ char *buf) \
{ \
struct cpu *cpu = container_of(dev, struct cpu, sysdev); \
unsigned long val = run_on_cpu(cpu->sysdev.id, read_##NAME, 0); \
return sprintf(buf, "%lx\n", val); \
} \
static ssize_t __used \
- store_##NAME(struct sys_device *dev, const char *buf, size_t count) \
+ store_##NAME(struct sys_device *dev, struct sysdev_attribute *attr, \
+ const char *buf, size_t count) \
{ \
struct cpu *cpu = container_of(dev, struct cpu, sysdev); \
unsigned long val; \
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 87a72c66ce2..a914411bced 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -9,6 +9,25 @@
ENTRY(_stext)
+PHDRS {
+ kernel PT_LOAD FLAGS(7); /* RWX */
+ notes PT_NOTE FLAGS(0);
+ dummy PT_NOTE FLAGS(0);
+
+ /* binutils < 2.18 has a bug that makes it misbehave when taking an
+ ELF file with all segments at load address 0 as input. This
+ happens when running "strip" on vmlinux, because of the AT() magic
+ in this linker script. People using GCC >= 4.2 won't run into
+ this problem, because the "build-id" support will put some data
+ into the "notes" segment (at a non-zero load address).
+
+ To work around this, we force some data into both the "dummy"
+ segment and the kernel segment, so the dummy segment will get a
+ non-zero load address. It's not enough to always create the
+ "notes" segment, since if nothing gets assigned to it, its load
+ address will be zero. */
+}
+
#ifdef CONFIG_PPC64
OUTPUT_ARCH(powerpc:common64)
jiffies = jiffies_64;
@@ -50,7 +69,7 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
_etext = .;
PROVIDE32 (etext = .);
- }
+ } :kernel
/* Read-only data */
RODATA
@@ -62,7 +81,13 @@ SECTIONS
__stop___ex_table = .;
}
- NOTES
+ NOTES :kernel :notes
+
+ /* The dummy segment contents for the bug workaround mentioned above
+ near PHDRS. */
+ .dummy : {
+ LONG(0xf177)
+ } :kernel :dummy
/*
* Init sections discarded at runtime
@@ -74,7 +99,7 @@ SECTIONS
_sinittext = .;
INIT_TEXT
_einittext = .;
- }
+ } :kernel
/* .exit.text is discarded at runtime, not link time,
* to deal with references from __bug_table