From 441cbd8dace80545db2ac43175ac1c097d96f75c Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Thu, 26 Oct 2006 15:38:10 +1000 Subject: [POWERPC] Fix various offb issues This patch fixes a few issues in offb: - A test was inverted causing the palette hack to never work (no device node was passed down to the init function) - Some cards seem to have their assigned-addresses property in a random order, thus we need to try using of_get_pci_address() first, which will fail if it's not a PCI device, and fallback to of_get_address() in that case. of_get_pci_address() properly parsees assigned-addresses to test the BAR number and thus will get it right whatever the order is. - Some cards (like GXT4500) provide a linebytes of 0xffffffff in the device-tree which does no good. This patch handles that by using the screen width when that happens. (Also fixes btext.c while at it). - Add detection of the GXT4500 in addition to the GXT2000 for the palette hacks (we use the same hack, palette is linear in register space at offset 0x6000). Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/btext.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c index 995fcef156f..93f21aaf7c8 100644 --- a/arch/powerpc/kernel/btext.c +++ b/arch/powerpc/kernel/btext.c @@ -182,7 +182,7 @@ int btext_initialize(struct device_node *np) prop = get_property(np, "linux,bootx-linebytes", NULL); if (prop == NULL) prop = get_property(np, "linebytes", NULL); - if (prop) + if (prop && *prop != 0xffffffffu) pitch = *prop; if (pitch == 1) pitch = 0x1000; -- cgit v1.2.3 From dd6c89f686bdb2a5de72fab636fc839e5a0add6d Mon Sep 17 00:00:00 2001 From: Andy Fleming Date: Fri, 27 Oct 2006 15:06:32 -0500 Subject: [POWERPC] Fix oprofile support for e500 in arch/powerpc Fixed a compile error in building the 85xx support with oprofile, and in the process cleaned up some issues with the fsl_booke performance monitor code. * Reorganized FSL Book-E performance monitoring code so that the 7450 wouldn't be built if the e500 was, and cleaned it up so it was more self-contained. * Added a cpu_setup function for FSL Book-E. The original cpu_setup function prototype had no arguments, assuming that the reg_setup function would copy the required information into variables which represented the registers. This was silly for e500, since it has 1 register per counter (rather than 3 for all counters), so the code has been restructured to have cpu_setup take the current counter config array as an argument, with op_powerpc_setup() invoking op_powerpc_cpu_setup() through on_each_cpu(), and op_powerpc_cpu_setup() invoking the model-specific cpu_setup function with an argument. The argument is ignored on all other platforms at present. * Fixed a confusing line where a trinary operator only had two arguments Signed-off-by: Andrew Fleming Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/Makefile | 1 - arch/powerpc/kernel/perfmon_fsl_booke.c | 221 ----------------------------- arch/powerpc/kernel/pmc.c | 2 +- arch/powerpc/oprofile/Makefile | 2 +- arch/powerpc/oprofile/common.c | 10 +- arch/powerpc/oprofile/op_model_7450.c | 2 +- arch/powerpc/oprofile/op_model_fsl_booke.c | 170 +++++++++++++++++----- arch/powerpc/oprofile/op_model_power4.c | 2 +- arch/powerpc/oprofile/op_model_rs64.c | 2 +- 9 files changed, 149 insertions(+), 263 deletions(-) delete mode 100644 arch/powerpc/kernel/perfmon_fsl_booke.c (limited to 'arch') diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 8b133afbdc2..7af23c43fd4 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -38,7 +38,6 @@ obj-$(CONFIG_6xx) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o obj-$(CONFIG_TAU) += tau_6xx.o obj32-$(CONFIG_SOFTWARE_SUSPEND) += swsusp_32.o obj32-$(CONFIG_MODULES) += module_32.o -obj-$(CONFIG_E500) += perfmon_fsl_booke.o ifeq ($(CONFIG_PPC_MERGE),y) diff --git a/arch/powerpc/kernel/perfmon_fsl_booke.c b/arch/powerpc/kernel/perfmon_fsl_booke.c deleted file mode 100644 index e0dcf2b41fb..00000000000 --- a/arch/powerpc/kernel/perfmon_fsl_booke.c +++ /dev/null @@ -1,221 +0,0 @@ -/* arch/powerpc/kernel/perfmon_fsl_booke.c - * Freescale Book-E Performance Monitor code - * - * Author: Andy Fleming - * Copyright (c) 2004 Freescale Semiconductor, Inc - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -static inline u32 get_pmlca(int ctr); -static inline void set_pmlca(int ctr, u32 pmlca); - -static inline u32 get_pmlca(int ctr) -{ - u32 pmlca; - - switch (ctr) { - case 0: - pmlca = mfpmr(PMRN_PMLCA0); - break; - case 1: - pmlca = mfpmr(PMRN_PMLCA1); - break; - case 2: - pmlca = mfpmr(PMRN_PMLCA2); - break; - case 3: - pmlca = mfpmr(PMRN_PMLCA3); - break; - default: - panic("Bad ctr number\n"); - } - - return pmlca; -} - -static inline void set_pmlca(int ctr, u32 pmlca) -{ - switch (ctr) { - case 0: - mtpmr(PMRN_PMLCA0, pmlca); - break; - case 1: - mtpmr(PMRN_PMLCA1, pmlca); - break; - case 2: - mtpmr(PMRN_PMLCA2, pmlca); - break; - case 3: - mtpmr(PMRN_PMLCA3, pmlca); - break; - default: - panic("Bad ctr number\n"); - } -} - -void init_pmc_stop(int ctr) -{ - u32 pmlca = (PMLCA_FC | PMLCA_FCS | PMLCA_FCU | - PMLCA_FCM1 | PMLCA_FCM0); - u32 pmlcb = 0; - - switch (ctr) { - case 0: - mtpmr(PMRN_PMLCA0, pmlca); - mtpmr(PMRN_PMLCB0, pmlcb); - break; - case 1: - mtpmr(PMRN_PMLCA1, pmlca); - mtpmr(PMRN_PMLCB1, pmlcb); - break; - case 2: - mtpmr(PMRN_PMLCA2, pmlca); - mtpmr(PMRN_PMLCB2, pmlcb); - break; - case 3: - mtpmr(PMRN_PMLCA3, pmlca); - mtpmr(PMRN_PMLCB3, pmlcb); - break; - default: - panic("Bad ctr number!\n"); - } -} - -void set_pmc_event(int ctr, int event) -{ - u32 pmlca; - - pmlca = get_pmlca(ctr); - - pmlca = (pmlca & ~PMLCA_EVENT_MASK) | - ((event << PMLCA_EVENT_SHIFT) & - PMLCA_EVENT_MASK); - - set_pmlca(ctr, pmlca); -} - -void set_pmc_user_kernel(int ctr, int user, int kernel) -{ - u32 pmlca; - - pmlca = get_pmlca(ctr); - - if(user) - pmlca &= ~PMLCA_FCU; - else - pmlca |= PMLCA_FCU; - - if(kernel) - pmlca &= ~PMLCA_FCS; - else - pmlca |= PMLCA_FCS; - - set_pmlca(ctr, pmlca); -} - -void set_pmc_marked(int ctr, int mark0, int mark1) -{ - u32 pmlca = get_pmlca(ctr); - - if(mark0) - pmlca &= ~PMLCA_FCM0; - else - pmlca |= PMLCA_FCM0; - - if(mark1) - pmlca &= ~PMLCA_FCM1; - else - pmlca |= PMLCA_FCM1; - - set_pmlca(ctr, pmlca); -} - -void pmc_start_ctr(int ctr, int enable) -{ - u32 pmlca = get_pmlca(ctr); - - pmlca &= ~PMLCA_FC; - - if (enable) - pmlca |= PMLCA_CE; - else - pmlca &= ~PMLCA_CE; - - set_pmlca(ctr, pmlca); -} - -void pmc_start_ctrs(int enable) -{ - u32 pmgc0 = mfpmr(PMRN_PMGC0); - - pmgc0 &= ~PMGC0_FAC; - pmgc0 |= PMGC0_FCECE; - - if (enable) - pmgc0 |= PMGC0_PMIE; - else - pmgc0 &= ~PMGC0_PMIE; - - mtpmr(PMRN_PMGC0, pmgc0); -} - -void pmc_stop_ctrs(void) -{ - u32 pmgc0 = mfpmr(PMRN_PMGC0); - - pmgc0 |= PMGC0_FAC; - - pmgc0 &= ~(PMGC0_PMIE | PMGC0_FCECE); - - mtpmr(PMRN_PMGC0, pmgc0); -} - -void dump_pmcs(void) -{ - printk("pmgc0: %x\n", mfpmr(PMRN_PMGC0)); - printk("pmc\t\tpmlca\t\tpmlcb\n"); - printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC0), - mfpmr(PMRN_PMLCA0), mfpmr(PMRN_PMLCB0)); - printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC1), - mfpmr(PMRN_PMLCA1), mfpmr(PMRN_PMLCB1)); - printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC2), - mfpmr(PMRN_PMLCA2), mfpmr(PMRN_PMLCB2)); - printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC3), - mfpmr(PMRN_PMLCA3), mfpmr(PMRN_PMLCB3)); -} - -EXPORT_SYMBOL(init_pmc_stop); -EXPORT_SYMBOL(set_pmc_event); -EXPORT_SYMBOL(set_pmc_user_kernel); -EXPORT_SYMBOL(set_pmc_marked); -EXPORT_SYMBOL(pmc_start_ctr); -EXPORT_SYMBOL(pmc_start_ctrs); -EXPORT_SYMBOL(pmc_stop_ctrs); -EXPORT_SYMBOL(dump_pmcs); diff --git a/arch/powerpc/kernel/pmc.c b/arch/powerpc/kernel/pmc.c index a0a2efadeab..3d8f6f44641 100644 --- a/arch/powerpc/kernel/pmc.c +++ b/arch/powerpc/kernel/pmc.c @@ -71,7 +71,7 @@ int reserve_pmc_hardware(perf_irq_t new_perf_irq) } pmc_owner_caller = __builtin_return_address(0); - perf_irq = new_perf_irq ? : dummy_perf; + perf_irq = new_perf_irq ? new_perf_irq : dummy_perf; out: spin_unlock(&pmc_owner_lock); diff --git a/arch/powerpc/oprofile/Makefile b/arch/powerpc/oprofile/Makefile index 3145d610b5b..0b5df9c96ae 100644 --- a/arch/powerpc/oprofile/Makefile +++ b/arch/powerpc/oprofile/Makefile @@ -13,4 +13,4 @@ DRIVER_OBJS := $(addprefix ../../../drivers/oprofile/, \ oprofile-y := $(DRIVER_OBJS) common.o backtrace.o oprofile-$(CONFIG_PPC64) += op_model_rs64.o op_model_power4.o oprofile-$(CONFIG_FSL_BOOKE) += op_model_fsl_booke.o -oprofile-$(CONFIG_PPC32) += op_model_7450.o +oprofile-$(CONFIG_6xx) += op_model_7450.o diff --git a/arch/powerpc/oprofile/common.c b/arch/powerpc/oprofile/common.c index fd0bbbe7a4d..63bbef3b63f 100644 --- a/arch/powerpc/oprofile/common.c +++ b/arch/powerpc/oprofile/common.c @@ -34,6 +34,11 @@ static void op_handle_interrupt(struct pt_regs *regs) model->handle_interrupt(regs, ctr); } +static void op_powerpc_cpu_setup(void *dummy) +{ + model->cpu_setup(ctr); +} + static int op_powerpc_setup(void) { int err; @@ -47,7 +52,7 @@ static int op_powerpc_setup(void) model->reg_setup(ctr, &sys, model->num_counters); /* Configure the registers on all cpus. */ - on_each_cpu(model->cpu_setup, NULL, 0, 1); + on_each_cpu(op_powerpc_cpu_setup, NULL, 0, 1); return 0; } @@ -142,7 +147,8 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) case PPC_OPROFILE_POWER4: model = &op_model_power4; break; -#else +#endif +#ifdef CONFIG_6xx case PPC_OPROFILE_G4: model = &op_model_7450; break; diff --git a/arch/powerpc/oprofile/op_model_7450.c b/arch/powerpc/oprofile/op_model_7450.c index d8ee3aea83f..f481c0ed5e6 100644 --- a/arch/powerpc/oprofile/op_model_7450.c +++ b/arch/powerpc/oprofile/op_model_7450.c @@ -81,7 +81,7 @@ static void pmc_stop_ctrs(void) /* Configures the counters on this CPU based on the global * settings */ -static void fsl7450_cpu_setup(void *unused) +static void fsl7450_cpu_setup(struct op_counter_config *ctr) { /* freeze all counters */ pmc_stop_ctrs(); diff --git a/arch/powerpc/oprofile/op_model_fsl_booke.c b/arch/powerpc/oprofile/op_model_fsl_booke.c index e29dede3142..0b3c31f5209 100644 --- a/arch/powerpc/oprofile/op_model_fsl_booke.c +++ b/arch/powerpc/oprofile/op_model_fsl_booke.c @@ -32,42 +32,152 @@ static unsigned long reset_value[OP_MAX_COUNTER]; static int num_counters; static int oprofile_running; -static inline unsigned int ctr_read(unsigned int i) +static void init_pmc_stop(int ctr) { - switch(i) { - case 0: - return mfpmr(PMRN_PMC0); - case 1: - return mfpmr(PMRN_PMC1); - case 2: - return mfpmr(PMRN_PMC2); - case 3: - return mfpmr(PMRN_PMC3); - default: - return 0; - } -} + u32 pmlca = (PMLCA_FC | PMLCA_FCS | PMLCA_FCU | + PMLCA_FCM1 | PMLCA_FCM0); + u32 pmlcb = 0; -static inline void ctr_write(unsigned int i, unsigned int val) -{ - switch(i) { + switch (ctr) { case 0: - mtpmr(PMRN_PMC0, val); + mtpmr(PMRN_PMLCA0, pmlca); + mtpmr(PMRN_PMLCB0, pmlcb); break; case 1: - mtpmr(PMRN_PMC1, val); + mtpmr(PMRN_PMLCA1, pmlca); + mtpmr(PMRN_PMLCB1, pmlcb); break; case 2: - mtpmr(PMRN_PMC2, val); + mtpmr(PMRN_PMLCA2, pmlca); + mtpmr(PMRN_PMLCB2, pmlcb); break; case 3: - mtpmr(PMRN_PMC3, val); + mtpmr(PMRN_PMLCA3, pmlca); + mtpmr(PMRN_PMLCB3, pmlcb); break; default: - break; + panic("Bad ctr number!\n"); } } +static void set_pmc_event(int ctr, int event) +{ + u32 pmlca; + + pmlca = get_pmlca(ctr); + + pmlca = (pmlca & ~PMLCA_EVENT_MASK) | + ((event << PMLCA_EVENT_SHIFT) & + PMLCA_EVENT_MASK); + + set_pmlca(ctr, pmlca); +} + +static void set_pmc_user_kernel(int ctr, int user, int kernel) +{ + u32 pmlca; + + pmlca = get_pmlca(ctr); + + if(user) + pmlca &= ~PMLCA_FCU; + else + pmlca |= PMLCA_FCU; + + if(kernel) + pmlca &= ~PMLCA_FCS; + else + pmlca |= PMLCA_FCS; + + set_pmlca(ctr, pmlca); +} + +static void set_pmc_marked(int ctr, int mark0, int mark1) +{ + u32 pmlca = get_pmlca(ctr); + + if(mark0) + pmlca &= ~PMLCA_FCM0; + else + pmlca |= PMLCA_FCM0; + + if(mark1) + pmlca &= ~PMLCA_FCM1; + else + pmlca |= PMLCA_FCM1; + + set_pmlca(ctr, pmlca); +} + +static void pmc_start_ctr(int ctr, int enable) +{ + u32 pmlca = get_pmlca(ctr); + + pmlca &= ~PMLCA_FC; + + if (enable) + pmlca |= PMLCA_CE; + else + pmlca &= ~PMLCA_CE; + + set_pmlca(ctr, pmlca); +} + +static void pmc_start_ctrs(int enable) +{ + u32 pmgc0 = mfpmr(PMRN_PMGC0); + + pmgc0 &= ~PMGC0_FAC; + pmgc0 |= PMGC0_FCECE; + + if (enable) + pmgc0 |= PMGC0_PMIE; + else + pmgc0 &= ~PMGC0_PMIE; + + mtpmr(PMRN_PMGC0, pmgc0); +} + +static void pmc_stop_ctrs(void) +{ + u32 pmgc0 = mfpmr(PMRN_PMGC0); + + pmgc0 |= PMGC0_FAC; + + pmgc0 &= ~(PMGC0_PMIE | PMGC0_FCECE); + + mtpmr(PMRN_PMGC0, pmgc0); +} + +static void dump_pmcs(void) +{ + printk("pmgc0: %x\n", mfpmr(PMRN_PMGC0)); + printk("pmc\t\tpmlca\t\tpmlcb\n"); + printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC0), + mfpmr(PMRN_PMLCA0), mfpmr(PMRN_PMLCB0)); + printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC1), + mfpmr(PMRN_PMLCA1), mfpmr(PMRN_PMLCB1)); + printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC2), + mfpmr(PMRN_PMLCA2), mfpmr(PMRN_PMLCB2)); + printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC3), + mfpmr(PMRN_PMLCA3), mfpmr(PMRN_PMLCB3)); +} + +static void fsl_booke_cpu_setup(struct op_counter_config *ctr) +{ + int i; + + /* freeze all counters */ + pmc_stop_ctrs(); + + for (i = 0;i < num_counters;i++) { + init_pmc_stop(i); + + set_pmc_event(i, ctr[i].event); + + set_pmc_user_kernel(i, ctr[i].user, ctr[i].kernel); + } +} static void fsl_booke_reg_setup(struct op_counter_config *ctr, struct op_system_config *sys, @@ -77,23 +187,14 @@ static void fsl_booke_reg_setup(struct op_counter_config *ctr, num_counters = num_ctrs; - /* freeze all counters */ - pmc_stop_ctrs(); - /* Our counters count up, and "count" refers to * how much before the next interrupt, and we interrupt * on overflow. So we calculate the starting value * which will give us "count" until overflow. * Then we set the events on the enabled counters */ - for (i = 0; i < num_counters; ++i) { + for (i = 0; i < num_counters; ++i) reset_value[i] = 0x80000000UL - ctr[i].count; - init_pmc_stop(i); - - set_pmc_event(i, ctr[i].event); - - set_pmc_user_kernel(i, ctr[i].user, ctr[i].kernel); - } } static void fsl_booke_start(struct op_counter_config *ctr) @@ -105,8 +206,8 @@ static void fsl_booke_start(struct op_counter_config *ctr) for (i = 0; i < num_counters; ++i) { if (ctr[i].enabled) { ctr_write(i, reset_value[i]); - /* Set Each enabled counterd to only - * count when the Mark bit is not set */ + /* Set each enabled counter to only + * count when the Mark bit is *not* set */ set_pmc_marked(i, 1, 0); pmc_start_ctr(i, 1); } else { @@ -177,6 +278,7 @@ static void fsl_booke_handle_interrupt(struct pt_regs *regs, struct op_powerpc_model op_model_fsl_booke = { .reg_setup = fsl_booke_reg_setup, + .cpu_setup = fsl_booke_cpu_setup, .start = fsl_booke_start, .stop = fsl_booke_stop, .handle_interrupt = fsl_booke_handle_interrupt, diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c index 6a927effcc7..356709d515b 100644 --- a/arch/powerpc/oprofile/op_model_power4.c +++ b/arch/powerpc/oprofile/op_model_power4.c @@ -82,7 +82,7 @@ static inline int mmcra_must_set_sample(void) return 0; } -static void power4_cpu_setup(void *unused) +static void power4_cpu_setup(struct op_counter_config *ctr) { unsigned int mmcr0 = mmcr0_val; unsigned long mmcra = mmcra_val; diff --git a/arch/powerpc/oprofile/op_model_rs64.c b/arch/powerpc/oprofile/op_model_rs64.c index 042f8f4867a..19c5ee089bc 100644 --- a/arch/powerpc/oprofile/op_model_rs64.c +++ b/arch/powerpc/oprofile/op_model_rs64.c @@ -102,7 +102,7 @@ static void rs64_reg_setup(struct op_counter_config *ctr, /* XXX setup user and kernel profiling */ } -static void rs64_cpu_setup(void *unused) +static void rs64_cpu_setup(struct op_counter_config *ctr) { unsigned int mmcr0; -- cgit v1.2.3 From 5d2efba64b231a1733c4048d1708d77e07f26426 Mon Sep 17 00:00:00 2001 From: Linas Vepstas Date: Mon, 30 Oct 2006 16:15:59 +1100 Subject: [POWERPC] Use 4kB iommu pages even on 64kB-page systems The 10Gigabit ethernet device drivers appear to be able to chew up all 256MB of TCE mappings on pSeries systems, as evidenced by numerous error messages: iommu_alloc failed, tbl c0000000010d5c48 vaddr c0000000d875eff0 npages 1 Some experimentation indicates that this is essentially because one 1500 byte ethernet MTU gets mapped as a 64K DMA region when the large 64K pages are enabled. Thus, it doesn't take much to exhaust all of the available DMA mappings for a high-speed card. This patch changes the iommu allocator to work with its own unique, distinct page size. Although the patch is long, its actually quite simple: it just #defines a distinct IOMMU_PAGE_SIZE and then uses this in all the places that matter. As a side effect, it also dramatically improves network performance on platforms with H-calls on iommu translation inserts/removes (since we no longer call it 16 times for a 1500 bytes packet when the iommu HW is still 4k). In the future, we might want to make the IOMMU_PAGE_SIZE a variable in the iommu_table instance, thus allowing support for different HW page sizes in the iommu itself. Signed-off-by: Linas Vepstas Signed-off-by: Benjamin Herrenschmidt Acked-by: Olof Johansson Acked-by: Stephen Rothwell Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/iommu.c | 77 ++++++++++++++++++++-------------- arch/powerpc/kernel/vio.c | 4 +- arch/powerpc/platforms/iseries/iommu.c | 11 +---- arch/powerpc/platforms/pseries/iommu.c | 35 ++++------------ arch/powerpc/sysdev/dart.h | 1 - arch/powerpc/sysdev/dart_iommu.c | 8 +--- 6 files changed, 58 insertions(+), 78 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index f88a2a675d9..ba6b7256084 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -47,6 +47,17 @@ static int novmerge = 0; static int novmerge = 1; #endif +static inline unsigned long iommu_num_pages(unsigned long vaddr, + unsigned long slen) +{ + unsigned long npages; + + npages = IOMMU_PAGE_ALIGN(vaddr + slen) - (vaddr & IOMMU_PAGE_MASK); + npages >>= IOMMU_PAGE_SHIFT; + + return npages; +} + static int __init setup_iommu(char *str) { if (!strcmp(str, "novmerge")) @@ -178,10 +189,10 @@ static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page, } entry += tbl->it_offset; /* Offset into real TCE table */ - ret = entry << PAGE_SHIFT; /* Set the return dma address */ + ret = entry << IOMMU_PAGE_SHIFT; /* Set the return dma address */ /* Put the TCEs in the HW table */ - ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & PAGE_MASK, + ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & IOMMU_PAGE_MASK, direction); @@ -203,7 +214,7 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, unsigned long entry, free_entry; unsigned long i; - entry = dma_addr >> PAGE_SHIFT; + entry = dma_addr >> IOMMU_PAGE_SHIFT; free_entry = entry - tbl->it_offset; if (((free_entry + npages) > tbl->it_size) || @@ -270,7 +281,7 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl, /* Init first segment length for backout at failure */ outs->dma_length = 0; - DBG("mapping %d elements:\n", nelems); + DBG("sg mapping %d elements:\n", nelems); spin_lock_irqsave(&(tbl->it_lock), flags); @@ -285,9 +296,8 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl, } /* Allocate iommu entries for that segment */ vaddr = (unsigned long)page_address(s->page) + s->offset; - npages = PAGE_ALIGN(vaddr + slen) - (vaddr & PAGE_MASK); - npages >>= PAGE_SHIFT; - entry = iommu_range_alloc(tbl, npages, &handle, mask >> PAGE_SHIFT, 0); + npages = iommu_num_pages(vaddr, slen); + entry = iommu_range_alloc(tbl, npages, &handle, mask >> IOMMU_PAGE_SHIFT, 0); DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen); @@ -301,14 +311,14 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl, /* Convert entry to a dma_addr_t */ entry += tbl->it_offset; - dma_addr = entry << PAGE_SHIFT; - dma_addr |= s->offset; + dma_addr = entry << IOMMU_PAGE_SHIFT; + dma_addr |= (s->offset & ~IOMMU_PAGE_MASK); - DBG(" - %lx pages, entry: %lx, dma_addr: %lx\n", + DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n", npages, entry, dma_addr); /* Insert into HW table */ - ppc_md.tce_build(tbl, entry, npages, vaddr & PAGE_MASK, direction); + ppc_md.tce_build(tbl, entry, npages, vaddr & IOMMU_PAGE_MASK, direction); /* If we are in an open segment, try merging */ if (segstart != s) { @@ -323,7 +333,7 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl, DBG(" can't merge, new segment.\n"); } else { outs->dma_length += s->length; - DBG(" merged, new len: %lx\n", outs->dma_length); + DBG(" merged, new len: %ux\n", outs->dma_length); } } @@ -367,9 +377,8 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl, if (s->dma_length != 0) { unsigned long vaddr, npages; - vaddr = s->dma_address & PAGE_MASK; - npages = (PAGE_ALIGN(s->dma_address + s->dma_length) - vaddr) - >> PAGE_SHIFT; + vaddr = s->dma_address & IOMMU_PAGE_MASK; + npages = iommu_num_pages(s->dma_address, s->dma_length); __iommu_free(tbl, vaddr, npages); s->dma_address = DMA_ERROR_CODE; s->dma_length = 0; @@ -398,8 +407,7 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, if (sglist->dma_length == 0) break; - npages = (PAGE_ALIGN(dma_handle + sglist->dma_length) - - (dma_handle & PAGE_MASK)) >> PAGE_SHIFT; + npages = iommu_num_pages(dma_handle,sglist->dma_length); __iommu_free(tbl, dma_handle, npages); sglist++; } @@ -532,12 +540,11 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr, BUG_ON(direction == DMA_NONE); uaddr = (unsigned long)vaddr; - npages = PAGE_ALIGN(uaddr + size) - (uaddr & PAGE_MASK); - npages >>= PAGE_SHIFT; + npages = iommu_num_pages(uaddr, size); if (tbl) { dma_handle = iommu_alloc(tbl, vaddr, npages, direction, - mask >> PAGE_SHIFT, 0); + mask >> IOMMU_PAGE_SHIFT, 0); if (dma_handle == DMA_ERROR_CODE) { if (printk_ratelimit()) { printk(KERN_INFO "iommu_alloc failed, " @@ -545,7 +552,7 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr, tbl, vaddr, npages); } } else - dma_handle |= (uaddr & ~PAGE_MASK); + dma_handle |= (uaddr & ~IOMMU_PAGE_MASK); } return dma_handle; @@ -554,11 +561,14 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr, void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { + unsigned int npages; + BUG_ON(direction == DMA_NONE); - if (tbl) - iommu_free(tbl, dma_handle, (PAGE_ALIGN(dma_handle + size) - - (dma_handle & PAGE_MASK)) >> PAGE_SHIFT); + if (tbl) { + npages = iommu_num_pages(dma_handle, size); + iommu_free(tbl, dma_handle, npages); + } } /* Allocates a contiguous real buffer and creates mappings over it. @@ -570,11 +580,11 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size, { void *ret = NULL; dma_addr_t mapping; - unsigned int npages, order; + unsigned int order; + unsigned int nio_pages, io_order; struct page *page; size = PAGE_ALIGN(size); - npages = size >> PAGE_SHIFT; order = get_order(size); /* @@ -598,8 +608,10 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size, memset(ret, 0, size); /* Set up tces to cover the allocated range */ - mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL, - mask >> PAGE_SHIFT, order); + nio_pages = size >> IOMMU_PAGE_SHIFT; + io_order = get_iommu_order(size); + mapping = iommu_alloc(tbl, ret, nio_pages, DMA_BIDIRECTIONAL, + mask >> IOMMU_PAGE_SHIFT, io_order); if (mapping == DMA_ERROR_CODE) { free_pages((unsigned long)ret, order); return NULL; @@ -611,12 +623,13 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size, void iommu_free_coherent(struct iommu_table *tbl, size_t size, void *vaddr, dma_addr_t dma_handle) { - unsigned int npages; - if (tbl) { + unsigned int nio_pages; + + size = PAGE_ALIGN(size); + nio_pages = size >> IOMMU_PAGE_SHIFT; + iommu_free(tbl, dma_handle, nio_pages); size = PAGE_ALIGN(size); - npages = size >> PAGE_SHIFT; - iommu_free(tbl, dma_handle, npages); free_pages((unsigned long)vaddr, get_order(size)); } } diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index cb87e71eec6..ed007878d1b 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c @@ -92,9 +92,9 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev) &tbl->it_index, &offset, &size); /* TCE table size - measured in tce entries */ - tbl->it_size = size >> PAGE_SHIFT; + tbl->it_size = size >> IOMMU_PAGE_SHIFT; /* offset for VIO should always be 0 */ - tbl->it_offset = offset >> PAGE_SHIFT; + tbl->it_offset = offset >> IOMMU_PAGE_SHIFT; tbl->it_busno = 0; tbl->it_type = TCE_VB; diff --git a/arch/powerpc/platforms/iseries/iommu.c b/arch/powerpc/platforms/iseries/iommu.c index f4cbbcf8773..218817d13c5 100644 --- a/arch/powerpc/platforms/iseries/iommu.c +++ b/arch/powerpc/platforms/iseries/iommu.c @@ -43,9 +43,6 @@ static void tce_build_iSeries(struct iommu_table *tbl, long index, long npages, u64 rc; u64 tce, rpn; - index <<= TCE_PAGE_FACTOR; - npages <<= TCE_PAGE_FACTOR; - while (npages--) { rpn = virt_to_abs(uaddr) >> TCE_SHIFT; tce = (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT; @@ -75,9 +72,6 @@ static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages) { u64 rc; - npages <<= TCE_PAGE_FACTOR; - index <<= TCE_PAGE_FACTOR; - while (npages--) { rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, 0); if (rc) @@ -136,10 +130,9 @@ void iommu_table_getparms_iSeries(unsigned long busno, panic("PCI_DMA: parms->size is zero, parms is 0x%p", parms); /* itc_size is in pages worth of table, it_size is in # of entries */ - tbl->it_size = ((parms->itc_size * TCE_PAGE_SIZE) / - TCE_ENTRY_SIZE) >> TCE_PAGE_FACTOR; + tbl->it_size = (parms->itc_size * TCE_PAGE_SIZE) / TCE_ENTRY_SIZE; tbl->it_busno = parms->itc_busno; - tbl->it_offset = parms->itc_offset >> TCE_PAGE_FACTOR; + tbl->it_offset = parms->itc_offset; tbl->it_index = parms->itc_index; tbl->it_blocksize = 1; tbl->it_type = virtbus ? TCE_VB : TCE_PCI; diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index d24ba547e53..556c279a789 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -57,9 +57,6 @@ static void tce_build_pSeries(struct iommu_table *tbl, long index, u64 *tcep; u64 rpn; - index <<= TCE_PAGE_FACTOR; - npages <<= TCE_PAGE_FACTOR; - proto_tce = TCE_PCI_READ; // Read allowed if (direction != DMA_TO_DEVICE) @@ -82,9 +79,6 @@ static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages) { u64 *tcep; - npages <<= TCE_PAGE_FACTOR; - index <<= TCE_PAGE_FACTOR; - tcep = ((u64 *)tbl->it_base) + index; while (npages--) @@ -95,7 +89,6 @@ static unsigned long tce_get_pseries(struct iommu_table *tbl, long index) { u64 *tcep; - index <<= TCE_PAGE_FACTOR; tcep = ((u64 *)tbl->it_base) + index; return *tcep; @@ -109,9 +102,6 @@ static void tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum, u64 proto_tce, tce; u64 rpn; - tcenum <<= TCE_PAGE_FACTOR; - npages <<= TCE_PAGE_FACTOR; - rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT; proto_tce = TCE_PCI_READ; if (direction != DMA_TO_DEVICE) @@ -146,7 +136,7 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, u64 rpn; long l, limit; - if (TCE_PAGE_FACTOR == 0 && npages == 1) + if (npages == 1) return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr, direction); @@ -164,9 +154,6 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, __get_cpu_var(tce_page) = tcep; } - tcenum <<= TCE_PAGE_FACTOR; - npages <<= TCE_PAGE_FACTOR; - rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT; proto_tce = TCE_PCI_READ; if (direction != DMA_TO_DEVICE) @@ -207,9 +194,6 @@ static void tce_free_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages { u64 rc; - tcenum <<= TCE_PAGE_FACTOR; - npages <<= TCE_PAGE_FACTOR; - while (npages--) { rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, 0); @@ -229,9 +213,6 @@ static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long n { u64 rc; - tcenum <<= TCE_PAGE_FACTOR; - npages <<= TCE_PAGE_FACTOR; - rc = plpar_tce_stuff((u64)tbl->it_index, (u64)tcenum << 12, 0, npages); if (rc && printk_ratelimit()) { @@ -248,7 +229,6 @@ static unsigned long tce_get_pSeriesLP(struct iommu_table *tbl, long tcenum) u64 rc; unsigned long tce_ret; - tcenum <<= TCE_PAGE_FACTOR; rc = plpar_tce_get((u64)tbl->it_index, (u64)tcenum << 12, &tce_ret); if (rc && printk_ratelimit()) { @@ -289,7 +269,7 @@ static void iommu_table_setparms(struct pci_controller *phb, tbl->it_busno = phb->bus->number; /* Units of tce entries */ - tbl->it_offset = phb->dma_window_base_cur >> PAGE_SHIFT; + tbl->it_offset = phb->dma_window_base_cur >> IOMMU_PAGE_SHIFT; /* Test if we are going over 2GB of DMA space */ if (phb->dma_window_base_cur + phb->dma_window_size > 0x80000000ul) { @@ -300,7 +280,7 @@ static void iommu_table_setparms(struct pci_controller *phb, phb->dma_window_base_cur += phb->dma_window_size; /* Set the tce table size - measured in entries */ - tbl->it_size = phb->dma_window_size >> PAGE_SHIFT; + tbl->it_size = phb->dma_window_size >> IOMMU_PAGE_SHIFT; tbl->it_index = 0; tbl->it_blocksize = 16; @@ -325,8 +305,8 @@ static void iommu_table_setparms_lpar(struct pci_controller *phb, tbl->it_base = 0; tbl->it_blocksize = 16; tbl->it_type = TCE_PCI; - tbl->it_offset = offset >> PAGE_SHIFT; - tbl->it_size = size >> PAGE_SHIFT; + tbl->it_offset = offset >> IOMMU_PAGE_SHIFT; + tbl->it_size = size >> IOMMU_PAGE_SHIFT; } static void iommu_bus_setup_pSeries(struct pci_bus *bus) @@ -522,8 +502,6 @@ static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev) const void *dma_window = NULL; struct pci_dn *pci; - DBG("iommu_dev_setup_pSeriesLP, dev %p (%s)\n", dev, pci_name(dev)); - /* dev setup for LPAR is a little tricky, since the device tree might * contain the dma-window properties per-device and not neccesarily * for the bus. So we need to search upwards in the tree until we @@ -532,6 +510,9 @@ static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev) */ dn = pci_device_to_OF_node(dev); + DBG("iommu_dev_setup_pSeriesLP, dev %p (%s) %s\n", + dev, pci_name(dev), dn->full_name); + for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->iommu_table; pdn = pdn->parent) { dma_window = get_property(pdn, "ibm,dma-window", NULL); diff --git a/arch/powerpc/sysdev/dart.h b/arch/powerpc/sysdev/dart.h index 1c8817c4835..ff202edb059 100644 --- a/arch/powerpc/sysdev/dart.h +++ b/arch/powerpc/sysdev/dart.h @@ -72,7 +72,6 @@ #define DART_PAGE_SHIFT 12 #define DART_PAGE_SIZE (1 << DART_PAGE_SHIFT) -#define DART_PAGE_FACTOR (PAGE_SHIFT - DART_PAGE_SHIFT) #endif /* _POWERPC_SYSDEV_DART_H */ diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c index 03b4477dd7f..572b7846cc7 100644 --- a/arch/powerpc/sysdev/dart_iommu.c +++ b/arch/powerpc/sysdev/dart_iommu.c @@ -156,9 +156,6 @@ static void dart_build(struct iommu_table *tbl, long index, DBG("dart: build at: %lx, %lx, addr: %x\n", index, npages, uaddr); - index <<= DART_PAGE_FACTOR; - npages <<= DART_PAGE_FACTOR; - dp = ((unsigned int*)tbl->it_base) + index; /* On U3, all memory is contigous, so we can move this @@ -199,9 +196,6 @@ static void dart_free(struct iommu_table *tbl, long index, long npages) DBG("dart: free at: %lx, %lx\n", index, npages); - index <<= DART_PAGE_FACTOR; - npages <<= DART_PAGE_FACTOR; - dp = ((unsigned int *)tbl->it_base) + index; while (npages--) @@ -281,7 +275,7 @@ static void iommu_table_dart_setup(void) iommu_table_dart.it_busno = 0; iommu_table_dart.it_offset = 0; /* it_size is in number of entries */ - iommu_table_dart.it_size = (dart_tablesize / sizeof(u32)) >> DART_PAGE_FACTOR; + iommu_table_dart.it_size = dart_tablesize / sizeof(u32); /* Initialize the common IOMMU code */ iommu_table_dart.it_base = (unsigned long)dart_vbase; -- cgit v1.2.3 From 302439d2167e0f1e01a6480ac40c06063f4e16a1 Mon Sep 17 00:00:00 2001 From: Timur Tabi Date: Tue, 31 Oct 2006 17:53:42 +0800 Subject: [POWERPC] qe_lib: qe_issue_cmd writes wrong value to CECDR Changed qe_issue_cmd() to write cmd_input to the CECDR unmodified. It was treating cmd_input as a virtual address and tried to convert it to a physical address. Signed-off-by: Timur Tabi Signed-off-by: Paul Mackerras --- arch/powerpc/sysdev/qe_lib/qe.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c index 2bae632d3ad..e4223226a7a 100644 --- a/arch/powerpc/sysdev/qe_lib/qe.c +++ b/arch/powerpc/sysdev/qe_lib/qe.c @@ -122,8 +122,7 @@ int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input) mcn_shift = QE_CR_MCN_NORMAL_SHIFT; } - out_be32(&qe_immr->cp.cecdr, - immrbar_virt_to_phys((void *)cmd_input)); + out_be32(&qe_immr->cp.cecdr, cmd_input); out_be32(&qe_immr->cp.cecr, (cmd | QE_CR_FLG | ((u32) device << dev_shift) | (u32) mcn_protocol << mcn_shift)); -- cgit v1.2.3 From 96268889ee369b36203b7a06e8aabb197270216e Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 31 Oct 2006 18:40:39 +0000 Subject: [POWERPC] Make high hugepage areas preempt safe Checking source for other get_paca()->field preemption dangers found that open_high_hpage_areas does a structure copy into its paca while preemption is enabled: unsafe however gcc accomplishes it. Just remove that copy: it's done safely afterwards by on_each_cpu, as in open_low_hpage_areas. Signed-off-by: Hugh Dickins Acked-by: David Gibson Signed-off-by: Paul Mackerras --- arch/powerpc/mm/hugetlbpage.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 5615acc2952..fd68b74c07c 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -480,9 +480,6 @@ static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas) mm->context.high_htlb_areas |= newareas; - /* update the paca copy of the context struct */ - get_paca()->context = mm->context; - /* the context change must make it to memory before the flush, * so that further SLB misses do the right thing. */ mb(); -- cgit v1.2.3 From 0d69a052d4d7c4085706b9ac0d1bd28ff90c9fca Mon Sep 17 00:00:00 2001 From: "Gui,Jian" Date: Wed, 1 Nov 2006 10:50:15 +0800 Subject: [POWERPC] Disallow kprobes on emulate_step and branch_taken On powerpc, probing on emulate_step function will crash 2.6.18.1 when it is triggered. When kprobe is triggered, emulate_step() is on its kernel path and will cause recursive kprobe fault. And branch_taken() is called in emulate_step(). This disallows kprobes on both of them. Signed-off-by: Paul Mackerras --- arch/powerpc/lib/sstep.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index 9590ba780b9..7e8ded051b5 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -9,6 +9,7 @@ * 2 of the License, or (at your option) any later version. */ #include +#include #include #include #include @@ -25,7 +26,7 @@ extern char system_call_common[]; /* * Determine whether a conditional branch instruction would branch. */ -static int branch_taken(unsigned int instr, struct pt_regs *regs) +static int __kprobes branch_taken(unsigned int instr, struct pt_regs *regs) { unsigned int bo = (instr >> 21) & 0x1f; unsigned int bi; @@ -51,7 +52,7 @@ static int branch_taken(unsigned int instr, struct pt_regs *regs) * or -1 if the instruction is one that should not be stepped, * such as an rfid, or a mtmsrd that would clear MSR_RI. */ -int emulate_step(struct pt_regs *regs, unsigned int instr) +int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) { unsigned int opcode, rd; unsigned long int imm; -- cgit v1.2.3 From 4393c4f6788cee65095dd838cfeca6edefbfeb52 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Wed, 1 Nov 2006 15:11:39 +1100 Subject: [POWERPC] Make alignment exception always check exception table The alignment exception used to only check the exception table for -EFAULT, not for other errors. That opens an oops window if we can coerce the kernel into getting an alignment exception for other reasons in what would normally be a user-protected accessor, which can be done via some of the futex ops. This fixes it by always checking the exception tables. Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/traps.c | 18 ++++++++++-------- arch/ppc/kernel/traps.c | 18 ++++++++++-------- 2 files changed, 20 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 5ed4c2ceb5c..c66b4771ef4 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -843,7 +843,7 @@ void __kprobes program_check_exception(struct pt_regs *regs) void alignment_exception(struct pt_regs *regs) { - int fixed = 0; + int sig, code, fixed = 0; /* we don't implement logging of alignment exceptions */ if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) @@ -857,14 +857,16 @@ void alignment_exception(struct pt_regs *regs) /* Operand address was bad */ if (fixed == -EFAULT) { - if (user_mode(regs)) - _exception(SIGSEGV, regs, SEGV_ACCERR, regs->dar); - else - /* Search exception table */ - bad_page_fault(regs, regs->dar, SIGSEGV); - return; + sig = SIGSEGV; + code = SEGV_ACCERR; + } else { + sig = SIGBUS; + code = BUS_ADRALN; } - _exception(SIGBUS, regs, BUS_ADRALN, regs->dar); + if (user_mode(regs)) + _exception(sig, regs, code, regs->dar); + else + bad_page_fault(regs, regs->dar, sig); } void StackOverflow(struct pt_regs *regs) diff --git a/arch/ppc/kernel/traps.c b/arch/ppc/kernel/traps.c index aafc8e8893d..9661a91183b 100644 --- a/arch/ppc/kernel/traps.c +++ b/arch/ppc/kernel/traps.c @@ -708,7 +708,7 @@ void single_step_exception(struct pt_regs *regs) void alignment_exception(struct pt_regs *regs) { - int fixed; + int sig, code, fixed = 0; fixed = fix_alignment(regs); if (fixed == 1) { @@ -717,14 +717,16 @@ void alignment_exception(struct pt_regs *regs) return; } if (fixed == -EFAULT) { - /* fixed == -EFAULT means the operand address was bad */ - if (user_mode(regs)) - _exception(SIGSEGV, regs, SEGV_ACCERR, regs->dar); - else - bad_page_fault(regs, regs->dar, SIGSEGV); - return; + sig = SIGSEGV; + code = SEGV_ACCERR; + } else { + sig = SIGBUS; + code = BUS_ADRALN; } - _exception(SIGBUS, regs, BUS_ADRALN, regs->dar); + if (user_mode(regs)) + _exception(sig, regs, code, regs->dar); + else + bad_page_fault(regs, regs->dar, sig); } void StackOverflow(struct pt_regs *regs) -- cgit v1.2.3