aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/Kconfig4
-rw-r--r--arch/arm/kernel/entry-armv.S15
-rw-r--r--arch/arm/kernel/ptrace.c49
-rw-r--r--arch/arm/kernel/setup.c10
-rw-r--r--arch/arm/mach-aaec2000/clock.c1
-rw-r--r--arch/arm/mach-epxa10db/mm.c1
-rw-r--r--arch/arm/mach-integrator/impd1.c3
-rw-r--r--arch/arm/mach-ixp2000/core.c35
-rw-r--r--arch/arm/mach-ixp2000/uengine.c11
-rw-r--r--arch/arm/mach-ixp4xx/common-pci.c2
-rw-r--r--arch/arm/mach-pxa/Kconfig9
-rw-r--r--arch/arm/mach-pxa/Makefile1
-rw-r--r--arch/arm/mach-pxa/corgi_lcd.c1
-rw-r--r--arch/arm/mach-pxa/pm.c16
-rw-r--r--arch/arm/mach-pxa/tosa.c162
-rw-r--r--arch/arm/mach-realview/Kconfig9
-rw-r--r--arch/arm/mach-realview/Makefile1
-rw-r--r--arch/arm/mach-realview/core.h1
-rw-r--r--arch/arm/mach-realview/headsmp.S39
-rw-r--r--arch/arm/mach-realview/platsmp.c195
-rw-r--r--arch/arm/mach-realview/realview_eb.c5
-rw-r--r--arch/arm/mm/mm-armv.c48
-rw-r--r--arch/arm/mm/proc-v6.S26
-rw-r--r--arch/arm/nwfpe/fpa11.h2
-rw-r--r--arch/arm/nwfpe/fpa11_cpdt.c10
-rw-r--r--arch/arm/nwfpe/fpopcode.c16
-rw-r--r--arch/arm/nwfpe/softfloat-specialize1
-rw-r--r--arch/arm/nwfpe/softfloat.c6
-rw-r--r--arch/arm/nwfpe/softfloat.h14
-rw-r--r--arch/arm26/kernel/ptrace.c49
-rw-r--r--arch/cris/arch-v10/README.mm6
-rw-r--r--arch/cris/arch-v10/kernel/ptrace.c51
-rw-r--r--arch/cris/arch-v10/kernel/signal.c2
-rw-r--r--arch/cris/arch-v32/drivers/cryptocop.c14
-rw-r--r--arch/cris/arch-v32/kernel/ptrace.c51
-rw-r--r--arch/cris/arch-v32/kernel/signal.c2
-rw-r--r--arch/cris/mm/ioremap.c2
-rw-r--r--arch/frv/kernel/ptrace.c43
-rw-r--r--arch/h8300/kernel/ptrace.c39
-rw-r--r--arch/i386/Kconfig13
-rw-r--r--arch/i386/Kconfig.debug10
-rw-r--r--arch/i386/kernel/apic.c10
-rw-r--r--arch/i386/kernel/apm.c5
-rw-r--r--arch/i386/kernel/cpu/common.c5
-rw-r--r--arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c3
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k7.c12
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k8.c19
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c5
-rw-r--r--arch/i386/kernel/cpu/mcheck/k7.c2
-rw-r--r--arch/i386/kernel/cpu/mcheck/mce.c4
-rw-r--r--arch/i386/kernel/cpu/mcheck/p4.c4
-rw-r--r--arch/i386/kernel/cpu/mcheck/p5.c2
-rw-r--r--arch/i386/kernel/cpu/mcheck/p6.c2
-rw-r--r--arch/i386/kernel/cpu/mcheck/winchip.c2
-rw-r--r--arch/i386/kernel/kprobes.c180
-rw-r--r--arch/i386/kernel/ldt.c1
-rw-r--r--arch/i386/kernel/mca.c2
-rw-r--r--arch/i386/kernel/ptrace.c44
-rw-r--r--arch/i386/kernel/reboot_fixups.c3
-rw-r--r--arch/i386/kernel/scx200.c1
-rw-r--r--arch/i386/kernel/smpboot.c4
-rw-r--r--arch/i386/oprofile/Kconfig6
-rw-r--r--arch/i386/power/cpu.c1
-rw-r--r--arch/ia64/Kconfig14
-rw-r--r--arch/ia64/Kconfig.debug11
-rw-r--r--arch/ia64/hp/sim/simserial.c6
-rw-r--r--arch/ia64/kernel/kprobes.c126
-rw-r--r--arch/ia64/kernel/perfmon.c2
-rw-r--r--arch/ia64/kernel/setup.c7
-rw-r--r--arch/ia64/oprofile/Kconfig6
-rw-r--r--arch/m68k/atari/time.c6
-rw-r--r--arch/m68k/kernel/ptrace.c47
-rw-r--r--arch/m68knommu/Kconfig17
-rw-r--r--arch/m68knommu/Makefile7
-rw-r--r--arch/m68knommu/kernel/asm-offsets.c1
-rw-r--r--arch/m68knommu/kernel/ptrace.c39
-rw-r--r--arch/m68knommu/kernel/setup.c5
-rw-r--r--arch/m68knommu/kernel/vmlinux.lds.S9
-rw-r--r--arch/m68knommu/platform/520x/Makefile19
-rw-r--r--arch/m68knommu/platform/520x/config.c65
-rw-r--r--arch/m68knommu/platform/5307/Makefile1
-rw-r--r--arch/m68knommu/platform/5307/head.S3
-rw-r--r--arch/m68knommu/platform/5307/ints.c1
-rw-r--r--arch/m68knommu/platform/5307/pit.c12
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mips/boot/.gitignore4
-rw-r--r--arch/mips/configs/pnx8550-jbs_defconfig11
-rw-r--r--arch/mips/configs/pnx8550-v2pci_defconfig10
-rw-r--r--arch/mips/ddb5xxx/common/rtc_ds1386.c6
-rw-r--r--arch/mips/dec/time.c24
-rw-r--r--arch/mips/jmr3927/common/rtc_ds1742.c6
-rw-r--r--arch/mips/kernel/irixsig.c3
-rw-r--r--arch/mips/kernel/ptrace.c55
-rw-r--r--arch/mips/kernel/rtlx.c197
-rw-r--r--arch/mips/kernel/signal.c3
-rw-r--r--arch/mips/kernel/signal32.c13
-rw-r--r--arch/mips/kernel/vpe.c100
-rw-r--r--arch/mips/lasat/ds1603.c9
-rw-r--r--arch/mips/momentum/jaguar_atx/setup.c6
-rw-r--r--arch/mips/momentum/ocelot_3/setup.c6
-rw-r--r--arch/mips/momentum/ocelot_c/setup.c6
-rw-r--r--arch/mips/pmc-sierra/yosemite/setup.c6
-rw-r--r--arch/mips/sgi-ip22/ip22-time.c6
-rw-r--r--arch/mips/sibyte/swarm/rtc_m41t81.c7
-rw-r--r--arch/mips/sibyte/swarm/rtc_xicor1241.c6
-rw-r--r--arch/parisc/kernel/ptrace.c50
-rw-r--r--arch/powerpc/Kconfig32
-rw-r--r--arch/powerpc/Kconfig.debug10
-rw-r--r--arch/powerpc/configs/g5_defconfig261
-rw-r--r--arch/powerpc/kernel/asm-offsets.c3
-rw-r--r--arch/powerpc/kernel/cputable.c14
-rw-r--r--arch/powerpc/kernel/head_64.S300
-rw-r--r--arch/powerpc/kernel/lparmap.c2
-rw-r--r--arch/powerpc/kernel/misc_64.S70
-rw-r--r--arch/powerpc/kernel/process.c10
-rw-r--r--arch/powerpc/kernel/prom.c97
-rw-r--r--arch/powerpc/kernel/prom_init.c11
-rw-r--r--arch/powerpc/kernel/ptrace.c43
-rw-r--r--arch/powerpc/kernel/rtas.c5
-rw-r--r--arch/powerpc/kernel/setup-common.c40
-rw-r--r--arch/powerpc/kernel/setup_32.c1
-rw-r--r--arch/powerpc/kernel/setup_64.c77
-rw-r--r--arch/powerpc/kernel/signal_32.c1
-rw-r--r--arch/powerpc/kernel/signal_64.c1
-rw-r--r--arch/powerpc/kernel/smp.c1
-rw-r--r--arch/powerpc/kernel/time.c6
-rw-r--r--arch/powerpc/kernel/traps.c11
-rw-r--r--arch/powerpc/lib/copypage_64.S2
-rw-r--r--arch/powerpc/lib/copyuser_64.S4
-rw-r--r--arch/powerpc/lib/locks.c1
-rw-r--r--arch/powerpc/mm/fault.c17
-rw-r--r--arch/powerpc/mm/hash_low_64.S613
-rw-r--r--arch/powerpc/mm/hash_native_64.c377
-rw-r--r--arch/powerpc/mm/hash_utils_64.c538
-rw-r--r--arch/powerpc/mm/hugetlbpage.c140
-rw-r--r--arch/powerpc/mm/init_64.c19
-rw-r--r--arch/powerpc/mm/mem.c58
-rw-r--r--arch/powerpc/mm/numa.c1
-rw-r--r--arch/powerpc/mm/pgtable_64.c23
-rw-r--r--arch/powerpc/mm/ppc_mmu_32.c15
-rw-r--r--arch/powerpc/mm/slb.c102
-rw-r--r--arch/powerpc/mm/slb_low.S229
-rw-r--r--arch/powerpc/mm/stab.c30
-rw-r--r--arch/powerpc/mm/tlb_64.c32
-rw-r--r--arch/powerpc/oprofile/Kconfig6
-rw-r--r--arch/powerpc/oprofile/op_model_power4.c24
-rw-r--r--arch/powerpc/platforms/iseries/htab.c65
-rw-r--r--arch/powerpc/platforms/iseries/hvlog.c4
-rw-r--r--arch/powerpc/platforms/iseries/iommu.c74
-rw-r--r--arch/powerpc/platforms/iseries/irq.c5
-rw-r--r--arch/powerpc/platforms/iseries/pci.c37
-rw-r--r--arch/powerpc/platforms/iseries/setup.c17
-rw-r--r--arch/powerpc/platforms/iseries/smp.c1
-rw-r--r--arch/powerpc/platforms/iseries/vio.c39
-rw-r--r--arch/powerpc/platforms/iseries/viopath.c16
-rw-r--r--arch/powerpc/platforms/powermac/Makefile3
-rw-r--r--arch/powerpc/platforms/powermac/cpufreq_32.c (renamed from arch/powerpc/platforms/powermac/cpufreq.c)15
-rw-r--r--arch/powerpc/platforms/powermac/cpufreq_64.c323
-rw-r--r--arch/powerpc/platforms/powermac/setup.c13
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c2
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c118
-rw-r--r--arch/powerpc/platforms/pseries/plpar_wrappers.h10
-rw-r--r--arch/powerpc/platforms/pseries/ras.c2
-rw-r--r--arch/powerpc/platforms/pseries/reconfig.c6
-rw-r--r--arch/powerpc/platforms/pseries/setup.c18
-rw-r--r--arch/powerpc/sysdev/i8259.c5
-rw-r--r--arch/powerpc/sysdev/u3_iommu.c1
-rw-r--r--arch/ppc/4xx_io/serial_sicc.c17
-rw-r--r--arch/ppc/8260_io/fcc_enet.c3
-rw-r--r--arch/ppc/8xx_io/cs4218_tdm.c3
-rw-r--r--arch/ppc/Kconfig18
-rw-r--r--arch/ppc/boot/simple/Makefile27
-rw-r--r--arch/ppc/boot/simple/misc.c16
-rw-r--r--arch/ppc/boot/simple/openbios.c106
-rw-r--r--arch/ppc/configs/ev64360_defconfig73
-rw-r--r--arch/ppc/configs/stx_gp3_defconfig86
-rw-r--r--arch/ppc/kernel/Makefile1
-rw-r--r--arch/ppc/kernel/head_44x.S4
-rw-r--r--arch/ppc/kernel/misc.S145
-rw-r--r--arch/ppc/kernel/rio.c52
-rw-r--r--arch/ppc/kernel/traps.c12
-rw-r--r--arch/ppc/platforms/4xx/Kconfig19
-rw-r--r--arch/ppc/platforms/4xx/Makefile2
-rw-r--r--arch/ppc/platforms/4xx/bubinga.c2
-rw-r--r--arch/ppc/platforms/4xx/bubinga.h64
-rw-r--r--arch/ppc/platforms/4xx/ebony.h4
-rw-r--r--arch/ppc/platforms/4xx/ppc440spe.c148
-rw-r--r--arch/ppc/platforms/4xx/ppc440spe.h66
-rw-r--r--arch/ppc/platforms/4xx/sycamore.c7
-rw-r--r--arch/ppc/platforms/4xx/sycamore.h67
-rw-r--r--arch/ppc/platforms/4xx/walnut.c2
-rw-r--r--arch/ppc/platforms/4xx/walnut.h67
-rw-r--r--arch/ppc/platforms/4xx/yucca.c395
-rw-r--r--arch/ppc/platforms/4xx/yucca.h111
-rw-r--r--arch/ppc/platforms/85xx/mpc85xx_ads_common.c10
-rw-r--r--arch/ppc/platforms/85xx/stx_gp3.c14
-rw-r--r--arch/ppc/platforms/ev64360.c12
-rw-r--r--arch/ppc/syslib/Makefile4
-rw-r--r--arch/ppc/syslib/ibm440sp_common.c4
-rw-r--r--arch/ppc/syslib/ibm44x_common.c12
-rw-r--r--arch/ppc/syslib/m8xx_wdt.c13
-rw-r--r--arch/ppc/syslib/ppc405_pci.c7
-rw-r--r--arch/ppc/syslib/ppc440spe_pcie.c442
-rw-r--r--arch/ppc/syslib/ppc440spe_pcie.h149
-rw-r--r--arch/ppc/syslib/ppc4xx_pic.c37
-rw-r--r--arch/ppc/syslib/ppc85xx_rio.c938
-rw-r--r--arch/ppc/syslib/ppc85xx_rio.h21
-rw-r--r--arch/ppc/syslib/ppc_sys.c1
-rw-r--r--arch/ppc/syslib/prom.c10
-rw-r--r--arch/ppc/xmon/xmon.c5
-rw-r--r--arch/ppc64/Kconfig24
-rw-r--r--arch/ppc64/Kconfig.debug4
-rw-r--r--arch/ppc64/boot/main.c11
-rw-r--r--arch/ppc64/kernel/asm-offsets.c3
-rw-r--r--arch/ppc64/kernel/head.S300
-rw-r--r--arch/ppc64/kernel/idle.c1
-rw-r--r--arch/ppc64/kernel/kprobes.c138
-rw-r--r--arch/ppc64/kernel/lparcfg.c4
-rw-r--r--arch/ppc64/kernel/machine_kexec.c1
-rw-r--r--arch/ppc64/kernel/misc.S72
-rw-r--r--arch/ppc64/kernel/pacaData.c2
-rw-r--r--arch/ppc64/kernel/pci.c17
-rw-r--r--arch/ppc64/kernel/prom.c119
-rw-r--r--arch/ppc64/kernel/prom_init.c3
-rw-r--r--arch/ppc64/kernel/rtas_pci.c6
-rw-r--r--arch/ppc64/kernel/scanlog.c3
-rw-r--r--arch/ppc64/kernel/sysfs.c1
-rw-r--r--arch/ppc64/kernel/udbg.c55
-rw-r--r--arch/s390/Makefile4
-rw-r--r--arch/s390/kernel/Makefile4
-rw-r--r--arch/s390/kernel/entry.S4
-rw-r--r--arch/s390/kernel/entry64.S4
-rw-r--r--arch/s390/kernel/head.S383
-rw-r--r--arch/s390/kernel/head31.S336
-rw-r--r--arch/s390/kernel/head64.S543
-rw-r--r--arch/s390/kernel/time.c8
-rw-r--r--arch/s390/kernel/traps.c29
-rw-r--r--arch/s390/mm/extmem.c8
-rw-r--r--arch/s390/mm/fault.c113
-rw-r--r--arch/sh/Kconfig28
-rw-r--r--arch/sh/Makefile8
-rw-r--r--arch/sh/drivers/Makefile5
-rw-r--r--arch/sh/drivers/superhyway/Makefile6
-rw-r--r--arch/sh/drivers/superhyway/ops-sh4-202.c171
-rw-r--r--arch/sh/kernel/ptrace.c44
-rw-r--r--arch/sh/kernel/setup.c26
-rw-r--r--arch/sh/mm/init.c21
-rw-r--r--arch/sh/mm/tlb-sh3.c19
-rw-r--r--arch/sh/ramdisk/Makefile20
-rw-r--r--arch/sh/ramdisk/ld.script9
-rw-r--r--arch/sh64/kernel/ptrace.c83
-rw-r--r--arch/sh64/kernel/syscalls.S2
-rw-r--r--arch/sparc/Kconfig8
-rw-r--r--arch/sparc/kernel/Makefile1
-rw-r--r--arch/sparc/kernel/led.c139
-rw-r--r--arch/sparc/kernel/sunos_ioctl.c1
-rw-r--r--arch/sparc64/Kconfig13
-rw-r--r--arch/sparc64/Kconfig.debug10
-rw-r--r--arch/sparc64/kernel/ioctl32.c459
-rw-r--r--arch/sparc64/kernel/kprobes.c165
-rw-r--r--arch/sparc64/kernel/setup.c12
-rw-r--r--arch/sparc64/kernel/signal32.c6
-rw-r--r--arch/sparc64/kernel/smp.c79
-rw-r--r--arch/sparc64/kernel/sunos_ioctl32.c1
-rw-r--r--arch/sparc64/kernel/time.c13
-rw-r--r--arch/sparc64/kernel/us2e_cpufreq.c7
-rw-r--r--arch/sparc64/kernel/us3_cpufreq.c7
-rw-r--r--arch/sparc64/oprofile/Kconfig6
-rw-r--r--arch/um/Kconfig46
-rw-r--r--arch/um/Makefile2
-rw-r--r--arch/um/Makefile-i3862
-rw-r--r--arch/um/drivers/chan_user.c1
-rw-r--r--arch/um/drivers/harddog_kern.c1
-rw-r--r--arch/um/drivers/harddog_user.c1
-rw-r--r--arch/um/drivers/net_kern.c38
-rw-r--r--arch/um/drivers/net_user.c1
-rw-r--r--arch/um/drivers/port_user.c1
-rw-r--r--arch/um/drivers/random.c6
-rw-r--r--arch/um/drivers/slip_user.c1
-rw-r--r--arch/um/drivers/slirp_user.c1
-rw-r--r--arch/um/drivers/xterm.c1
-rw-r--r--arch/um/include/helper.h27
-rw-r--r--arch/um/include/mem_user.h2
-rw-r--r--arch/um/include/net_user.h2
-rw-r--r--arch/um/include/os.h16
-rw-r--r--arch/um/include/sysdep-i386/stub.h64
-rw-r--r--arch/um/include/sysdep-x86_64/stub.h61
-rw-r--r--arch/um/include/uml_uaccess.h4
-rw-r--r--arch/um/kernel/Makefile7
-rw-r--r--arch/um/kernel/ksyms.c1
-rw-r--r--arch/um/kernel/mem.c4
-rw-r--r--arch/um/kernel/physmem.c4
-rw-r--r--arch/um/kernel/ptrace.c50
-rw-r--r--arch/um/kernel/sigio_user.c3
-rw-r--r--arch/um/kernel/skas/include/mmu-skas.h2
-rw-r--r--arch/um/kernel/skas/include/skas.h3
-rw-r--r--arch/um/kernel/skas/mem.c2
-rw-r--r--arch/um/kernel/skas/mmu.c44
-rw-r--r--arch/um/kernel/skas/process.c17
-rw-r--r--arch/um/kernel/skas/process_kern.c2
-rw-r--r--arch/um/kernel/tt/uaccess_user.c1
-rw-r--r--arch/um/kernel/uaccess.c30
-rw-r--r--arch/um/kernel/uaccess_user.c64
-rw-r--r--arch/um/kernel/um_arch.c10
-rw-r--r--arch/um/kernel/user_util.c1
-rw-r--r--arch/um/os-Linux/Makefile9
-rw-r--r--arch/um/os-Linux/aio.c1
-rw-r--r--arch/um/os-Linux/drivers/ethertap_user.c1
-rw-r--r--arch/um/os-Linux/drivers/tuntap_user.c1
-rw-r--r--arch/um/os-Linux/helper.c (renamed from arch/um/kernel/helper.c)18
-rw-r--r--arch/um/os-Linux/main.c (renamed from arch/um/kernel/main.c)49
-rw-r--r--arch/um/os-Linux/mem.c6
-rw-r--r--arch/um/os-Linux/start_up.c77
-rw-r--r--arch/um/os-Linux/uaccess.c32
-rw-r--r--arch/um/scripts/Makefile.rules7
-rw-r--r--arch/um/sys-i386/ldt.c506
-rw-r--r--arch/um/sys-x86_64/Makefile5
-rw-r--r--arch/um/sys-x86_64/syscalls.c75
-rw-r--r--arch/v850/kernel/ptrace.c43
-rw-r--r--arch/x86_64/Kconfig13
-rw-r--r--arch/x86_64/Kconfig.debug10
-rw-r--r--arch/x86_64/kernel/kprobes.c183
-rw-r--r--arch/x86_64/kernel/ptrace.c43
-rw-r--r--arch/x86_64/kernel/smpboot.c2
-rw-r--r--arch/x86_64/oprofile/Kconfig6
-rw-r--r--arch/xtensa/kernel/ptrace.c55
326 files changed, 9399 insertions, 5264 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 296bc03d1cf..91d5ef3397b 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -324,7 +324,7 @@ menu "Kernel Features"
config SMP
bool "Symmetric Multi-Processing (EXPERIMENTAL)"
- depends on EXPERIMENTAL && BROKEN #&& n
+ depends on EXPERIMENTAL && REALVIEW_MPCORE
help
This enables support for systems with more than one CPU. If you have
a system with only one CPU, like most personal computers, say N. If
@@ -585,7 +585,7 @@ config FPE_NWFPE
config FPE_NWFPE_XP
bool "Support extended precision"
- depends on FPE_NWFPE && !CPU_BIG_ENDIAN
+ depends on FPE_NWFPE
help
Say Y to include 80-bit support in the kernel floating-point
emulator. Otherwise, only 32 and 64-bit support is compiled in.
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index be439cab92c..a511ec5b11a 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -785,7 +785,7 @@ __kuser_helper_end:
* SP points to a minimal amount of processor-private memory, the address
* of which is copied into r0 for the mode specific abort handler.
*/
- .macro vector_stub, name, correction=0
+ .macro vector_stub, name, mode, correction=0
.align 5
vector_\name:
@@ -805,15 +805,14 @@ vector_\name:
@ Prepare for SVC32 mode. IRQs remain disabled.
@
mrs r0, cpsr
- bic r0, r0, #MODE_MASK
- orr r0, r0, #SVC_MODE
+ eor r0, r0, #(\mode ^ SVC_MODE)
msr spsr_cxsf, r0
@
@ the branch table must immediately follow this code
@
- mov r0, sp
and lr, lr, #0x0f
+ mov r0, sp
ldr lr, [pc, lr, lsl #2]
movs pc, lr @ branch to handler in SVC mode
.endm
@@ -823,7 +822,7 @@ __stubs_start:
/*
* Interrupt dispatcher
*/
- vector_stub irq, 4
+ vector_stub irq, IRQ_MODE, 4
.long __irq_usr @ 0 (USR_26 / USR_32)
.long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
@@ -846,7 +845,7 @@ __stubs_start:
* Data abort dispatcher
* Enter in ABT mode, spsr = USR CPSR, lr = USR PC
*/
- vector_stub dabt, 8
+ vector_stub dabt, ABT_MODE, 8
.long __dabt_usr @ 0 (USR_26 / USR_32)
.long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
@@ -869,7 +868,7 @@ __stubs_start:
* Prefetch abort dispatcher
* Enter in ABT mode, spsr = USR CPSR, lr = USR PC
*/
- vector_stub pabt, 4
+ vector_stub pabt, ABT_MODE, 4
.long __pabt_usr @ 0 (USR_26 / USR_32)
.long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
@@ -892,7 +891,7 @@ __stubs_start:
* Undef instr entry dispatcher
* Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
*/
- vector_stub und
+ vector_stub und, UND_MODE
.long __und_usr @ 0 (USR_26 / USR_32)
.long __und_invalid @ 1 (FIQ_26 / FIQ_32)
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 9bd8609a292..9a340e790da 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -648,7 +648,7 @@ static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp)
#endif
-static int do_ptrace(int request, struct task_struct *child, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
unsigned long tmp;
int ret;
@@ -782,53 +782,6 @@ static int do_ptrace(int request, struct task_struct *child, long addr, long dat
return ret;
}
-asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
-{
- struct task_struct *child;
- int ret;
-
- lock_kernel();
- ret = -EPERM;
- if (request == PTRACE_TRACEME) {
- /* are we already being traced? */
- if (current->ptrace & PT_PTRACED)
- goto out;
- ret = security_ptrace(current->parent, current);
- if (ret)
- goto out;
- /* set the ptrace bit in the process flags. */
- current->ptrace |= PT_PTRACED;
- ret = 0;
- goto out;
- }
- ret = -ESRCH;
- read_lock(&tasklist_lock);
- child = find_task_by_pid(pid);
- if (child)
- get_task_struct(child);
- read_unlock(&tasklist_lock);
- if (!child)
- goto out;
-
- ret = -EPERM;
- if (pid == 1) /* you may not mess with init */
- goto out_tsk;
-
- if (request == PTRACE_ATTACH) {
- ret = ptrace_attach(child);
- goto out_tsk;
- }
- ret = ptrace_check_attach(child, request == PTRACE_KILL);
- if (ret == 0)
- ret = do_ptrace(request, child, addr, data);
-
-out_tsk:
- put_task_struct(child);
-out:
- unlock_kernel();
- return ret;
-}
-
asmlinkage void syscall_trace(int why, struct pt_regs *regs)
{
unsigned long ip;
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index c9b69771f92..85774165e9f 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -338,7 +338,8 @@ void cpu_init(void)
BUG();
}
- dump_cpu_info(cpu);
+ if (system_state == SYSTEM_BOOTING)
+ dump_cpu_info(cpu);
/*
* setup stacks for re-entrant exception handlers
@@ -838,7 +839,12 @@ static int c_show(struct seq_file *m, void *v)
#if defined(CONFIG_SMP)
for_each_online_cpu(i) {
- seq_printf(m, "Processor\t: %d\n", i);
+ /*
+ * glibc reads /proc/cpuinfo to determine the number of
+ * online processors, looking for lines beginning with
+ * "processor". Give glibc what it expects.
+ */
+ seq_printf(m, "processor\t: %d\n", i);
seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
(per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
diff --git a/arch/arm/mach-aaec2000/clock.c b/arch/arm/mach-aaec2000/clock.c
index 99e019169dd..0340ddc4824 100644
--- a/arch/arm/mach-aaec2000/clock.c
+++ b/arch/arm/mach-aaec2000/clock.c
@@ -14,6 +14,7 @@
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/err.h>
+#include <linux/string.h>
#include <asm/semaphore.h>
#include <asm/hardware/clock.h>
diff --git a/arch/arm/mach-epxa10db/mm.c b/arch/arm/mach-epxa10db/mm.c
index e8832d0910e..cfd0d2182d4 100644
--- a/arch/arm/mach-epxa10db/mm.c
+++ b/arch/arm/mach-epxa10db/mm.c
@@ -25,6 +25,7 @@
#include <asm/hardware.h>
#include <asm/io.h>
#include <asm/sizes.h>
+#include <asm/page.h>
#include <asm/mach/map.h>
diff --git a/arch/arm/mach-integrator/impd1.c b/arch/arm/mach-integrator/impd1.c
index a1b153d1626..a4bafee77a0 100644
--- a/arch/arm/mach-integrator/impd1.c
+++ b/arch/arm/mach-integrator/impd1.c
@@ -420,8 +420,7 @@ static int impd1_probe(struct lm_device *dev)
free_impd1:
if (impd1 && impd1->base)
iounmap(impd1->base);
- if (impd1)
- kfree(impd1);
+ kfree(impd1);
release_lm:
release_mem_region(dev->resource.start, SZ_4K);
return ret;
diff --git a/arch/arm/mach-ixp2000/core.c b/arch/arm/mach-ixp2000/core.c
index df140962bb0..6851abaf552 100644
--- a/arch/arm/mach-ixp2000/core.c
+++ b/arch/arm/mach-ixp2000/core.c
@@ -84,63 +84,54 @@ static struct map_desc ixp2000_io_desc[] __initdata = {
.virtual = IXP2000_CAP_VIRT_BASE,
.pfn = __phys_to_pfn(IXP2000_CAP_PHYS_BASE),
.length = IXP2000_CAP_SIZE,
- .type = MT_DEVICE
+ .type = MT_IXP2000_DEVICE,
}, {
.virtual = IXP2000_INTCTL_VIRT_BASE,
.pfn = __phys_to_pfn(IXP2000_INTCTL_PHYS_BASE),
.length = IXP2000_INTCTL_SIZE,
- .type = MT_DEVICE
+ .type = MT_IXP2000_DEVICE,
}, {
.virtual = IXP2000_PCI_CREG_VIRT_BASE,
.pfn = __phys_to_pfn(IXP2000_PCI_CREG_PHYS_BASE),
.length = IXP2000_PCI_CREG_SIZE,
- .type = MT_DEVICE
+ .type = MT_IXP2000_DEVICE,
}, {
.virtual = IXP2000_PCI_CSR_VIRT_BASE,
.pfn = __phys_to_pfn(IXP2000_PCI_CSR_PHYS_BASE),
.length = IXP2000_PCI_CSR_SIZE,
- .type = MT_DEVICE
+ .type = MT_IXP2000_DEVICE,
}, {
.virtual = IXP2000_MSF_VIRT_BASE,
.pfn = __phys_to_pfn(IXP2000_MSF_PHYS_BASE),
.length = IXP2000_MSF_SIZE,
- .type = MT_DEVICE
+ .type = MT_IXP2000_DEVICE,
}, {
.virtual = IXP2000_PCI_IO_VIRT_BASE,
.pfn = __phys_to_pfn(IXP2000_PCI_IO_PHYS_BASE),
.length = IXP2000_PCI_IO_SIZE,
- .type = MT_DEVICE
+ .type = MT_IXP2000_DEVICE,
}, {
.virtual = IXP2000_PCI_CFG0_VIRT_BASE,
.pfn = __phys_to_pfn(IXP2000_PCI_CFG0_PHYS_BASE),
.length = IXP2000_PCI_CFG0_SIZE,
- .type = MT_DEVICE
+ .type = MT_IXP2000_DEVICE,
}, {
.virtual = IXP2000_PCI_CFG1_VIRT_BASE,
.pfn = __phys_to_pfn(IXP2000_PCI_CFG1_PHYS_BASE),
.length = IXP2000_PCI_CFG1_SIZE,
- .type = MT_DEVICE
+ .type = MT_IXP2000_DEVICE,
}
};
void __init ixp2000_map_io(void)
{
- extern unsigned int processor_id;
-
/*
- * On IXP2400 CPUs we need to use MT_IXP2000_DEVICE for
- * tweaking the PMDs so XCB=101. On IXP2800s we use the normal
- * PMD flags.
+ * On IXP2400 CPUs we need to use MT_IXP2000_DEVICE so that
+ * XCB=101 (to avoid triggering erratum #66), and given that
+ * this mode speeds up I/O accesses and we have write buffer
+ * flushes in the right places anyway, it doesn't hurt to use
+ * XCB=101 for all IXP2000s.
*/
- if ((processor_id & 0xfffffff0) == 0x69054190) {
- int i;
-
- printk(KERN_INFO "Enabling IXP2400 erratum #66 workaround\n");
-
- for(i=0;i<ARRAY_SIZE(ixp2000_io_desc);i++)
- ixp2000_io_desc[i].type = MT_IXP2000_DEVICE;
- }
-
iotable_init(ixp2000_io_desc, ARRAY_SIZE(ixp2000_io_desc));
/* Set slowport to 8-bit mode. */
diff --git a/arch/arm/mach-ixp2000/uengine.c b/arch/arm/mach-ixp2000/uengine.c
index 43e234349d4..ec4e007a22e 100644
--- a/arch/arm/mach-ixp2000/uengine.c
+++ b/arch/arm/mach-ixp2000/uengine.c
@@ -91,8 +91,8 @@ EXPORT_SYMBOL(ixp2000_uengine_csr_write);
void ixp2000_uengine_reset(u32 uengine_mask)
{
- ixp2000_reg_write(IXP2000_RESET1, uengine_mask & ixp2000_uengine_mask);
- ixp2000_reg_write(IXP2000_RESET1, 0);
+ ixp2000_reg_wrb(IXP2000_RESET1, uengine_mask & ixp2000_uengine_mask);
+ ixp2000_reg_wrb(IXP2000_RESET1, 0);
}
EXPORT_SYMBOL(ixp2000_uengine_reset);
@@ -452,21 +452,20 @@ static int __init ixp2000_uengine_init(void)
/*
* Reset microengines.
*/
- ixp2000_reg_write(IXP2000_RESET1, ixp2000_uengine_mask);
- ixp2000_reg_write(IXP2000_RESET1, 0);
+ ixp2000_uengine_reset(ixp2000_uengine_mask);
/*
* Synchronise timestamp counters across all microengines.
*/
value = ixp2000_reg_read(IXP2000_MISC_CONTROL);
- ixp2000_reg_write(IXP2000_MISC_CONTROL, value & ~0x80);
+ ixp2000_reg_wrb(IXP2000_MISC_CONTROL, value & ~0x80);
for (uengine = 0; uengine < 32; uengine++) {
if (ixp2000_uengine_mask & (1 << uengine)) {
ixp2000_uengine_csr_write(uengine, TIMESTAMP_LOW, 0);
ixp2000_uengine_csr_write(uengine, TIMESTAMP_HIGH, 0);
}
}
- ixp2000_reg_write(IXP2000_MISC_CONTROL, value | 0x80);
+ ixp2000_reg_wrb(IXP2000_MISC_CONTROL, value | 0x80);
return 0;
}
diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c
index 2b544363c07..9795da270e3 100644
--- a/arch/arm/mach-ixp4xx/common-pci.c
+++ b/arch/arm/mach-ixp4xx/common-pci.c
@@ -427,7 +427,7 @@ void __init ixp4xx_pci_preinit(void)
#ifdef __ARMEB__
*PCI_CSR = PCI_CSR_IC | PCI_CSR_ABE | PCI_CSR_PDS | PCI_CSR_ADS;
#else
- *PCI_CSR = PCI_CSR_IC;
+ *PCI_CSR = PCI_CSR_IC | PCI_CSR_ABE;
#endif
pr_debug("DONE\n");
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig
index 3e5f69bb5ac..b380a438e68 100644
--- a/arch/arm/mach-pxa/Kconfig
+++ b/arch/arm/mach-pxa/Kconfig
@@ -27,7 +27,8 @@ config PXA_SHARPSL
Say Y here if you intend to run this kernel on a
Sharp Zaurus SL-5600 (Poodle), SL-C700 (Corgi),
SL-C750 (Shepherd), SL-C760 (Husky), SL-C1000 (Akita),
- SL-C3000 (Spitz) or SL-C3100 (Borzoi) handheld computer.
+ SL-C3000 (Spitz), SL-C3100 (Borzoi) or SL-C6000x (Tosa)
+ handheld computer.
endchoice
@@ -37,7 +38,7 @@ choice
prompt "Select target Sharp Zaurus device range"
config PXA_SHARPSL_25x
- bool "Sharp PXA25x models (SL-5600 and SL-C7xx)"
+ bool "Sharp PXA25x models (SL-5600, SL-C7xx and SL-C6000x)"
select PXA25x
config PXA_SHARPSL_27x
@@ -80,6 +81,10 @@ config MACH_BORZOI
depends PXA_SHARPSL_27x
select PXA_SHARP_Cxx00
+config MACH_TOSA
+ bool "Enable Sharp SL-6000x (Tosa) Support"
+ depends PXA_SHARPSL
+
config PXA25x
bool
help
diff --git a/arch/arm/mach-pxa/Makefile b/arch/arm/mach-pxa/Makefile
index f609a0f232c..8bc72d07cea 100644
--- a/arch/arm/mach-pxa/Makefile
+++ b/arch/arm/mach-pxa/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_ARCH_PXA_IDP) += idp.o
obj-$(CONFIG_PXA_SHARP_C7xx) += corgi.o corgi_ssp.o corgi_lcd.o ssp.o
obj-$(CONFIG_PXA_SHARP_Cxx00) += spitz.o corgi_ssp.o corgi_lcd.o ssp.o
obj-$(CONFIG_MACH_POODLE) += poodle.o
+obj-$(CONFIG_MACH_TOSA) += tosa.o
# Support for blinky lights
led-y := leds.o
diff --git a/arch/arm/mach-pxa/corgi_lcd.c b/arch/arm/mach-pxa/corgi_lcd.c
index 54162ba9541..698eb06545c 100644
--- a/arch/arm/mach-pxa/corgi_lcd.c
+++ b/arch/arm/mach-pxa/corgi_lcd.c
@@ -19,6 +19,7 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/module.h>
+#include <linux/string.h>
#include <asm/arch/akita.h>
#include <asm/arch/corgi.h>
#include <asm/arch/hardware.h>
diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c
index ac4dd433616..f74b9af112d 100644
--- a/arch/arm/mach-pxa/pm.c
+++ b/arch/arm/mach-pxa/pm.c
@@ -12,6 +12,7 @@
*/
#include <linux/config.h>
#include <linux/init.h>
+#include <linux/module.h>
#include <linux/suspend.h>
#include <linux/errno.h>
#include <linux/time.h>
@@ -19,6 +20,7 @@
#include <asm/hardware.h>
#include <asm/memory.h>
#include <asm/system.h>
+#include <asm/arch/pm.h>
#include <asm/arch/pxa-regs.h>
#include <asm/arch/lubbock.h>
#include <asm/mach/time.h>
@@ -72,7 +74,7 @@ enum { SLEEP_SAVE_START = 0,
};
-static int pxa_pm_enter(suspend_state_t state)
+int pxa_pm_enter(suspend_state_t state)
{
unsigned long sleep_save[SLEEP_SAVE_SIZE];
unsigned long checksum = 0;
@@ -191,6 +193,8 @@ static int pxa_pm_enter(suspend_state_t state)
return 0;
}
+EXPORT_SYMBOL_GPL(pxa_pm_enter);
+
unsigned long sleep_phys_sp(void *sp)
{
return virt_to_phys(sp);
@@ -199,21 +203,25 @@ unsigned long sleep_phys_sp(void *sp)
/*
* Called after processes are frozen, but before we shut down devices.
*/
-static int pxa_pm_prepare(suspend_state_t state)
+int pxa_pm_prepare(suspend_state_t state)
{
extern int pxa_cpu_pm_prepare(suspend_state_t state);
return pxa_cpu_pm_prepare(state);
}
+EXPORT_SYMBOL_GPL(pxa_pm_prepare);
+
/*
* Called after devices are re-setup, but before processes are thawed.
*/
-static int pxa_pm_finish(suspend_state_t state)
+int pxa_pm_finish(suspend_state_t state)
{
return 0;
}
+EXPORT_SYMBOL_GPL(pxa_pm_finish);
+
/*
* Set to PM_DISK_FIRMWARE so we can quickly veto suspend-to-disk.
*/
@@ -230,4 +238,4 @@ static int __init pxa_pm_init(void)
return 0;
}
-late_initcall(pxa_pm_init);
+device_initcall(pxa_pm_init);
diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c
new file mode 100644
index 00000000000..400609f8b6a
--- /dev/null
+++ b/arch/arm/mach-pxa/tosa.c
@@ -0,0 +1,162 @@
+/*
+ * Support for Sharp SL-C6000x PDAs
+ * Model: (Tosa)
+ *
+ * Copyright (c) 2005 Dirk Opfer
+ *
+ * Based on code written by Sharp/Lineo for 2.4 kernels
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/major.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/mmc/host.h>
+
+#include <asm/setup.h>
+#include <asm/memory.h>
+#include <asm/mach-types.h>
+#include <asm/hardware.h>
+#include <asm/irq.h>
+#include <asm/arch/irda.h>
+#include <asm/arch/mmc.h>
+#include <asm/arch/udc.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/irq.h>
+
+#include <asm/arch/pxa-regs.h>
+#include <asm/arch/irq.h>
+#include <asm/arch/tosa.h>
+
+#include <asm/hardware/scoop.h>
+#include <asm/mach/sharpsl_param.h>
+
+#include "generic.h"
+
+
+/*
+ * SCOOP Device
+ */
+static struct resource tosa_scoop_resources[] = {
+ [0] = {
+ .start = TOSA_CF_PHYS,
+ .end = TOSA_CF_PHYS + 0xfff,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct scoop_config tosa_scoop_setup = {
+ .io_dir = TOSA_SCOOP_IO_DIR,
+ .io_out = TOSA_SCOOP_IO_OUT,
+
+};
+
+struct platform_device tosascoop_device = {
+ .name = "sharp-scoop",
+ .id = 0,
+ .dev = {
+ .platform_data = &tosa_scoop_setup,
+ },
+ .num_resources = ARRAY_SIZE(tosa_scoop_resources),
+ .resource = tosa_scoop_resources,
+};
+
+
+/*
+ * SCOOP Device Jacket
+ */
+static struct resource tosa_scoop_jc_resources[] = {
+ [0] = {
+ .start = TOSA_SCOOP_PHYS + 0x40,
+ .end = TOSA_SCOOP_PHYS + 0xfff,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct scoop_config tosa_scoop_jc_setup = {
+ .io_dir = TOSA_SCOOP_JC_IO_DIR,
+ .io_out = TOSA_SCOOP_JC_IO_OUT,
+};
+
+struct platform_device tosascoop_jc_device = {
+ .name = "sharp-scoop",
+ .id = 1,
+ .dev = {
+ .platform_data = &tosa_scoop_jc_setup,
+ .parent = &tosascoop_device.dev,
+ },
+ .num_resources = ARRAY_SIZE(tosa_scoop_jc_resources),
+ .resource = tosa_scoop_jc_resources,
+};
+
+static struct scoop_pcmcia_dev tosa_pcmcia_scoop[] = {
+{
+ .dev = &tosascoop_device.dev,
+ .irq = TOSA_IRQ_GPIO_CF_IRQ,
+ .cd_irq = TOSA_IRQ_GPIO_CF_CD,
+ .cd_irq_str = "PCMCIA0 CD",
+},{
+ .dev = &tosascoop_jc_device.dev,
+ .irq = TOSA_IRQ_GPIO_JC_CF_IRQ,
+ .cd_irq = -1,
+},
+};
+
+
+static struct platform_device *devices[] __initdata = {
+ &tosascoop_device,
+ &tosascoop_jc_device,
+};
+
+static void __init tosa_init(void)
+{
+ pxa_gpio_mode(TOSA_GPIO_ON_RESET | GPIO_IN);
+ pxa_gpio_mode(TOSA_GPIO_TC6393_INT | GPIO_IN);
+
+ /* setup sleep mode values */
+ PWER = 0x00000002;
+ PFER = 0x00000000;
+ PRER = 0x00000002;
+ PGSR0 = 0x00000000;
+ PGSR1 = 0x00FF0002;
+ PGSR2 = 0x00014000;
+ PCFR |= PCFR_OPDE;
+
+ // enable batt_fault
+ PMCR = 0x01;
+
+ platform_add_devices(devices, ARRAY_SIZE(devices));
+
+ scoop_num = 2;
+ scoop_devs = &tosa_pcmcia_scoop[0];
+}
+
+static void __init fixup_tosa(struct machine_desc *desc,
+ struct tag *tags, char **cmdline, struct meminfo *mi)
+{
+ sharpsl_save_param();
+ mi->nr_banks=1;
+ mi->bank[0].start = 0xa0000000;
+ mi->bank[0].node = 0;
+ mi->bank[0].size = (64*1024*1024);
+}
+
+MACHINE_START(TOSA, "SHARP Tosa")
+ .phys_ram = 0xa0000000,
+ .phys_io = 0x40000000,
+ .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc,
+ .fixup = fixup_tosa,
+ .map_io = pxa_map_io,
+ .init_irq = pxa_init_irq,
+ .init_machine = tosa_init,
+ .timer = &pxa_timer,
+MACHINE_END
diff --git a/arch/arm/mach-realview/Kconfig b/arch/arm/mach-realview/Kconfig
index 4b63dc9eabf..129976866d4 100644
--- a/arch/arm/mach-realview/Kconfig
+++ b/arch/arm/mach-realview/Kconfig
@@ -8,4 +8,13 @@ config MACH_REALVIEW_EB
help
Include support for the ARM(R) RealView Emulation Baseboard platform.
+config REALVIEW_MPCORE
+ bool "Support MPcore tile"
+ depends on MACH_REALVIEW_EB
+ help
+ Enable support for the MPCore tile on the Realview platform.
+ Since there are device address and interrupt differences, a
+ kernel built with this option enabled is not compatible with
+ other tiles.
+
endmenu
diff --git a/arch/arm/mach-realview/Makefile b/arch/arm/mach-realview/Makefile
index 8d37ea1605f..011a85c1062 100644
--- a/arch/arm/mach-realview/Makefile
+++ b/arch/arm/mach-realview/Makefile
@@ -4,3 +4,4 @@
obj-y := core.o clock.o
obj-$(CONFIG_MACH_REALVIEW_EB) += realview_eb.o
+obj-$(CONFIG_SMP) += platsmp.o headsmp.o
diff --git a/arch/arm/mach-realview/core.h b/arch/arm/mach-realview/core.h
index 575599db74d..d83e8bad203 100644
--- a/arch/arm/mach-realview/core.h
+++ b/arch/arm/mach-realview/core.h
@@ -23,6 +23,7 @@
#define __ASM_ARCH_REALVIEW_H
#include <asm/hardware/amba.h>
+#include <asm/leds.h>
#include <asm/io.h>
#define __io_address(n) __io(IO_ADDRESS(n))
diff --git a/arch/arm/mach-realview/headsmp.S b/arch/arm/mach-realview/headsmp.S
new file mode 100644
index 00000000000..4075473cf68
--- /dev/null
+++ b/arch/arm/mach-realview/headsmp.S
@@ -0,0 +1,39 @@
+/*
+ * linux/arch/arm/mach-realview/headsmp.S
+ *
+ * Copyright (c) 2003 ARM Limited
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+ __INIT
+
+/*
+ * Realview specific entry point for secondary CPUs. This provides
+ * a "holding pen" into which all secondary cores are held until we're
+ * ready for them to initialise.
+ */
+ENTRY(realview_secondary_startup)
+ mrc p15, 0, r0, c0, c0, 5
+ and r0, r0, #15
+ adr r4, 1f
+ ldmia r4, {r5, r6}
+ sub r4, r4, r5
+ add r6, r6, r4
+pen: ldr r7, [r6]
+ cmp r7, r0
+ bne pen
+
+ /*
+ * we've been released from the holding pen: secondary_stack
+ * should now contain the SVC stack for this core
+ */
+ b secondary_startup
+
+1: .long .
+ .long pen_release
diff --git a/arch/arm/mach-realview/platsmp.c b/arch/arm/mach-realview/platsmp.c
new file mode 100644
index 00000000000..9844644d0fb
--- /dev/null
+++ b/arch/arm/mach-realview/platsmp.c
@@ -0,0 +1,195 @@
+/*
+ * linux/arch/arm/mach-realview/platsmp.c
+ *
+ * Copyright (C) 2002 ARM Ltd.
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/smp.h>
+
+#include <asm/cacheflush.h>
+#include <asm/hardware/arm_scu.h>
+#include <asm/hardware.h>
+
+#include "core.h"
+
+extern void realview_secondary_startup(void);
+
+/*
+ * control for which core is the next to come out of the secondary
+ * boot "holding pen"
+ */
+volatile int __cpuinitdata pen_release = -1;
+
+static unsigned int __init get_core_count(void)
+{
+ unsigned int ncores;
+
+ ncores = __raw_readl(IO_ADDRESS(REALVIEW_MPCORE_SCU_BASE) + SCU_CONFIG);
+
+ return (ncores & 0x03) + 1;
+}
+
+static DEFINE_SPINLOCK(boot_lock);
+
+void __cpuinit platform_secondary_init(unsigned int cpu)
+{
+ /*
+ * the primary core may have used a "cross call" soft interrupt
+ * to get this processor out of WFI in the BootMonitor - make
+ * sure that we are no longer being sent this soft interrupt
+ */
+ smp_cross_call_done(cpumask_of_cpu(cpu));
+
+ /*
+ * if any interrupts are already enabled for the primary
+ * core (e.g. timer irq), then they will not have been enabled
+ * for us: do so
+ */
+ gic_cpu_init(__io_address(REALVIEW_GIC_CPU_BASE));
+
+ /*
+ * let the primary processor know we're out of the
+ * pen, then head off into the C entry point
+ */
+ pen_release = -1;
+
+ /*
+ * Synchronise with the boot thread.
+ */
+ spin_lock(&boot_lock);
+ spin_unlock(&boot_lock);
+}
+
+int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
+{
+ unsigned long timeout;
+
+ /*
+ * set synchronisation state between this boot processor
+ * and the secondary one
+ */
+ spin_lock(&boot_lock);
+
+ /*
+ * The secondary processor is waiting to be released from
+ * the holding pen - release it, then wait for it to flag
+ * that it has been released by resetting pen_release.
+ *
+ * Note that "pen_release" is the hardware CPU ID, whereas
+ * "cpu" is Linux's internal ID.
+ */
+ pen_release = cpu;
+ flush_cache_all();
+
+ /*
+ * XXX
+ *
+ * This is a later addition to the booting protocol: the
+ * bootMonitor now puts secondary cores into WFI, so
+ * poke_milo() no longer gets the cores moving; we need
+ * to send a soft interrupt to wake the secondary core.
+ * Use smp_cross_call() for this, since there's little
+ * point duplicating the code here
+ */
+ smp_cross_call(cpumask_of_cpu(cpu));
+
+ timeout = jiffies + (1 * HZ);
+ while (time_before(jiffies, timeout)) {
+ if (pen_release == -1)
+ break;
+
+ udelay(10);
+ }
+
+ /*
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+ spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? -ENOSYS : 0;
+}
+
+static void __init poke_milo(void)
+{
+ extern void secondary_startup(void);
+
+ /* nobody is to be released from the pen yet */
+ pen_release = -1;
+
+ /*
+ * write the address of secondary startup into the system-wide
+ * flags register, then clear the bottom two bits, which is what
+ * BootMonitor is waiting for
+ */
+#if 1
+#define REALVIEW_SYS_FLAGSS_OFFSET 0x30
+ __raw_writel(virt_to_phys(realview_secondary_startup),
+ (IO_ADDRESS(REALVIEW_SYS_BASE) +
+ REALVIEW_SYS_FLAGSS_OFFSET));
+#define REALVIEW_SYS_FLAGSC_OFFSET 0x34
+ __raw_writel(3,
+ (IO_ADDRESS(REALVIEW_SYS_BASE) +
+ REALVIEW_SYS_FLAGSC_OFFSET));
+#endif
+
+ mb();
+}
+
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+ unsigned int ncores = get_core_count();
+ unsigned int cpu = smp_processor_id();
+ int i;
+
+ /* sanity check */
+ if (ncores == 0) {
+ printk(KERN_ERR
+ "Realview: strange CM count of 0? Default to 1\n");
+
+ ncores = 1;
+ }
+
+ if (ncores > NR_CPUS) {
+ printk(KERN_WARNING
+ "Realview: no. of cores (%d) greater than configured "
+ "maximum of %d - clipping\n",
+ ncores, NR_CPUS);
+ ncores = NR_CPUS;
+ }
+
+ smp_store_cpu_info(cpu);
+
+ /*
+ * are we trying to boot more cores than exist?
+ */
+ if (max_cpus > ncores)
+ max_cpus = ncores;
+
+ /*
+ * Initialise the possible/present maps.
+ * cpu_possible_map describes the set of CPUs which may be present
+ * cpu_present_map describes the set of CPUs populated
+ */
+ for (i = 0; i < max_cpus; i++) {
+ cpu_set(i, cpu_possible_map);
+ cpu_set(i, cpu_present_map);
+ }
+
+ /*
+ * Do we need any more CPUs? If so, then let them know where
+ * to start. Note that, on modern versions of MILO, the "poke"
+ * doesn't actually do anything until each individual core is
+ * sent a soft interrupt to get it out of WFI
+ */
+ if (max_cpus > 1)
+ poke_milo();
+}
diff --git a/arch/arm/mach-realview/realview_eb.c b/arch/arm/mach-realview/realview_eb.c
index 267bb07e39b..7dc32503fdf 100644
--- a/arch/arm/mach-realview/realview_eb.c
+++ b/arch/arm/mach-realview/realview_eb.c
@@ -136,6 +136,11 @@ static struct amba_device *amba_devs[] __initdata = {
static void __init gic_init_irq(void)
{
+#ifdef CONFIG_REALVIEW_MPCORE
+ writel(0x0000a05f, __io_address(REALVIEW_SYS_LOCK));
+ writel(0x008003c0, __io_address(REALVIEW_SYS_BASE) + 0xd8);
+ writel(0x00000000, __io_address(REALVIEW_SYS_LOCK));
+#endif
gic_dist_init(__io_address(REALVIEW_GIC_DIST_BASE));
gic_cpu_init(__io_address(REALVIEW_GIC_CPU_BASE));
}
diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c
index fb5b40289de..9e50127be63 100644
--- a/arch/arm/mm/mm-armv.c
+++ b/arch/arm/mm/mm-armv.c
@@ -354,7 +354,7 @@ void __init build_mem_type_table(void)
{
struct cachepolicy *cp;
unsigned int cr = get_cr();
- unsigned int user_pgprot;
+ unsigned int user_pgprot, kern_pgprot;
int cpu_arch = cpu_architecture();
int i;
@@ -381,7 +381,7 @@ void __init build_mem_type_table(void)
}
cp = &cache_policies[cachepolicy];
- user_pgprot = cp->pte;
+ kern_pgprot = user_pgprot = cp->pte;
/*
* ARMv6 and above have extended page tables.
@@ -393,6 +393,7 @@ void __init build_mem_type_table(void)
*/
mem_types[MT_MEMORY].prot_sect &= ~PMD_BIT4;
mem_types[MT_ROM].prot_sect &= ~PMD_BIT4;
+
/*
* Mark cache clean areas and XIP ROM read only
* from SVC mode and no access from userspace.
@@ -412,32 +413,47 @@ void __init build_mem_type_table(void)
* (iow, non-global)
*/
user_pgprot |= L_PTE_ASID;
+
+#ifdef CONFIG_SMP
+ /*
+ * Mark memory with the "shared" attribute for SMP systems
+ */
+ user_pgprot |= L_PTE_SHARED;
+ kern_pgprot |= L_PTE_SHARED;
+ mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
+#endif
}
+ for (i = 0; i < 16; i++) {
+ unsigned long v = pgprot_val(protection_map[i]);
+ v = (v & ~(L_PTE_BUFFERABLE|L_PTE_CACHEABLE)) | user_pgprot;
+ protection_map[i] = __pgprot(v);
+ }
+
+ mem_types[MT_LOW_VECTORS].prot_pte |= kern_pgprot;
+ mem_types[MT_HIGH_VECTORS].prot_pte |= kern_pgprot;
+
if (cpu_arch >= CPU_ARCH_ARMv5) {
- mem_types[MT_LOW_VECTORS].prot_pte |= cp->pte & PTE_CACHEABLE;
- mem_types[MT_HIGH_VECTORS].prot_pte |= cp->pte & PTE_CACHEABLE;
+#ifndef CONFIG_SMP
+ /*
+ * Only use write-through for non-SMP systems
+ */
+ mem_types[MT_LOW_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE;
+ mem_types[MT_HIGH_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE;
+#endif
} else {
- mem_types[MT_LOW_VECTORS].prot_pte |= cp->pte;
- mem_types[MT_HIGH_VECTORS].prot_pte |= cp->pte;
mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1);
}
+ pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
+ L_PTE_DIRTY | L_PTE_WRITE |
+ L_PTE_EXEC | kern_pgprot);
+
mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
mem_types[MT_ROM].prot_sect |= cp->pmd;
- for (i = 0; i < 16; i++) {
- unsigned long v = pgprot_val(protection_map[i]);
- v = (v & ~(PTE_BUFFERABLE|PTE_CACHEABLE)) | user_pgprot;
- protection_map[i] = __pgprot(v);
- }
-
- pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
- L_PTE_DIRTY | L_PTE_WRITE |
- L_PTE_EXEC | cp->pte);
-
switch (cp->pmd) {
case PMD_SECT_WT:
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 9bb5fff406f..92f3ca31b7b 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -12,6 +12,7 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
+#include <asm/hardware/arm_scu.h>
#include <asm/procinfo.h>
#include <asm/pgtable.h>
@@ -112,6 +113,9 @@ ENTRY(cpu_v6_dcache_clean_area)
ENTRY(cpu_v6_switch_mm)
mov r2, #0
ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
+#ifdef CONFIG_SMP
+ orr r0, r0, #2 @ set shared pgtable
+#endif
mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
mcr p15, 0, r2, c7, c10, 4 @ drain write buffer
mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
@@ -140,7 +144,7 @@ ENTRY(cpu_v6_switch_mm)
ENTRY(cpu_v6_set_pte)
str r1, [r0], #-2048 @ linux version
- bic r2, r1, #0x000007f0
+ bic r2, r1, #0x000003f0
bic r2, r2, #0x00000003
orr r2, r2, #PTE_EXT_AP0 | 2
@@ -191,6 +195,23 @@ cpu_v6_name:
* - cache type register is implemented
*/
__v6_setup:
+#ifdef CONFIG_SMP
+ /* Set up the SCU on core 0 only */
+ mrc p15, 0, r0, c0, c0, 5 @ CPU core number
+ ands r0, r0, #15
+ moveq r0, #0x10000000 @ SCU_BASE
+ orreq r0, r0, #0x00100000
+ ldreq r5, [r0, #SCU_CTRL]
+ orreq r5, r5, #1
+ streq r5, [r0, #SCU_CTRL]
+
+#ifndef CONFIG_CPU_DCACHE_DISABLE
+ mrc p15, 0, r0, c1, c0, 1 @ Enable SMP/nAMP mode
+ orr r0, r0, #0x20
+ mcr p15, 0, r0, c1, c0, 1
+#endif
+#endif
+
mov r0, #0
mcr p15, 0, r0, c7, c14, 0 @ clean+invalidate D cache
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
@@ -198,6 +219,9 @@ __v6_setup:
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs
mcr p15, 0, r0, c2, c0, 2 @ TTB control register
+#ifdef CONFIG_SMP
+ orr r4, r4, #2 @ set shared pgtable
+#endif
mcr p15, 0, r4, c2, c0, 1 @ load TTB1
#ifdef CONFIG_VFP
mrc p15, 0, r0, c1, c0, 2
diff --git a/arch/arm/nwfpe/fpa11.h b/arch/arm/nwfpe/fpa11.h
index 9677ae8448e..da4c616b6c4 100644
--- a/arch/arm/nwfpe/fpa11.h
+++ b/arch/arm/nwfpe/fpa11.h
@@ -60,7 +60,7 @@ typedef union tagFPREG {
#ifdef CONFIG_FPE_NWFPE_XP
floatx80 fExtended;
#else
- int padding[3];
+ u32 padding[3];
#endif
} FPREG;
diff --git a/arch/arm/nwfpe/fpa11_cpdt.c b/arch/arm/nwfpe/fpa11_cpdt.c
index b0db5cbcc3b..32859fa8dcf 100644
--- a/arch/arm/nwfpe/fpa11_cpdt.c
+++ b/arch/arm/nwfpe/fpa11_cpdt.c
@@ -59,8 +59,13 @@ static inline void loadExtended(const unsigned int Fn, const unsigned int __user
p = (unsigned int *) &fpa11->fpreg[Fn].fExtended;
fpa11->fType[Fn] = typeExtended;
get_user(p[0], &pMem[0]); /* sign & exponent */
+#ifdef __ARMEB__
+ get_user(p[1], &pMem[1]); /* ms bits */
+ get_user(p[2], &pMem[2]); /* ls bits */
+#else
get_user(p[1], &pMem[2]); /* ls bits */
get_user(p[2], &pMem[1]); /* ms bits */
+#endif
}
#endif
@@ -177,8 +182,13 @@ static inline void storeExtended(const unsigned int Fn, unsigned int __user *pMe
}
put_user(val.i[0], &pMem[0]); /* sign & exp */
+#ifdef __ARMEB__
+ put_user(val.i[1], &pMem[1]); /* msw */
+ put_user(val.i[2], &pMem[2]);
+#else
put_user(val.i[1], &pMem[2]);
put_user(val.i[2], &pMem[1]); /* msw */
+#endif
}
#endif
diff --git a/arch/arm/nwfpe/fpopcode.c b/arch/arm/nwfpe/fpopcode.c
index 4c9f5703148..67ff2ab08ea 100644
--- a/arch/arm/nwfpe/fpopcode.c
+++ b/arch/arm/nwfpe/fpopcode.c
@@ -29,14 +29,14 @@
#ifdef CONFIG_FPE_NWFPE_XP
const floatx80 floatx80Constant[] = {
- {0x0000, 0x0000000000000000ULL}, /* extended 0.0 */
- {0x3fff, 0x8000000000000000ULL}, /* extended 1.0 */
- {0x4000, 0x8000000000000000ULL}, /* extended 2.0 */
- {0x4000, 0xc000000000000000ULL}, /* extended 3.0 */
- {0x4001, 0x8000000000000000ULL}, /* extended 4.0 */
- {0x4001, 0xa000000000000000ULL}, /* extended 5.0 */
- {0x3ffe, 0x8000000000000000ULL}, /* extended 0.5 */
- {0x4002, 0xa000000000000000ULL} /* extended 10.0 */
+ { .high = 0x0000, .low = 0x0000000000000000ULL},/* extended 0.0 */
+ { .high = 0x3fff, .low = 0x8000000000000000ULL},/* extended 1.0 */
+ { .high = 0x4000, .low = 0x8000000000000000ULL},/* extended 2.0 */
+ { .high = 0x4000, .low = 0xc000000000000000ULL},/* extended 3.0 */
+ { .high = 0x4001, .low = 0x8000000000000000ULL},/* extended 4.0 */
+ { .high = 0x4001, .low = 0xa000000000000000ULL},/* extended 5.0 */
+ { .high = 0x3ffe, .low = 0x8000000000000000ULL},/* extended 0.5 */
+ { .high = 0x4002, .low = 0xa000000000000000ULL},/* extended 10.0 */
};
#endif
diff --git a/arch/arm/nwfpe/softfloat-specialize b/arch/arm/nwfpe/softfloat-specialize
index acf40914476..d4a4c8e0663 100644
--- a/arch/arm/nwfpe/softfloat-specialize
+++ b/arch/arm/nwfpe/softfloat-specialize
@@ -332,6 +332,7 @@ static floatx80 commonNaNToFloatx80( commonNaNT a )
z.low = LIT64( 0xC000000000000000 ) | ( a.high>>1 );
z.high = ( ( (bits16) a.sign )<<15 ) | 0x7FFF;
+ z.__padding = 0;
return z;
}
diff --git a/arch/arm/nwfpe/softfloat.c b/arch/arm/nwfpe/softfloat.c
index f9f049132a1..0f9656e482b 100644
--- a/arch/arm/nwfpe/softfloat.c
+++ b/arch/arm/nwfpe/softfloat.c
@@ -531,6 +531,7 @@ INLINE floatx80 packFloatx80( flag zSign, int32 zExp, bits64 zSig )
z.low = zSig;
z.high = ( ( (bits16) zSign )<<15 ) + zExp;
+ z.__padding = 0;
return z;
}
@@ -2831,6 +2832,7 @@ static floatx80 subFloatx80Sigs( struct roundingData *roundData, floatx80 a, flo
roundData->exception |= float_flag_invalid;
z.low = floatx80_default_nan_low;
z.high = floatx80_default_nan_high;
+ z.__padding = 0;
return z;
}
if ( aExp == 0 ) {
@@ -2950,6 +2952,7 @@ floatx80 floatx80_mul( struct roundingData *roundData, floatx80 a, floatx80 b )
roundData->exception |= float_flag_invalid;
z.low = floatx80_default_nan_low;
z.high = floatx80_default_nan_high;
+ z.__padding = 0;
return z;
}
return packFloatx80( zSign, 0x7FFF, LIT64( 0x8000000000000000 ) );
@@ -3015,6 +3018,7 @@ floatx80 floatx80_div( struct roundingData *roundData, floatx80 a, floatx80 b )
roundData->exception |= float_flag_invalid;
z.low = floatx80_default_nan_low;
z.high = floatx80_default_nan_high;
+ z.__padding = 0;
return z;
}
roundData->exception |= float_flag_divbyzero;
@@ -3093,6 +3097,7 @@ floatx80 floatx80_rem( struct roundingData *roundData, floatx80 a, floatx80 b )
roundData->exception |= float_flag_invalid;
z.low = floatx80_default_nan_low;
z.high = floatx80_default_nan_high;
+ z.__padding = 0;
return z;
}
normalizeFloatx80Subnormal( bSig, &bExp, &bSig );
@@ -3184,6 +3189,7 @@ floatx80 floatx80_sqrt( struct roundingData *roundData, floatx80 a )
roundData->exception |= float_flag_invalid;
z.low = floatx80_default_nan_low;
z.high = floatx80_default_nan_high;
+ z.__padding = 0;
return z;
}
if ( aExp == 0 ) {
diff --git a/arch/arm/nwfpe/softfloat.h b/arch/arm/nwfpe/softfloat.h
index 14151700b6b..978c699673c 100644
--- a/arch/arm/nwfpe/softfloat.h
+++ b/arch/arm/nwfpe/softfloat.h
@@ -51,11 +51,17 @@ input or output the `floatx80' type will be defined.
Software IEC/IEEE floating-point types.
-------------------------------------------------------------------------------
*/
-typedef unsigned long int float32;
-typedef unsigned long long float64;
+typedef u32 float32;
+typedef u64 float64;
typedef struct {
- unsigned short high;
- unsigned long long low;
+#ifdef __ARMEB__
+ u16 __padding;
+ u16 high;
+#else
+ u16 high;
+ u16 __padding;
+#endif
+ u64 low;
} floatx80;
/*
diff --git a/arch/arm26/kernel/ptrace.c b/arch/arm26/kernel/ptrace.c
index cf7e977d18c..4e6b7356a72 100644
--- a/arch/arm26/kernel/ptrace.c
+++ b/arch/arm26/kernel/ptrace.c
@@ -546,7 +546,7 @@ static int ptrace_setfpregs(struct task_struct *tsk, void *ufp)
sizeof(struct user_fp)) ? -EFAULT : 0;
}
-static int do_ptrace(int request, struct task_struct *child, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
unsigned long tmp;
int ret;
@@ -665,53 +665,6 @@ static int do_ptrace(int request, struct task_struct *child, long addr, long dat
return ret;
}
-asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
-{
- struct task_struct *child;
- int ret;
-
- lock_kernel();
- ret = -EPERM;
- if (request == PTRACE_TRACEME) {
- /* are we already being traced? */
- if (current->ptrace & PT_PTRACED)
- goto out;
- ret = security_ptrace(current->parent, current);
- if (ret)
- goto out;
- /* set the ptrace bit in the process flags. */
- current->ptrace |= PT_PTRACED;
- ret = 0;
- goto out;
- }
- ret = -ESRCH;
- read_lock(&tasklist_lock);
- child = find_task_by_pid(pid);
- if (child)
- get_task_struct(child);
- read_unlock(&tasklist_lock);
- if (!child)
- goto out;
-
- ret = -EPERM;
- if (pid == 1) /* you may not mess with init */
- goto out_tsk;
-
- if (request == PTRACE_ATTACH) {
- ret = ptrace_attach(child);
- goto out_tsk;
- }
- ret = ptrace_check_attach(child, request == PTRACE_KILL);
- if (ret == 0)
- ret = do_ptrace(request, child, addr, data);
-
-out_tsk:
- put_task_struct(child);
-out:
- unlock_kernel();
- return ret;
-}
-
asmlinkage void syscall_trace(int why, struct pt_regs *regs)
{
unsigned long ip;
diff --git a/arch/cris/arch-v10/README.mm b/arch/cris/arch-v10/README.mm
index 6f08903f313..517d1f027fe 100644
--- a/arch/cris/arch-v10/README.mm
+++ b/arch/cris/arch-v10/README.mm
@@ -177,7 +177,7 @@ The example address is 0xd004000c; in binary this is:
Given the top-level Page Directory, the offset in that directory is calculated
using the upper 8 bits:
-extern inline pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address)
+static inline pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address)
{
return mm->pgd + (address >> PGDIR_SHIFT);
}
@@ -190,14 +190,14 @@ The pgd_t from our example will therefore be the 208'th (0xd0) entry in mm->pgd.
Since the Middle Directory does not exist, it is a unity mapping:
-extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
+static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
{
return (pmd_t *) dir;
}
The Page Table provides the final lookup by using bits 13 to 23 as index:
-extern inline pte_t * pte_offset(pmd_t * dir, unsigned long address)
+static inline pte_t * pte_offset(pmd_t * dir, unsigned long address)
{
return (pte_t *) pmd_page(*dir) + ((address >> PAGE_SHIFT) &
(PTRS_PER_PTE - 1));
diff --git a/arch/cris/arch-v10/kernel/ptrace.c b/arch/cris/arch-v10/kernel/ptrace.c
index 130dd214e41..6cbd34a27b9 100644
--- a/arch/cris/arch-v10/kernel/ptrace.c
+++ b/arch/cris/arch-v10/kernel/ptrace.c
@@ -76,55 +76,11 @@ ptrace_disable(struct task_struct *child)
* (in user space) where the result of the ptrace call is written (instead of
* being returned).
*/
-asmlinkage int
-sys_ptrace(long request, long pid, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
- struct task_struct *child;
int ret;
unsigned long __user *datap = (unsigned long __user *)data;
- lock_kernel();
- ret = -EPERM;
-
- if (request == PTRACE_TRACEME) {
- /* are we already being traced? */
- if (current->ptrace & PT_PTRACED)
- goto out;
- ret = security_ptrace(current->parent, current);
- if (ret)
- goto out;
- /* set the ptrace bit in the process flags. */
- current->ptrace |= PT_PTRACED;
- ret = 0;
- goto out;
- }
-
- ret = -ESRCH;
- read_lock(&tasklist_lock);
- child = find_task_by_pid(pid);
-
- if (child)
- get_task_struct(child);
-
- read_unlock(&tasklist_lock);
-
- if (!child)
- goto out;
-
- ret = -EPERM;
-
- if (pid == 1) /* Leave the init process alone! */
- goto out_tsk;
-
- if (request == PTRACE_ATTACH) {
- ret = ptrace_attach(child);
- goto out_tsk;
- }
-
- ret = ptrace_check_attach(child, request == PTRACE_KILL);
- if (ret < 0)
- goto out_tsk;
-
switch (request) {
/* Read word at location address. */
case PTRACE_PEEKTEXT:
@@ -289,10 +245,7 @@ sys_ptrace(long request, long pid, long addr, long data)
ret = ptrace_request(child, request, addr, data);
break;
}
-out_tsk:
- put_task_struct(child);
-out:
- unlock_kernel();
+
return ret;
}
diff --git a/arch/cris/arch-v10/kernel/signal.c b/arch/cris/arch-v10/kernel/signal.c
index 693771961f8..19bcad05716 100644
--- a/arch/cris/arch-v10/kernel/signal.c
+++ b/arch/cris/arch-v10/kernel/signal.c
@@ -476,7 +476,7 @@ give_sigsegv:
* OK, we're invoking a handler
*/
-extern inline void
+static inline void
handle_signal(int canrestart, unsigned long sig,
siginfo_t *info, struct k_sigaction *ka,
sigset_t *oldset, struct pt_regs * regs)
diff --git a/arch/cris/arch-v32/drivers/cryptocop.c b/arch/cris/arch-v32/drivers/cryptocop.c
index ca72076c630..501fa52d8d3 100644
--- a/arch/cris/arch-v32/drivers/cryptocop.c
+++ b/arch/cris/arch-v32/drivers/cryptocop.c
@@ -277,7 +277,7 @@ struct file_operations cryptocop_fops = {
static void free_cdesc(struct cryptocop_dma_desc *cdesc)
{
DEBUG(printk("free_cdesc: cdesc 0x%p, from_pool=%d\n", cdesc, cdesc->from_pool));
- if (cdesc->free_buf) kfree(cdesc->free_buf);
+ kfree(cdesc->free_buf);
if (cdesc->from_pool) {
unsigned long int flags;
@@ -2950,15 +2950,15 @@ static int cryptocop_ioctl_process(struct inode *inode, struct file *filp, unsig
put_page(outpages[i]);
}
- if (digest_result) kfree(digest_result);
- if (inpages) kfree(inpages);
- if (outpages) kfree(outpages);
+ kfree(digest_result);
+ kfree(inpages);
+ kfree(outpages);
if (cop){
- if (cop->tfrm_op.indata) kfree(cop->tfrm_op.indata);
- if (cop->tfrm_op.outdata) kfree(cop->tfrm_op.outdata);
+ kfree(cop->tfrm_op.indata);
+ kfree(cop->tfrm_op.outdata);
kfree(cop);
}
- if (jc) kfree(jc);
+ kfree(jc);
DEBUG(print_lock_status());
diff --git a/arch/cris/arch-v32/kernel/ptrace.c b/arch/cris/arch-v32/kernel/ptrace.c
index 208489da2a8..5528b83a622 100644
--- a/arch/cris/arch-v32/kernel/ptrace.c
+++ b/arch/cris/arch-v32/kernel/ptrace.c
@@ -99,55 +99,11 @@ ptrace_disable(struct task_struct *child)
}
-asmlinkage int
-sys_ptrace(long request, long pid, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
- struct task_struct *child;
int ret;
unsigned long __user *datap = (unsigned long __user *)data;
- lock_kernel();
- ret = -EPERM;
-
- if (request == PTRACE_TRACEME) {
- /* are we already being traced? */
- if (current->ptrace & PT_PTRACED)
- goto out;
- ret = security_ptrace(current->parent, current);
- if (ret)
- goto out;
- /* set the ptrace bit in the process flags. */
- current->ptrace |= PT_PTRACED;
- ret = 0;
- goto out;
- }
-
- ret = -ESRCH;
- read_lock(&tasklist_lock);
- child = find_task_by_pid(pid);
-
- if (child)
- get_task_struct(child);
-
- read_unlock(&tasklist_lock);
-
- if (!child)
- goto out;
-
- ret = -EPERM;
-
- if (pid == 1) /* Leave the init process alone! */
- goto out_tsk;
-
- if (request == PTRACE_ATTACH) {
- ret = ptrace_attach(child);
- goto out_tsk;
- }
-
- ret = ptrace_check_attach(child, request == PTRACE_KILL);
- if (ret < 0)
- goto out_tsk;
-
switch (request) {
/* Read word at location address. */
case PTRACE_PEEKTEXT:
@@ -347,10 +303,7 @@ sys_ptrace(long request, long pid, long addr, long data)
ret = ptrace_request(child, request, addr, data);
break;
}
-out_tsk:
- put_task_struct(child);
-out:
- unlock_kernel();
+
return ret;
}
diff --git a/arch/cris/arch-v32/kernel/signal.c b/arch/cris/arch-v32/kernel/signal.c
index 0a3614dab88..99e59b3eacf 100644
--- a/arch/cris/arch-v32/kernel/signal.c
+++ b/arch/cris/arch-v32/kernel/signal.c
@@ -513,7 +513,7 @@ give_sigsegv:
}
/* Invoke a singal handler to, well, handle the signal. */
-extern inline void
+static inline void
handle_signal(int canrestart, unsigned long sig,
siginfo_t *info, struct k_sigaction *ka,
sigset_t *oldset, struct pt_regs * regs)
diff --git a/arch/cris/mm/ioremap.c b/arch/cris/mm/ioremap.c
index a92ac987758..1780df3ed9e 100644
--- a/arch/cris/mm/ioremap.c
+++ b/arch/cris/mm/ioremap.c
@@ -16,7 +16,7 @@
#include <asm/tlbflush.h>
#include <asm/arch/memmap.h>
-extern inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
+static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
unsigned long phys_addr, pgprot_t prot)
{
unsigned long end;
diff --git a/arch/frv/kernel/ptrace.c b/arch/frv/kernel/ptrace.c
index cb335a14a31..f953484e7d5 100644
--- a/arch/frv/kernel/ptrace.c
+++ b/arch/frv/kernel/ptrace.c
@@ -106,48 +106,11 @@ void ptrace_enable(struct task_struct *child)
child->thread.frame0->__status |= REG__STATUS_STEP;
}
-asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
- struct task_struct *child;
unsigned long tmp;
int ret;
- lock_kernel();
- ret = -EPERM;
- if (request == PTRACE_TRACEME) {
- /* are we already being traced? */
- if (current->ptrace & PT_PTRACED)
- goto out;
- ret = security_ptrace(current->parent, current);
- if (ret)
- goto out;
- /* set the ptrace bit in the process flags. */
- current->ptrace |= PT_PTRACED;
- ret = 0;
- goto out;
- }
- ret = -ESRCH;
- read_lock(&tasklist_lock);
- child = find_task_by_pid(pid);
- if (child)
- get_task_struct(child);
- read_unlock(&tasklist_lock);
- if (!child)
- goto out;
-
- ret = -EPERM;
- if (pid == 1) /* you may not mess with init */
- goto out_tsk;
-
- if (request == PTRACE_ATTACH) {
- ret = ptrace_attach(child);
- goto out_tsk;
- }
-
- ret = ptrace_check_attach(child, request == PTRACE_KILL);
- if (ret < 0)
- goto out_tsk;
-
switch (request) {
/* when I and D space are separate, these will need to be fixed. */
case PTRACE_PEEKTEXT: /* read word at location addr. */
@@ -351,10 +314,6 @@ asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
ret = -EIO;
break;
}
-out_tsk:
- put_task_struct(child);
-out:
- unlock_kernel();
return ret;
}
diff --git a/arch/h8300/kernel/ptrace.c b/arch/h8300/kernel/ptrace.c
index a569fe4aa28..0ff6f79b0fe 100644
--- a/arch/h8300/kernel/ptrace.c
+++ b/arch/h8300/kernel/ptrace.c
@@ -57,43 +57,10 @@ void ptrace_disable(struct task_struct *child)
h8300_disable_trace(child);
}
-asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
- struct task_struct *child;
int ret;
- lock_kernel();
- ret = -EPERM;
- if (request == PTRACE_TRACEME) {
- /* are we already being traced? */
- if (current->ptrace & PT_PTRACED)
- goto out;
- /* set the ptrace bit in the process flags. */
- current->ptrace |= PT_PTRACED;
- ret = 0;
- goto out;
- }
- ret = -ESRCH;
- read_lock(&tasklist_lock);
- child = find_task_by_pid(pid);
- if (child)
- get_task_struct(child);
- read_unlock(&tasklist_lock);
- if (!child)
- goto out;
-
- ret = -EPERM;
- if (pid == 1) /* you may not mess with init */
- goto out_tsk;
-
- if (request == PTRACE_ATTACH) {
- ret = ptrace_attach(child);
- goto out_tsk;
- }
- ret = ptrace_check_attach(child, request == PTRACE_KILL);
- if (ret < 0)
- goto out_tsk;
-
switch (request) {
case PTRACE_PEEKTEXT: /* read word at location addr. */
case PTRACE_PEEKDATA: {
@@ -251,10 +218,6 @@ asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
ret = -EIO;
break;
}
-out_tsk:
- put_task_struct(child);
-out:
- unlock_kernel();
return ret;
}
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index bac0da731ee..dbf90ad6eac 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -997,8 +997,21 @@ source "drivers/Kconfig"
source "fs/Kconfig"
+menu "Instrumentation Support"
+ depends on EXPERIMENTAL
+
source "arch/i386/oprofile/Kconfig"
+config KPROBES
+ bool "Kprobes (EXPERIMENTAL)"
+ help
+ Kprobes allows you to trap at almost any kernel address and
+ execute a callback function. register_kprobe() establishes
+ a probepoint and specifies the callback. Kprobes is useful
+ for kernel debugging, non-intrusive instrumentation and testing.
+ If in doubt, say "N".
+endmenu
+
source "arch/i386/Kconfig.debug"
source "security/Kconfig"
diff --git a/arch/i386/Kconfig.debug b/arch/i386/Kconfig.debug
index 5228c40a6fb..c48b424dd64 100644
--- a/arch/i386/Kconfig.debug
+++ b/arch/i386/Kconfig.debug
@@ -22,16 +22,6 @@ config DEBUG_STACKOVERFLOW
This option will cause messages to be printed if free stack space
drops below a certain limit.
-config KPROBES
- bool "Kprobes"
- depends on DEBUG_KERNEL
- help
- Kprobes allows you to trap at almost any kernel address and
- execute a callback function. register_kprobe() establishes
- a probepoint and specifies the callback. Kprobes is useful
- for kernel debugging, non-intrusive instrumentation and testing.
- If in doubt, say "N".
-
config DEBUG_STACK_USAGE
bool "Stack utilization instrumentation"
depends on DEBUG_KERNEL
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index 7c724ffa08b..496a2c9909f 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -559,14 +559,20 @@ void __devinit setup_local_APIC(void)
* If Linux enabled the LAPIC against the BIOS default
* disable it down before re-entering the BIOS on shutdown.
* Otherwise the BIOS may get confused and not power-off.
+ * Additionally clear all LVT entries before disable_local_APIC
+ * for the case where Linux didn't enable the LAPIC.
*/
void lapic_shutdown(void)
{
- if (!cpu_has_apic || !enabled_via_apicbase)
+ if (!cpu_has_apic)
return;
local_irq_disable();
- disable_local_APIC();
+ clear_local_APIC();
+
+ if (enabled_via_apicbase)
+ disable_local_APIC();
+
local_irq_enable();
}
diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c
index d2ef0c2aa93..86e80c55147 100644
--- a/arch/i386/kernel/apm.c
+++ b/arch/i386/kernel/apm.c
@@ -447,8 +447,7 @@ static char * apm_event_name[] = {
"system standby resume",
"capabilities change"
};
-#define NR_APM_EVENT_NAME \
- (sizeof(apm_event_name) / sizeof(apm_event_name[0]))
+#define NR_APM_EVENT_NAME ARRAY_SIZE(apm_event_name)
typedef struct lookup_t {
int key;
@@ -479,7 +478,7 @@ static const lookup_t error_table[] = {
{ APM_NO_ERROR, "BIOS did not set a return code" },
{ APM_NOT_PRESENT, "No APM present" }
};
-#define ERROR_COUNT (sizeof(error_table)/sizeof(lookup_t))
+#define ERROR_COUNT ARRAY_SIZE(error_table)
/**
* apm_error - display an APM error
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index 74145a33cb0..c145fb30002 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -30,8 +30,6 @@ static int disable_x86_serial_nr __devinitdata = 1;
struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
-extern void mcheck_init(struct cpuinfo_x86 *c);
-
extern int disable_pse;
static void default_init(struct cpuinfo_x86 * c)
@@ -429,9 +427,8 @@ void __devinit identify_cpu(struct cpuinfo_x86 *c)
}
/* Init Machine Check Exception if available. */
-#ifdef CONFIG_X86_MCE
mcheck_init(c);
-#endif
+
if (c == &boot_cpu_data)
sysenter_setup();
enable_sep_cpu();
diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
index caa9f771134..871366b83b3 100644
--- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -377,10 +377,9 @@ acpi_cpufreq_cpu_init (
arg0.buffer.length = 12;
arg0.buffer.pointer = (u8 *) arg0_buf;
- data = kmalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL);
+ data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL);
if (!data)
return (-ENOMEM);
- memset(data, 0, sizeof(struct cpufreq_acpi_io));
acpi_io_data[cpu] = data;
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k7.c b/arch/i386/kernel/cpu/cpufreq/powernow-k7.c
index 73a5dc5b26b..edcd626001d 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k7.c
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k7.c
@@ -171,10 +171,9 @@ static int get_ranges (unsigned char *pst)
unsigned int speed;
u8 fid, vid;
- powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table) * (number_scales + 1)), GFP_KERNEL);
+ powernow_table = kzalloc((sizeof(struct cpufreq_frequency_table) * (number_scales + 1)), GFP_KERNEL);
if (!powernow_table)
return -ENOMEM;
- memset(powernow_table, 0, (sizeof(struct cpufreq_frequency_table) * (number_scales + 1)));
for (j=0 ; j < number_scales; j++) {
fid = *pst++;
@@ -305,16 +304,13 @@ static int powernow_acpi_init(void)
goto err0;
}
- acpi_processor_perf = kmalloc(sizeof(struct acpi_processor_performance),
+ acpi_processor_perf = kzalloc(sizeof(struct acpi_processor_performance),
GFP_KERNEL);
-
if (!acpi_processor_perf) {
retval = -ENOMEM;
goto err0;
}
- memset(acpi_processor_perf, 0, sizeof(struct acpi_processor_performance));
-
if (acpi_processor_register_performance(acpi_processor_perf, 0)) {
retval = -EIO;
goto err1;
@@ -337,14 +333,12 @@ static int powernow_acpi_init(void)
goto err2;
}
- powernow_table = kmalloc((number_scales + 1) * (sizeof(struct cpufreq_frequency_table)), GFP_KERNEL);
+ powernow_table = kzalloc((number_scales + 1) * (sizeof(struct cpufreq_frequency_table)), GFP_KERNEL);
if (!powernow_table) {
retval = -ENOMEM;
goto err2;
}
- memset(powernow_table, 0, ((number_scales + 1) * sizeof(struct cpufreq_frequency_table)));
-
pc.val = (unsigned long) acpi_processor_perf->states[0].control;
for (i = 0; i < number_scales; i++) {
u8 fid, vid;
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
index 2d5c9adba0c..68a1fc87f4c 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
@@ -462,7 +462,6 @@ static int check_supported_cpu(unsigned int cpu)
oldmask = current->cpus_allowed;
set_cpus_allowed(current, cpumask_of_cpu(cpu));
- schedule();
if (smp_processor_id() != cpu) {
printk(KERN_ERR "limiting to cpu %u failed\n", cpu);
@@ -497,9 +496,7 @@ static int check_supported_cpu(unsigned int cpu)
out:
set_cpus_allowed(current, oldmask);
- schedule();
return rc;
-
}
static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, u8 maxvid)
@@ -913,7 +910,6 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
/* only run on specific CPU from here on */
oldmask = current->cpus_allowed;
set_cpus_allowed(current, cpumask_of_cpu(pol->cpu));
- schedule();
if (smp_processor_id() != pol->cpu) {
printk(KERN_ERR "limiting to cpu %u failed\n", pol->cpu);
@@ -968,8 +964,6 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
err_out:
set_cpus_allowed(current, oldmask);
- schedule();
-
return ret;
}
@@ -991,12 +985,11 @@ static int __init powernowk8_cpu_init(struct cpufreq_policy *pol)
if (!check_supported_cpu(pol->cpu))
return -ENODEV;
- data = kmalloc(sizeof(struct powernow_k8_data), GFP_KERNEL);
+ data = kzalloc(sizeof(struct powernow_k8_data), GFP_KERNEL);
if (!data) {
printk(KERN_ERR PFX "unable to alloc powernow_k8_data");
return -ENOMEM;
}
- memset(data,0,sizeof(struct powernow_k8_data));
data->cpu = pol->cpu;
@@ -1026,7 +1019,6 @@ static int __init powernowk8_cpu_init(struct cpufreq_policy *pol)
/* only run on specific CPU from here on */
oldmask = current->cpus_allowed;
set_cpus_allowed(current, cpumask_of_cpu(pol->cpu));
- schedule();
if (smp_processor_id() != pol->cpu) {
printk(KERN_ERR "limiting to cpu %u failed\n", pol->cpu);
@@ -1045,7 +1037,6 @@ static int __init powernowk8_cpu_init(struct cpufreq_policy *pol)
/* run on any CPU again */
set_cpus_allowed(current, oldmask);
- schedule();
pol->governor = CPUFREQ_DEFAULT_GOVERNOR;
pol->cpus = cpu_core_map[pol->cpu];
@@ -1080,7 +1071,6 @@ static int __init powernowk8_cpu_init(struct cpufreq_policy *pol)
err_out:
set_cpus_allowed(current, oldmask);
- schedule();
powernow_k8_cpu_exit_acpi(data);
kfree(data);
@@ -1116,17 +1106,14 @@ static unsigned int powernowk8_get (unsigned int cpu)
set_cpus_allowed(current, oldmask);
return 0;
}
- preempt_disable();
-
+
if (query_current_values_with_pending_wait(data))
goto out;
khz = find_khz_freq_from_fid(data->currfid);
- out:
- preempt_enable_no_resched();
+out:
set_cpus_allowed(current, oldmask);
-
return khz;
}
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
index 1465974256c..edb9873e27e 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -67,7 +67,7 @@ static const struct cpu_id cpu_ids[] = {
[CPU_MP4HT_D0] = {15, 3, 4 },
[CPU_MP4HT_E0] = {15, 4, 1 },
};
-#define N_IDS (sizeof(cpu_ids)/sizeof(cpu_ids[0]))
+#define N_IDS ARRAY_SIZE(cpu_ids)
struct cpu_model
{
@@ -423,12 +423,11 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
}
}
- centrino_model[cpu] = kmalloc(sizeof(struct cpu_model), GFP_KERNEL);
+ centrino_model[cpu] = kzalloc(sizeof(struct cpu_model), GFP_KERNEL);
if (!centrino_model[cpu]) {
result = -ENOMEM;
goto err_unreg;
}
- memset(centrino_model[cpu], 0, sizeof(struct cpu_model));
centrino_model[cpu]->model_name=NULL;
centrino_model[cpu]->max_freq = p.states[0].core_frequency * 1000;
diff --git a/arch/i386/kernel/cpu/mcheck/k7.c b/arch/i386/kernel/cpu/mcheck/k7.c
index 7c6b9c73522..fc5d5215e23 100644
--- a/arch/i386/kernel/cpu/mcheck/k7.c
+++ b/arch/i386/kernel/cpu/mcheck/k7.c
@@ -68,7 +68,7 @@ static fastcall void k7_machine_check(struct pt_regs * regs, long error_code)
/* AMD K7 machine check is Intel like */
-void __devinit amd_mcheck_init(struct cpuinfo_x86 *c)
+void amd_mcheck_init(struct cpuinfo_x86 *c)
{
u32 l, h;
int i;
diff --git a/arch/i386/kernel/cpu/mcheck/mce.c b/arch/i386/kernel/cpu/mcheck/mce.c
index 2cf25d2ba0f..6170af3c271 100644
--- a/arch/i386/kernel/cpu/mcheck/mce.c
+++ b/arch/i386/kernel/cpu/mcheck/mce.c
@@ -16,7 +16,7 @@
#include "mce.h"
-int mce_disabled __devinitdata = 0;
+int mce_disabled = 0;
int nr_mce_banks;
EXPORT_SYMBOL_GPL(nr_mce_banks); /* non-fatal.o */
@@ -31,7 +31,7 @@ static fastcall void unexpected_machine_check(struct pt_regs * regs, long error_
void fastcall (*machine_check_vector)(struct pt_regs *, long error_code) = unexpected_machine_check;
/* This has to be run for each processor */
-void __devinit mcheck_init(struct cpuinfo_x86 *c)
+void mcheck_init(struct cpuinfo_x86 *c)
{
if (mce_disabled==1)
return;
diff --git a/arch/i386/kernel/cpu/mcheck/p4.c b/arch/i386/kernel/cpu/mcheck/p4.c
index 1d1e885f500..fd2c459a31e 100644
--- a/arch/i386/kernel/cpu/mcheck/p4.c
+++ b/arch/i386/kernel/cpu/mcheck/p4.c
@@ -77,7 +77,7 @@ fastcall void smp_thermal_interrupt(struct pt_regs *regs)
}
/* P4/Xeon Thermal regulation detect and init */
-static void __devinit intel_init_thermal(struct cpuinfo_x86 *c)
+static void intel_init_thermal(struct cpuinfo_x86 *c)
{
u32 l, h;
unsigned int cpu = smp_processor_id();
@@ -231,7 +231,7 @@ static fastcall void intel_machine_check(struct pt_regs * regs, long error_code)
}
-void __devinit intel_p4_mcheck_init(struct cpuinfo_x86 *c)
+void intel_p4_mcheck_init(struct cpuinfo_x86 *c)
{
u32 l, h;
int i;
diff --git a/arch/i386/kernel/cpu/mcheck/p5.c b/arch/i386/kernel/cpu/mcheck/p5.c
index 3a2e24baddc..94bc43d950c 100644
--- a/arch/i386/kernel/cpu/mcheck/p5.c
+++ b/arch/i386/kernel/cpu/mcheck/p5.c
@@ -28,7 +28,7 @@ static fastcall void pentium_machine_check(struct pt_regs * regs, long error_cod
}
/* Set up machine check reporting for processors with Intel style MCE */
-void __devinit intel_p5_mcheck_init(struct cpuinfo_x86 *c)
+void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
{
u32 l, h;
diff --git a/arch/i386/kernel/cpu/mcheck/p6.c b/arch/i386/kernel/cpu/mcheck/p6.c
index 979b18bc95c..deeae42ce19 100644
--- a/arch/i386/kernel/cpu/mcheck/p6.c
+++ b/arch/i386/kernel/cpu/mcheck/p6.c
@@ -79,7 +79,7 @@ static fastcall void intel_machine_check(struct pt_regs * regs, long error_code)
}
/* Set up machine check reporting for processors with Intel style MCE */
-void __devinit intel_p6_mcheck_init(struct cpuinfo_x86 *c)
+void intel_p6_mcheck_init(struct cpuinfo_x86 *c)
{
u32 l, h;
int i;
diff --git a/arch/i386/kernel/cpu/mcheck/winchip.c b/arch/i386/kernel/cpu/mcheck/winchip.c
index 5b9d2dd411d..9e424b6c293 100644
--- a/arch/i386/kernel/cpu/mcheck/winchip.c
+++ b/arch/i386/kernel/cpu/mcheck/winchip.c
@@ -22,7 +22,7 @@ static fastcall void winchip_machine_check(struct pt_regs * regs, long error_cod
}
/* Set up machine check reporting on the Winchip C6 series */
-void __devinit winchip_mcheck_init(struct cpuinfo_x86 *c)
+void winchip_mcheck_init(struct cpuinfo_x86 *c)
{
u32 lo, hi;
machine_check_vector = winchip_machine_check;
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
index 6345b430b10..32b0c24ab9a 100644
--- a/arch/i386/kernel/kprobes.c
+++ b/arch/i386/kernel/kprobes.c
@@ -31,22 +31,16 @@
#include <linux/config.h>
#include <linux/kprobes.h>
#include <linux/ptrace.h>
-#include <linux/spinlock.h>
#include <linux/preempt.h>
#include <asm/cacheflush.h>
#include <asm/kdebug.h>
#include <asm/desc.h>
-static struct kprobe *current_kprobe;
-static unsigned long kprobe_status, kprobe_old_eflags, kprobe_saved_eflags;
-static struct kprobe *kprobe_prev;
-static unsigned long kprobe_status_prev, kprobe_old_eflags_prev, kprobe_saved_eflags_prev;
-static struct pt_regs jprobe_saved_regs;
-static long *jprobe_saved_esp;
-/* copy of the kernel stack at the probe fire time */
-static kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
void jprobe_return_end(void);
+DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
/*
* returns non-zero if opcode modifies the interrupt flag.
*/
@@ -91,29 +85,30 @@ void __kprobes arch_remove_kprobe(struct kprobe *p)
{
}
-static inline void save_previous_kprobe(void)
+static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
{
- kprobe_prev = current_kprobe;
- kprobe_status_prev = kprobe_status;
- kprobe_old_eflags_prev = kprobe_old_eflags;
- kprobe_saved_eflags_prev = kprobe_saved_eflags;
+ kcb->prev_kprobe.kp = kprobe_running();
+ kcb->prev_kprobe.status = kcb->kprobe_status;
+ kcb->prev_kprobe.old_eflags = kcb->kprobe_old_eflags;
+ kcb->prev_kprobe.saved_eflags = kcb->kprobe_saved_eflags;
}
-static inline void restore_previous_kprobe(void)
+static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
- current_kprobe = kprobe_prev;
- kprobe_status = kprobe_status_prev;
- kprobe_old_eflags = kprobe_old_eflags_prev;
- kprobe_saved_eflags = kprobe_saved_eflags_prev;
+ __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+ kcb->kprobe_status = kcb->prev_kprobe.status;
+ kcb->kprobe_old_eflags = kcb->prev_kprobe.old_eflags;
+ kcb->kprobe_saved_eflags = kcb->prev_kprobe.saved_eflags;
}
-static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs)
+static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
{
- current_kprobe = p;
- kprobe_saved_eflags = kprobe_old_eflags
+ __get_cpu_var(current_kprobe) = p;
+ kcb->kprobe_saved_eflags = kcb->kprobe_old_eflags
= (regs->eflags & (TF_MASK | IF_MASK));
if (is_IF_modifier(p->opcode))
- kprobe_saved_eflags &= ~IF_MASK;
+ kcb->kprobe_saved_eflags &= ~IF_MASK;
}
static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
@@ -127,6 +122,7 @@ static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
regs->eip = (unsigned long)&p->ainsn.insn;
}
+/* Called with kretprobe_lock held */
void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
struct pt_regs *regs)
{
@@ -157,9 +153,15 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
int ret = 0;
kprobe_opcode_t *addr = NULL;
unsigned long *lp;
+ struct kprobe_ctlblk *kcb;
- /* We're in an interrupt, but this is clear and BUG()-safe. */
+ /*
+ * We don't want to be preempted for the entire
+ * duration of kprobe processing
+ */
preempt_disable();
+ kcb = get_kprobe_ctlblk();
+
/* Check if the application is using LDT entry for its code segment and
* calculate the address by reading the base address from the LDT entry.
*/
@@ -173,15 +175,12 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
}
/* Check we're not actually recursing */
if (kprobe_running()) {
- /* We *are* holding lock here, so this is safe.
- Disarm the probe we just hit, and ignore it. */
p = get_kprobe(addr);
if (p) {
- if (kprobe_status == KPROBE_HIT_SS &&
+ if (kcb->kprobe_status == KPROBE_HIT_SS &&
*p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
regs->eflags &= ~TF_MASK;
- regs->eflags |= kprobe_saved_eflags;
- unlock_kprobes();
+ regs->eflags |= kcb->kprobe_saved_eflags;
goto no_kprobe;
}
/* We have reentered the kprobe_handler(), since
@@ -190,26 +189,23 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
* just single step on the instruction of the new probe
* without calling any user handlers.
*/
- save_previous_kprobe();
- set_current_kprobe(p, regs);
+ save_previous_kprobe(kcb);
+ set_current_kprobe(p, regs, kcb);
p->nmissed++;
prepare_singlestep(p, regs);
- kprobe_status = KPROBE_REENTER;
+ kcb->kprobe_status = KPROBE_REENTER;
return 1;
} else {
- p = current_kprobe;
+ p = __get_cpu_var(current_kprobe);
if (p->break_handler && p->break_handler(p, regs)) {
goto ss_probe;
}
}
- /* If it's not ours, can't be delete race, (we hold lock). */
goto no_kprobe;
}
- lock_kprobes();
p = get_kprobe(addr);
if (!p) {
- unlock_kprobes();
if (regs->eflags & VM_MASK) {
/* We are in virtual-8086 mode. Return 0 */
goto no_kprobe;
@@ -232,8 +228,8 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
goto no_kprobe;
}
- kprobe_status = KPROBE_HIT_ACTIVE;
- set_current_kprobe(p, regs);
+ set_current_kprobe(p, regs, kcb);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
if (p->pre_handler && p->pre_handler(p, regs))
/* handler has already set things up, so skip ss setup */
@@ -241,7 +237,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
ss_probe:
prepare_singlestep(p, regs);
- kprobe_status = KPROBE_HIT_SS;
+ kcb->kprobe_status = KPROBE_HIT_SS;
return 1;
no_kprobe:
@@ -269,9 +265,10 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
struct kretprobe_instance *ri = NULL;
struct hlist_head *head;
struct hlist_node *node, *tmp;
- unsigned long orig_ret_address = 0;
+ unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
+ spin_lock_irqsave(&kretprobe_lock, flags);
head = kretprobe_inst_table_head(current);
/*
@@ -310,14 +307,15 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address));
regs->eip = orig_ret_address;
- unlock_kprobes();
+ reset_current_kprobe();
+ spin_unlock_irqrestore(&kretprobe_lock, flags);
preempt_enable_no_resched();
- /*
- * By returning a non-zero value, we are telling
- * kprobe_handler() that we have handled unlocking
- * and re-enabling preemption.
- */
+ /*
+ * By returning a non-zero value, we are telling
+ * kprobe_handler() that we don't want the post_handler
+ * to run (and have re-enabled preemption)
+ */
return 1;
}
@@ -343,7 +341,8 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
* that is atop the stack is the address following the copied instruction.
* We need to make it the address following the original instruction.
*/
-static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
+static void __kprobes resume_execution(struct kprobe *p,
+ struct pt_regs *regs, struct kprobe_ctlblk *kcb)
{
unsigned long *tos = (unsigned long *)&regs->esp;
unsigned long next_eip = 0;
@@ -353,7 +352,7 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
switch (p->ainsn.insn[0]) {
case 0x9c: /* pushfl */
*tos &= ~(TF_MASK | IF_MASK);
- *tos |= kprobe_old_eflags;
+ *tos |= kcb->kprobe_old_eflags;
break;
case 0xc3: /* ret/lret */
case 0xcb:
@@ -394,27 +393,30 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
/*
* Interrupts are disabled on entry as trap1 is an interrupt gate and they
- * remain disabled thoroughout this function. And we hold kprobe lock.
+ * remain disabled thoroughout this function.
*/
static inline int post_kprobe_handler(struct pt_regs *regs)
{
- if (!kprobe_running())
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ if (!cur)
return 0;
- if ((kprobe_status != KPROBE_REENTER) && current_kprobe->post_handler) {
- kprobe_status = KPROBE_HIT_SSDONE;
- current_kprobe->post_handler(current_kprobe, regs, 0);
+ if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ cur->post_handler(cur, regs, 0);
}
- resume_execution(current_kprobe, regs);
- regs->eflags |= kprobe_saved_eflags;
+ resume_execution(cur, regs, kcb);
+ regs->eflags |= kcb->kprobe_saved_eflags;
/*Restore back the original saved kprobes variables and continue. */
- if (kprobe_status == KPROBE_REENTER) {
- restore_previous_kprobe();
+ if (kcb->kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
goto out;
}
- unlock_kprobes();
+ reset_current_kprobe();
out:
preempt_enable_no_resched();
@@ -429,18 +431,19 @@ out:
return 1;
}
-/* Interrupts disabled, kprobe_lock held. */
static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
- if (current_kprobe->fault_handler
- && current_kprobe->fault_handler(current_kprobe, regs, trapnr))
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
return 1;
- if (kprobe_status & KPROBE_HIT_SS) {
- resume_execution(current_kprobe, regs);
- regs->eflags |= kprobe_old_eflags;
+ if (kcb->kprobe_status & KPROBE_HIT_SS) {
+ resume_execution(cur, regs, kcb);
+ regs->eflags |= kcb->kprobe_old_eflags;
- unlock_kprobes();
+ reset_current_kprobe();
preempt_enable_no_resched();
}
return 0;
@@ -453,39 +456,41 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data)
{
struct die_args *args = (struct die_args *)data;
+ int ret = NOTIFY_DONE;
+
switch (val) {
case DIE_INT3:
if (kprobe_handler(args->regs))
- return NOTIFY_STOP;
+ ret = NOTIFY_STOP;
break;
case DIE_DEBUG:
if (post_kprobe_handler(args->regs))
- return NOTIFY_STOP;
+ ret = NOTIFY_STOP;
break;
case DIE_GPF:
- if (kprobe_running() &&
- kprobe_fault_handler(args->regs, args->trapnr))
- return NOTIFY_STOP;
- break;
case DIE_PAGE_FAULT:
+ /* kprobe_running() needs smp_processor_id() */
+ preempt_disable();
if (kprobe_running() &&
kprobe_fault_handler(args->regs, args->trapnr))
- return NOTIFY_STOP;
+ ret = NOTIFY_STOP;
+ preempt_enable();
break;
default:
break;
}
- return NOTIFY_DONE;
+ return ret;
}
int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
struct jprobe *jp = container_of(p, struct jprobe, kp);
unsigned long addr;
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- jprobe_saved_regs = *regs;
- jprobe_saved_esp = &regs->esp;
- addr = (unsigned long)jprobe_saved_esp;
+ kcb->jprobe_saved_regs = *regs;
+ kcb->jprobe_saved_esp = &regs->esp;
+ addr = (unsigned long)(kcb->jprobe_saved_esp);
/*
* TBD: As Linus pointed out, gcc assumes that the callee
@@ -494,7 +499,8 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
* we also save and restore enough stack bytes to cover
* the argument area.
*/
- memcpy(jprobes_stack, (kprobe_opcode_t *) addr, MIN_STACK_SIZE(addr));
+ memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr,
+ MIN_STACK_SIZE(addr));
regs->eflags &= ~IF_MASK;
regs->eip = (unsigned long)(jp->entry);
return 1;
@@ -502,36 +508,40 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
void __kprobes jprobe_return(void)
{
- preempt_enable_no_resched();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
asm volatile (" xchgl %%ebx,%%esp \n"
" int3 \n"
" .globl jprobe_return_end \n"
" jprobe_return_end: \n"
" nop \n"::"b"
- (jprobe_saved_esp):"memory");
+ (kcb->jprobe_saved_esp):"memory");
}
int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
u8 *addr = (u8 *) (regs->eip - 1);
- unsigned long stack_addr = (unsigned long)jprobe_saved_esp;
+ unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_esp);
struct jprobe *jp = container_of(p, struct jprobe, kp);
if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) {
- if (&regs->esp != jprobe_saved_esp) {
+ if (&regs->esp != kcb->jprobe_saved_esp) {
struct pt_regs *saved_regs =
- container_of(jprobe_saved_esp, struct pt_regs, esp);
+ container_of(kcb->jprobe_saved_esp,
+ struct pt_regs, esp);
printk("current esp %p does not match saved esp %p\n",
- &regs->esp, jprobe_saved_esp);
+ &regs->esp, kcb->jprobe_saved_esp);
printk("Saved registers for jprobe %p\n", jp);
show_registers(saved_regs);
printk("Current registers\n");
show_registers(regs);
BUG();
}
- *regs = jprobe_saved_regs;
- memcpy((kprobe_opcode_t *) stack_addr, jprobes_stack,
+ *regs = kcb->jprobe_saved_regs;
+ memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack,
MIN_STACK_SIZE(stack_addr));
+ preempt_enable_no_resched();
return 1;
}
return 0;
diff --git a/arch/i386/kernel/ldt.c b/arch/i386/kernel/ldt.c
index fe1ffa55587..983f95707e1 100644
--- a/arch/i386/kernel/ldt.c
+++ b/arch/i386/kernel/ldt.c
@@ -18,6 +18,7 @@
#include <asm/system.h>
#include <asm/ldt.h>
#include <asm/desc.h>
+#include <asm/mmu_context.h>
#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
static void flush_ldt(void *null)
diff --git a/arch/i386/kernel/mca.c b/arch/i386/kernel/mca.c
index 8600faeea29..558bb207720 100644
--- a/arch/i386/kernel/mca.c
+++ b/arch/i386/kernel/mca.c
@@ -132,7 +132,7 @@ static struct resource mca_standard_resources[] = {
{ .start = 0x100, .end = 0x107, .name = "POS (MCA)" }
};
-#define MCA_STANDARD_RESOURCES (sizeof(mca_standard_resources)/sizeof(struct resource))
+#define MCA_STANDARD_RESOURCES ARRAY_SIZE(mca_standard_resources)
/**
* mca_read_and_store_pos - read the POS registers into a memory buffer
diff --git a/arch/i386/kernel/ptrace.c b/arch/i386/kernel/ptrace.c
index efd11f09c99..5ffbb4b7ad0 100644
--- a/arch/i386/kernel/ptrace.c
+++ b/arch/i386/kernel/ptrace.c
@@ -354,49 +354,12 @@ ptrace_set_thread_area(struct task_struct *child,
return 0;
}
-asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
- struct task_struct *child;
struct user * dummy = NULL;
int i, ret;
unsigned long __user *datap = (unsigned long __user *)data;
- lock_kernel();
- ret = -EPERM;
- if (request == PTRACE_TRACEME) {
- /* are we already being traced? */
- if (current->ptrace & PT_PTRACED)
- goto out;
- ret = security_ptrace(current->parent, current);
- if (ret)
- goto out;
- /* set the ptrace bit in the process flags. */
- current->ptrace |= PT_PTRACED;
- ret = 0;
- goto out;
- }
- ret = -ESRCH;
- read_lock(&tasklist_lock);
- child = find_task_by_pid(pid);
- if (child)
- get_task_struct(child);
- read_unlock(&tasklist_lock);
- if (!child)
- goto out;
-
- ret = -EPERM;
- if (pid == 1) /* you may not mess with init */
- goto out_tsk;
-
- if (request == PTRACE_ATTACH) {
- ret = ptrace_attach(child);
- goto out_tsk;
- }
-
- ret = ptrace_check_attach(child, request == PTRACE_KILL);
- if (ret < 0)
- goto out_tsk;
-
switch (request) {
/* when I and D space are separate, these will need to be fixed. */
case PTRACE_PEEKTEXT: /* read word at location addr. */
@@ -663,10 +626,7 @@ asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
ret = ptrace_request(child, request, addr, data);
break;
}
-out_tsk:
- put_task_struct(child);
-out:
- unlock_kernel();
+ out_tsk:
return ret;
}
diff --git a/arch/i386/kernel/reboot_fixups.c b/arch/i386/kernel/reboot_fixups.c
index c9b87330aee..10e21a4773d 100644
--- a/arch/i386/kernel/reboot_fixups.c
+++ b/arch/i386/kernel/reboot_fixups.c
@@ -10,6 +10,7 @@
#include <asm/delay.h>
#include <linux/pci.h>
+#include <linux/reboot_fixups.h>
static void cs5530a_warm_reset(struct pci_dev *dev)
{
@@ -42,7 +43,7 @@ void mach_reboot_fixups(void)
struct pci_dev *dev;
int i;
- for (i=0; i < (sizeof(fixups_table)/sizeof(fixups_table[0])); i++) {
+ for (i=0; i < ARRAY_SIZE(fixups_table); i++) {
cur = &(fixups_table[i]);
dev = pci_get_device(cur->vendor, cur->device, NULL);
if (!dev)
diff --git a/arch/i386/kernel/scx200.c b/arch/i386/kernel/scx200.c
index 69e203a0d33..9c968ae67c4 100644
--- a/arch/i386/kernel/scx200.c
+++ b/arch/i386/kernel/scx200.c
@@ -12,6 +12,7 @@
#include <linux/pci.h>
#include <linux/scx200.h>
+#include <linux/scx200_gpio.h>
/* Verify that the configuration block really is there */
#define scx200_cb_probe(base) (inw((base) + SCx200_CBA) == (base))
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index 01b618e73ec..47ec76794d0 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -68,11 +68,9 @@ EXPORT_SYMBOL(smp_num_siblings);
/* Package ID of each logical CPU */
int phys_proc_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID};
-EXPORT_SYMBOL(phys_proc_id);
/* Core ID of each logical CPU */
int cpu_core_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID};
-EXPORT_SYMBOL(cpu_core_id);
cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_sibling_map);
@@ -612,7 +610,7 @@ static inline void __inquire_remote_apic(int apicid)
printk("Inquiring remote APIC #%d...\n", apicid);
- for (i = 0; i < sizeof(regs) / sizeof(*regs); i++) {
+ for (i = 0; i < ARRAY_SIZE(regs); i++) {
printk("... APIC #%d %s: ", apicid, names[i]);
/*
diff --git a/arch/i386/oprofile/Kconfig b/arch/i386/oprofile/Kconfig
index 5ade19801b9..d8a84088471 100644
--- a/arch/i386/oprofile/Kconfig
+++ b/arch/i386/oprofile/Kconfig
@@ -1,7 +1,3 @@
-
-menu "Profiling support"
- depends on EXPERIMENTAL
-
config PROFILING
bool "Profiling support (EXPERIMENTAL)"
help
@@ -19,5 +15,3 @@ config OPROFILE
If unsure, say N.
-endmenu
-
diff --git a/arch/i386/power/cpu.c b/arch/i386/power/cpu.c
index 1f1572692e0..50a0bef8c85 100644
--- a/arch/i386/power/cpu.c
+++ b/arch/i386/power/cpu.c
@@ -118,6 +118,7 @@ void __restore_processor_state(struct saved_context *ctxt)
fix_processor_context();
do_fpu_end();
mtrr_ap_init();
+ mcheck_init(&boot_cpu_data);
}
void restore_processor_state(void)
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 3b4248cff9a..d4de8a4814b 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -191,6 +191,7 @@ config IOSAPIC
config IA64_SGI_SN_XP
tristate "Support communication between SGI SSIs"
+ depends on IA64_GENERIC || IA64_SGI_SN2
select IA64_UNCACHED_ALLOCATOR
help
An SGI machine can be divided into multiple Single System
@@ -426,8 +427,21 @@ config GENERIC_PENDING_IRQ
source "arch/ia64/hp/sim/Kconfig"
+menu "Instrumentation Support"
+ depends on EXPERIMENTAL
+
source "arch/ia64/oprofile/Kconfig"
+config KPROBES
+ bool "Kprobes (EXPERIMENTAL)"
+ help
+ Kprobes allows you to trap at almost any kernel address and
+ execute a callback function. register_kprobe() establishes
+ a probepoint and specifies the callback. Kprobes is useful
+ for kernel debugging, non-intrusive instrumentation and testing.
+ If in doubt, say "N".
+endmenu
+
source "arch/ia64/Kconfig.debug"
source "security/Kconfig"
diff --git a/arch/ia64/Kconfig.debug b/arch/ia64/Kconfig.debug
index fda67ac993d..de9d507ba0f 100644
--- a/arch/ia64/Kconfig.debug
+++ b/arch/ia64/Kconfig.debug
@@ -2,17 +2,6 @@ menu "Kernel hacking"
source "lib/Kconfig.debug"
-config KPROBES
- bool "Kprobes"
- depends on DEBUG_KERNEL
- help
- Kprobes allows you to trap at almost any kernel address and
- execute a callback function. register_kprobe() establishes
- a probepoint and specifies the callback. Kprobes is useful
- for kernel debugging, non-intrusive instrumentation and testing.
- If in doubt, say "N".
-
-
choice
prompt "Physical memory granularity"
default IA64_GRANULE_64MB
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c
index b42ec37be51..19ee635eeb7 100644
--- a/arch/ia64/hp/sim/simserial.c
+++ b/arch/ia64/hp/sim/simserial.c
@@ -642,10 +642,8 @@ static void rs_close(struct tty_struct *tty, struct file * filp)
info->event = 0;
info->tty = 0;
if (info->blocked_open) {
- if (info->close_delay) {
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(info->close_delay);
- }
+ if (info->close_delay)
+ schedule_timeout_interruptible(info->close_delay);
wake_up_interruptible(&info->open_wait);
}
info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING);
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 471086b808a..96736a119c9 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -26,7 +26,6 @@
#include <linux/config.h>
#include <linux/kprobes.h>
#include <linux/ptrace.h>
-#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/preempt.h>
@@ -38,13 +37,8 @@
extern void jprobe_inst_return(void);
-/* kprobe_status settings */
-#define KPROBE_HIT_ACTIVE 0x00000001
-#define KPROBE_HIT_SS 0x00000002
-
-static struct kprobe *current_kprobe, *kprobe_prev;
-static unsigned long kprobe_status, kprobe_status_prev;
-static struct pt_regs jprobe_saved_regs;
+DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
enum instruction_type {A, I, M, F, B, L, X, u};
static enum instruction_type bundle_encoding[32][3] = {
@@ -313,21 +307,22 @@ static int __kprobes valid_kprobe_addr(int template, int slot,
return 0;
}
-static inline void save_previous_kprobe(void)
+static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
{
- kprobe_prev = current_kprobe;
- kprobe_status_prev = kprobe_status;
+ kcb->prev_kprobe.kp = kprobe_running();
+ kcb->prev_kprobe.status = kcb->kprobe_status;
}
-static inline void restore_previous_kprobe(void)
+static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
- current_kprobe = kprobe_prev;
- kprobe_status = kprobe_status_prev;
+ __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+ kcb->kprobe_status = kcb->prev_kprobe.status;
}
-static inline void set_current_kprobe(struct kprobe *p)
+static inline void set_current_kprobe(struct kprobe *p,
+ struct kprobe_ctlblk *kcb)
{
- current_kprobe = p;
+ __get_cpu_var(current_kprobe) = p;
}
static void kretprobe_trampoline(void)
@@ -347,10 +342,11 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
struct kretprobe_instance *ri = NULL;
struct hlist_head *head;
struct hlist_node *node, *tmp;
- unsigned long orig_ret_address = 0;
+ unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address =
((struct fnptr *)kretprobe_trampoline)->ip;
+ spin_lock_irqsave(&kretprobe_lock, flags);
head = kretprobe_inst_table_head(current);
/*
@@ -389,17 +385,19 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address));
regs->cr_iip = orig_ret_address;
- unlock_kprobes();
+ reset_current_kprobe();
+ spin_unlock_irqrestore(&kretprobe_lock, flags);
preempt_enable_no_resched();
- /*
- * By returning a non-zero value, we are telling
- * kprobe_handler() that we have handled unlocking
- * and re-enabling preemption.
- */
+ /*
+ * By returning a non-zero value, we are telling
+ * kprobe_handler() that we don't want the post_handler
+ * to run (and have re-enabled preemption)
+ */
return 1;
}
+/* Called with kretprobe_lock held */
void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
struct pt_regs *regs)
{
@@ -606,17 +604,22 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
int ret = 0;
struct pt_regs *regs = args->regs;
kprobe_opcode_t *addr = (kprobe_opcode_t *)instruction_pointer(regs);
+ struct kprobe_ctlblk *kcb;
+ /*
+ * We don't want to be preempted for the entire
+ * duration of kprobe processing
+ */
preempt_disable();
+ kcb = get_kprobe_ctlblk();
/* Handle recursion cases */
if (kprobe_running()) {
p = get_kprobe(addr);
if (p) {
- if ( (kprobe_status == KPROBE_HIT_SS) &&
+ if ((kcb->kprobe_status == KPROBE_HIT_SS) &&
(p->ainsn.inst_flag == INST_FLAG_BREAK_INST)) {
ia64_psr(regs)->ss = 0;
- unlock_kprobes();
goto no_kprobe;
}
/* We have reentered the pre_kprobe_handler(), since
@@ -625,17 +628,17 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
* just single step on the instruction of the new probe
* without calling any user handlers.
*/
- save_previous_kprobe();
- set_current_kprobe(p);
+ save_previous_kprobe(kcb);
+ set_current_kprobe(p, kcb);
p->nmissed++;
prepare_ss(p, regs);
- kprobe_status = KPROBE_REENTER;
+ kcb->kprobe_status = KPROBE_REENTER;
return 1;
} else if (args->err == __IA64_BREAK_JPROBE) {
/*
* jprobe instrumented function just completed
*/
- p = current_kprobe;
+ p = __get_cpu_var(current_kprobe);
if (p->break_handler && p->break_handler(p, regs)) {
goto ss_probe;
}
@@ -645,10 +648,8 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
}
}
- lock_kprobes();
p = get_kprobe(addr);
if (!p) {
- unlock_kprobes();
if (!is_ia64_break_inst(regs)) {
/*
* The breakpoint instruction was removed right
@@ -665,8 +666,8 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
goto no_kprobe;
}
- kprobe_status = KPROBE_HIT_ACTIVE;
- set_current_kprobe(p);
+ set_current_kprobe(p, kcb);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
if (p->pre_handler && p->pre_handler(p, regs))
/*
@@ -678,7 +679,7 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
ss_probe:
prepare_ss(p, regs);
- kprobe_status = KPROBE_HIT_SS;
+ kcb->kprobe_status = KPROBE_HIT_SS;
return 1;
no_kprobe:
@@ -688,23 +689,25 @@ no_kprobe:
static int __kprobes post_kprobes_handler(struct pt_regs *regs)
{
- if (!kprobe_running())
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ if (!cur)
return 0;
- if ((kprobe_status != KPROBE_REENTER) && current_kprobe->post_handler) {
- kprobe_status = KPROBE_HIT_SSDONE;
- current_kprobe->post_handler(current_kprobe, regs, 0);
+ if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ cur->post_handler(cur, regs, 0);
}
- resume_execution(current_kprobe, regs);
+ resume_execution(cur, regs);
/*Restore back the original saved kprobes variables and continue. */
- if (kprobe_status == KPROBE_REENTER) {
- restore_previous_kprobe();
+ if (kcb->kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
goto out;
}
-
- unlock_kprobes();
+ reset_current_kprobe();
out:
preempt_enable_no_resched();
@@ -713,16 +716,15 @@ out:
static int __kprobes kprobes_fault_handler(struct pt_regs *regs, int trapnr)
{
- if (!kprobe_running())
- return 0;
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- if (current_kprobe->fault_handler &&
- current_kprobe->fault_handler(current_kprobe, regs, trapnr))
+ if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
return 1;
- if (kprobe_status & KPROBE_HIT_SS) {
- resume_execution(current_kprobe, regs);
- unlock_kprobes();
+ if (kcb->kprobe_status & KPROBE_HIT_SS) {
+ resume_execution(cur, regs);
+ reset_current_kprobe();
preempt_enable_no_resched();
}
@@ -733,31 +735,38 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data)
{
struct die_args *args = (struct die_args *)data;
+ int ret = NOTIFY_DONE;
+
switch(val) {
case DIE_BREAK:
if (pre_kprobes_handler(args))
- return NOTIFY_STOP;
+ ret = NOTIFY_STOP;
break;
case DIE_SS:
if (post_kprobes_handler(args->regs))
- return NOTIFY_STOP;
+ ret = NOTIFY_STOP;
break;
case DIE_PAGE_FAULT:
- if (kprobes_fault_handler(args->regs, args->trapnr))
- return NOTIFY_STOP;
+ /* kprobe_running() needs smp_processor_id() */
+ preempt_disable();
+ if (kprobe_running() &&
+ kprobes_fault_handler(args->regs, args->trapnr))
+ ret = NOTIFY_STOP;
+ preempt_enable();
default:
break;
}
- return NOTIFY_DONE;
+ return ret;
}
int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
struct jprobe *jp = container_of(p, struct jprobe, kp);
unsigned long addr = ((struct fnptr *)(jp->entry))->ip;
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
/* save architectural state */
- jprobe_saved_regs = *regs;
+ kcb->jprobe_saved_regs = *regs;
/* after rfi, execute the jprobe instrumented function */
regs->cr_iip = addr & ~0xFULL;
@@ -775,7 +784,10 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
{
- *regs = jprobe_saved_regs;
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ *regs = kcb->jprobe_saved_regs;
+ preempt_enable_no_resched();
return 1;
}
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index f7dfc107cb7..410d4804fa6 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -4940,7 +4940,7 @@ abort_locked:
if (call_made && PFM_CMD_RW_ARG(cmd) && copy_to_user(arg, args_k, base_sz*count)) ret = -EFAULT;
error_args:
- if (args_k) kfree(args_k);
+ kfree(args_k);
DPRINT(("cmd=%s ret=%ld\n", PFM_CMD_NAME(cmd), ret));
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index fc56ca2da35..3af6de36a48 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -92,6 +92,13 @@ extern void efi_initialize_iomem_resources(struct resource *,
extern char _text[], _end[], _etext[];
unsigned long ia64_max_cacheline_size;
+
+int dma_get_cache_alignment(void)
+{
+ return ia64_max_cacheline_size;
+}
+EXPORT_SYMBOL(dma_get_cache_alignment);
+
unsigned long ia64_iobase; /* virtual address for I/O accesses */
EXPORT_SYMBOL(ia64_iobase);
struct io_space io_space[MAX_IO_SPACES];
diff --git a/arch/ia64/oprofile/Kconfig b/arch/ia64/oprofile/Kconfig
index 56e6f614b04..97271ab484d 100644
--- a/arch/ia64/oprofile/Kconfig
+++ b/arch/ia64/oprofile/Kconfig
@@ -1,7 +1,3 @@
-
-menu "Profiling support"
- depends on EXPERIMENTAL
-
config PROFILING
bool "Profiling support (EXPERIMENTAL)"
help
@@ -22,5 +18,3 @@ config OPROFILE
If unsure, say N.
-endmenu
-
diff --git a/arch/m68k/atari/time.c b/arch/m68k/atari/time.c
index 6df7fb60dfe..e79bbc94216 100644
--- a/arch/m68k/atari/time.c
+++ b/arch/m68k/atari/time.c
@@ -212,10 +212,8 @@ int atari_tt_hwclk( int op, struct rtc_time *t )
* additionally the RTC_SET bit is set to prevent an update cycle.
*/
- while( RTC_READ(RTC_FREQ_SELECT) & RTC_UIP ) {
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(HWCLK_POLL_INTERVAL);
- }
+ while( RTC_READ(RTC_FREQ_SELECT) & RTC_UIP )
+ schedule_timeout_interruptible(HWCLK_POLL_INTERVAL);
local_irq_save(flags);
RTC_WRITE( RTC_CONTROL, ctrl | RTC_SET );
diff --git a/arch/m68k/kernel/ptrace.c b/arch/m68k/kernel/ptrace.c
index f7f1d2e5b90..7e54422685c 100644
--- a/arch/m68k/kernel/ptrace.c
+++ b/arch/m68k/kernel/ptrace.c
@@ -121,48 +121,11 @@ void ptrace_disable(struct task_struct *child)
child->thread.work.syscall_trace = 0;
}
-asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
- struct task_struct *child;
unsigned long tmp;
int i, ret = 0;
- lock_kernel();
- if (request == PTRACE_TRACEME) {
- /* are we already being traced? */
- if (current->ptrace & PT_PTRACED) {
- ret = -EPERM;
- goto out;
- }
- /* set the ptrace bit in the process flags. */
- current->ptrace |= PT_PTRACED;
- goto out;
- }
- read_lock(&tasklist_lock);
- child = find_task_by_pid(pid);
- if (child)
- get_task_struct(child);
- read_unlock(&tasklist_lock);
- if (unlikely(!child)) {
- ret = -ESRCH;
- goto out;
- }
-
- /* you may not mess with init */
- if (unlikely(pid == 1)) {
- ret = -EPERM;
- goto out_tsk;
- }
-
- if (request == PTRACE_ATTACH) {
- ret = ptrace_attach(child);
- goto out_tsk;
- }
-
- ret = ptrace_check_attach(child, request == PTRACE_KILL);
- if (ret)
- goto out_tsk;
-
switch (request) {
/* when I and D space are separate, these will need to be fixed. */
case PTRACE_PEEKTEXT: /* read word at location addr. */
@@ -317,14 +280,10 @@ asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
ret = ptrace_request(child, request, addr, data);
break;
}
-out_tsk:
- put_task_struct(child);
-out:
- unlock_kernel();
+
return ret;
out_eio:
- ret = -EIO;
- goto out_tsk;
+ return -EIO;
}
asmlinkage void syscall_trace(void)
diff --git a/arch/m68knommu/Kconfig b/arch/m68knommu/Kconfig
index 8520df9cee6..b96498120fe 100644
--- a/arch/m68knommu/Kconfig
+++ b/arch/m68knommu/Kconfig
@@ -71,6 +71,11 @@ config M5206e
help
Motorola ColdFire 5206e processor support.
+config M520x
+ bool "MCF520x"
+ help
+ Freescale Coldfire 5207/5208 processor support.
+
config M523x
bool "MCF523x"
help
@@ -120,7 +125,7 @@ config M527x
config COLDFIRE
bool
- depends on (M5206 || M5206e || M523x || M5249 || M527x || M5272 || M528x || M5307 || M5407)
+ depends on (M5206 || M5206e || M520x || M523x || M5249 || M527x || M5272 || M528x || M5307 || M5407)
default y
choice
@@ -322,6 +327,12 @@ config ELITE
help
Support for the Motorola M5206eLITE board.
+config M5208EVB
+ bool "Freescale M5208EVB board support"
+ depends on M520x
+ help
+ Support for the Freescale Coldfire M5208EVB.
+
config M5235EVB
bool "Freescale M5235EVB support"
depends on M523x
@@ -465,10 +476,10 @@ config ARNEWSH
default y
depends on (ARN5206 || ARN5307)
-config MOTOROLA
+config FREESCALE
bool
default y
- depends on (M5206eC3 || M5235EVB || M5249C3 || M5271EVB || M5272C3 || M5275EVB || M5282EVB || M5307C3 || M5407C3)
+ depends on (M5206eC3 || M5208EVB || M5235EVB || M5249C3 || M5271EVB || M5272C3 || M5275EVB || M5282EVB || M5307C3 || M5407C3)
config HW_FEITH
bool
diff --git a/arch/m68knommu/Makefile b/arch/m68knommu/Makefile
index b8fdf191b8f..b6b5c14e55f 100644
--- a/arch/m68knommu/Makefile
+++ b/arch/m68knommu/Makefile
@@ -14,6 +14,7 @@ platform-$(CONFIG_M68VZ328) := 68VZ328
platform-$(CONFIG_M68360) := 68360
platform-$(CONFIG_M5206) := 5206
platform-$(CONFIG_M5206e) := 5206e
+platform-$(CONFIG_M520x) := 520x
platform-$(CONFIG_M523x) := 523x
platform-$(CONFIG_M5249) := 5249
platform-$(CONFIG_M527x) := 527x
@@ -29,7 +30,7 @@ board-$(CONFIG_UCDIMM) := ucdimm
board-$(CONFIG_UCQUICC) := uCquicc
board-$(CONFIG_DRAGEN2) := de2
board-$(CONFIG_ARNEWSH) := ARNEWSH
-board-$(CONFIG_MOTOROLA) := MOTOROLA
+board-$(CONFIG_FREESCALE) := FREESCALE
board-$(CONFIG_M5235EVB) := M5235EVB
board-$(CONFIG_M5271EVB) := M5271EVB
board-$(CONFIG_M5275EVB) := M5275EVB
@@ -41,6 +42,7 @@ board-$(CONFIG_SECUREEDGEMP3) := MP3
board-$(CONFIG_CLEOPATRA) := CLEOPATRA
board-$(CONFIG_senTec) := senTec
board-$(CONFIG_SNEHA) := SNEHA
+board-$(CONFIG_M5208EVB) := M5208EVB
board-$(CONFIG_MOD5272) := MOD5272
BOARD := $(board-y)
@@ -56,6 +58,7 @@ MODEL := $(model-y)
#
cpuclass-$(CONFIG_M5206) := 5307
cpuclass-$(CONFIG_M5206e) := 5307
+cpuclass-$(CONFIG_M520x) := 5307
cpuclass-$(CONFIG_M523x) := 5307
cpuclass-$(CONFIG_M5249) := 5307
cpuclass-$(CONFIG_M527x) := 5307
@@ -80,6 +83,7 @@ export PLATFORM BOARD MODEL CPUCLASS
#
cflags-$(CONFIG_M5206) := -m5200 -Wa,-S -Wa,-m5200
cflags-$(CONFIG_M5206e) := -m5200 -Wa,-S -Wa,-m5200
+cflags-$(CONFIG_M520x) := -m5307 -Wa,-S -Wa,-m5307
cflags-$(CONFIG_M523x) := -m5307 -Wa,-S -Wa,-m5307
cflags-$(CONFIG_M5249) := -m5200 -Wa,-S -Wa,-m5200
cflags-$(CONFIG_M527x) := -m5307 -Wa,-S -Wa,-m5307
@@ -95,7 +99,6 @@ cflags-$(CONFIG_M68360) := -m68332
AFLAGS += $(cflags-y)
CFLAGS += $(cflags-y)
-CFLAGS += -fno-builtin
CFLAGS += -O1 -g
CFLAGS += -D__linux__
CFLAGS += -DUTS_SYSNAME=\"uClinux\"
diff --git a/arch/m68knommu/kernel/asm-offsets.c b/arch/m68knommu/kernel/asm-offsets.c
index cd3ffe12653..b988c7bdc6e 100644
--- a/arch/m68knommu/kernel/asm-offsets.c
+++ b/arch/m68knommu/kernel/asm-offsets.c
@@ -15,6 +15,7 @@
#include <linux/hardirq.h>
#include <asm/bootinfo.h>
#include <asm/irq.h>
+#include <asm/irqnode.h>
#include <asm/thread_info.h>
#define DEFINE(sym, val) \
diff --git a/arch/m68knommu/kernel/ptrace.c b/arch/m68knommu/kernel/ptrace.c
index 621d7b91ccf..262ab8c72e5 100644
--- a/arch/m68knommu/kernel/ptrace.c
+++ b/arch/m68knommu/kernel/ptrace.c
@@ -101,43 +101,10 @@ void ptrace_disable(struct task_struct *child)
put_reg(child, PT_SR, tmp);
}
-asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
+long arch_ptrace(truct task_struct *child, long request, long addr, long data)
{
- struct task_struct *child;
int ret;
- lock_kernel();
- ret = -EPERM;
- if (request == PTRACE_TRACEME) {
- /* are we already being traced? */
- if (current->ptrace & PT_PTRACED)
- goto out;
- /* set the ptrace bit in the process flags. */
- current->ptrace |= PT_PTRACED;
- ret = 0;
- goto out;
- }
- ret = -ESRCH;
- read_lock(&tasklist_lock);
- child = find_task_by_pid(pid);
- if (child)
- get_task_struct(child);
- read_unlock(&tasklist_lock);
- if (!child)
- goto out;
-
- ret = -EPERM;
- if (pid == 1) /* you may not mess with init */
- goto out_tsk;
-
- if (request == PTRACE_ATTACH) {
- ret = ptrace_attach(child);
- goto out_tsk;
- }
- ret = ptrace_check_attach(child, request == PTRACE_KILL);
- if (ret < 0)
- goto out_tsk;
-
switch (request) {
/* when I and D space are separate, these will need to be fixed. */
case PTRACE_PEEKTEXT: /* read word at location addr. */
@@ -357,10 +324,6 @@ asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
ret = -EIO;
break;
}
-out_tsk:
- put_task_struct(child);
-out:
- unlock_kernel();
return ret;
}
diff --git a/arch/m68knommu/kernel/setup.c b/arch/m68knommu/kernel/setup.c
index a220345e974..abb80fa2b94 100644
--- a/arch/m68knommu/kernel/setup.c
+++ b/arch/m68knommu/kernel/setup.c
@@ -107,6 +107,9 @@ void (*mach_power_off)( void ) = NULL;
#if defined(CONFIG_M5206e)
#define CPU "COLDFIRE(m5206e)"
#endif
+#if defined(CONFIG_M520x)
+ #define CPU "COLDFIRE(m520x)"
+#endif
#if defined(CONFIG_M523x)
#define CPU "COLDFIRE(m523x)"
#endif
@@ -132,7 +135,7 @@ void (*mach_power_off)( void ) = NULL;
#define CPU "COLDFIRE(m5407)"
#endif
#ifndef CPU
- #define CPU "UNKOWN"
+ #define CPU "UNKNOWN"
#endif
/* (es) */
diff --git a/arch/m68knommu/kernel/vmlinux.lds.S b/arch/m68knommu/kernel/vmlinux.lds.S
index 47f06787190..0eab92ca4b9 100644
--- a/arch/m68knommu/kernel/vmlinux.lds.S
+++ b/arch/m68knommu/kernel/vmlinux.lds.S
@@ -125,6 +125,14 @@
#endif
/*
+ * The Freescale 5208EVB board has 32MB of RAM.
+ */
+#if defined(CONFIG_M5208EVB)
+#define RAM_START 0x40020000
+#define RAM_LENGTH 0x01e00000
+#endif
+
+/*
* The senTec COBRA5272 board has nearly the same memory layout as
* the M5272C3. We assume 16MiB ram.
*/
@@ -275,6 +283,7 @@ SECTIONS {
*(__ksymtab_strings)
/* Built-in module parameters */
+ . = ALIGN(4) ;
__start___param = .;
*(__param)
__stop___param = .;
diff --git a/arch/m68knommu/platform/520x/Makefile b/arch/m68knommu/platform/520x/Makefile
new file mode 100644
index 00000000000..e861b05106b
--- /dev/null
+++ b/arch/m68knommu/platform/520x/Makefile
@@ -0,0 +1,19 @@
+#
+# Makefile for the M5208 specific file.
+#
+
+#
+# If you want to play with the HW breakpoints then you will
+# need to add define this, which will give you a stack backtrace
+# on the console port whenever a DBG interrupt occurs. You have to
+# set up you HW breakpoints to trigger a DBG interrupt:
+#
+# EXTRA_CFLAGS += -DTRAP_DBG_INTERRUPT
+# EXTRA_AFLAGS += -DTRAP_DBG_INTERRUPT
+#
+
+ifdef CONFIG_FULLDEBUG
+AFLAGS += -DDEBUGGER_COMPATIBLE_CACHE=1
+endif
+
+obj-y := config.o
diff --git a/arch/m68knommu/platform/520x/config.c b/arch/m68knommu/platform/520x/config.c
new file mode 100644
index 00000000000..71dea2e0f45
--- /dev/null
+++ b/arch/m68knommu/platform/520x/config.c
@@ -0,0 +1,65 @@
+/***************************************************************************/
+
+/*
+ * linux/arch/m68knommu/platform/520x/config.c
+ *
+ * Copyright (C) 2005, Freescale (www.freescale.com)
+ * Copyright (C) 2005, Intec Automation (mike@steroidmicros.com)
+ * Copyright (C) 1999-2003, Greg Ungerer (gerg@snapgear.com)
+ * Copyright (C) 2001-2003, SnapGear Inc. (www.snapgear.com)
+ */
+
+/***************************************************************************/
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <asm/machdep.h>
+#include <asm/dma.h>
+
+/***************************************************************************/
+
+/*
+ * DMA channel base address table.
+ */
+unsigned int dma_base_addr[MAX_M68K_DMA_CHANNELS];
+unsigned int dma_device_address[MAX_M68K_DMA_CHANNELS];
+
+/***************************************************************************/
+
+void coldfire_pit_tick(void);
+void coldfire_pit_init(irqreturn_t (*handler)(int, void *, struct pt_regs *));
+unsigned long coldfire_pit_offset(void);
+void coldfire_trap_init(void);
+void coldfire_reset(void);
+
+/***************************************************************************/
+
+/*
+ * Program the vector to be an auto-vectored.
+ */
+
+void mcf_autovector(unsigned int vec)
+{
+ /* Everything is auto-vectored on the 520x devices */
+}
+
+/***************************************************************************/
+
+void config_BSP(char *commandp, int size)
+{
+#ifdef CONFIG_BOOTPARAM
+ strncpy(commandp, CONFIG_BOOTPARAM_STRING, size);
+ commandp[size-1] = 0;
+#else
+ memset(commandp, 0, size);
+#endif
+
+ mach_sched_init = coldfire_pit_init;
+ mach_tick = coldfire_pit_tick;
+ mach_gettimeoffset = coldfire_pit_offset;
+ mach_trap_init = coldfire_trap_init;
+ mach_reset = coldfire_reset;
+}
+
+/***************************************************************************/
diff --git a/arch/m68knommu/platform/5307/Makefile b/arch/m68knommu/platform/5307/Makefile
index 6fe5a2b8fb0..8d1619dc1ea 100644
--- a/arch/m68knommu/platform/5307/Makefile
+++ b/arch/m68knommu/platform/5307/Makefile
@@ -19,6 +19,7 @@ endif
obj-$(CONFIG_COLDFIRE) += entry.o vectors.o ints.o
obj-$(CONFIG_M5206) += timers.o
obj-$(CONFIG_M5206e) += timers.o
+obj-$(CONFIG_M520x) += pit.o
obj-$(CONFIG_M523x) += pit.o
obj-$(CONFIG_M5249) += timers.o
obj-$(CONFIG_M527x) += pit.o
diff --git a/arch/m68knommu/platform/5307/head.S b/arch/m68knommu/platform/5307/head.S
index 7f4ba837901..c30c462b99b 100644
--- a/arch/m68knommu/platform/5307/head.S
+++ b/arch/m68knommu/platform/5307/head.S
@@ -113,6 +113,9 @@
#define MEM_BASE 0x02000000
#define VBR_BASE 0x20000000 /* vectors in SRAM */
#endif
+#if defined(CONFIG_M5208EVB)
+#define MEM_BASE 0x40000000
+#endif
#ifndef MEM_BASE
#define MEM_BASE 0x00000000 /* memory base at address 0 */
diff --git a/arch/m68knommu/platform/5307/ints.c b/arch/m68knommu/platform/5307/ints.c
index 0117754d44f..a134fb2f056 100644
--- a/arch/m68knommu/platform/5307/ints.c
+++ b/arch/m68knommu/platform/5307/ints.c
@@ -26,6 +26,7 @@
#include <asm/system.h>
#include <asm/irq.h>
+#include <asm/irqnode.h>
#include <asm/traps.h>
#include <asm/page.h>
#include <asm/machdep.h>
diff --git a/arch/m68knommu/platform/5307/pit.c b/arch/m68knommu/platform/5307/pit.c
index a9b2c2e7e28..323f2677e49 100644
--- a/arch/m68knommu/platform/5307/pit.c
+++ b/arch/m68knommu/platform/5307/pit.c
@@ -3,7 +3,7 @@
/*
* pit.c -- Motorola ColdFire PIT timer. Currently this type of
* hardware timer only exists in the Motorola ColdFire
- * 5270/5271 and 5282 CPUs.
+ * 5270/5271, 5282 and other CPUs.
*
* Copyright (C) 1999-2004, Greg Ungerer (gerg@snapgear.com)
* Copyright (C) 2001-2004, SnapGear Inc. (www.snapgear.com)
@@ -47,10 +47,10 @@ void coldfire_pit_init(irqreturn_t (*handler)(int, void *, struct pt_regs *))
icrp = (volatile unsigned char *) (MCF_IPSBAR + MCFICM_INTC0 +
MCFINTC_ICR0 + MCFINT_PIT1);
- *icrp = 0x2b; /* PIT1 with level 5, priority 3 */
+ *icrp = ICR_INTRCONF;
- imrp = (volatile unsigned long *) (MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_IMRH);
- *imrp &= ~(1 << (MCFINT_PIT1 - 32));
+ imrp = (volatile unsigned long *) (MCF_IPSBAR + MCFICM_INTC0 + MCFPIT_IMR);
+ *imrp &= ~MCFPIT_IMR_IBIT;
/* Set up PIT timer 1 as poll clock */
tp = (volatile struct mcfpit *) (MCF_IPSBAR + MCFPIT_BASE1);
@@ -70,7 +70,7 @@ unsigned long coldfire_pit_offset(void)
unsigned long pmr, pcntr, offset;
tp = (volatile struct mcfpit *) (MCF_IPSBAR + MCFPIT_BASE1);
- ipr = (volatile unsigned long *) (MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_IPRH);
+ ipr = (volatile unsigned long *) (MCF_IPSBAR + MCFICM_INTC0 + MCFPIT_IMR);
pmr = *(&tp->pmr);
pcntr = *(&tp->pcntr);
@@ -80,7 +80,7 @@ unsigned long coldfire_pit_offset(void)
* timer interupt is pending, then add on a ticks worth of time.
*/
offset = ((pmr - pcntr) * (1000000 / HZ)) / pmr;
- if ((offset < (1000000 / HZ / 2)) && (*ipr & (1 << (MCFINT_PIT1 - 32))))
+ if ((offset < (1000000 / HZ / 2)) && (*ipr & MCFPIT_IMR_IBIT))
offset += 1000000 / HZ;
return offset;
}
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 0097a0d53b3..e380a8322a9 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -958,7 +958,7 @@ config SOC_PNX8550
bool
select DMA_NONCOHERENT
select HW_HAS_PCI
- select SYS_HAS_CPU_R4X00
+ select SYS_HAS_CPU_MIPS32_R1
select SYS_SUPPORTS_32BIT_KERNEL
config SWAP_IO_SPACE
diff --git a/arch/mips/boot/.gitignore b/arch/mips/boot/.gitignore
new file mode 100644
index 00000000000..ba63401c6e1
--- /dev/null
+++ b/arch/mips/boot/.gitignore
@@ -0,0 +1,4 @@
+mkboot
+elf2ecoff
+zImage
+zImage.tmp
diff --git a/arch/mips/configs/pnx8550-jbs_defconfig b/arch/mips/configs/pnx8550-jbs_defconfig
index 95f84d71191..555837e4c06 100644
--- a/arch/mips/configs/pnx8550-jbs_defconfig
+++ b/arch/mips/configs/pnx8550-jbs_defconfig
@@ -129,7 +129,7 @@ CONFIG_MIPS_L1_CACHE_SHIFT=5
#
# CPU selection
#
-# CONFIG_CPU_MIPS32_R1 is not set
+CONFIG_CPU_MIPS32_R1=y
# CONFIG_CPU_MIPS32_R2 is not set
# CONFIG_CPU_MIPS64_R1 is not set
# CONFIG_CPU_MIPS64_R2 is not set
@@ -137,7 +137,7 @@ CONFIG_MIPS_L1_CACHE_SHIFT=5
# CONFIG_CPU_TX39XX is not set
# CONFIG_CPU_VR41XX is not set
# CONFIG_CPU_R4300 is not set
-CONFIG_CPU_R4X00=y
+# CONFIG_CPU_R4X00 is not set
# CONFIG_CPU_TX49XX is not set
# CONFIG_CPU_R5000 is not set
# CONFIG_CPU_R5432 is not set
@@ -148,10 +148,11 @@ CONFIG_CPU_R4X00=y
# CONFIG_CPU_RM7000 is not set
# CONFIG_CPU_RM9000 is not set
# CONFIG_CPU_SB1 is not set
-CONFIG_SYS_HAS_CPU_R4X00=y
+CONFIG_SYS_HAS_CPU_MIPS32_R1=y
+CONFIG_CPU_MIPS32=y
+CONFIG_CPU_MIPSR1=y
CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y
CONFIG_CPU_SUPPORTS_32BIT_KERNEL=y
-CONFIG_CPU_SUPPORTS_64BIT_KERNEL=y
#
# Kernel type
@@ -162,11 +163,11 @@ CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
# CONFIG_PAGE_SIZE_64KB is not set
+CONFIG_CPU_HAS_PREFETCH=y
# CONFIG_MIPS_MT is not set
# CONFIG_64BIT_PHYS_ADDR is not set
# CONFIG_CPU_ADVANCED is not set
CONFIG_CPU_HAS_LLSC=y
-CONFIG_CPU_HAS_LLDSCD=y
CONFIG_CPU_HAS_SYNC=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
diff --git a/arch/mips/configs/pnx8550-v2pci_defconfig b/arch/mips/configs/pnx8550-v2pci_defconfig
index deb24c29ac0..05e65206a7b 100644
--- a/arch/mips/configs/pnx8550-v2pci_defconfig
+++ b/arch/mips/configs/pnx8550-v2pci_defconfig
@@ -128,7 +128,7 @@ CONFIG_MIPS_L1_CACHE_SHIFT=5
#
# CPU selection
#
-# CONFIG_CPU_MIPS32_R1 is not set
+CONFIG_CPU_MIPS32_R1=y
# CONFIG_CPU_MIPS32_R2 is not set
# CONFIG_CPU_MIPS64_R1 is not set
# CONFIG_CPU_MIPS64_R2 is not set
@@ -136,7 +136,7 @@ CONFIG_MIPS_L1_CACHE_SHIFT=5
# CONFIG_CPU_TX39XX is not set
# CONFIG_CPU_VR41XX is not set
# CONFIG_CPU_R4300 is not set
-CONFIG_CPU_R4X00=y
+# CONFIG_CPU_R4X00 is not set
# CONFIG_CPU_TX49XX is not set
# CONFIG_CPU_R5000 is not set
# CONFIG_CPU_R5432 is not set
@@ -147,10 +147,11 @@ CONFIG_CPU_R4X00=y
# CONFIG_CPU_RM7000 is not set
# CONFIG_CPU_RM9000 is not set
# CONFIG_CPU_SB1 is not set
-CONFIG_SYS_HAS_CPU_R4X00=y
+CONFIG_SYS_HAS_CPU_MIPS32_R1=y
+CONFIG_CPU_MIPS32=y
+CONFIG_CPU_MIPSR1=y
CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y
CONFIG_CPU_SUPPORTS_32BIT_KERNEL=y
-CONFIG_CPU_SUPPORTS_64BIT_KERNEL=y
#
# Kernel type
@@ -161,6 +162,7 @@ CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
# CONFIG_PAGE_SIZE_64KB is not set
+CONFIG_CPU_HAS_PREFETCH=y
# CONFIG_MIPS_MT is not set
# CONFIG_64BIT_PHYS_ADDR is not set
CONFIG_CPU_ADVANCED=y
diff --git a/arch/mips/ddb5xxx/common/rtc_ds1386.c b/arch/mips/ddb5xxx/common/rtc_ds1386.c
index f5b11508ff2..995896ac0e3 100644
--- a/arch/mips/ddb5xxx/common/rtc_ds1386.c
+++ b/arch/mips/ddb5xxx/common/rtc_ds1386.c
@@ -41,7 +41,9 @@ rtc_ds1386_get_time(void)
u8 byte;
u8 temp;
unsigned int year, month, day, hour, minute, second;
+ unsigned long flags;
+ spin_lock_irqsave(&rtc_lock, flags);
/* let us freeze external registers */
byte = READ_RTC(0xB);
byte &= 0x3f;
@@ -60,6 +62,7 @@ rtc_ds1386_get_time(void)
/* enable time transfer */
byte |= 0x80;
WRITE_RTC(0xB, byte);
+ spin_unlock_irqrestore(&rtc_lock, flags);
/* calc hour */
if (temp & 0x40) {
@@ -81,7 +84,9 @@ rtc_ds1386_set_time(unsigned long t)
u8 byte;
u8 temp;
u8 year, month, day, hour, minute, second;
+ unsigned long flags;
+ spin_lock_irqsave(&rtc_lock, flags);
/* let us freeze external registers */
byte = READ_RTC(0xB);
byte &= 0x3f;
@@ -133,6 +138,7 @@ rtc_ds1386_set_time(unsigned long t)
if (second != READ_RTC(0x1)) {
WRITE_RTC(0x1, second);
}
+ spin_unlock_irqrestore(&rtc_lock, flags);
return 0;
}
diff --git a/arch/mips/dec/time.c b/arch/mips/dec/time.c
index dc7091caa7a..17482234413 100644
--- a/arch/mips/dec/time.c
+++ b/arch/mips/dec/time.c
@@ -37,10 +37,25 @@
#include <asm/dec/machtype.h>
+/*
+ * Returns true if a clock update is in progress
+ */
+static inline unsigned char dec_rtc_is_updating(void)
+{
+ unsigned char uip;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtc_lock, flags);
+ uip = (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP);
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ return uip;
+}
+
static unsigned long dec_rtc_get_time(void)
{
unsigned int year, mon, day, hour, min, sec, real_year;
int i;
+ unsigned long flags;
/* The Linux interpretation of the DS1287 clock register contents:
* When the Update-In-Progress (UIP) flag goes from 1 to 0, the
@@ -49,11 +64,12 @@ static unsigned long dec_rtc_get_time(void)
*/
/* read RTC exactly on falling edge of update flag */
for (i = 0; i < 1000000; i++) /* may take up to 1 second... */
- if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)
+ if (dec_rtc_is_updating())
break;
for (i = 0; i < 1000000; i++) /* must try at least 2.228 ms */
- if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP))
+ if (!dec_rtc_is_updating())
break;
+ spin_lock_irqsave(&rtc_lock, flags);
/* Isn't this overkill? UIP above should guarantee consistency */
do {
sec = CMOS_READ(RTC_SECONDS);
@@ -77,6 +93,7 @@ static unsigned long dec_rtc_get_time(void)
* of unused BBU RAM locations.
*/
real_year = CMOS_READ(RTC_DEC_YEAR);
+ spin_unlock_irqrestore(&rtc_lock, flags);
year += real_year - 72 + 2000;
return mktime(year, mon, day, hour, min, sec);
@@ -95,6 +112,8 @@ static int dec_rtc_set_mmss(unsigned long nowtime)
int real_seconds, real_minutes, cmos_minutes;
unsigned char save_control, save_freq_select;
+ /* irq are locally disabled here */
+ spin_lock(&rtc_lock);
/* tell the clock it's being set */
save_control = CMOS_READ(RTC_CONTROL);
CMOS_WRITE((save_control | RTC_SET), RTC_CONTROL);
@@ -141,6 +160,7 @@ static int dec_rtc_set_mmss(unsigned long nowtime)
*/
CMOS_WRITE(save_control, RTC_CONTROL);
CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
+ spin_unlock(&rtc_lock);
return retval;
}
diff --git a/arch/mips/jmr3927/common/rtc_ds1742.c b/arch/mips/jmr3927/common/rtc_ds1742.c
index 1ae4318e135..8b407d7dc46 100644
--- a/arch/mips/jmr3927/common/rtc_ds1742.c
+++ b/arch/mips/jmr3927/common/rtc_ds1742.c
@@ -57,7 +57,9 @@ rtc_ds1742_get_time(void)
{
unsigned int year, month, day, hour, minute, second;
unsigned int century;
+ unsigned long flags;
+ spin_lock_irqsave(&rtc_lock, flags);
CMOS_WRITE(RTC_READ, RTC_CONTROL);
second = BCD2BIN(CMOS_READ(RTC_SECONDS) & RTC_SECONDS_MASK);
minute = BCD2BIN(CMOS_READ(RTC_MINUTES));
@@ -67,6 +69,7 @@ rtc_ds1742_get_time(void)
year = BCD2BIN(CMOS_READ(RTC_YEAR));
century = BCD2BIN(CMOS_READ(RTC_CENTURY) & RTC_CENTURY_MASK);
CMOS_WRITE(0, RTC_CONTROL);
+ spin_unlock_irqrestore(&rtc_lock, flags);
year += century * 100;
@@ -81,7 +84,9 @@ rtc_ds1742_set_time(unsigned long t)
u8 year, month, day, hour, minute, second;
u8 cmos_year, cmos_month, cmos_day, cmos_hour, cmos_minute, cmos_second;
int cmos_century;
+ unsigned long flags;
+ spin_lock_irqsave(&rtc_lock, flags);
CMOS_WRITE(RTC_READ, RTC_CONTROL);
cmos_second = (u8)(CMOS_READ(RTC_SECONDS) & RTC_SECONDS_MASK);
cmos_minute = (u8)CMOS_READ(RTC_MINUTES);
@@ -139,6 +144,7 @@ rtc_ds1742_set_time(unsigned long t)
/* RTC_CENTURY and RTC_CONTROL share same address... */
CMOS_WRITE(cmos_century, RTC_CONTROL);
+ spin_unlock_irqrestore(&rtc_lock, flags);
return 0;
}
diff --git a/arch/mips/kernel/irixsig.c b/arch/mips/kernel/irixsig.c
index 908e6368420..dd118c60bcd 100644
--- a/arch/mips/kernel/irixsig.c
+++ b/arch/mips/kernel/irixsig.c
@@ -502,8 +502,7 @@ asmlinkage int irix_sigpoll_sys(unsigned long __user *set,
while(1) {
long tmp = 0;
- current->state = TASK_INTERRUPTIBLE;
- expire = schedule_timeout(expire);
+ expire = schedule_timeout_interruptible(expire);
for (i=0; i<=4; i++)
tmp |= (current->pending.signal.sig[i] & kset.sig[i]);
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index f1b0f3e1f95..510da5fda56 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -174,51 +174,10 @@ int ptrace_setfpregs (struct task_struct *child, __u32 __user *data)
return 0;
}
-asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
- struct task_struct *child;
int ret;
-#if 0
- printk("ptrace(r=%d,pid=%d,addr=%08lx,data=%08lx)\n",
- (int) request, (int) pid, (unsigned long) addr,
- (unsigned long) data);
-#endif
- lock_kernel();
- ret = -EPERM;
- if (request == PTRACE_TRACEME) {
- /* are we already being traced? */
- if (current->ptrace & PT_PTRACED)
- goto out;
- if ((ret = security_ptrace(current->parent, current)))
- goto out;
- /* set the ptrace bit in the process flags. */
- current->ptrace |= PT_PTRACED;
- ret = 0;
- goto out;
- }
- ret = -ESRCH;
- read_lock(&tasklist_lock);
- child = find_task_by_pid(pid);
- if (child)
- get_task_struct(child);
- read_unlock(&tasklist_lock);
- if (!child)
- goto out;
-
- ret = -EPERM;
- if (pid == 1) /* you may not mess with init */
- goto out_tsk;
-
- if (request == PTRACE_ATTACH) {
- ret = ptrace_attach(child);
- goto out_tsk;
- }
-
- ret = ptrace_check_attach(child, request == PTRACE_KILL);
- if (ret < 0)
- goto out_tsk;
-
switch (request) {
/* when I and D space are separate, these will need to be fixed. */
case PTRACE_PEEKTEXT: /* read word at location addr. */
@@ -319,7 +278,7 @@ asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
if (!cpu_has_dsp) {
tmp = 0;
ret = -EIO;
- goto out_tsk;
+ goto out;
}
if (child->thread.dsp.used_dsp) {
dregs = __get_dsp_regs(child);
@@ -333,14 +292,14 @@ asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
if (!cpu_has_dsp) {
tmp = 0;
ret = -EIO;
- goto out_tsk;
+ goto out;
}
tmp = child->thread.dsp.dspcontrol;
break;
default:
tmp = 0;
ret = -EIO;
- goto out_tsk;
+ goto out;
}
ret = put_user(tmp, (unsigned long __user *) data);
break;
@@ -495,11 +454,7 @@ asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
ret = ptrace_request(child, request, addr, data);
break;
}
-
-out_tsk:
- put_task_struct(child);
-out:
- unlock_kernel();
+ out:
return ret;
}
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c
index 8c81f3cb4e2..1d855112bac 100644
--- a/arch/mips/kernel/rtlx.c
+++ b/arch/mips/kernel/rtlx.c
@@ -20,42 +20,42 @@
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/init.h>
-#include <asm/uaccess.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-#include <linux/vmalloc.h>
-#include <linux/elf.h>
-#include <linux/seq_file.h>
-#include <linux/syscalls.h>
-#include <linux/moduleloader.h>
-#include <linux/interrupt.h>
#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <asm/mipsmtregs.h>
-#include <asm/cacheflush.h>
-#include <asm/atomic.h>
+#include <asm/bitops.h>
#include <asm/cpu.h>
#include <asm/processor.h>
-#include <asm/system.h>
#include <asm/rtlx.h>
+#include <asm/uaccess.h>
-#define RTLX_MAJOR 64
#define RTLX_TARG_VPE 1
-struct rtlx_info *rtlx;
+static struct rtlx_info *rtlx;
static int major;
static char module_name[] = "rtlx";
-static inline int spacefree(int read, int write, int size);
+static struct irqaction irq;
+static int irq_num;
+
+static inline int spacefree(int read, int write, int size)
+{
+ if (read == write) {
+ /*
+ * never fill the buffer completely, so indexes are always
+ * equal if empty and only empty, or !equal if data available
+ */
+ return size - 1;
+ }
+
+ return ((read + size - write) % size) - 1;
+}
static struct chan_waitqueues {
wait_queue_head_t rt_queue;
wait_queue_head_t lx_queue;
} channel_wqs[RTLX_CHANNELS];
-static struct irqaction irq;
-static int irq_num;
-
extern void *vpe_get_shared(int index);
static void rtlx_dispatch(struct pt_regs *regs)
@@ -63,9 +63,8 @@ static void rtlx_dispatch(struct pt_regs *regs)
do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_RTLX_IRQ, regs);
}
-irqreturn_t rtlx_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+static irqreturn_t rtlx_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
- irqreturn_t r = IRQ_HANDLED;
int i;
for (i = 0; i < RTLX_CHANNELS; i++) {
@@ -75,30 +74,7 @@ irqreturn_t rtlx_interrupt(int irq, void *dev_id, struct pt_regs *regs)
wake_up_interruptible(&channel_wqs[i].lx_queue);
}
- return r;
-}
-
-void dump_rtlx(void)
-{
- int i;
-
- printk("id 0x%lx state %d\n", rtlx->id, rtlx->state);
-
- for (i = 0; i < RTLX_CHANNELS; i++) {
- struct rtlx_channel *chan = &rtlx->channel[i];
-
- printk(" rt_state %d lx_state %d buffer_size %d\n",
- chan->rt_state, chan->lx_state, chan->buffer_size);
-
- printk(" rt_read %d rt_write %d\n",
- chan->rt_read, chan->rt_write);
-
- printk(" lx_read %d lx_write %d\n",
- chan->lx_read, chan->lx_write);
-
- printk(" rt_buffer <%s>\n", chan->rt_buffer);
- printk(" lx_buffer <%s>\n", chan->lx_buffer);
- }
+ return IRQ_HANDLED;
}
/* call when we have the address of the shared structure from the SP side. */
@@ -108,7 +84,7 @@ static int rtlx_init(struct rtlx_info *rtlxi)
if (rtlxi->id != RTLX_ID) {
printk(KERN_WARNING "no valid RTLX id at 0x%p\n", rtlxi);
- return (-ENOEXEC);
+ return -ENOEXEC;
}
/* initialise the wait queues */
@@ -120,9 +96,8 @@ static int rtlx_init(struct rtlx_info *rtlxi)
/* set up for interrupt handling */
memset(&irq, 0, sizeof(struct irqaction));
- if (cpu_has_vint) {
+ if (cpu_has_vint)
set_vi_handler(MIPS_CPU_RTLX_IRQ, rtlx_dispatch);
- }
irq_num = MIPSCPU_INT_BASE + MIPS_CPU_RTLX_IRQ;
irq.handler = rtlx_interrupt;
@@ -132,7 +107,8 @@ static int rtlx_init(struct rtlx_info *rtlxi)
setup_irq(irq_num, &irq);
rtlx = rtlxi;
- return (0);
+
+ return 0;
}
/* only allow one open process at a time to open each channel */
@@ -147,36 +123,36 @@ static int rtlx_open(struct inode *inode, struct file *filp)
if (rtlx == NULL) {
struct rtlx_info **p;
if( (p = vpe_get_shared(RTLX_TARG_VPE)) == NULL) {
- printk(" vpe_get_shared is NULL. Has an SP program been loaded?\n");
- return (-EFAULT);
+ printk(KERN_ERR "vpe_get_shared is NULL. "
+ "Has an SP program been loaded?\n");
+ return -EFAULT;
}
if (*p == NULL) {
- printk(" vpe_shared %p %p\n", p, *p);
- return (-EFAULT);
+ printk(KERN_ERR "vpe_shared %p %p\n", p, *p);
+ return -EFAULT;
}
if ((ret = rtlx_init(*p)) < 0)
- return (ret);
+ return ret;
}
chan = &rtlx->channel[minor];
- /* already open? */
- if (chan->lx_state == RTLX_STATE_OPENED)
- return (-EBUSY);
+ if (test_and_set_bit(RTLX_STATE_OPENED, &chan->lx_state))
+ return -EBUSY;
- chan->lx_state = RTLX_STATE_OPENED;
- return (0);
+ return 0;
}
static int rtlx_release(struct inode *inode, struct file *filp)
{
- int minor;
+ int minor = MINOR(inode->i_rdev);
- minor = MINOR(inode->i_rdev);
- rtlx->channel[minor].lx_state = RTLX_STATE_UNUSED;
- return (0);
+ clear_bit(RTLX_STATE_OPENED, &rtlx->channel[minor].lx_state);
+ smp_mb__after_clear_bit();
+
+ return 0;
}
static unsigned int rtlx_poll(struct file *file, poll_table * wait)
@@ -199,12 +175,13 @@ static unsigned int rtlx_poll(struct file *file, poll_table * wait)
if (spacefree(chan->rt_read, chan->rt_write, chan->buffer_size))
mask |= POLLOUT | POLLWRNORM;
- return (mask);
+ return mask;
}
static ssize_t rtlx_read(struct file *file, char __user * buffer, size_t count,
loff_t * ppos)
{
+ unsigned long failed;
size_t fl = 0L;
int minor;
struct rtlx_channel *lx;
@@ -216,7 +193,7 @@ static ssize_t rtlx_read(struct file *file, char __user * buffer, size_t count,
/* data available? */
if (lx->lx_write == lx->lx_read) {
if (file->f_flags & O_NONBLOCK)
- return (0); // -EAGAIN makes cat whinge
+ return 0; /* -EAGAIN makes cat whinge */
/* go to sleep */
add_wait_queue(&channel_wqs[minor].lx_queue, &wait);
@@ -232,39 +209,39 @@ static ssize_t rtlx_read(struct file *file, char __user * buffer, size_t count,
}
/* find out how much in total */
- count = min( count,
- (size_t)(lx->lx_write + lx->buffer_size - lx->lx_read) % lx->buffer_size);
+ count = min(count,
+ (size_t)(lx->lx_write + lx->buffer_size - lx->lx_read) % lx->buffer_size);
/* then how much from the read pointer onwards */
- fl = min( count, (size_t)lx->buffer_size - lx->lx_read);
+ fl = min(count, (size_t)lx->buffer_size - lx->lx_read);
- copy_to_user (buffer, &lx->lx_buffer[lx->lx_read], fl);
+ failed = copy_to_user (buffer, &lx->lx_buffer[lx->lx_read], fl);
+ if (failed) {
+ count = fl - failed;
+ goto out;
+ }
/* and if there is anything left at the beginning of the buffer */
- if ( count - fl )
- copy_to_user (buffer + fl, lx->lx_buffer, count - fl);
+ if (count - fl) {
+ failed = copy_to_user (buffer + fl, lx->lx_buffer, count - fl);
+ if (failed) {
+ count -= failed;
+ goto out;
+ }
+ }
+out:
/* update the index */
lx->lx_read += count;
lx->lx_read %= lx->buffer_size;
- return (count);
-}
-
-static inline int spacefree(int read, int write, int size)
-{
- if (read == write) {
- /* never fill the buffer completely, so indexes are always equal if empty
- and only empty, or !equal if data available */
- return (size - 1);
- }
-
- return ((read + size - write) % size) - 1;
+ return count;
}
static ssize_t rtlx_write(struct file *file, const char __user * buffer,
size_t count, loff_t * ppos)
{
+ unsigned long failed;
int minor;
struct rtlx_channel *rt;
size_t fl;
@@ -277,7 +254,7 @@ static ssize_t rtlx_write(struct file *file, const char __user * buffer,
if (!spacefree(rt->rt_read, rt->rt_write, rt->buffer_size)) {
if (file->f_flags & O_NONBLOCK)
- return (-EAGAIN);
+ return -EAGAIN;
add_wait_queue(&channel_wqs[minor].rt_queue, &wait);
set_current_state(TASK_INTERRUPTIBLE);
@@ -290,52 +267,64 @@ static ssize_t rtlx_write(struct file *file, const char __user * buffer,
}
/* total number of bytes to copy */
- count = min( count, (size_t)spacefree(rt->rt_read, rt->rt_write, rt->buffer_size) );
+ count = min(count, (size_t)spacefree(rt->rt_read, rt->rt_write, rt->buffer_size) );
/* first bit from write pointer to the end of the buffer, or count */
fl = min(count, (size_t) rt->buffer_size - rt->rt_write);
- copy_from_user(&rt->rt_buffer[rt->rt_write], buffer, fl);
+ failed = copy_from_user(&rt->rt_buffer[rt->rt_write], buffer, fl);
+ if (failed) {
+ count = fl - failed;
+ goto out;
+ }
/* if there's any left copy to the beginning of the buffer */
- if( count - fl )
- copy_from_user(rt->rt_buffer, buffer + fl, count - fl);
+ if (count - fl) {
+ failed = copy_from_user(rt->rt_buffer, buffer + fl, count - fl);
+ if (failed) {
+ count -= failed;
+ goto out;
+ }
+ }
+out:
rt->rt_write += count;
rt->rt_write %= rt->buffer_size;
- return(count);
+ return count;
}
static struct file_operations rtlx_fops = {
- .owner = THIS_MODULE,
- .open = rtlx_open,
- .release = rtlx_release,
- .write = rtlx_write,
- .read = rtlx_read,
- .poll = rtlx_poll
+ .owner = THIS_MODULE,
+ .open = rtlx_open,
+ .release = rtlx_release,
+ .write = rtlx_write,
+ .read = rtlx_read,
+ .poll = rtlx_poll
};
-static int rtlx_module_init(void)
+static char register_chrdev_failed[] __initdata =
+ KERN_ERR "rtlx_module_init: unable to register device\n";
+
+static int __init rtlx_module_init(void)
{
- if ((major = register_chrdev(RTLX_MAJOR, module_name, &rtlx_fops)) < 0) {
- printk("rtlx_module_init: unable to register device\n");
- return (-EBUSY);
+ major = register_chrdev(0, module_name, &rtlx_fops);
+ if (major < 0) {
+ printk(register_chrdev_failed);
+ return major;
}
- if (major == 0)
- major = RTLX_MAJOR;
-
- return (0);
+ return 0;
}
-static void rtlx_module_exit(void)
+static void __exit rtlx_module_exit(void)
{
unregister_chrdev(major, module_name);
}
module_init(rtlx_module_init);
module_exit(rtlx_module_exit);
+
MODULE_DESCRIPTION("MIPS RTLX");
-MODULE_AUTHOR("Elizabeth Clarke, MIPS Technologies, Inc");
+MODULE_AUTHOR("Elizabeth Clarke, MIPS Technologies, Inc.");
MODULE_LICENSE("GPL");
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 9202a17db8f..05e09eedabf 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -384,9 +384,6 @@ give_sigsegv:
return 0;
}
-extern void setup_rt_frame_n32(struct k_sigaction * ka,
- struct pt_regs *regs, int signr, sigset_t *set, siginfo_t *info);
-
static inline int handle_signal(unsigned long sig, siginfo_t *info,
struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs)
{
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index dbe82130312..e315d3f6aa6 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -647,8 +647,8 @@ static inline void *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
return (void *)((sp - frame_size) & ALMASK);
}
-void setup_frame_32(struct k_sigaction * ka, struct pt_regs *regs,
- int signr, sigset_t *set)
+int setup_frame_32(struct k_sigaction * ka, struct pt_regs *regs,
+ int signr, sigset_t *set)
{
struct sigframe *frame;
int err = 0;
@@ -694,13 +694,15 @@ void setup_frame_32(struct k_sigaction * ka, struct pt_regs *regs,
current->comm, current->pid,
frame, regs->cp0_epc, frame->sf_code);
#endif
- return;
+ return 1;
give_sigsegv:
force_sigsegv(signr, current);
+ return 0;
}
-void setup_rt_frame_32(struct k_sigaction * ka, struct pt_regs *regs, int signr, sigset_t *set, siginfo_t *info)
+int setup_rt_frame_32(struct k_sigaction * ka, struct pt_regs *regs,
+ int signr, sigset_t *set, siginfo_t *info)
{
struct rt_sigframe32 *frame;
int err = 0;
@@ -763,10 +765,11 @@ void setup_rt_frame_32(struct k_sigaction * ka, struct pt_regs *regs, int signr,
current->comm, current->pid,
frame, regs->cp0_epc, frame->rs_code);
#endif
- return;
+ return 1;
give_sigsegv:
force_sigsegv(signr, current);
+ return 0;
}
static inline int handle_signal(unsigned long sig, siginfo_t *info,
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 97fefcc9dbe..06be405be39 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -58,10 +58,6 @@
typedef void *vpe_handle;
-// defined here because the kernel module loader doesn't have
-// anything to do with it.
-#define SHN_MIPS_SCOMMON 0xff03
-
#ifndef ARCH_SHF_SMALL
#define ARCH_SHF_SMALL 0
#endif
@@ -69,11 +65,8 @@ typedef void *vpe_handle;
/* If this is set, the section belongs in the init part of the module */
#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
-// temp number,
-#define VPE_MAJOR 63
-
static char module_name[] = "vpe";
-static int major = 0;
+static int major;
/* grab the likely amount of memory we will need. */
#ifdef CONFIG_MIPS_VPE_LOADER_TOM
@@ -98,22 +91,7 @@ enum tc_state {
TC_STATE_DYNAMIC
};
-struct vpe;
-typedef struct tc {
- enum tc_state state;
- int index;
-
- /* parent VPE */
- struct vpe *pvpe;
-
- /* The list of TC's with this VPE */
- struct list_head tc;
-
- /* The global list of tc's */
- struct list_head list;
-} tc_t;
-
-typedef struct vpe {
+struct vpe {
enum vpe_state state;
/* (device) minor associated with this vpe */
@@ -135,7 +113,21 @@ typedef struct vpe {
/* shared symbol address */
void *shared_ptr;
-} vpe_t;
+};
+
+struct tc {
+ enum tc_state state;
+ int index;
+
+ /* parent VPE */
+ struct vpe *pvpe;
+
+ /* The list of TC's with this VPE */
+ struct list_head tc;
+
+ /* The global list of tc's */
+ struct list_head list;
+};
struct vpecontrol_ {
/* Virtual processing elements */
@@ -146,7 +138,7 @@ struct vpecontrol_ {
} vpecontrol;
static void release_progmem(void *ptr);
-static void dump_vpe(vpe_t * v);
+static void dump_vpe(struct vpe * v);
extern void save_gp_address(unsigned int secbase, unsigned int rel);
/* get the vpe associated with this minor */
@@ -197,13 +189,11 @@ struct vpe *alloc_vpe(int minor)
{
struct vpe *v;
- if ((v = kmalloc(sizeof(struct vpe), GFP_KERNEL)) == NULL) {
+ if ((v = kzalloc(sizeof(struct vpe), GFP_KERNEL)) == NULL) {
printk(KERN_WARNING "VPE: alloc_vpe no mem\n");
return NULL;
}
- memset(v, 0, sizeof(struct vpe));
-
INIT_LIST_HEAD(&v->tc);
list_add_tail(&v->list, &vpecontrol.vpe_list);
@@ -216,13 +206,11 @@ struct tc *alloc_tc(int index)
{
struct tc *t;
- if ((t = kmalloc(sizeof(struct tc), GFP_KERNEL)) == NULL) {
+ if ((t = kzalloc(sizeof(struct tc), GFP_KERNEL)) == NULL) {
printk(KERN_WARNING "VPE: alloc_tc no mem\n");
return NULL;
}
- memset(t, 0, sizeof(struct tc));
-
INIT_LIST_HEAD(&t->tc);
list_add_tail(&t->list, &vpecontrol.tc_list);
@@ -412,16 +400,17 @@ static int apply_r_mips_26(struct module *me, uint32_t *location,
return -ENOEXEC;
}
-/* Not desperately convinced this is a good check of an overflow condition
- anyway. But it gets in the way of handling undefined weak symbols which
- we want to set to zero.
- if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
- printk(KERN_ERR
- "module %s: relocation overflow\n",
- me->name);
- return -ENOEXEC;
- }
-*/
+/*
+ * Not desperately convinced this is a good check of an overflow condition
+ * anyway. But it gets in the way of handling undefined weak symbols which
+ * we want to set to zero.
+ * if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
+ * printk(KERN_ERR
+ * "module %s: relocation overflow\n",
+ * me->name);
+ * return -ENOEXEC;
+ * }
+ */
*location = (*location & ~0x03ffffff) |
((*location + (v >> 2)) & 0x03ffffff);
@@ -681,7 +670,7 @@ static void dump_tclist(void)
}
/* We are prepared so configure and start the VPE... */
-int vpe_run(vpe_t * v)
+int vpe_run(struct vpe * v)
{
unsigned long val;
struct tc *t;
@@ -772,7 +761,7 @@ int vpe_run(vpe_t * v)
return 0;
}
-static unsigned long find_vpe_symbols(vpe_t * v, Elf_Shdr * sechdrs,
+static unsigned long find_vpe_symbols(struct vpe * v, Elf_Shdr * sechdrs,
unsigned int symindex, const char *strtab,
struct module *mod)
{
@@ -792,10 +781,12 @@ static unsigned long find_vpe_symbols(vpe_t * v, Elf_Shdr * sechdrs,
return 0;
}
-/* Allocates a VPE with some program code space(the load address), copies the contents
- of the program (p)buffer performing relocatations/etc, free's it when finished.
+/*
+ * Allocates a VPE with some program code space(the load address), copies
+ * the contents of the program (p)buffer performing relocatations/etc,
+ * free's it when finished.
*/
-int vpe_elfload(vpe_t * v)
+int vpe_elfload(struct vpe * v)
{
Elf_Ehdr *hdr;
Elf_Shdr *sechdrs;
@@ -931,7 +922,7 @@ cleanup:
return err;
}
-static void dump_vpe(vpe_t * v)
+static void dump_vpe(struct vpe * v)
{
struct tc *t;
@@ -947,7 +938,7 @@ static void dump_vpe(vpe_t * v)
static int vpe_open(struct inode *inode, struct file *filp)
{
int minor;
- vpe_t *v;
+ struct vpe *v;
/* assume only 1 device at the mo. */
if ((minor = MINOR(inode->i_rdev)) != 1) {
@@ -1001,7 +992,7 @@ static int vpe_open(struct inode *inode, struct file *filp)
static int vpe_release(struct inode *inode, struct file *filp)
{
int minor, ret = 0;
- vpe_t *v;
+ struct vpe *v;
Elf_Ehdr *hdr;
minor = MINOR(inode->i_rdev);
@@ -1035,7 +1026,7 @@ static ssize_t vpe_write(struct file *file, const char __user * buffer,
{
int minor;
size_t ret = count;
- vpe_t *v;
+ struct vpe *v;
minor = MINOR(file->f_dentry->d_inode->i_rdev);
if ((v = get_vpe(minor)) == NULL)
@@ -1180,14 +1171,11 @@ static int __init vpe_module_init(void)
return -ENODEV;
}
- if ((major = register_chrdev(VPE_MAJOR, module_name, &vpe_fops) < 0)) {
+ if ((major = register_chrdev(0, module_name, &vpe_fops) < 0)) {
printk("VPE loader: unable to register character device\n");
- return -EBUSY;
+ return major;
}
- if (major == 0)
- major = VPE_MAJOR;
-
dmt();
dvpe();
diff --git a/arch/mips/lasat/ds1603.c b/arch/mips/lasat/ds1603.c
index 9d7812e03dc..7dced67c55e 100644
--- a/arch/mips/lasat/ds1603.c
+++ b/arch/mips/lasat/ds1603.c
@@ -8,6 +8,7 @@
#include <asm/lasat/lasat.h>
#include <linux/delay.h>
#include <asm/lasat/ds1603.h>
+#include <asm/time.h>
#include "ds1603.h"
@@ -138,19 +139,27 @@ static void rtc_end_op(void)
unsigned long ds1603_read(void)
{
unsigned long word;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtc_lock, flags);
rtc_init_op();
rtc_write_byte(READ_TIME_CMD);
word = rtc_read_word();
rtc_end_op();
+ spin_unlock_irqrestore(&rtc_lock, flags);
return word;
}
int ds1603_set(unsigned long time)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtc_lock, flags);
rtc_init_op();
rtc_write_byte(SET_TIME_CMD);
rtc_write_word(time);
rtc_end_op();
+ spin_unlock_irqrestore(&rtc_lock, flags);
return 0;
}
diff --git a/arch/mips/momentum/jaguar_atx/setup.c b/arch/mips/momentum/jaguar_atx/setup.c
index 768bf440645..bab192ddc18 100644
--- a/arch/mips/momentum/jaguar_atx/setup.c
+++ b/arch/mips/momentum/jaguar_atx/setup.c
@@ -149,7 +149,9 @@ arch_initcall(per_cpu_mappings);
unsigned long m48t37y_get_time(void)
{
unsigned int year, month, day, hour, min, sec;
+ unsigned long flags;
+ spin_lock_irqsave(&rtc_lock, flags);
/* stop the update */
rtc_base[0x7ff8] = 0x40;
@@ -166,6 +168,7 @@ unsigned long m48t37y_get_time(void)
/* start the update */
rtc_base[0x7ff8] = 0x00;
+ spin_unlock_irqrestore(&rtc_lock, flags);
return mktime(year, month, day, hour, min, sec);
}
@@ -173,11 +176,13 @@ unsigned long m48t37y_get_time(void)
int m48t37y_set_time(unsigned long sec)
{
struct rtc_time tm;
+ unsigned long flags;
/* convert to a more useful format -- note months count from 0 */
to_tm(sec, &tm);
tm.tm_mon += 1;
+ spin_lock_irqsave(&rtc_lock, flags);
/* enable writing */
rtc_base[0x7ff8] = 0x80;
@@ -201,6 +206,7 @@ int m48t37y_set_time(unsigned long sec)
/* disable writing */
rtc_base[0x7ff8] = 0x00;
+ spin_unlock_irqrestore(&rtc_lock, flags);
return 0;
}
diff --git a/arch/mips/momentum/ocelot_3/setup.c b/arch/mips/momentum/ocelot_3/setup.c
index a7803e08f9d..c9b7ff8148e 100644
--- a/arch/mips/momentum/ocelot_3/setup.c
+++ b/arch/mips/momentum/ocelot_3/setup.c
@@ -135,7 +135,9 @@ void setup_wired_tlb_entries(void)
unsigned long m48t37y_get_time(void)
{
unsigned int year, month, day, hour, min, sec;
+ unsigned long flags;
+ spin_lock_irqsave(&rtc_lock, flags);
/* stop the update */
rtc_base[0x7ff8] = 0x40;
@@ -152,6 +154,7 @@ unsigned long m48t37y_get_time(void)
/* start the update */
rtc_base[0x7ff8] = 0x00;
+ spin_unlock_irqrestore(&rtc_lock, flags);
return mktime(year, month, day, hour, min, sec);
}
@@ -159,11 +162,13 @@ unsigned long m48t37y_get_time(void)
int m48t37y_set_time(unsigned long sec)
{
struct rtc_time tm;
+ unsigned long flags;
/* convert to a more useful format -- note months count from 0 */
to_tm(sec, &tm);
tm.tm_mon += 1;
+ spin_lock_irqsave(&rtc_lock, flags);
/* enable writing */
rtc_base[0x7ff8] = 0x80;
@@ -187,6 +192,7 @@ int m48t37y_set_time(unsigned long sec)
/* disable writing */
rtc_base[0x7ff8] = 0x00;
+ spin_unlock_irqrestore(&rtc_lock, flags);
return 0;
}
diff --git a/arch/mips/momentum/ocelot_c/setup.c b/arch/mips/momentum/ocelot_c/setup.c
index ce70fc96f16..2755c154747 100644
--- a/arch/mips/momentum/ocelot_c/setup.c
+++ b/arch/mips/momentum/ocelot_c/setup.c
@@ -140,7 +140,9 @@ unsigned long m48t37y_get_time(void)
unsigned char* rtc_base = (unsigned char*)0xfc800000;
#endif
unsigned int year, month, day, hour, min, sec;
+ unsigned long flags;
+ spin_lock_irqsave(&rtc_lock, flags);
/* stop the update */
rtc_base[0x7ff8] = 0x40;
@@ -157,6 +159,7 @@ unsigned long m48t37y_get_time(void)
/* start the update */
rtc_base[0x7ff8] = 0x00;
+ spin_unlock_irqrestore(&rtc_lock, flags);
return mktime(year, month, day, hour, min, sec);
}
@@ -169,11 +172,13 @@ int m48t37y_set_time(unsigned long sec)
unsigned char* rtc_base = (unsigned char*)0xfc800000;
#endif
struct rtc_time tm;
+ unsigned long flags;
/* convert to a more useful format -- note months count from 0 */
to_tm(sec, &tm);
tm.tm_mon += 1;
+ spin_lock_irqsave(&rtc_lock, flags);
/* enable writing */
rtc_base[0x7ff8] = 0x80;
@@ -197,6 +202,7 @@ int m48t37y_set_time(unsigned long sec)
/* disable writing */
rtc_base[0x7ff8] = 0x00;
+ spin_unlock_irqrestore(&rtc_lock, flags);
return 0;
}
diff --git a/arch/mips/pmc-sierra/yosemite/setup.c b/arch/mips/pmc-sierra/yosemite/setup.c
index bdc2ab55bed..059755b5ed5 100644
--- a/arch/mips/pmc-sierra/yosemite/setup.c
+++ b/arch/mips/pmc-sierra/yosemite/setup.c
@@ -73,7 +73,9 @@ void __init bus_error_init(void)
unsigned long m48t37y_get_time(void)
{
unsigned int year, month, day, hour, min, sec;
+ unsigned long flags;
+ spin_lock_irqsave(&rtc_lock, flags);
/* Stop the update to the time */
m48t37_base->control = 0x40;
@@ -88,6 +90,7 @@ unsigned long m48t37y_get_time(void)
/* Start the update to the time again */
m48t37_base->control = 0x00;
+ spin_unlock_irqrestore(&rtc_lock, flags);
return mktime(year, month, day, hour, min, sec);
}
@@ -95,11 +98,13 @@ unsigned long m48t37y_get_time(void)
int m48t37y_set_time(unsigned long sec)
{
struct rtc_time tm;
+ unsigned long flags;
/* convert to a more useful format -- note months count from 0 */
to_tm(sec, &tm);
tm.tm_mon += 1;
+ spin_lock_irqsave(&rtc_lock, flags);
/* enable writing */
m48t37_base->control = 0x80;
@@ -123,6 +128,7 @@ int m48t37y_set_time(unsigned long sec)
/* disable writing */
m48t37_base->control = 0x00;
+ spin_unlock_irqrestore(&rtc_lock, flags);
return 0;
}
diff --git a/arch/mips/sgi-ip22/ip22-time.c b/arch/mips/sgi-ip22/ip22-time.c
index df9b5694328..b7300cc5c5a 100644
--- a/arch/mips/sgi-ip22/ip22-time.c
+++ b/arch/mips/sgi-ip22/ip22-time.c
@@ -35,7 +35,9 @@ static unsigned long indy_rtc_get_time(void)
{
unsigned int yrs, mon, day, hrs, min, sec;
unsigned int save_control;
+ unsigned long flags;
+ spin_lock_irqsave(&rtc_lock, flags);
save_control = hpc3c0->rtcregs[RTC_CMD] & 0xff;
hpc3c0->rtcregs[RTC_CMD] = save_control | RTC_TE;
@@ -47,6 +49,7 @@ static unsigned long indy_rtc_get_time(void)
yrs = BCD2BIN(hpc3c0->rtcregs[RTC_YEAR] & 0xff);
hpc3c0->rtcregs[RTC_CMD] = save_control;
+ spin_unlock_irqrestore(&rtc_lock, flags);
if (yrs < 45)
yrs += 30;
@@ -60,6 +63,7 @@ static int indy_rtc_set_time(unsigned long tim)
{
struct rtc_time tm;
unsigned int save_control;
+ unsigned long flags;
to_tm(tim, &tm);
@@ -68,6 +72,7 @@ static int indy_rtc_set_time(unsigned long tim)
if (tm.tm_year >= 100)
tm.tm_year -= 100;
+ spin_lock_irqsave(&rtc_lock, flags);
save_control = hpc3c0->rtcregs[RTC_CMD] & 0xff;
hpc3c0->rtcregs[RTC_CMD] = save_control | RTC_TE;
@@ -80,6 +85,7 @@ static int indy_rtc_set_time(unsigned long tim)
hpc3c0->rtcregs[RTC_HUNDREDTH_SECOND] = 0;
hpc3c0->rtcregs[RTC_CMD] = save_control;
+ spin_unlock_irqrestore(&rtc_lock, flags);
return 0;
}
diff --git a/arch/mips/sibyte/swarm/rtc_m41t81.c b/arch/mips/sibyte/swarm/rtc_m41t81.c
index 5b4fc26c1b3..c13914bdda5 100644
--- a/arch/mips/sibyte/swarm/rtc_m41t81.c
+++ b/arch/mips/sibyte/swarm/rtc_m41t81.c
@@ -144,6 +144,7 @@ static int m41t81_write(uint8_t addr, int b)
int m41t81_set_time(unsigned long t)
{
struct rtc_time tm;
+ unsigned long flags;
to_tm(t, &tm);
@@ -153,6 +154,7 @@ int m41t81_set_time(unsigned long t)
* believe we should finish writing min within a second.
*/
+ spin_lock_irqsave(&rtc_lock, flags);
tm.tm_sec = BIN2BCD(tm.tm_sec);
m41t81_write(M41T81REG_SC, tm.tm_sec);
@@ -180,6 +182,7 @@ int m41t81_set_time(unsigned long t)
tm.tm_year %= 100;
tm.tm_year = BIN2BCD(tm.tm_year);
m41t81_write(M41T81REG_YR, tm.tm_year);
+ spin_unlock_irqrestore(&rtc_lock, flags);
return 0;
}
@@ -187,19 +190,23 @@ int m41t81_set_time(unsigned long t)
unsigned long m41t81_get_time(void)
{
unsigned int year, mon, day, hour, min, sec;
+ unsigned long flags;
/*
* min is valid if two reads of sec are the same.
*/
for (;;) {
+ spin_lock_irqsave(&rtc_lock, flags);
sec = m41t81_read(M41T81REG_SC);
min = m41t81_read(M41T81REG_MN);
if (sec == m41t81_read(M41T81REG_SC)) break;
+ spin_unlock_irqrestore(&rtc_lock, flags);
}
hour = m41t81_read(M41T81REG_HR) & 0x3f;
day = m41t81_read(M41T81REG_DT);
mon = m41t81_read(M41T81REG_MO);
year = m41t81_read(M41T81REG_YR);
+ spin_unlock_irqrestore(&rtc_lock, flags);
sec = BCD2BIN(sec);
min = BCD2BIN(min);
diff --git a/arch/mips/sibyte/swarm/rtc_xicor1241.c b/arch/mips/sibyte/swarm/rtc_xicor1241.c
index d9ff9323f24..f4a17883641 100644
--- a/arch/mips/sibyte/swarm/rtc_xicor1241.c
+++ b/arch/mips/sibyte/swarm/rtc_xicor1241.c
@@ -113,9 +113,11 @@ int xicor_set_time(unsigned long t)
{
struct rtc_time tm;
int tmp;
+ unsigned long flags;
to_tm(t, &tm);
+ spin_lock_irqsave(&rtc_lock, flags);
/* unlock writes to the CCR */
xicor_write(X1241REG_SR, X1241REG_SR_WEL);
xicor_write(X1241REG_SR, X1241REG_SR_WEL | X1241REG_SR_RWEL);
@@ -160,6 +162,7 @@ int xicor_set_time(unsigned long t)
xicor_write(X1241REG_HR, tmp);
xicor_write(X1241REG_SR, 0);
+ spin_unlock_irqrestore(&rtc_lock, flags);
return 0;
}
@@ -167,7 +170,9 @@ int xicor_set_time(unsigned long t)
unsigned long xicor_get_time(void)
{
unsigned int year, mon, day, hour, min, sec, y2k;
+ unsigned long flags;
+ spin_lock_irqsave(&rtc_lock, flags);
sec = xicor_read(X1241REG_SC);
min = xicor_read(X1241REG_MN);
hour = xicor_read(X1241REG_HR);
@@ -183,6 +188,7 @@ unsigned long xicor_get_time(void)
mon = xicor_read(X1241REG_MO);
year = xicor_read(X1241REG_YR);
y2k = xicor_read(X1241REG_Y2K);
+ spin_unlock_irqrestore(&rtc_lock, flags);
sec = BCD2BIN(sec);
min = BCD2BIN(min);
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index 18130c3748f..b6fe202a620 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -78,52 +78,13 @@ void ptrace_disable(struct task_struct *child)
pa_psw(child)->l = 0;
}
-long sys_ptrace(long request, long pid, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
- struct task_struct *child;
long ret;
#ifdef DEBUG_PTRACE
long oaddr=addr, odata=data;
#endif
- lock_kernel();
- ret = -EPERM;
- if (request == PTRACE_TRACEME) {
- /* are we already being traced? */
- if (current->ptrace & PT_PTRACED)
- goto out;
-
- ret = security_ptrace(current->parent, current);
- if (ret)
- goto out;
-
- /* set the ptrace bit in the process flags. */
- current->ptrace |= PT_PTRACED;
- ret = 0;
- goto out;
- }
-
- ret = -ESRCH;
- read_lock(&tasklist_lock);
- child = find_task_by_pid(pid);
- if (child)
- get_task_struct(child);
- read_unlock(&tasklist_lock);
- if (!child)
- goto out;
- ret = -EPERM;
- if (pid == 1) /* no messing around with init! */
- goto out_tsk;
-
- if (request == PTRACE_ATTACH) {
- ret = ptrace_attach(child);
- goto out_tsk;
- }
-
- ret = ptrace_check_attach(child, request == PTRACE_KILL);
- if (ret < 0)
- goto out_tsk;
-
switch (request) {
case PTRACE_PEEKTEXT: /* read word at location addr. */
case PTRACE_PEEKDATA: {
@@ -383,11 +344,11 @@ long sys_ptrace(long request, long pid, long addr, long data)
case PTRACE_GETEVENTMSG:
ret = put_user(child->ptrace_message, (unsigned int __user *) data);
- goto out_tsk;
+ goto out;
default:
ret = ptrace_request(child, request, addr, data);
- goto out_tsk;
+ goto out;
}
out_wake_notrap:
@@ -396,10 +357,7 @@ out_wake:
wake_up_process(child);
ret = 0;
out_tsk:
- put_task_struct(child);
-out:
- unlock_kernel();
- DBG("sys_ptrace(%ld, %d, %lx, %lx) returning %ld\n",
+ DBG("arch_ptrace(%ld, %d, %lx, %lx) returning %ld\n",
request, pid, oaddr, odata, ret);
return ret;
}
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index f4e25c648fb..1493c7896fe 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -404,6 +404,14 @@ config CPU_FREQ_PMAC
this currently includes some models of iBook & Titanium
PowerBook.
+config CPU_FREQ_PMAC64
+ bool "Support for some Apple G5s"
+ depends on CPU_FREQ && PMAC_SMU && PPC64
+ select CPU_FREQ_TABLE
+ help
+ This adds support for frequency switching on Apple iMac G5,
+ and some of the more recent desktop G5 machines as well.
+
config PPC601_SYNC_FIX
bool "Workarounds for PPC601 bugs"
depends on 6xx && (PPC_PREP || PPC_PMAC)
@@ -484,6 +492,7 @@ source "fs/Kconfig.binfmt"
config FORCE_MAX_ZONEORDER
int
depends on PPC64
+ default "9" if PPC_64K_PAGES
default "13"
config MATH_EMULATION
@@ -603,6 +612,16 @@ config NODES_SPAN_OTHER_NODES
def_bool y
depends on NEED_MULTIPLE_NODES
+config PPC_64K_PAGES
+ bool "64k page size"
+ depends on PPC64
+ help
+ This option changes the kernel logical page size to 64k. On machines
+ without processor support for 64k pages, the kernel will simulate
+ them by loading each individual 4k page on demand transparently,
+ while on hardware with such support, it will be used to map
+ normal application pages.
+
config SCHED_SMT
bool "SMT (Hyperthreading) scheduler support"
depends on PPC64 && SMP
@@ -907,8 +926,21 @@ source "arch/powerpc/platforms/iseries/Kconfig"
source "lib/Kconfig"
+menu "Instrumentation Support"
+ depends on EXPERIMENTAL
+
source "arch/powerpc/oprofile/Kconfig"
+config KPROBES
+ bool "Kprobes (EXPERIMENTAL)"
+ help
+ Kprobes allows you to trap at almost any kernel address and
+ execute a callback function. register_kprobe() establishes
+ a probepoint and specifies the callback. Kprobes is useful
+ for kernel debugging, non-intrusive instrumentation and testing.
+ If in doubt, say "N".
+endmenu
+
source "arch/powerpc/Kconfig.debug"
source "security/Kconfig"
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 0baf64ec80d..30a30bf559e 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -9,16 +9,6 @@ config DEBUG_STACKOVERFLOW
This option will cause messages to be printed if free stack space
drops below a certain limit.
-config KPROBES
- bool "Kprobes"
- depends on DEBUG_KERNEL && PPC64
- help
- Kprobes allows you to trap at almost any kernel address and
- execute a callback function. register_kprobe() establishes
- a probepoint and specifies the callback. Kprobes is useful
- for kernel debugging, non-intrusive instrumentation and testing.
- If in doubt, say "N".
-
config DEBUG_STACK_USAGE
bool "Stack utilization instrumentation"
depends on DEBUG_KERNEL && PPC64
diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig
index 6323065fbf2..e76854f8c12 100644
--- a/arch/powerpc/configs/g5_defconfig
+++ b/arch/powerpc/configs/g5_defconfig
@@ -1,18 +1,32 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.14-rc4
-# Thu Oct 20 08:30:23 2005
+# Linux kernel version: 2.6.14
+# Mon Nov 7 13:37:59 2005
#
+CONFIG_PPC64=y
CONFIG_64BIT=y
+CONFIG_PPC_MERGE=y
CONFIG_MMU=y
+CONFIG_GENERIC_HARDIRQS=y
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
-CONFIG_GENERIC_ISA_DMA=y
+CONFIG_PPC=y
CONFIG_EARLY_PRINTK=y
CONFIG_COMPAT=y
+CONFIG_SYSVIPC_COMPAT=y
CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
CONFIG_ARCH_MAY_HAVE_PC_FDC=y
-CONFIG_FORCE_MAX_ZONEORDER=13
+
+#
+# Processor support
+#
+CONFIG_POWER4_ONLY=y
+CONFIG_POWER4=y
+CONFIG_PPC_FPU=y
+CONFIG_ALTIVEC=y
+CONFIG_PPC_STD_MMU=y
+CONFIG_SMP=y
+CONFIG_NR_CPUS=2
#
# Code maturity level options
@@ -67,30 +81,60 @@ CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_KMOD=y
CONFIG_STOP_MACHINE=y
-CONFIG_SYSVIPC_COMPAT=y
#
# Platform support
#
-# CONFIG_PPC_ISERIES is not set
CONFIG_PPC_MULTIPLATFORM=y
+# CONFIG_PPC_ISERIES is not set
+# CONFIG_EMBEDDED6xx is not set
+# CONFIG_APUS is not set
# CONFIG_PPC_PSERIES is not set
-# CONFIG_PPC_BPA is not set
CONFIG_PPC_PMAC=y
+CONFIG_PPC_PMAC64=y
# CONFIG_PPC_MAPLE is not set
-CONFIG_PPC=y
-CONFIG_PPC64=y
+# CONFIG_PPC_CELL is not set
CONFIG_PPC_OF=y
-CONFIG_MPIC=y
-CONFIG_ALTIVEC=y
-CONFIG_KEXEC=y
CONFIG_U3_DART=y
-CONFIG_PPC_PMAC64=y
-CONFIG_BOOTX_TEXT=y
-CONFIG_POWER4_ONLY=y
+CONFIG_MPIC=y
+# CONFIG_PPC_RTAS is not set
+# CONFIG_MMIO_NVRAM is not set
+# CONFIG_PPC_MPC106 is not set
+CONFIG_GENERIC_TBSYNC=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TABLE=y
+# CONFIG_CPU_FREQ_DEBUG is not set
+CONFIG_CPU_FREQ_STAT=y
+# CONFIG_CPU_FREQ_STAT_DETAILS is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+# CONFIG_CPU_FREQ_GOV_ONDEMAND is not set
+# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set
+CONFIG_CPU_FREQ_PMAC64=y
+# CONFIG_WANT_EARLY_SERIAL is not set
+
+#
+# Kernel options
+#
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+# CONFIG_PREEMPT_BKL is not set
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_MISC is not set
+CONFIG_FORCE_MAX_ZONEORDER=13
CONFIG_IOMMU_VMERGE=y
-CONFIG_SMP=y
-CONFIG_NR_CPUS=2
+# CONFIG_HOTPLUG_CPU is not set
+CONFIG_KEXEC=y
+CONFIG_IRQ_ALL_CPUS=y
+# CONFIG_NUMA is not set
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
CONFIG_ARCH_FLATMEM_ENABLE=y
CONFIG_SELECT_MEMORY_MODEL=y
@@ -100,28 +144,21 @@ CONFIG_FLATMEM_MANUAL=y
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
# CONFIG_SPARSEMEM_STATIC is not set
-# CONFIG_NUMA is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_PPC_64K_PAGES is not set
# CONFIG_SCHED_SMT is not set
-CONFIG_PREEMPT_NONE=y
-# CONFIG_PREEMPT_VOLUNTARY is not set
-# CONFIG_PREEMPT is not set
-# CONFIG_PREEMPT_BKL is not set
-# CONFIG_HZ_100 is not set
-CONFIG_HZ_250=y
-# CONFIG_HZ_1000 is not set
-CONFIG_HZ=250
-CONFIG_GENERIC_HARDIRQS=y
-CONFIG_SECCOMP=y
-CONFIG_BINFMT_ELF=y
-# CONFIG_BINFMT_MISC is not set
-# CONFIG_HOTPLUG_CPU is not set
CONFIG_PROC_DEVICETREE=y
# CONFIG_CMDLINE_BOOL is not set
+# CONFIG_PM is not set
+CONFIG_SECCOMP=y
CONFIG_ISA_DMA_API=y
#
-# Bus Options
+# Bus options
#
+CONFIG_GENERIC_ISA_DMA=y
+# CONFIG_PPC_I8259 is not set
+# CONFIG_PPC_INDIRECT_PCI is not set
CONFIG_PCI=y
CONFIG_PCI_DOMAINS=y
CONFIG_PCI_LEGACY_PROC=y
@@ -136,6 +173,7 @@ CONFIG_PCI_LEGACY_PROC=y
# PCI Hotplug Support
#
# CONFIG_HOTPLUG_PCI is not set
+CONFIG_KERNEL_START=0xc000000000000000
#
# Networking
@@ -276,6 +314,10 @@ CONFIG_LLC=y
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
# CONFIG_NET_SCHED is not set
CONFIG_NET_CLS_ROUTE=y
@@ -348,6 +390,11 @@ CONFIG_IOSCHED_NOOP=y
CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y
CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
# CONFIG_ATA_OVER_ETH is not set
#
@@ -449,6 +496,7 @@ CONFIG_SCSI_SPI_ATTRS=y
#
# SCSI low-level drivers
#
+# CONFIG_ISCSI_TCP is not set
# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
# CONFIG_SCSI_3W_9XXX is not set
# CONFIG_SCSI_ACARD is not set
@@ -465,10 +513,12 @@ CONFIG_SCSI_SATA_SVW=y
# CONFIG_SCSI_ATA_PIIX is not set
# CONFIG_SCSI_SATA_MV is not set
# CONFIG_SCSI_SATA_NV is not set
-# CONFIG_SCSI_SATA_PROMISE is not set
+# CONFIG_SCSI_PDC_ADMA is not set
# CONFIG_SCSI_SATA_QSTOR is not set
+# CONFIG_SCSI_SATA_PROMISE is not set
# CONFIG_SCSI_SATA_SX4 is not set
# CONFIG_SCSI_SATA_SIL is not set
+# CONFIG_SCSI_SATA_SIL24 is not set
# CONFIG_SCSI_SATA_SIS is not set
# CONFIG_SCSI_SATA_ULI is not set
# CONFIG_SCSI_SATA_VIA is not set
@@ -567,6 +617,9 @@ CONFIG_IEEE1394_RAWIO=y
CONFIG_ADB_PMU=y
CONFIG_PMAC_SMU=y
CONFIG_THERM_PM72=y
+CONFIG_WINDFARM=y
+CONFIG_WINDFARM_PM81=y
+CONFIG_WINDFARM_PM91=y
#
# Network device support
@@ -603,6 +656,7 @@ CONFIG_SUNGEM=y
# CONFIG_NET_TULIP is not set
# CONFIG_HP100 is not set
# CONFIG_NET_PCI is not set
+# CONFIG_FEC_8XX is not set
#
# Ethernet (1000 Mbit)
@@ -768,6 +822,7 @@ CONFIG_MAX_RAW_DEVS=256
# TPM devices
#
# CONFIG_TCG_TPM is not set
+# CONFIG_TELCLOCK is not set
#
# I2C support
@@ -820,6 +875,7 @@ CONFIG_I2C_PMAC_SMU=y
# CONFIG_SENSORS_PCF8591 is not set
# CONFIG_SENSORS_RTC8564 is not set
# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_RTC_X1205_I2C is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
@@ -876,10 +932,9 @@ CONFIG_FB_OF=y
# CONFIG_FB_ASILIANT is not set
# CONFIG_FB_IMSTT is not set
# CONFIG_FB_VGA16 is not set
-# CONFIG_FB_NVIDIA is not set
-CONFIG_FB_RIVA=y
-# CONFIG_FB_RIVA_I2C is not set
-# CONFIG_FB_RIVA_DEBUG is not set
+CONFIG_FB_NVIDIA=y
+CONFIG_FB_NVIDIA_I2C=y
+# CONFIG_FB_RIVA is not set
# CONFIG_FB_MATROX is not set
# CONFIG_FB_RADEON_OLD is not set
CONFIG_FB_RADEON=y
@@ -924,7 +979,96 @@ CONFIG_LCD_DEVICE=y
#
# Sound
#
-# CONFIG_SOUND is not set
+CONFIG_SOUND=m
+
+#
+# Advanced Linux Sound Architecture
+#
+CONFIG_SND=m
+CONFIG_SND_TIMER=m
+CONFIG_SND_PCM=m
+CONFIG_SND_HWDEP=m
+CONFIG_SND_RAWMIDI=m
+CONFIG_SND_SEQUENCER=m
+# CONFIG_SND_SEQ_DUMMY is not set
+CONFIG_SND_OSSEMUL=y
+CONFIG_SND_MIXER_OSS=m
+CONFIG_SND_PCM_OSS=m
+CONFIG_SND_SEQUENCER_OSS=y
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+CONFIG_SND_GENERIC_DRIVER=y
+
+#
+# Generic devices
+#
+# CONFIG_SND_DUMMY is not set
+# CONFIG_SND_VIRMIDI is not set
+# CONFIG_SND_MTPAV is not set
+# CONFIG_SND_SERIAL_U16550 is not set
+# CONFIG_SND_MPU401 is not set
+
+#
+# PCI devices
+#
+# CONFIG_SND_ALI5451 is not set
+# CONFIG_SND_ATIIXP is not set
+# CONFIG_SND_ATIIXP_MODEM is not set
+# CONFIG_SND_AU8810 is not set
+# CONFIG_SND_AU8820 is not set
+# CONFIG_SND_AU8830 is not set
+# CONFIG_SND_AZT3328 is not set
+# CONFIG_SND_BT87X is not set
+# CONFIG_SND_CS46XX is not set
+# CONFIG_SND_CS4281 is not set
+# CONFIG_SND_EMU10K1 is not set
+# CONFIG_SND_EMU10K1X is not set
+# CONFIG_SND_CA0106 is not set
+# CONFIG_SND_KORG1212 is not set
+# CONFIG_SND_MIXART is not set
+# CONFIG_SND_NM256 is not set
+# CONFIG_SND_RME32 is not set
+# CONFIG_SND_RME96 is not set
+# CONFIG_SND_RME9652 is not set
+# CONFIG_SND_HDSP is not set
+# CONFIG_SND_HDSPM is not set
+# CONFIG_SND_TRIDENT is not set
+# CONFIG_SND_YMFPCI is not set
+# CONFIG_SND_AD1889 is not set
+# CONFIG_SND_ALS4000 is not set
+# CONFIG_SND_CMIPCI is not set
+# CONFIG_SND_ENS1370 is not set
+# CONFIG_SND_ENS1371 is not set
+# CONFIG_SND_ES1938 is not set
+# CONFIG_SND_ES1968 is not set
+# CONFIG_SND_MAESTRO3 is not set
+# CONFIG_SND_FM801 is not set
+# CONFIG_SND_ICE1712 is not set
+# CONFIG_SND_ICE1724 is not set
+# CONFIG_SND_INTEL8X0 is not set
+# CONFIG_SND_INTEL8X0M is not set
+# CONFIG_SND_SONICVIBES is not set
+# CONFIG_SND_VIA82XX is not set
+# CONFIG_SND_VIA82XX_MODEM is not set
+# CONFIG_SND_VX222 is not set
+# CONFIG_SND_HDA_INTEL is not set
+
+#
+# ALSA PowerMac devices
+#
+CONFIG_SND_POWERMAC=m
+CONFIG_SND_POWERMAC_AUTO_DRC=y
+
+#
+# USB devices
+#
+CONFIG_SND_USB_AUDIO=m
+# CONFIG_SND_USB_USX2Y is not set
+
+#
+# Open Sound System
+#
+# CONFIG_SOUND_PRIME is not set
#
# USB support
@@ -958,12 +1102,16 @@ CONFIG_USB_OHCI_LITTLE_ENDIAN=y
#
# USB Device Class drivers
#
-# CONFIG_USB_BLUETOOTH_TTY is not set
+# CONFIG_OBSOLETE_OSS_USB_DRIVER is not set
CONFIG_USB_ACM=m
CONFIG_USB_PRINTER=y
#
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# may also be needed; see USB_STORAGE Help for more information
#
CONFIG_USB_STORAGE=y
# CONFIG_USB_STORAGE_DEBUG is not set
@@ -1074,6 +1222,7 @@ CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
CONFIG_USB_SERIAL_KLSI=m
CONFIG_USB_SERIAL_KOBIL_SCT=m
CONFIG_USB_SERIAL_MCT_U232=m
+# CONFIG_USB_SERIAL_NOKIA_DKU2 is not set
CONFIG_USB_SERIAL_PL2303=m
# CONFIG_USB_SERIAL_HP4X is not set
CONFIG_USB_SERIAL_SAFE=m
@@ -1311,6 +1460,20 @@ CONFIG_NLS_ISO8859_15=y
CONFIG_NLS_UTF8=y
#
+# Library routines
+#
+CONFIG_CRC_CCITT=m
+# CONFIG_CRC16 is not set
+CONFIG_CRC32=y
+CONFIG_LIBCRC32C=m
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=m
+CONFIG_TEXTSEARCH=y
+CONFIG_TEXTSEARCH_KMP=m
+CONFIG_TEXTSEARCH_BM=m
+CONFIG_TEXTSEARCH_FSM=m
+
+#
# Profiling support
#
CONFIG_PROFILING=y
@@ -1331,12 +1494,14 @@ CONFIG_DETECT_SOFTLOCKUP=y
# CONFIG_DEBUG_KOBJECT is not set
# CONFIG_DEBUG_INFO is not set
CONFIG_DEBUG_FS=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_DEBUG_STACKOVERFLOW is not set
# CONFIG_KPROBES is not set
# CONFIG_DEBUG_STACK_USAGE is not set
# CONFIG_DEBUGGER is not set
-# CONFIG_PPCDBG is not set
CONFIG_IRQSTACKS=y
+CONFIG_BOOTX_TEXT=y
#
# Security options
@@ -1376,17 +1541,3 @@ CONFIG_CRYPTO_TEST=m
#
# Hardware crypto devices
#
-
-#
-# Library routines
-#
-CONFIG_CRC_CCITT=m
-# CONFIG_CRC16 is not set
-CONFIG_CRC32=y
-CONFIG_LIBCRC32C=m
-CONFIG_ZLIB_INFLATE=y
-CONFIG_ZLIB_DEFLATE=m
-CONFIG_TEXTSEARCH=y
-CONFIG_TEXTSEARCH_KMP=m
-CONFIG_TEXTSEARCH_BM=m
-CONFIG_TEXTSEARCH_FSM=m
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index bc5a3689cc0..b7575725199 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -125,6 +125,9 @@ int main(void)
DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
+#ifdef CONFIG_PPC_64K_PAGES
+ DEFINE(PACAPGDIR, offsetof(struct paca_struct, pgdir));
+#endif
#ifdef CONFIG_HUGETLB_PAGE
DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas));
DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas));
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index b91345fa080..cc4e9eb1c13 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -240,7 +240,7 @@ struct cpu_spec cpu_specs[] = {
.oprofile_model = &op_model_power4,
#endif
},
- { /* Power5 */
+ { /* Power5 GR */
.pvr_mask = 0xffff0000,
.pvr_value = 0x003a0000,
.cpu_name = "POWER5 (gr)",
@@ -255,7 +255,7 @@ struct cpu_spec cpu_specs[] = {
.oprofile_model = &op_model_power4,
#endif
},
- { /* Power5 */
+ { /* Power5 GS */
.pvr_mask = 0xffff0000,
.pvr_value = 0x003b0000,
.cpu_name = "POWER5 (gs)",
@@ -929,6 +929,16 @@ struct cpu_spec cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
},
+ { /* 440SPe Rev. A */
+ .pvr_mask = 0xff000fff,
+ .pvr_value = 0x53000890,
+ .cpu_name = "440SPe Rev. A",
+ .cpu_features = CPU_FTR_SPLIT_ID_CACHE |
+ CPU_FTR_USE_TB,
+ .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ },
#endif /* CONFIG_44x */
#ifdef CONFIG_FSL_BOOKE
{ /* e200z5 */
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 45d81976987..16ab40daa73 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -195,11 +195,11 @@ exception_marker:
#define EX_R12 24
#define EX_R13 32
#define EX_SRR0 40
-#define EX_R3 40 /* SLB miss saves R3, but not SRR0 */
#define EX_DAR 48
-#define EX_LR 48 /* SLB miss saves LR, but not DAR */
#define EX_DSISR 56
#define EX_CCR 60
+#define EX_R3 64
+#define EX_LR 72
#define EXCEPTION_PROLOG_PSERIES(area, label) \
mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
@@ -419,17 +419,22 @@ data_access_slb_pSeries:
mtspr SPRN_SPRG1,r13
RUNLATCH_ON(r13)
mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
+ std r3,PACA_EXSLB+EX_R3(r13)
+ mfspr r3,SPRN_DAR
std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
+ mfcr r9
+#ifdef __DISABLED__
+ /* Keep that around for when we re-implement dynamic VSIDs */
+ cmpdi r3,0
+ bge slb_miss_user_pseries
+#endif /* __DISABLED__ */
std r10,PACA_EXSLB+EX_R10(r13)
std r11,PACA_EXSLB+EX_R11(r13)
std r12,PACA_EXSLB+EX_R12(r13)
- std r3,PACA_EXSLB+EX_R3(r13)
- mfspr r9,SPRN_SPRG1
- std r9,PACA_EXSLB+EX_R13(r13)
- mfcr r9
+ mfspr r10,SPRN_SPRG1
+ std r10,PACA_EXSLB+EX_R13(r13)
mfspr r12,SPRN_SRR1 /* and SRR1 */
- mfspr r3,SPRN_DAR
- b .do_slb_miss /* Rel. branch works in real mode */
+ b .slb_miss_realmode /* Rel. branch works in real mode */
STD_EXCEPTION_PSERIES(0x400, instruction_access)
@@ -440,17 +445,22 @@ instruction_access_slb_pSeries:
mtspr SPRN_SPRG1,r13
RUNLATCH_ON(r13)
mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
+ std r3,PACA_EXSLB+EX_R3(r13)
+ mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
+ mfcr r9
+#ifdef __DISABLED__
+ /* Keep that around for when we re-implement dynamic VSIDs */
+ cmpdi r3,0
+ bge slb_miss_user_pseries
+#endif /* __DISABLED__ */
std r10,PACA_EXSLB+EX_R10(r13)
std r11,PACA_EXSLB+EX_R11(r13)
std r12,PACA_EXSLB+EX_R12(r13)
- std r3,PACA_EXSLB+EX_R3(r13)
- mfspr r9,SPRN_SPRG1
- std r9,PACA_EXSLB+EX_R13(r13)
- mfcr r9
+ mfspr r10,SPRN_SPRG1
+ std r10,PACA_EXSLB+EX_R13(r13)
mfspr r12,SPRN_SRR1 /* and SRR1 */
- mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
- b .do_slb_miss /* Rel. branch works in real mode */
+ b .slb_miss_realmode /* Rel. branch works in real mode */
STD_EXCEPTION_PSERIES(0x500, hardware_interrupt)
STD_EXCEPTION_PSERIES(0x600, alignment)
@@ -509,6 +519,38 @@ _GLOBAL(do_stab_bolted_pSeries)
EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
/*
+ * We have some room here we use that to put
+ * the peries slb miss user trampoline code so it's reasonably
+ * away from slb_miss_user_common to avoid problems with rfid
+ *
+ * This is used for when the SLB miss handler has to go virtual,
+ * which doesn't happen for now anymore but will once we re-implement
+ * dynamic VSIDs for shared page tables
+ */
+#ifdef __DISABLED__
+slb_miss_user_pseries:
+ std r10,PACA_EXGEN+EX_R10(r13)
+ std r11,PACA_EXGEN+EX_R11(r13)
+ std r12,PACA_EXGEN+EX_R12(r13)
+ mfspr r10,SPRG1
+ ld r11,PACA_EXSLB+EX_R9(r13)
+ ld r12,PACA_EXSLB+EX_R3(r13)
+ std r10,PACA_EXGEN+EX_R13(r13)
+ std r11,PACA_EXGEN+EX_R9(r13)
+ std r12,PACA_EXGEN+EX_R3(r13)
+ clrrdi r12,r13,32
+ mfmsr r10
+ mfspr r11,SRR0 /* save SRR0 */
+ ori r12,r12,slb_miss_user_common@l /* virt addr of handler */
+ ori r10,r10,MSR_IR|MSR_DR|MSR_RI
+ mtspr SRR0,r12
+ mfspr r12,SRR1 /* and SRR1 */
+ mtspr SRR1,r10
+ rfid
+ b . /* prevent spec. execution */
+#endif /* __DISABLED__ */
+
+/*
* Vectors for the FWNMI option. Share common code.
*/
.globl system_reset_fwnmi
@@ -559,22 +601,59 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
.globl data_access_slb_iSeries
data_access_slb_iSeries:
mtspr SPRN_SPRG1,r13 /* save r13 */
- EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
+ mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
std r3,PACA_EXSLB+EX_R3(r13)
- ld r12,PACALPPACA+LPPACASRR1(r13)
mfspr r3,SPRN_DAR
- b .do_slb_miss
+ std r9,PACA_EXSLB+EX_R9(r13)
+ mfcr r9
+#ifdef __DISABLED__
+ cmpdi r3,0
+ bge slb_miss_user_iseries
+#endif
+ std r10,PACA_EXSLB+EX_R10(r13)
+ std r11,PACA_EXSLB+EX_R11(r13)
+ std r12,PACA_EXSLB+EX_R12(r13)
+ mfspr r10,SPRN_SPRG1
+ std r10,PACA_EXSLB+EX_R13(r13)
+ ld r12,PACALPPACA+LPPACASRR1(r13);
+ b .slb_miss_realmode
STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN)
.globl instruction_access_slb_iSeries
instruction_access_slb_iSeries:
mtspr SPRN_SPRG1,r13 /* save r13 */
- EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
+ mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
std r3,PACA_EXSLB+EX_R3(r13)
- ld r12,PACALPPACA+LPPACASRR1(r13)
- ld r3,PACALPPACA+LPPACASRR0(r13)
- b .do_slb_miss
+ ld r3,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */
+ std r9,PACA_EXSLB+EX_R9(r13)
+ mfcr r9
+#ifdef __DISABLED__
+ cmpdi r3,0
+ bge .slb_miss_user_iseries
+#endif
+ std r10,PACA_EXSLB+EX_R10(r13)
+ std r11,PACA_EXSLB+EX_R11(r13)
+ std r12,PACA_EXSLB+EX_R12(r13)
+ mfspr r10,SPRN_SPRG1
+ std r10,PACA_EXSLB+EX_R13(r13)
+ ld r12,PACALPPACA+LPPACASRR1(r13);
+ b .slb_miss_realmode
+
+#ifdef __DISABLED__
+slb_miss_user_iseries:
+ std r10,PACA_EXGEN+EX_R10(r13)
+ std r11,PACA_EXGEN+EX_R11(r13)
+ std r12,PACA_EXGEN+EX_R12(r13)
+ mfspr r10,SPRG1
+ ld r11,PACA_EXSLB+EX_R9(r13)
+ ld r12,PACA_EXSLB+EX_R3(r13)
+ std r10,PACA_EXGEN+EX_R13(r13)
+ std r11,PACA_EXGEN+EX_R9(r13)
+ std r12,PACA_EXGEN+EX_R3(r13)
+ EXCEPTION_PROLOG_ISERIES_2
+ b slb_miss_user_common
+#endif
MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt)
STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN)
@@ -809,6 +888,126 @@ instruction_access_common:
li r5,0x400
b .do_hash_page /* Try to handle as hpte fault */
+/*
+ * Here is the common SLB miss user that is used when going to virtual
+ * mode for SLB misses, that is currently not used
+ */
+#ifdef __DISABLED__
+ .align 7
+ .globl slb_miss_user_common
+slb_miss_user_common:
+ mflr r10
+ std r3,PACA_EXGEN+EX_DAR(r13)
+ stw r9,PACA_EXGEN+EX_CCR(r13)
+ std r10,PACA_EXGEN+EX_LR(r13)
+ std r11,PACA_EXGEN+EX_SRR0(r13)
+ bl .slb_allocate_user
+
+ ld r10,PACA_EXGEN+EX_LR(r13)
+ ld r3,PACA_EXGEN+EX_R3(r13)
+ lwz r9,PACA_EXGEN+EX_CCR(r13)
+ ld r11,PACA_EXGEN+EX_SRR0(r13)
+ mtlr r10
+ beq- slb_miss_fault
+
+ andi. r10,r12,MSR_RI /* check for unrecoverable exception */
+ beq- unrecov_user_slb
+ mfmsr r10
+
+.machine push
+.machine "power4"
+ mtcrf 0x80,r9
+.machine pop
+
+ clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */
+ mtmsrd r10,1
+
+ mtspr SRR0,r11
+ mtspr SRR1,r12
+
+ ld r9,PACA_EXGEN+EX_R9(r13)
+ ld r10,PACA_EXGEN+EX_R10(r13)
+ ld r11,PACA_EXGEN+EX_R11(r13)
+ ld r12,PACA_EXGEN+EX_R12(r13)
+ ld r13,PACA_EXGEN+EX_R13(r13)
+ rfid
+ b .
+
+slb_miss_fault:
+ EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
+ ld r4,PACA_EXGEN+EX_DAR(r13)
+ li r5,0
+ std r4,_DAR(r1)
+ std r5,_DSISR(r1)
+ b .handle_page_fault
+
+unrecov_user_slb:
+ EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
+ DISABLE_INTS
+ bl .save_nvgprs
+1: addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .unrecoverable_exception
+ b 1b
+
+#endif /* __DISABLED__ */
+
+
+/*
+ * r13 points to the PACA, r9 contains the saved CR,
+ * r12 contain the saved SRR1, SRR0 is still ready for return
+ * r3 has the faulting address
+ * r9 - r13 are saved in paca->exslb.
+ * r3 is saved in paca->slb_r3
+ * We assume we aren't going to take any exceptions during this procedure.
+ */
+_GLOBAL(slb_miss_realmode)
+ mflr r10
+
+ stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
+ std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
+
+ bl .slb_allocate_realmode
+
+ /* All done -- return from exception. */
+
+ ld r10,PACA_EXSLB+EX_LR(r13)
+ ld r3,PACA_EXSLB+EX_R3(r13)
+ lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
+#ifdef CONFIG_PPC_ISERIES
+ ld r11,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */
+#endif /* CONFIG_PPC_ISERIES */
+
+ mtlr r10
+
+ andi. r10,r12,MSR_RI /* check for unrecoverable exception */
+ beq- unrecov_slb
+
+.machine push
+.machine "power4"
+ mtcrf 0x80,r9
+ mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
+.machine pop
+
+#ifdef CONFIG_PPC_ISERIES
+ mtspr SPRN_SRR0,r11
+ mtspr SPRN_SRR1,r12
+#endif /* CONFIG_PPC_ISERIES */
+ ld r9,PACA_EXSLB+EX_R9(r13)
+ ld r10,PACA_EXSLB+EX_R10(r13)
+ ld r11,PACA_EXSLB+EX_R11(r13)
+ ld r12,PACA_EXSLB+EX_R12(r13)
+ ld r13,PACA_EXSLB+EX_R13(r13)
+ rfid
+ b . /* prevent speculative execution */
+
+unrecov_slb:
+ EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
+ DISABLE_INTS
+ bl .save_nvgprs
+1: addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .unrecoverable_exception
+ b 1b
+
.align 7
.globl hardware_interrupt_common
.globl hardware_interrupt_entry
@@ -1139,62 +1338,6 @@ _GLOBAL(do_stab_bolted)
b . /* prevent speculative execution */
/*
- * r13 points to the PACA, r9 contains the saved CR,
- * r11 and r12 contain the saved SRR0 and SRR1.
- * r3 has the faulting address
- * r9 - r13 are saved in paca->exslb.
- * r3 is saved in paca->slb_r3
- * We assume we aren't going to take any exceptions during this procedure.
- */
-_GLOBAL(do_slb_miss)
- mflr r10
-
- stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
- std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
-
- bl .slb_allocate /* handle it */
-
- /* All done -- return from exception. */
-
- ld r10,PACA_EXSLB+EX_LR(r13)
- ld r3,PACA_EXSLB+EX_R3(r13)
- lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
-#ifdef CONFIG_PPC_ISERIES
- ld r11,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */
-#endif /* CONFIG_PPC_ISERIES */
-
- mtlr r10
-
- andi. r10,r12,MSR_RI /* check for unrecoverable exception */
- beq- unrecov_slb
-
-.machine push
-.machine "power4"
- mtcrf 0x80,r9
- mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
-.machine pop
-
-#ifdef CONFIG_PPC_ISERIES
- mtspr SPRN_SRR0,r11
- mtspr SPRN_SRR1,r12
-#endif /* CONFIG_PPC_ISERIES */
- ld r9,PACA_EXSLB+EX_R9(r13)
- ld r10,PACA_EXSLB+EX_R10(r13)
- ld r11,PACA_EXSLB+EX_R11(r13)
- ld r12,PACA_EXSLB+EX_R12(r13)
- ld r13,PACA_EXSLB+EX_R13(r13)
- rfid
- b . /* prevent speculative execution */
-
-unrecov_slb:
- EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
- DISABLE_INTS
- bl .save_nvgprs
-1: addi r3,r1,STACK_FRAME_OVERHEAD
- bl .unrecoverable_exception
- b 1b
-
-/*
* Space for CPU0's segment table.
*
* On iSeries, the hypervisor must fill in at least one entry before
@@ -1569,7 +1712,10 @@ _GLOBAL(__secondary_start)
#endif
/* Initialize the first segment table (or SLB) entry */
ld r3,PACASTABVIRT(r13) /* get addr of segment table */
+BEGIN_FTR_SECTION
bl .stab_initialize
+END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
+ bl .slb_initialize
/* Initialize the kernel stack. Just a repeat for iSeries. */
LOADADDR(r3,current_set)
diff --git a/arch/powerpc/kernel/lparmap.c b/arch/powerpc/kernel/lparmap.c
index eded971d1bf..5a05a797485 100644
--- a/arch/powerpc/kernel/lparmap.c
+++ b/arch/powerpc/kernel/lparmap.c
@@ -25,7 +25,7 @@ const struct LparMap __attribute__((__section__(".text"))) xLparMap = {
.xRanges = {
{ .xPages = HvPagesToMap,
.xOffset = 0,
- .xVPN = KERNEL_VSID(KERNELBASE) << (SID_SHIFT - PAGE_SHIFT),
+ .xVPN = KERNEL_VSID(KERNELBASE) << (SID_SHIFT - HW_PAGE_SHIFT),
},
},
};
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index b3e95ff0dba..ae1433da09b 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -604,6 +604,76 @@ _GLOBAL(real_writeb)
#endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */
/*
+ * SCOM access functions for 970 (FX only for now)
+ *
+ * unsigned long scom970_read(unsigned int address);
+ * void scom970_write(unsigned int address, unsigned long value);
+ *
+ * The address passed in is the 24 bits register address. This code
+ * is 970 specific and will not check the status bits, so you should
+ * know what you are doing.
+ */
+_GLOBAL(scom970_read)
+ /* interrupts off */
+ mfmsr r4
+ ori r0,r4,MSR_EE
+ xori r0,r0,MSR_EE
+ mtmsrd r0,1
+
+ /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
+ * (including parity). On current CPUs they must be 0'd,
+ * and finally or in RW bit
+ */
+ rlwinm r3,r3,8,0,15
+ ori r3,r3,0x8000
+
+ /* do the actual scom read */
+ sync
+ mtspr SPRN_SCOMC,r3
+ isync
+ mfspr r3,SPRN_SCOMD
+ isync
+ mfspr r0,SPRN_SCOMC
+ isync
+
+ /* XXX: fixup result on some buggy 970's (ouch ! we lost a bit, bah
+ * that's the best we can do). Not implemented yet as we don't use
+ * the scom on any of the bogus CPUs yet, but may have to be done
+ * ultimately
+ */
+
+ /* restore interrupts */
+ mtmsrd r4,1
+ blr
+
+
+_GLOBAL(scom970_write)
+ /* interrupts off */
+ mfmsr r5
+ ori r0,r5,MSR_EE
+ xori r0,r0,MSR_EE
+ mtmsrd r0,1
+
+ /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
+ * (including parity). On current CPUs they must be 0'd.
+ */
+
+ rlwinm r3,r3,8,0,15
+
+ sync
+ mtspr SPRN_SCOMD,r4 /* write data */
+ isync
+ mtspr SPRN_SCOMC,r3 /* write command */
+ isync
+ mfspr 3,SPRN_SCOMC
+ isync
+
+ /* restore interrupts */
+ mtmsrd r5,1
+ blr
+
+
+/*
* Create a kernel thread
* kernel_thread(fn, arg, flags)
*/
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 96843211cc5..de69fb37c73 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -46,10 +46,10 @@
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/prom.h>
+#include <asm/machdep.h>
#ifdef CONFIG_PPC64
#include <asm/firmware.h>
#include <asm/time.h>
-#include <asm/machdep.h>
#endif
extern unsigned long _get_SP(void);
@@ -203,10 +203,8 @@ int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs)
int set_dabr(unsigned long dabr)
{
-#ifdef CONFIG_PPC64
if (ppc_md.set_dabr)
return ppc_md.set_dabr(dabr);
-#endif
mtspr(SPRN_DABR, dabr);
return 0;
@@ -554,12 +552,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
#ifdef CONFIG_PPC64
if (cpu_has_feature(CPU_FTR_SLB)) {
unsigned long sp_vsid = get_kernel_vsid(sp);
+ unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
sp_vsid <<= SLB_VSID_SHIFT;
- sp_vsid |= SLB_VSID_KERNEL;
- if (cpu_has_feature(CPU_FTR_16M_PAGE))
- sp_vsid |= SLB_VSID_L;
-
+ sp_vsid |= SLB_VSID_KERNEL | llp;
p->thread.ksp_vsid = sp_vsid;
}
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index eec2da69550..f645adb5753 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -724,10 +724,10 @@ static inline char *find_flat_dt_string(u32 offset)
* used to extract the memory informations at boot before we can
* unflatten the tree
*/
-static int __init scan_flat_dt(int (*it)(unsigned long node,
- const char *uname, int depth,
- void *data),
- void *data)
+int __init of_scan_flat_dt(int (*it)(unsigned long node,
+ const char *uname, int depth,
+ void *data),
+ void *data)
{
unsigned long p = ((unsigned long)initial_boot_params) +
initial_boot_params->off_dt_struct;
@@ -784,8 +784,8 @@ static int __init scan_flat_dt(int (*it)(unsigned long node,
* This function can be used within scan_flattened_dt callback to get
* access to properties
*/
-static void* __init get_flat_dt_prop(unsigned long node, const char *name,
- unsigned long *size)
+void* __init of_get_flat_dt_prop(unsigned long node, const char *name,
+ unsigned long *size)
{
unsigned long p = node;
@@ -1087,7 +1087,7 @@ void __init unflatten_device_tree(void)
static int __init early_init_dt_scan_cpus(unsigned long node,
const char *uname, int depth, void *data)
{
- char *type = get_flat_dt_prop(node, "device_type", NULL);
+ char *type = of_get_flat_dt_prop(node, "device_type", NULL);
u32 *prop;
unsigned long size = 0;
@@ -1095,19 +1095,6 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
if (type == NULL || strcmp(type, "cpu") != 0)
return 0;
-#ifdef CONFIG_PPC_PSERIES
- /* On LPAR, look for the first ibm,pft-size property for the hash table size
- */
- if (systemcfg->platform == PLATFORM_PSERIES_LPAR && ppc64_pft_size == 0) {
- u32 *pft_size;
- pft_size = get_flat_dt_prop(node, "ibm,pft-size", NULL);
- if (pft_size != NULL) {
- /* pft_size[0] is the NUMA CEC cookie */
- ppc64_pft_size = pft_size[1];
- }
- }
-#endif
-
boot_cpuid = 0;
boot_cpuid_phys = 0;
if (initial_boot_params && initial_boot_params->version >= 2) {
@@ -1117,8 +1104,9 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
boot_cpuid_phys = initial_boot_params->boot_cpuid_phys;
} else {
/* Check if it's the boot-cpu, set it's hw index now */
- if (get_flat_dt_prop(node, "linux,boot-cpu", NULL) != NULL) {
- prop = get_flat_dt_prop(node, "reg", NULL);
+ if (of_get_flat_dt_prop(node,
+ "linux,boot-cpu", NULL) != NULL) {
+ prop = of_get_flat_dt_prop(node, "reg", NULL);
if (prop != NULL)
boot_cpuid_phys = *prop;
}
@@ -1127,14 +1115,14 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
#ifdef CONFIG_ALTIVEC
/* Check if we have a VMX and eventually update CPU features */
- prop = (u32 *)get_flat_dt_prop(node, "ibm,vmx", &size);
+ prop = (u32 *)of_get_flat_dt_prop(node, "ibm,vmx", &size);
if (prop && (*prop) > 0) {
cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
}
/* Same goes for Apple's "altivec" property */
- prop = (u32 *)get_flat_dt_prop(node, "altivec", NULL);
+ prop = (u32 *)of_get_flat_dt_prop(node, "altivec", NULL);
if (prop) {
cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
@@ -1147,7 +1135,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
* this by looking at the size of the ibm,ppc-interrupt-server#s
* property
*/
- prop = (u32 *)get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s",
+ prop = (u32 *)of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s",
&size);
cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
if (prop && ((size / sizeof(u32)) > 1))
@@ -1170,7 +1158,7 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
return 0;
/* get platform type */
- prop = (u32 *)get_flat_dt_prop(node, "linux,platform", NULL);
+ prop = (u32 *)of_get_flat_dt_prop(node, "linux,platform", NULL);
if (prop == NULL)
return 0;
#ifdef CONFIG_PPC64
@@ -1183,21 +1171,21 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
#ifdef CONFIG_PPC64
/* check if iommu is forced on or off */
- if (get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
+ if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
iommu_is_off = 1;
- if (get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
+ if (of_get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
iommu_force_on = 1;
#endif
- lprop = get_flat_dt_prop(node, "linux,memory-limit", NULL);
+ lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
if (lprop)
memory_limit = *lprop;
#ifdef CONFIG_PPC64
- lprop = get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
+ lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
if (lprop)
tce_alloc_start = *lprop;
- lprop = get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
+ lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
if (lprop)
tce_alloc_end = *lprop;
#endif
@@ -1209,9 +1197,9 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
{
u64 *basep, *entryp;
- basep = get_flat_dt_prop(node, "linux,rtas-base", NULL);
- entryp = get_flat_dt_prop(node, "linux,rtas-entry", NULL);
- prop = get_flat_dt_prop(node, "linux,rtas-size", NULL);
+ basep = of_get_flat_dt_prop(node, "linux,rtas-base", NULL);
+ entryp = of_get_flat_dt_prop(node, "linux,rtas-entry", NULL);
+ prop = of_get_flat_dt_prop(node, "linux,rtas-size", NULL);
if (basep && entryp && prop) {
rtas.base = *basep;
rtas.entry = *entryp;
@@ -1232,11 +1220,11 @@ static int __init early_init_dt_scan_root(unsigned long node,
if (depth != 0)
return 0;
- prop = get_flat_dt_prop(node, "#size-cells", NULL);
+ prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
dt_root_size_cells = (prop == NULL) ? 1 : *prop;
DBG("dt_root_size_cells = %x\n", dt_root_size_cells);
- prop = get_flat_dt_prop(node, "#address-cells", NULL);
+ prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
dt_root_addr_cells = (prop == NULL) ? 2 : *prop;
DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells);
@@ -1271,7 +1259,7 @@ static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp)
static int __init early_init_dt_scan_memory(unsigned long node,
const char *uname, int depth, void *data)
{
- char *type = get_flat_dt_prop(node, "device_type", NULL);
+ char *type = of_get_flat_dt_prop(node, "device_type", NULL);
cell_t *reg, *endp;
unsigned long l;
@@ -1279,7 +1267,7 @@ static int __init early_init_dt_scan_memory(unsigned long node,
if (type == NULL || strcmp(type, "memory") != 0)
return 0;
- reg = (cell_t *)get_flat_dt_prop(node, "reg", &l);
+ reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l);
if (reg == NULL)
return 0;
@@ -1343,12 +1331,12 @@ void __init early_init_devtree(void *params)
* device-tree, including the platform type, initrd location and
* size, TCE reserve, and more ...
*/
- scan_flat_dt(early_init_dt_scan_chosen, NULL);
+ of_scan_flat_dt(early_init_dt_scan_chosen, NULL);
/* Scan memory nodes and rebuild LMBs */
lmb_init();
- scan_flat_dt(early_init_dt_scan_root, NULL);
- scan_flat_dt(early_init_dt_scan_memory, NULL);
+ of_scan_flat_dt(early_init_dt_scan_root, NULL);
+ of_scan_flat_dt(early_init_dt_scan_memory, NULL);
lmb_enforce_memory_limit(memory_limit);
lmb_analyze();
#ifdef CONFIG_PPC64
@@ -1363,10 +1351,10 @@ void __init early_init_devtree(void *params)
DBG("Scanning CPUs ...\n");
- /* Retreive hash table size from flattened tree plus other
- * CPU related informations (altivec support, boot CPU ID, ...)
+ /* Retreive CPU related informations from the flat tree
+ * (altivec support, boot CPU ID, ...)
*/
- scan_flat_dt(early_init_dt_scan_cpus, NULL);
+ of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
DBG(" <- early_init_devtree()\n");
}
@@ -1986,14 +1974,29 @@ EXPORT_SYMBOL(get_property);
/*
* Add a property to a node
*/
-void prom_add_property(struct device_node* np, struct property* prop)
+int prom_add_property(struct device_node* np, struct property* prop)
{
- struct property **next = &np->properties;
+ struct property **next;
prop->next = NULL;
- while (*next)
+ write_lock(&devtree_lock);
+ next = &np->properties;
+ while (*next) {
+ if (strcmp(prop->name, (*next)->name) == 0) {
+ /* duplicate ! don't insert it */
+ write_unlock(&devtree_lock);
+ return -1;
+ }
next = &(*next)->next;
+ }
*next = prop;
+ write_unlock(&devtree_lock);
+
+ /* try to add to proc as well if it was initialized */
+ if (np->pde)
+ proc_device_tree_add_prop(np->pde, prop);
+
+ return 0;
}
/* I quickly hacked that one, check against spec ! */
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index c758b6624d7..6dc33d19fc2 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -403,19 +403,19 @@ static int __init prom_next_node(phandle *nodep)
}
}
-static int __init prom_getprop(phandle node, const char *pname,
+static int inline prom_getprop(phandle node, const char *pname,
void *value, size_t valuelen)
{
return call_prom("getprop", 4, 1, node, ADDR(pname),
(u32)(unsigned long) value, (u32) valuelen);
}
-static int __init prom_getproplen(phandle node, const char *pname)
+static int inline prom_getproplen(phandle node, const char *pname)
{
return call_prom("getproplen", 2, 1, node, ADDR(pname));
}
-static int __init prom_setprop(phandle node, const char *pname,
+static int inline prom_setprop(phandle node, const char *pname,
void *value, size_t valuelen)
{
return call_prom("setprop", 4, 1, node, ADDR(pname),
@@ -1408,8 +1408,9 @@ static int __init prom_find_machine_type(void)
struct prom_t *_prom = &RELOC(prom);
char compat[256];
int len, i = 0;
+#ifdef CONFIG_PPC64
phandle rtas;
-
+#endif
len = prom_getprop(_prom->root, "compatible",
compat, sizeof(compat)-1);
if (len > 0) {
@@ -1872,7 +1873,7 @@ static void __init fixup_device_tree(void)
if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev))
== PROM_ERROR)
return;
- if (u3_rev != 0x35 && u3_rev != 0x37)
+ if (u3_rev < 0x35 || u3_rev > 0x39)
return;
/* does it need fixup ? */
if (prom_getproplen(i2c, "interrupts") > 0)
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 568ea335d61..3d2abd95c7a 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -248,46 +248,10 @@ void ptrace_disable(struct task_struct *child)
clear_single_step(child);
}
-long sys_ptrace(long request, long pid, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
- struct task_struct *child;
int ret = -EPERM;
- lock_kernel();
- if (request == PTRACE_TRACEME) {
- /* are we already being traced? */
- if (current->ptrace & PT_PTRACED)
- goto out;
- ret = security_ptrace(current->parent, current);
- if (ret)
- goto out;
- /* set the ptrace bit in the process flags. */
- current->ptrace |= PT_PTRACED;
- ret = 0;
- goto out;
- }
- ret = -ESRCH;
- read_lock(&tasklist_lock);
- child = find_task_by_pid(pid);
- if (child)
- get_task_struct(child);
- read_unlock(&tasklist_lock);
- if (!child)
- goto out;
-
- ret = -EPERM;
- if (pid == 1) /* you may not mess with init */
- goto out_tsk;
-
- if (request == PTRACE_ATTACH) {
- ret = ptrace_attach(child);
- goto out_tsk;
- }
-
- ret = ptrace_check_attach(child, request == PTRACE_KILL);
- if (ret < 0)
- goto out_tsk;
-
switch (request) {
/* when I and D space are separate, these will need to be fixed. */
case PTRACE_PEEKTEXT: /* read word at location addr. */
@@ -540,10 +504,7 @@ long sys_ptrace(long request, long pid, long addr, long data)
ret = ptrace_request(child, request, addr, data);
break;
}
-out_tsk:
- put_task_struct(child);
-out:
- unlock_kernel();
+
return ret;
}
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index b7fc2d88495..9d4e07f6f1e 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -17,6 +17,7 @@
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/delay.h>
#include <asm/prom.h>
#include <asm/rtas.h>
@@ -83,7 +84,7 @@ void call_rtas_display_status_delay(unsigned char c)
while (width-- > 0)
call_rtas_display_status(' ');
width = 16;
- udelay(500000);
+ mdelay(500);
pending_newline = 1;
} else {
if (pending_newline) {
@@ -608,7 +609,6 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
return 0;
}
-#ifdef CONFIG_SMP
/* This version can't take the spinlock, because it never returns */
struct rtas_args rtas_stop_self_args = {
@@ -633,7 +633,6 @@ void rtas_stop_self(void)
panic("Alas, I survived.\n");
}
-#endif
/*
* Call early during boot, before mem init or bootmem, to retreive the RTAS
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index d43fa8c0e5a..e22856ecb5a 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -405,6 +405,46 @@ static int __init set_preferred_console(void)
console_initcall(set_preferred_console);
#endif /* CONFIG_PPC_MULTIPLATFORM */
+void __init check_for_initrd(void)
+{
+#ifdef CONFIG_BLK_DEV_INITRD
+ unsigned long *prop;
+
+ DBG(" -> check_for_initrd()\n");
+
+ if (of_chosen) {
+ prop = (unsigned long *)get_property(of_chosen,
+ "linux,initrd-start", NULL);
+ if (prop != NULL) {
+ initrd_start = (unsigned long)__va(*prop);
+ prop = (unsigned long *)get_property(of_chosen,
+ "linux,initrd-end", NULL);
+ if (prop != NULL) {
+ initrd_end = (unsigned long)__va(*prop);
+ initrd_below_start_ok = 1;
+ } else
+ initrd_start = 0;
+ }
+ }
+
+ /* If we were passed an initrd, set the ROOT_DEV properly if the values
+ * look sensible. If not, clear initrd reference.
+ */
+ if (initrd_start >= KERNELBASE && initrd_end >= KERNELBASE &&
+ initrd_end > initrd_start)
+ ROOT_DEV = Root_RAM0;
+ else {
+ printk("Bogus initrd %08lx %08lx\n", initrd_start, initrd_end);
+ initrd_start = initrd_end = 0;
+ }
+
+ if (initrd_start)
+ printk("Found initrd at 0x%lx:0x%lx\n", initrd_start, initrd_end);
+
+ DBG(" <- check_for_initrd()\n");
+#endif /* CONFIG_BLK_DEV_INITRD */
+}
+
#ifdef CONFIG_SMP
/**
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index b45eedbb4b3..3af2631e3fa 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -286,6 +286,7 @@ void __init setup_arch(char **cmdline_p)
loops_per_jiffy = 500000000 / HZ;
unflatten_device_tree();
+ check_for_initrd();
finish_device_tree();
smp_setup_cpu_maps();
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 6b52cce872b..0471e843b6c 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -41,7 +41,6 @@
#include <asm/elf.h>
#include <asm/machdep.h>
#include <asm/paca.h>
-#include <asm/ppcdebug.h>
#include <asm/time.h>
#include <asm/cputable.h>
#include <asm/sections.h>
@@ -60,6 +59,7 @@
#include <asm/firmware.h>
#include <asm/systemcfg.h>
#include <asm/xmon.h>
+#include <asm/udbg.h>
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
@@ -244,12 +244,6 @@ void __init early_setup(unsigned long dt_ptr)
DBG(" -> early_setup()\n");
/*
- * Fill the default DBG level (do we want to keep
- * that old mecanism around forever ?)
- */
- ppcdbg_initialize();
-
- /*
* Do early initializations using the flattened device
* tree, like retreiving the physical memory map or
* calculating/retreiving the hash table size
@@ -277,16 +271,21 @@ void __init early_setup(unsigned long dt_ptr)
DBG("Found, Initializing memory management...\n");
/*
- * Initialize stab / SLB management
+ * Initialize the MMU Hash table and create the linear mapping
+ * of memory. Has to be done before stab/slb initialization as
+ * this is currently where the page size encoding is obtained
*/
- if (!firmware_has_feature(FW_FEATURE_ISERIES))
- stab_initialize(lpaca->stab_real);
+ htab_initialize();
/*
- * Initialize the MMU Hash table and create the linear mapping
- * of memory
+ * Initialize stab / SLB management except on iSeries
*/
- htab_initialize();
+ if (!firmware_has_feature(FW_FEATURE_ISERIES)) {
+ if (cpu_has_feature(CPU_FTR_SLB))
+ slb_initialize();
+ else
+ stab_initialize(lpaca->stab_real);
+ }
DBG(" <- early_setup()\n");
}
@@ -396,43 +395,6 @@ static void __init initialize_cache_info(void)
DBG(" <- initialize_cache_info()\n");
}
-static void __init check_for_initrd(void)
-{
-#ifdef CONFIG_BLK_DEV_INITRD
- u64 *prop;
-
- DBG(" -> check_for_initrd()\n");
-
- if (of_chosen) {
- prop = (u64 *)get_property(of_chosen,
- "linux,initrd-start", NULL);
- if (prop != NULL) {
- initrd_start = (unsigned long)__va(*prop);
- prop = (u64 *)get_property(of_chosen,
- "linux,initrd-end", NULL);
- if (prop != NULL) {
- initrd_end = (unsigned long)__va(*prop);
- initrd_below_start_ok = 1;
- } else
- initrd_start = 0;
- }
- }
-
- /* If we were passed an initrd, set the ROOT_DEV properly if the values
- * look sensible. If not, clear initrd reference.
- */
- if (initrd_start >= KERNELBASE && initrd_end >= KERNELBASE &&
- initrd_end > initrd_start)
- ROOT_DEV = Root_RAM0;
- else
- initrd_start = initrd_end = 0;
-
- if (initrd_start)
- printk("Found initrd at 0x%lx:0x%lx\n", initrd_start, initrd_end);
-
- DBG(" <- check_for_initrd()\n");
-#endif /* CONFIG_BLK_DEV_INITRD */
-}
/*
* Do some initial setup of the system. The parameters are those which
@@ -516,7 +478,6 @@ void __init setup_system(void)
printk("-----------------------------------------------------\n");
printk("ppc64_pft_size = 0x%lx\n", ppc64_pft_size);
- printk("ppc64_debug_switch = 0x%lx\n", ppc64_debug_switch);
printk("ppc64_interrupt_controller = 0x%ld\n", ppc64_interrupt_controller);
printk("systemcfg = 0x%p\n", systemcfg);
printk("systemcfg->platform = 0x%x\n", systemcfg->platform);
@@ -552,10 +513,12 @@ static void __init irqstack_early_init(void)
* SLB misses on them.
*/
for_each_cpu(i) {
- softirq_ctx[i] = (struct thread_info *)__va(lmb_alloc_base(THREAD_SIZE,
- THREAD_SIZE, 0x10000000));
- hardirq_ctx[i] = (struct thread_info *)__va(lmb_alloc_base(THREAD_SIZE,
- THREAD_SIZE, 0x10000000));
+ softirq_ctx[i] = (struct thread_info *)
+ __va(lmb_alloc_base(THREAD_SIZE,
+ THREAD_SIZE, 0x10000000));
+ hardirq_ctx[i] = (struct thread_info *)
+ __va(lmb_alloc_base(THREAD_SIZE,
+ THREAD_SIZE, 0x10000000));
}
}
#else
@@ -583,8 +546,8 @@ static void __init emergency_stack_init(void)
limit = min(0x10000000UL, lmb.rmo_size);
for_each_cpu(i)
- paca[i].emergency_sp = __va(lmb_alloc_base(PAGE_SIZE, 128,
- limit)) + PAGE_SIZE;
+ paca[i].emergency_sp =
+ __va(lmb_alloc_base(HW_PAGE_SIZE, 128, limit)) + HW_PAGE_SIZE;
}
/*
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 876c57c1136..081d931eae4 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -44,7 +44,6 @@
#include <asm/cacheflush.h>
#ifdef CONFIG_PPC64
#include "ppc32.h"
-#include <asm/ppcdebug.h>
#include <asm/unistd.h>
#include <asm/vdso.h>
#else
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index ec9d0984b6a..58194e15071 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -33,7 +33,6 @@
#include <asm/ucontext.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
-#include <asm/ppcdebug.h>
#include <asm/unistd.h>
#include <asm/cacheflush.h>
#include <asm/vdso.h>
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 1794a694a92..5c330c3366e 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -40,7 +40,6 @@
#include <asm/prom.h>
#include <asm/smp.h>
#include <asm/time.h>
-#include <asm/xmon.h>
#include <asm/machdep.h>
#include <asm/cputable.h>
#include <asm/system.h>
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 6996a593dcb..a6282b625b4 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -61,6 +61,7 @@
#include <asm/prom.h>
#include <asm/irq.h>
#include <asm/div64.h>
+#include <asm/smp.h>
#ifdef CONFIG_PPC64
#include <asm/systemcfg.h>
#include <asm/firmware.h>
@@ -69,6 +70,7 @@
#include <asm/iseries/it_lp_queue.h>
#include <asm/iseries/hv_call_xm.h>
#endif
+#include <asm/smp.h>
/* keep track of when we need to update the rtc */
time_t last_rtc_update;
@@ -118,10 +120,6 @@ static unsigned adjusting_time = 0;
unsigned long ppc_proc_freq;
unsigned long ppc_tb_freq;
-#ifdef CONFIG_PPC32 /* XXX for now */
-#define boot_cpuid 0
-#endif
-
u64 tb_last_jiffy __cacheline_aligned_in_smp;
unsigned long tb_last_stamp;
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 07e5ee40b87..32f215825e8 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -39,7 +39,6 @@
#include <asm/io.h>
#include <asm/machdep.h>
#include <asm/rtas.h>
-#include <asm/xmon.h>
#include <asm/pmc.h>
#ifdef CONFIG_PPC32
#include <asm/reg.h>
@@ -748,22 +747,12 @@ static int check_bug_trap(struct pt_regs *regs)
return 0;
if (bug->line & BUG_WARNING_TRAP) {
/* this is a WARN_ON rather than BUG/BUG_ON */
-#ifdef CONFIG_XMON
- xmon_printf(KERN_ERR "Badness in %s at %s:%ld\n",
- bug->function, bug->file,
- bug->line & ~BUG_WARNING_TRAP);
-#endif /* CONFIG_XMON */
printk(KERN_ERR "Badness in %s at %s:%ld\n",
bug->function, bug->file,
bug->line & ~BUG_WARNING_TRAP);
dump_stack();
return 1;
}
-#ifdef CONFIG_XMON
- xmon_printf(KERN_CRIT "kernel BUG in %s at %s:%ld!\n",
- bug->function, bug->file, bug->line);
- xmon(regs);
-#endif /* CONFIG_XMON */
printk(KERN_CRIT "kernel BUG in %s at %s:%ld!\n",
bug->function, bug->file, bug->line);
diff --git a/arch/powerpc/lib/copypage_64.S b/arch/powerpc/lib/copypage_64.S
index 733d61618bb..40523b14010 100644
--- a/arch/powerpc/lib/copypage_64.S
+++ b/arch/powerpc/lib/copypage_64.S
@@ -11,7 +11,7 @@
#include <asm/processor.h>
#include <asm/ppc_asm.h>
-_GLOBAL(copy_page)
+_GLOBAL(copy_4K_page)
std r31,-8(1)
std r30,-16(1)
std r29,-24(1)
diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S
index a0b3fbbd6fb..6d69ef39b7d 100644
--- a/arch/powerpc/lib/copyuser_64.S
+++ b/arch/powerpc/lib/copyuser_64.S
@@ -24,7 +24,7 @@ _GLOBAL(__copy_tofrom_user)
std r4,-16(r1)
std r5,-8(r1)
dcbt 0,r4
- beq .Lcopy_page
+ beq .Lcopy_page_4K
andi. r6,r6,7
mtcrf 0x01,r5
blt cr1,.Lshort_copy
@@ -366,7 +366,7 @@ _GLOBAL(__copy_tofrom_user)
* above (following the .Ldst_aligned label) but it runs slightly
* slower on POWER3.
*/
-.Lcopy_page:
+.Lcopy_page_4K:
std r31,-32(1)
std r30,-40(1)
std r29,-48(1)
diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
index 2a912f411eb..35bd03c41dd 100644
--- a/arch/powerpc/lib/locks.c
+++ b/arch/powerpc/lib/locks.c
@@ -23,6 +23,7 @@
#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
#include <asm/hvcall.h>
#include <asm/iseries/hv_call.h>
+#include <asm/smp.h>
void __spin_yield(raw_spinlock_t *lock)
{
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 841d8b6323a..93d4fbfdb72 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -389,5 +389,22 @@ void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
}
/* kernel has accessed a bad area */
+
+ printk(KERN_ALERT "Unable to handle kernel paging request for ");
+ switch (regs->trap) {
+ case 0x300:
+ case 0x380:
+ printk("data at address 0x%08lx\n", regs->dar);
+ break;
+ case 0x400:
+ case 0x480:
+ printk("instruction fetch\n");
+ break;
+ default:
+ printk("unknown fault\n");
+ }
+ printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n",
+ regs->nip);
+
die("Kernel access of bad area", regs, sig);
}
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
index d6ed9102eee..e0d02c4a261 100644
--- a/arch/powerpc/mm/hash_low_64.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -1,7 +1,7 @@
/*
* ppc64 MMU hashtable management routines
*
- * (c) Copyright IBM Corp. 2003
+ * (c) Copyright IBM Corp. 2003, 2005
*
* Maintained by: Benjamin Herrenschmidt
* <benh@kernel.crashing.org>
@@ -10,6 +10,7 @@
* described in the kernel's COPYING file.
*/
+#include <linux/config.h>
#include <asm/reg.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
@@ -42,14 +43,24 @@
/* Save non-volatile offsets */
#define STK_REG(i) (112 + ((i)-14)*8)
+
+#ifndef CONFIG_PPC_64K_PAGES
+
+/*****************************************************************************
+ * *
+ * 4K SW & 4K HW pages implementation *
+ * *
+ *****************************************************************************/
+
+
/*
- * _hash_page(unsigned long ea, unsigned long access, unsigned long vsid,
- * pte_t *ptep, unsigned long trap, int local)
+ * _hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
+ * pte_t *ptep, unsigned long trap, int local)
*
- * Adds a page to the hash table. This is the non-LPAR version for now
+ * Adds a 4K page to the hash table in a segment of 4K pages only
*/
-_GLOBAL(__hash_page)
+_GLOBAL(__hash_page_4K)
mflr r0
std r0,16(r1)
stdu r1,-STACKFRAMESIZE(r1)
@@ -88,7 +99,8 @@ _GLOBAL(__hash_page)
/* If so, just bail out and refault if needed. Someone else
* is changing this PTE anyway and might hash it.
*/
- bne- bail_ok
+ bne- htab_bail_ok
+
/* Prepare new PTE value (turn access RW into DIRTY, then
* add BUSY,HASHPTE and ACCESSED)
*/
@@ -118,10 +130,10 @@ _GLOBAL(__hash_page)
/* Convert linux PTE bits into HW equivalents */
andi. r3,r30,0x1fe /* Get basic set of flags */
- xori r3,r3,HW_NO_EXEC /* _PAGE_EXEC -> NOEXEC */
+ xori r3,r3,HPTE_R_N /* _PAGE_EXEC -> NOEXEC */
rlwinm r0,r30,32-9+1,30,30 /* _PAGE_RW -> _PAGE_USER (r0) */
rlwinm r4,r30,32-7+1,30,30 /* _PAGE_DIRTY -> _PAGE_USER (r4) */
- and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY -> r0 bit 30 */
+ and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/
andc r0,r30,r0 /* r0 = pte & ~r0 */
rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */
@@ -158,19 +170,21 @@ htab_insert_pte:
andc r30,r30,r0
ori r30,r30,_PAGE_HASHPTE
- /* page number in r5 */
- rldicl r5,r31,64-PTE_SHIFT,PTE_SHIFT
+ /* physical address r5 */
+ rldicl r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT
+ sldi r5,r5,PAGE_SHIFT
/* Calculate primary group hash */
and r0,r28,r27
- rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */
+ rldicr r3,r0,3,63-3 /* r3 = (hash & mask) << 3 */
/* Call ppc_md.hpte_insert */
- ld r7,STK_PARM(r4)(r1) /* Retreive new pp bits */
+ ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */
mr r4,r29 /* Retreive va */
- li r6,0 /* no vflags */
+ li r7,0 /* !bolted, !secondary */
+ li r8,MMU_PAGE_4K /* page size */
_GLOBAL(htab_call_hpte_insert1)
- bl . /* Will be patched by htab_finish_init() */
+ bl . /* Patched by htab_finish_init() */
cmpdi 0,r3,0
bge htab_pte_insert_ok /* Insertion successful */
cmpdi 0,r3,-2 /* Critical failure */
@@ -178,19 +192,21 @@ _GLOBAL(htab_call_hpte_insert1)
/* Now try secondary slot */
- /* page number in r5 */
- rldicl r5,r31,64-PTE_SHIFT,PTE_SHIFT
+ /* physical address r5 */
+ rldicl r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT
+ sldi r5,r5,PAGE_SHIFT
/* Calculate secondary group hash */
andc r0,r27,r28
rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */
/* Call ppc_md.hpte_insert */
- ld r7,STK_PARM(r4)(r1) /* Retreive new pp bits */
+ ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */
mr r4,r29 /* Retreive va */
- li r6,HPTE_V_SECONDARY@l /* secondary slot */
+ li r7,HPTE_V_SECONDARY /* !bolted, secondary */
+ li r8,MMU_PAGE_4K /* page size */
_GLOBAL(htab_call_hpte_insert2)
- bl . /* Will be patched by htab_finish_init() */
+ bl . /* Patched by htab_finish_init() */
cmpdi 0,r3,0
bge+ htab_pte_insert_ok /* Insertion successful */
cmpdi 0,r3,-2 /* Critical failure */
@@ -207,14 +223,14 @@ _GLOBAL(htab_call_hpte_insert2)
rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */
/* Call ppc_md.hpte_remove */
_GLOBAL(htab_call_hpte_remove)
- bl . /* Will be patched by htab_finish_init() */
+ bl . /* Patched by htab_finish_init() */
/* Try all again */
b htab_insert_pte
-bail_ok:
+htab_bail_ok:
li r3,0
- b bail
+ b htab_bail
htab_pte_insert_ok:
/* Insert slot number & secondary bit in PTE */
@@ -227,7 +243,7 @@ htab_write_out_pte:
ld r6,STK_PARM(r6)(r1)
std r30,0(r6)
li r3, 0
-bail:
+htab_bail:
ld r27,STK_REG(r27)(r1)
ld r28,STK_REG(r28)(r1)
ld r29,STK_REG(r29)(r1)
@@ -256,10 +272,10 @@ htab_modify_pte:
/* Call ppc_md.hpte_updatepp */
mr r5,r29 /* va */
- li r6,0 /* large is 0 */
+ li r6,MMU_PAGE_4K /* page size */
ld r7,STK_PARM(r8)(r1) /* get "local" param */
_GLOBAL(htab_call_hpte_updatepp)
- bl . /* Will be patched by htab_finish_init() */
+ bl . /* Patched by htab_finish_init() */
/* if we failed because typically the HPTE wasn't really here
* we try an insertion.
@@ -276,13 +292,556 @@ htab_wrong_access:
/* Bail out clearing reservation */
stdcx. r31,0,r6
li r3,1
- b bail
+ b htab_bail
+
+htab_pte_insert_failure:
+ /* Bail out restoring old PTE */
+ ld r6,STK_PARM(r6)(r1)
+ std r31,0(r6)
+ li r3,-1
+ b htab_bail
+
+
+#else /* CONFIG_PPC_64K_PAGES */
+
+
+/*****************************************************************************
+ * *
+ * 64K SW & 4K or 64K HW in a 4K segment pages implementation *
+ * *
+ *****************************************************************************/
+
+/* _hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
+ * pte_t *ptep, unsigned long trap, int local)
+ */
+
+/*
+ * For now, we do NOT implement Admixed pages
+ */
+_GLOBAL(__hash_page_4K)
+ mflr r0
+ std r0,16(r1)
+ stdu r1,-STACKFRAMESIZE(r1)
+ /* Save all params that we need after a function call */
+ std r6,STK_PARM(r6)(r1)
+ std r8,STK_PARM(r8)(r1)
+
+ /* Add _PAGE_PRESENT to access */
+ ori r4,r4,_PAGE_PRESENT
+
+ /* Save non-volatile registers.
+ * r31 will hold "old PTE"
+ * r30 is "new PTE"
+ * r29 is "va"
+ * r28 is a hash value
+ * r27 is hashtab mask (maybe dynamic patched instead ?)
+ * r26 is the hidx mask
+ * r25 is the index in combo page
+ */
+ std r25,STK_REG(r25)(r1)
+ std r26,STK_REG(r26)(r1)
+ std r27,STK_REG(r27)(r1)
+ std r28,STK_REG(r28)(r1)
+ std r29,STK_REG(r29)(r1)
+ std r30,STK_REG(r30)(r1)
+ std r31,STK_REG(r31)(r1)
+
+ /* Step 1:
+ *
+ * Check permissions, atomically mark the linux PTE busy
+ * and hashed.
+ */
+1:
+ ldarx r31,0,r6
+ /* Check access rights (access & ~(pte_val(*ptep))) */
+ andc. r0,r4,r31
+ bne- htab_wrong_access
+ /* Check if PTE is busy */
+ andi. r0,r31,_PAGE_BUSY
+ /* If so, just bail out and refault if needed. Someone else
+ * is changing this PTE anyway and might hash it.
+ */
+ bne- htab_bail_ok
+ /* Prepare new PTE value (turn access RW into DIRTY, then
+ * add BUSY and ACCESSED)
+ */
+ rlwinm r30,r4,32-9+7,31-7,31-7 /* _PAGE_RW -> _PAGE_DIRTY */
+ or r30,r30,r31
+ ori r30,r30,_PAGE_BUSY | _PAGE_ACCESSED | _PAGE_HASHPTE
+ /* Write the linux PTE atomically (setting busy) */
+ stdcx. r30,0,r6
+ bne- 1b
+ isync
+
+ /* Step 2:
+ *
+ * Insert/Update the HPTE in the hash table. At this point,
+ * r4 (access) is re-useable, we use it for the new HPTE flags
+ */
+
+ /* Load the hidx index */
+ rldicl r25,r3,64-12,60
+
+ /* Calc va and put it in r29 */
+ rldicr r29,r5,28,63-28 /* r29 = (vsid << 28) */
+ rldicl r3,r3,0,36 /* r3 = (ea & 0x0fffffff) */
+ or r29,r3,r29 /* r29 = va
+
+ /* Calculate hash value for primary slot and store it in r28 */
+ rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
+ rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */
+ xor r28,r5,r0
+
+ /* Convert linux PTE bits into HW equivalents */
+ andi. r3,r30,0x1fe /* Get basic set of flags */
+ xori r3,r3,HPTE_R_N /* _PAGE_EXEC -> NOEXEC */
+ rlwinm r0,r30,32-9+1,30,30 /* _PAGE_RW -> _PAGE_USER (r0) */
+ rlwinm r4,r30,32-7+1,30,30 /* _PAGE_DIRTY -> _PAGE_USER (r4) */
+ and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/
+ andc r0,r30,r0 /* r0 = pte & ~r0 */
+ rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */
+
+ /* We eventually do the icache sync here (maybe inline that
+ * code rather than call a C function...)
+ */
+BEGIN_FTR_SECTION
+ mr r4,r30
+ mr r5,r7
+ bl .hash_page_do_lazy_icache
+END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
+
+ /* At this point, r3 contains new PP bits, save them in
+ * place of "access" in the param area (sic)
+ */
+ std r3,STK_PARM(r4)(r1)
+
+ /* Get htab_hash_mask */
+ ld r4,htab_hash_mask@got(2)
+ ld r27,0(r4) /* htab_hash_mask -> r27 */
+
+ /* Check if we may already be in the hashtable, in this case, we
+ * go to out-of-line code to try to modify the HPTE. We look for
+ * the bit at (1 >> (index + 32))
+ */
+ andi. r0,r31,_PAGE_HASHPTE
+ li r26,0 /* Default hidx */
+ beq htab_insert_pte
+ ld r6,STK_PARM(r6)(r1)
+ ori r26,r6,0x8000 /* Load the hidx mask */
+ ld r26,0(r26)
+ addi r5,r25,36 /* Check actual HPTE_SUB bit, this */
+ rldcr. r0,r31,r5,0 /* must match pgtable.h definition */
+ bne htab_modify_pte
+
+htab_insert_pte:
+ /* real page number in r5, PTE RPN value + index */
+ rldicl r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT
+ sldi r5,r5,PAGE_SHIFT-HW_PAGE_SHIFT
+ add r5,r5,r25
+ sldi r5,r5,HW_PAGE_SHIFT
+
+ /* Calculate primary group hash */
+ and r0,r28,r27
+ rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */
+
+ /* Call ppc_md.hpte_insert */
+ ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */
+ mr r4,r29 /* Retreive va */
+ li r7,0 /* !bolted, !secondary */
+ li r8,MMU_PAGE_4K /* page size */
+_GLOBAL(htab_call_hpte_insert1)
+ bl . /* patched by htab_finish_init() */
+ cmpdi 0,r3,0
+ bge htab_pte_insert_ok /* Insertion successful */
+ cmpdi 0,r3,-2 /* Critical failure */
+ beq- htab_pte_insert_failure
+
+ /* Now try secondary slot */
+
+ /* real page number in r5, PTE RPN value + index */
+ rldicl r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT
+ sldi r5,r5,PAGE_SHIFT-HW_PAGE_SHIFT
+ add r5,r5,r25
+ sldi r5,r5,HW_PAGE_SHIFT
+
+ /* Calculate secondary group hash */
+ andc r0,r27,r28
+ rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */
+
+ /* Call ppc_md.hpte_insert */
+ ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */
+ mr r4,r29 /* Retreive va */
+ li r7,HPTE_V_SECONDARY /* !bolted, secondary */
+ li r8,MMU_PAGE_4K /* page size */
+_GLOBAL(htab_call_hpte_insert2)
+ bl . /* patched by htab_finish_init() */
+ cmpdi 0,r3,0
+ bge+ htab_pte_insert_ok /* Insertion successful */
+ cmpdi 0,r3,-2 /* Critical failure */
+ beq- htab_pte_insert_failure
+
+ /* Both are full, we need to evict something */
+ mftb r0
+ /* Pick a random group based on TB */
+ andi. r0,r0,1
+ mr r5,r28
+ bne 2f
+ not r5,r5
+2: and r0,r5,r27
+ rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */
+ /* Call ppc_md.hpte_remove */
+_GLOBAL(htab_call_hpte_remove)
+ bl . /* patched by htab_finish_init() */
+
+ /* Try all again */
+ b htab_insert_pte
+
+htab_bail_ok:
+ li r3,0
+ b htab_bail
+
+htab_pte_insert_ok:
+ /* Insert slot number & secondary bit in PTE second half,
+ * clear _PAGE_BUSY and set approriate HPTE slot bit
+ */
+ ld r6,STK_PARM(r6)(r1)
+ li r0,_PAGE_BUSY
+ andc r30,r30,r0
+ /* HPTE SUB bit */
+ li r0,1
+ subfic r5,r25,27 /* Must match bit position in */
+ sld r0,r0,r5 /* pgtable.h */
+ or r30,r30,r0
+ /* hindx */
+ sldi r5,r25,2
+ sld r3,r3,r5
+ li r4,0xf
+ sld r4,r4,r5
+ andc r26,r26,r4
+ or r26,r26,r3
+ ori r5,r6,0x8000
+ std r26,0(r5)
+ lwsync
+ std r30,0(r6)
+ li r3, 0
+htab_bail:
+ ld r25,STK_REG(r25)(r1)
+ ld r26,STK_REG(r26)(r1)
+ ld r27,STK_REG(r27)(r1)
+ ld r28,STK_REG(r28)(r1)
+ ld r29,STK_REG(r29)(r1)
+ ld r30,STK_REG(r30)(r1)
+ ld r31,STK_REG(r31)(r1)
+ addi r1,r1,STACKFRAMESIZE
+ ld r0,16(r1)
+ mtlr r0
+ blr
+
+htab_modify_pte:
+ /* Keep PP bits in r4 and slot idx from the PTE around in r3 */
+ mr r4,r3
+ sldi r5,r25,2
+ srd r3,r26,r5
+
+ /* Secondary group ? if yes, get a inverted hash value */
+ mr r5,r28
+ andi. r0,r3,0x8 /* page secondary ? */
+ beq 1f
+ not r5,r5
+1: andi. r3,r3,0x7 /* extract idx alone */
+
+ /* Calculate proper slot value for ppc_md.hpte_updatepp */
+ and r0,r5,r27
+ rldicr r0,r0,3,63-3 /* r0 = (hash & mask) << 3 */
+ add r3,r0,r3 /* add slot idx */
+
+ /* Call ppc_md.hpte_updatepp */
+ mr r5,r29 /* va */
+ li r6,MMU_PAGE_4K /* page size */
+ ld r7,STK_PARM(r8)(r1) /* get "local" param */
+_GLOBAL(htab_call_hpte_updatepp)
+ bl . /* patched by htab_finish_init() */
+
+ /* if we failed because typically the HPTE wasn't really here
+ * we try an insertion.
+ */
+ cmpdi 0,r3,-1
+ beq- htab_insert_pte
+
+ /* Clear the BUSY bit and Write out the PTE */
+ li r0,_PAGE_BUSY
+ andc r30,r30,r0
+ ld r6,STK_PARM(r6)(r1)
+ std r30,0(r6)
+ li r3,0
+ b htab_bail
+
+htab_wrong_access:
+ /* Bail out clearing reservation */
+ stdcx. r31,0,r6
+ li r3,1
+ b htab_bail
htab_pte_insert_failure:
/* Bail out restoring old PTE */
ld r6,STK_PARM(r6)(r1)
std r31,0(r6)
li r3,-1
- b bail
+ b htab_bail
+
+
+/*****************************************************************************
+ * *
+ * 64K SW & 64K HW in a 64K segment pages implementation *
+ * *
+ *****************************************************************************/
+
+_GLOBAL(__hash_page_64K)
+ mflr r0
+ std r0,16(r1)
+ stdu r1,-STACKFRAMESIZE(r1)
+ /* Save all params that we need after a function call */
+ std r6,STK_PARM(r6)(r1)
+ std r8,STK_PARM(r8)(r1)
+
+ /* Add _PAGE_PRESENT to access */
+ ori r4,r4,_PAGE_PRESENT
+
+ /* Save non-volatile registers.
+ * r31 will hold "old PTE"
+ * r30 is "new PTE"
+ * r29 is "va"
+ * r28 is a hash value
+ * r27 is hashtab mask (maybe dynamic patched instead ?)
+ */
+ std r27,STK_REG(r27)(r1)
+ std r28,STK_REG(r28)(r1)
+ std r29,STK_REG(r29)(r1)
+ std r30,STK_REG(r30)(r1)
+ std r31,STK_REG(r31)(r1)
+
+ /* Step 1:
+ *
+ * Check permissions, atomically mark the linux PTE busy
+ * and hashed.
+ */
+1:
+ ldarx r31,0,r6
+ /* Check access rights (access & ~(pte_val(*ptep))) */
+ andc. r0,r4,r31
+ bne- ht64_wrong_access
+ /* Check if PTE is busy */
+ andi. r0,r31,_PAGE_BUSY
+ /* If so, just bail out and refault if needed. Someone else
+ * is changing this PTE anyway and might hash it.
+ */
+ bne- ht64_bail_ok
+ /* Prepare new PTE value (turn access RW into DIRTY, then
+ * add BUSY,HASHPTE and ACCESSED)
+ */
+ rlwinm r30,r4,32-9+7,31-7,31-7 /* _PAGE_RW -> _PAGE_DIRTY */
+ or r30,r30,r31
+ ori r30,r30,_PAGE_BUSY | _PAGE_ACCESSED | _PAGE_HASHPTE
+ /* Write the linux PTE atomically (setting busy) */
+ stdcx. r30,0,r6
+ bne- 1b
+ isync
+
+ /* Step 2:
+ *
+ * Insert/Update the HPTE in the hash table. At this point,
+ * r4 (access) is re-useable, we use it for the new HPTE flags
+ */
+
+ /* Calc va and put it in r29 */
+ rldicr r29,r5,28,63-28
+ rldicl r3,r3,0,36
+ or r29,r3,r29
+
+ /* Calculate hash value for primary slot and store it in r28 */
+ rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
+ rldicl r0,r3,64-16,52 /* (ea >> 16) & 0xfff */
+ xor r28,r5,r0
+
+ /* Convert linux PTE bits into HW equivalents */
+ andi. r3,r30,0x1fe /* Get basic set of flags */
+ xori r3,r3,HPTE_R_N /* _PAGE_EXEC -> NOEXEC */
+ rlwinm r0,r30,32-9+1,30,30 /* _PAGE_RW -> _PAGE_USER (r0) */
+ rlwinm r4,r30,32-7+1,30,30 /* _PAGE_DIRTY -> _PAGE_USER (r4) */
+ and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/
+ andc r0,r30,r0 /* r0 = pte & ~r0 */
+ rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */
+
+ /* We eventually do the icache sync here (maybe inline that
+ * code rather than call a C function...)
+ */
+BEGIN_FTR_SECTION
+ mr r4,r30
+ mr r5,r7
+ bl .hash_page_do_lazy_icache
+END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
+
+ /* At this point, r3 contains new PP bits, save them in
+ * place of "access" in the param area (sic)
+ */
+ std r3,STK_PARM(r4)(r1)
+
+ /* Get htab_hash_mask */
+ ld r4,htab_hash_mask@got(2)
+ ld r27,0(r4) /* htab_hash_mask -> r27 */
+
+ /* Check if we may already be in the hashtable, in this case, we
+ * go to out-of-line code to try to modify the HPTE
+ */
+ andi. r0,r31,_PAGE_HASHPTE
+ bne ht64_modify_pte
+
+ht64_insert_pte:
+ /* Clear hpte bits in new pte (we also clear BUSY btw) and
+ * add _PAGE_HASHPTE
+ */
+ lis r0,_PAGE_HPTEFLAGS@h
+ ori r0,r0,_PAGE_HPTEFLAGS@l
+ andc r30,r30,r0
+ ori r30,r30,_PAGE_HASHPTE
+
+ /* Phyical address in r5 */
+ rldicl r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT
+ sldi r5,r5,PAGE_SHIFT
+
+ /* Calculate primary group hash */
+ and r0,r28,r27
+ rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */
+
+ /* Call ppc_md.hpte_insert */
+ ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */
+ mr r4,r29 /* Retreive va */
+ li r7,0 /* !bolted, !secondary */
+ li r8,MMU_PAGE_64K
+_GLOBAL(ht64_call_hpte_insert1)
+ bl . /* patched by htab_finish_init() */
+ cmpdi 0,r3,0
+ bge ht64_pte_insert_ok /* Insertion successful */
+ cmpdi 0,r3,-2 /* Critical failure */
+ beq- ht64_pte_insert_failure
+
+ /* Now try secondary slot */
+
+ /* Phyical address in r5 */
+ rldicl r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT
+ sldi r5,r5,PAGE_SHIFT
+
+ /* Calculate secondary group hash */
+ andc r0,r27,r28
+ rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */
+
+ /* Call ppc_md.hpte_insert */
+ ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */
+ mr r4,r29 /* Retreive va */
+ li r7,HPTE_V_SECONDARY /* !bolted, secondary */
+ li r8,MMU_PAGE_64K
+_GLOBAL(ht64_call_hpte_insert2)
+ bl . /* patched by htab_finish_init() */
+ cmpdi 0,r3,0
+ bge+ ht64_pte_insert_ok /* Insertion successful */
+ cmpdi 0,r3,-2 /* Critical failure */
+ beq- ht64_pte_insert_failure
+
+ /* Both are full, we need to evict something */
+ mftb r0
+ /* Pick a random group based on TB */
+ andi. r0,r0,1
+ mr r5,r28
+ bne 2f
+ not r5,r5
+2: and r0,r5,r27
+ rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */
+ /* Call ppc_md.hpte_remove */
+_GLOBAL(ht64_call_hpte_remove)
+ bl . /* patched by htab_finish_init() */
+
+ /* Try all again */
+ b ht64_insert_pte
+
+ht64_bail_ok:
+ li r3,0
+ b ht64_bail
+
+ht64_pte_insert_ok:
+ /* Insert slot number & secondary bit in PTE */
+ rldimi r30,r3,12,63-15
+
+ /* Write out the PTE with a normal write
+ * (maybe add eieio may be good still ?)
+ */
+ht64_write_out_pte:
+ ld r6,STK_PARM(r6)(r1)
+ std r30,0(r6)
+ li r3, 0
+ht64_bail:
+ ld r27,STK_REG(r27)(r1)
+ ld r28,STK_REG(r28)(r1)
+ ld r29,STK_REG(r29)(r1)
+ ld r30,STK_REG(r30)(r1)
+ ld r31,STK_REG(r31)(r1)
+ addi r1,r1,STACKFRAMESIZE
+ ld r0,16(r1)
+ mtlr r0
+ blr
+
+ht64_modify_pte:
+ /* Keep PP bits in r4 and slot idx from the PTE around in r3 */
+ mr r4,r3
+ rlwinm r3,r31,32-12,29,31
+
+ /* Secondary group ? if yes, get a inverted hash value */
+ mr r5,r28
+ andi. r0,r31,_PAGE_F_SECOND
+ beq 1f
+ not r5,r5
+1:
+ /* Calculate proper slot value for ppc_md.hpte_updatepp */
+ and r0,r5,r27
+ rldicr r0,r0,3,63-3 /* r0 = (hash & mask) << 3 */
+ add r3,r0,r3 /* add slot idx */
+
+ /* Call ppc_md.hpte_updatepp */
+ mr r5,r29 /* va */
+ li r6,MMU_PAGE_64K
+ ld r7,STK_PARM(r8)(r1) /* get "local" param */
+_GLOBAL(ht64_call_hpte_updatepp)
+ bl . /* patched by htab_finish_init() */
+
+ /* if we failed because typically the HPTE wasn't really here
+ * we try an insertion.
+ */
+ cmpdi 0,r3,-1
+ beq- ht64_insert_pte
+
+ /* Clear the BUSY bit and Write out the PTE */
+ li r0,_PAGE_BUSY
+ andc r30,r30,r0
+ b ht64_write_out_pte
+
+ht64_wrong_access:
+ /* Bail out clearing reservation */
+ stdcx. r31,0,r6
+ li r3,1
+ b ht64_bail
+
+ht64_pte_insert_failure:
+ /* Bail out restoring old PTE */
+ ld r6,STK_PARM(r6)(r1)
+ std r31,0(r6)
+ li r3,-1
+ b ht64_bail
+
+
+#endif /* CONFIG_PPC_64K_PAGES */
+/*****************************************************************************
+ * *
+ * Huge pages implementation is in hugetlbpage.c *
+ * *
+ *****************************************************************************/
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 174d14576c2..d96bcfe4c6f 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -9,6 +9,9 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
+
+#undef DEBUG_LOW
+
#include <linux/spinlock.h>
#include <linux/bitops.h>
#include <linux/threads.h>
@@ -22,11 +25,84 @@
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include <asm/cputable.h>
+#include <asm/udbg.h>
+
+#ifdef DEBUG_LOW
+#define DBG_LOW(fmt...) udbg_printf(fmt)
+#else
+#define DBG_LOW(fmt...)
+#endif
#define HPTE_LOCK_BIT 3
static DEFINE_SPINLOCK(native_tlbie_lock);
+static inline void __tlbie(unsigned long va, unsigned int psize)
+{
+ unsigned int penc;
+
+ /* clear top 16 bits, non SLS segment */
+ va &= ~(0xffffULL << 48);
+
+ switch (psize) {
+ case MMU_PAGE_4K:
+ va &= ~0xffful;
+ asm volatile("tlbie %0,0" : : "r" (va) : "memory");
+ break;
+ default:
+ penc = mmu_psize_defs[psize].penc;
+ va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
+ va |= (0x7f >> (8 - penc)) << 12;
+ asm volatile("tlbie %0,1" : : "r" (va) : "memory");
+ break;
+ }
+}
+
+static inline void __tlbiel(unsigned long va, unsigned int psize)
+{
+ unsigned int penc;
+
+ /* clear top 16 bits, non SLS segment */
+ va &= ~(0xffffULL << 48);
+
+ switch (psize) {
+ case MMU_PAGE_4K:
+ va &= ~0xffful;
+ asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
+ : : "r"(va) : "memory");
+ break;
+ default:
+ penc = mmu_psize_defs[psize].penc;
+ va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
+ va |= (0x7f >> (8 - penc)) << 12;
+ asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
+ : : "r"(va) : "memory");
+ break;
+ }
+
+}
+
+static inline void tlbie(unsigned long va, int psize, int local)
+{
+ unsigned int use_local = local && cpu_has_feature(CPU_FTR_TLBIEL);
+ int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
+
+ if (use_local)
+ use_local = mmu_psize_defs[psize].tlbiel;
+ if (lock_tlbie && !use_local)
+ spin_lock(&native_tlbie_lock);
+ asm volatile("ptesync": : :"memory");
+ if (use_local) {
+ __tlbiel(va, psize);
+ asm volatile("ptesync": : :"memory");
+ } else {
+ __tlbie(va, psize);
+ asm volatile("eieio; tlbsync; ptesync": : :"memory");
+ }
+ if (lock_tlbie && !use_local)
+ spin_unlock(&native_tlbie_lock);
+}
+
static inline void native_lock_hpte(hpte_t *hptep)
{
unsigned long *word = &hptep->v;
@@ -48,13 +124,19 @@ static inline void native_unlock_hpte(hpte_t *hptep)
}
long native_hpte_insert(unsigned long hpte_group, unsigned long va,
- unsigned long prpn, unsigned long vflags,
- unsigned long rflags)
+ unsigned long pa, unsigned long rflags,
+ unsigned long vflags, int psize)
{
hpte_t *hptep = htab_address + hpte_group;
unsigned long hpte_v, hpte_r;
int i;
+ if (!(vflags & HPTE_V_BOLTED)) {
+ DBG_LOW(" insert(group=%lx, va=%016lx, pa=%016lx,"
+ " rflags=%lx, vflags=%lx, psize=%d)\n",
+ hpte_group, va, pa, rflags, vflags, psize);
+ }
+
for (i = 0; i < HPTES_PER_GROUP; i++) {
if (! (hptep->v & HPTE_V_VALID)) {
/* retry with lock held */
@@ -70,10 +152,13 @@ long native_hpte_insert(unsigned long hpte_group, unsigned long va,
if (i == HPTES_PER_GROUP)
return -1;
- hpte_v = (va >> 23) << HPTE_V_AVPN_SHIFT | vflags | HPTE_V_VALID;
- if (vflags & HPTE_V_LARGE)
- va &= ~(1UL << HPTE_V_AVPN_SHIFT);
- hpte_r = (prpn << HPTE_R_RPN_SHIFT) | rflags;
+ hpte_v = hpte_encode_v(va, psize) | vflags | HPTE_V_VALID;
+ hpte_r = hpte_encode_r(pa, psize) | rflags;
+
+ if (!(vflags & HPTE_V_BOLTED)) {
+ DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
+ i, hpte_v, hpte_r);
+ }
hptep->r = hpte_r;
/* Guarantee the second dword is visible before the valid bit */
@@ -96,6 +181,8 @@ static long native_hpte_remove(unsigned long hpte_group)
int slot_offset;
unsigned long hpte_v;
+ DBG_LOW(" remove(group=%lx)\n", hpte_group);
+
/* pick a random entry to start at */
slot_offset = mftb() & 0x7;
@@ -126,34 +213,51 @@ static long native_hpte_remove(unsigned long hpte_group)
return i;
}
-static inline void set_pp_bit(unsigned long pp, hpte_t *addr)
+static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
+ unsigned long va, int psize, int local)
{
- unsigned long old;
- unsigned long *p = &addr->r;
-
- __asm__ __volatile__(
- "1: ldarx %0,0,%3\n\
- rldimi %0,%2,0,61\n\
- stdcx. %0,0,%3\n\
- bne 1b"
- : "=&r" (old), "=m" (*p)
- : "r" (pp), "r" (p), "m" (*p)
- : "cc");
+ hpte_t *hptep = htab_address + slot;
+ unsigned long hpte_v, want_v;
+ int ret = 0;
+
+ want_v = hpte_encode_v(va, psize);
+
+ DBG_LOW(" update(va=%016lx, avpnv=%016lx, hash=%016lx, newpp=%x)",
+ va, want_v & HPTE_V_AVPN, slot, newpp);
+
+ native_lock_hpte(hptep);
+
+ hpte_v = hptep->v;
+
+ /* Even if we miss, we need to invalidate the TLB */
+ if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
+ DBG_LOW(" -> miss\n");
+ native_unlock_hpte(hptep);
+ ret = -1;
+ } else {
+ DBG_LOW(" -> hit\n");
+ /* Update the HPTE */
+ hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
+ (newpp & (HPTE_R_PP | HPTE_R_N));
+ native_unlock_hpte(hptep);
+ }
+
+ /* Ensure it is out of the tlb too. */
+ tlbie(va, psize, local);
+
+ return ret;
}
-/*
- * Only works on small pages. Yes its ugly to have to check each slot in
- * the group but we only use this during bootup.
- */
-static long native_hpte_find(unsigned long vpn)
+static long native_hpte_find(unsigned long va, int psize)
{
hpte_t *hptep;
unsigned long hash;
unsigned long i, j;
long slot;
- unsigned long hpte_v;
+ unsigned long want_v, hpte_v;
- hash = hpt_hash(vpn, 0);
+ hash = hpt_hash(va, mmu_psize_defs[psize].shift);
+ want_v = hpte_encode_v(va, psize);
for (j = 0; j < 2; j++) {
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
@@ -161,7 +265,7 @@ static long native_hpte_find(unsigned long vpn)
hptep = htab_address + slot;
hpte_v = hptep->v;
- if ((HPTE_V_AVPN_VAL(hpte_v) == (vpn >> 11))
+ if (HPTE_V_COMPARE(hpte_v, want_v)
&& (hpte_v & HPTE_V_VALID)
&& ( !!(hpte_v & HPTE_V_SECONDARY) == j)) {
/* HPTE matches */
@@ -177,127 +281,101 @@ static long native_hpte_find(unsigned long vpn)
return -1;
}
-static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
- unsigned long va, int large, int local)
-{
- hpte_t *hptep = htab_address + slot;
- unsigned long hpte_v;
- unsigned long avpn = va >> 23;
- int ret = 0;
-
- if (large)
- avpn &= ~1;
-
- native_lock_hpte(hptep);
-
- hpte_v = hptep->v;
-
- /* Even if we miss, we need to invalidate the TLB */
- if ((HPTE_V_AVPN_VAL(hpte_v) != avpn)
- || !(hpte_v & HPTE_V_VALID)) {
- native_unlock_hpte(hptep);
- ret = -1;
- } else {
- set_pp_bit(newpp, hptep);
- native_unlock_hpte(hptep);
- }
-
- /* Ensure it is out of the tlb too */
- if (cpu_has_feature(CPU_FTR_TLBIEL) && !large && local) {
- tlbiel(va);
- } else {
- int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
-
- if (lock_tlbie)
- spin_lock(&native_tlbie_lock);
- tlbie(va, large);
- if (lock_tlbie)
- spin_unlock(&native_tlbie_lock);
- }
-
- return ret;
-}
-
/*
* Update the page protection bits. Intended to be used to create
* guard pages for kernel data structures on pages which are bolted
* in the HPT. Assumes pages being operated on will not be stolen.
- * Does not work on large pages.
*
* No need to lock here because we should be the only user.
*/
-static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea)
+static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
+ int psize)
{
- unsigned long vsid, va, vpn, flags = 0;
+ unsigned long vsid, va;
long slot;
hpte_t *hptep;
- int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
vsid = get_kernel_vsid(ea);
va = (vsid << 28) | (ea & 0x0fffffff);
- vpn = va >> PAGE_SHIFT;
- slot = native_hpte_find(vpn);
+ slot = native_hpte_find(va, psize);
if (slot == -1)
panic("could not find page to bolt\n");
hptep = htab_address + slot;
- set_pp_bit(newpp, hptep);
+ /* Update the HPTE */
+ hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
+ (newpp & (HPTE_R_PP | HPTE_R_N));
- /* Ensure it is out of the tlb too */
- if (lock_tlbie)
- spin_lock_irqsave(&native_tlbie_lock, flags);
- tlbie(va, 0);
- if (lock_tlbie)
- spin_unlock_irqrestore(&native_tlbie_lock, flags);
+ /* Ensure it is out of the tlb too. */
+ tlbie(va, psize, 0);
}
static void native_hpte_invalidate(unsigned long slot, unsigned long va,
- int large, int local)
+ int psize, int local)
{
hpte_t *hptep = htab_address + slot;
unsigned long hpte_v;
- unsigned long avpn = va >> 23;
+ unsigned long want_v;
unsigned long flags;
- int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
-
- if (large)
- avpn &= ~1;
local_irq_save(flags);
- native_lock_hpte(hptep);
+ DBG_LOW(" invalidate(va=%016lx, hash: %x)\n", va, slot);
+
+ want_v = hpte_encode_v(va, psize);
+ native_lock_hpte(hptep);
hpte_v = hptep->v;
/* Even if we miss, we need to invalidate the TLB */
- if ((HPTE_V_AVPN_VAL(hpte_v) != avpn)
- || !(hpte_v & HPTE_V_VALID)) {
+ if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
native_unlock_hpte(hptep);
- } else {
+ else
/* Invalidate the hpte. NOTE: this also unlocks it */
hptep->v = 0;
- }
- /* Invalidate the tlb */
- if (cpu_has_feature(CPU_FTR_TLBIEL) && !large && local) {
- tlbiel(va);
- } else {
- if (lock_tlbie)
- spin_lock(&native_tlbie_lock);
- tlbie(va, large);
- if (lock_tlbie)
- spin_unlock(&native_tlbie_lock);
- }
+ /* Invalidate the TLB */
+ tlbie(va, psize, local);
+
local_irq_restore(flags);
}
/*
+ * XXX This need fixing based on page size. It's only used by
+ * native_hpte_clear() for now which needs fixing too so they
+ * make a good pair...
+ */
+static unsigned long slot2va(unsigned long hpte_v, unsigned long slot)
+{
+ unsigned long avpn = HPTE_V_AVPN_VAL(hpte_v);
+ unsigned long va;
+
+ va = avpn << 23;
+
+ if (! (hpte_v & HPTE_V_LARGE)) {
+ unsigned long vpi, pteg;
+
+ pteg = slot / HPTES_PER_GROUP;
+ if (hpte_v & HPTE_V_SECONDARY)
+ pteg = ~pteg;
+
+ vpi = ((va >> 28) ^ pteg) & htab_hash_mask;
+
+ va |= vpi << PAGE_SHIFT;
+ }
+
+ return va;
+}
+
+/*
* clear all mappings on kexec. All cpus are in real mode (or they will
* be when they isi), and we are the only one left. We rely on our kernel
* mapping being 0xC0's and the hardware ignoring those two real bits.
*
* TODO: add batching support when enabled. remember, no dynamic memory here,
* athough there is the control page available...
+ *
+ * XXX FIXME: 4k only for now !
*/
static void native_hpte_clear(void)
{
@@ -327,7 +405,7 @@ static void native_hpte_clear(void)
if (hpte_v & HPTE_V_VALID) {
hptep->v = 0;
- tlbie(slot2va(hpte_v, slot), hpte_v & HPTE_V_LARGE);
+ tlbie(slot2va(hpte_v, slot), MMU_PAGE_4K, 0);
}
}
@@ -335,59 +413,59 @@ static void native_hpte_clear(void)
local_irq_restore(flags);
}
+/*
+ * Batched hash table flush, we batch the tlbie's to avoid taking/releasing
+ * the lock all the time
+ */
static void native_flush_hash_range(unsigned long number, int local)
{
- unsigned long va, vpn, hash, secondary, slot, flags, avpn;
- int i, j;
+ unsigned long va, hash, index, hidx, shift, slot;
hpte_t *hptep;
unsigned long hpte_v;
+ unsigned long want_v;
+ unsigned long flags;
+ real_pte_t pte;
struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
- unsigned long large = batch->large;
+ unsigned long psize = batch->psize;
+ int i;
local_irq_save(flags);
- j = 0;
for (i = 0; i < number; i++) {
- va = batch->vaddr[j];
- if (large)
- vpn = va >> HPAGE_SHIFT;
- else
- vpn = va >> PAGE_SHIFT;
- hash = hpt_hash(vpn, large);
- secondary = (pte_val(batch->pte[i]) & _PAGE_SECONDARY) >> 15;
- if (secondary)
- hash = ~hash;
- slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
- slot += (pte_val(batch->pte[i]) & _PAGE_GROUP_IX) >> 12;
-
- hptep = htab_address + slot;
-
- avpn = va >> 23;
- if (large)
- avpn &= ~0x1UL;
-
- native_lock_hpte(hptep);
-
- hpte_v = hptep->v;
-
- /* Even if we miss, we need to invalidate the TLB */
- if ((HPTE_V_AVPN_VAL(hpte_v) != avpn)
- || !(hpte_v & HPTE_V_VALID)) {
- native_unlock_hpte(hptep);
- } else {
- /* Invalidate the hpte. NOTE: this also unlocks it */
- hptep->v = 0;
- }
-
- j++;
+ va = batch->vaddr[i];
+ pte = batch->pte[i];
+
+ pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
+ hash = hpt_hash(va, shift);
+ hidx = __rpte_to_hidx(pte, index);
+ if (hidx & _PTEIDX_SECONDARY)
+ hash = ~hash;
+ slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
+ slot += hidx & _PTEIDX_GROUP_IX;
+ hptep = htab_address + slot;
+ want_v = hpte_encode_v(va, psize);
+ native_lock_hpte(hptep);
+ hpte_v = hptep->v;
+ if (!HPTE_V_COMPARE(hpte_v, want_v) ||
+ !(hpte_v & HPTE_V_VALID))
+ native_unlock_hpte(hptep);
+ else
+ hptep->v = 0;
+ } pte_iterate_hashed_end();
}
- if (cpu_has_feature(CPU_FTR_TLBIEL) && !large && local) {
+ if (cpu_has_feature(CPU_FTR_TLBIEL) &&
+ mmu_psize_defs[psize].tlbiel && local) {
asm volatile("ptesync":::"memory");
-
- for (i = 0; i < j; i++)
- __tlbiel(batch->vaddr[i]);
-
+ for (i = 0; i < number; i++) {
+ va = batch->vaddr[i];
+ pte = batch->pte[i];
+
+ pte_iterate_hashed_subpages(pte, psize, va, index,
+ shift) {
+ __tlbiel(va, psize);
+ } pte_iterate_hashed_end();
+ }
asm volatile("ptesync":::"memory");
} else {
int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);
@@ -396,10 +474,15 @@ static void native_flush_hash_range(unsigned long number, int local)
spin_lock(&native_tlbie_lock);
asm volatile("ptesync":::"memory");
-
- for (i = 0; i < j; i++)
- __tlbie(batch->vaddr[i], large);
-
+ for (i = 0; i < number; i++) {
+ va = batch->vaddr[i];
+ pte = batch->pte[i];
+
+ pte_iterate_hashed_subpages(pte, psize, va, index,
+ shift) {
+ __tlbie(va, psize);
+ } pte_iterate_hashed_end();
+ }
asm volatile("eieio; tlbsync; ptesync":::"memory");
if (lock_tlbie)
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 6e9e05cce02..22e47487613 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -19,6 +19,7 @@
*/
#undef DEBUG
+#undef DEBUG_LOW
#include <linux/config.h>
#include <linux/spinlock.h>
@@ -32,7 +33,6 @@
#include <linux/init.h>
#include <linux/signal.h>
-#include <asm/ppcdebug.h>
#include <asm/processor.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
@@ -59,6 +59,15 @@
#define DBG(fmt...)
#endif
+#ifdef DEBUG_LOW
+#define DBG_LOW(fmt...) udbg_printf(fmt)
+#else
+#define DBG_LOW(fmt...)
+#endif
+
+#define KB (1024)
+#define MB (1024*KB)
+
/*
* Note: pte --> Linux PTE
* HPTE --> PowerPC Hashed Page Table Entry
@@ -77,91 +86,292 @@ extern unsigned long dart_tablebase;
hpte_t *htab_address;
unsigned long htab_hash_mask;
-
unsigned long _SDR1;
+struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
+int mmu_linear_psize = MMU_PAGE_4K;
+int mmu_virtual_psize = MMU_PAGE_4K;
+#ifdef CONFIG_HUGETLB_PAGE
+int mmu_huge_psize = MMU_PAGE_16M;
+unsigned int HPAGE_SHIFT;
+#endif
-#define KB (1024)
-#define MB (1024*KB)
-
-static inline void loop_forever(void)
-{
- volatile unsigned long x = 1;
- for(;x;x|=1)
- ;
-}
+/* There are definitions of page sizes arrays to be used when none
+ * is provided by the firmware.
+ */
-static inline void create_pte_mapping(unsigned long start, unsigned long end,
- unsigned long mode, int large)
+/* Pre-POWER4 CPUs (4k pages only)
+ */
+struct mmu_psize_def mmu_psize_defaults_old[] = {
+ [MMU_PAGE_4K] = {
+ .shift = 12,
+ .sllp = 0,
+ .penc = 0,
+ .avpnm = 0,
+ .tlbiel = 0,
+ },
+};
+
+/* POWER4, GPUL, POWER5
+ *
+ * Support for 16Mb large pages
+ */
+struct mmu_psize_def mmu_psize_defaults_gp[] = {
+ [MMU_PAGE_4K] = {
+ .shift = 12,
+ .sllp = 0,
+ .penc = 0,
+ .avpnm = 0,
+ .tlbiel = 1,
+ },
+ [MMU_PAGE_16M] = {
+ .shift = 24,
+ .sllp = SLB_VSID_L,
+ .penc = 0,
+ .avpnm = 0x1UL,
+ .tlbiel = 0,
+ },
+};
+
+
+int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
+ unsigned long pstart, unsigned long mode, int psize)
{
- unsigned long addr;
- unsigned int step;
+ unsigned long vaddr, paddr;
+ unsigned int step, shift;
unsigned long tmp_mode;
- unsigned long vflags;
+ int ret = 0;
- if (large) {
- step = 16*MB;
- vflags = HPTE_V_BOLTED | HPTE_V_LARGE;
- } else {
- step = 4*KB;
- vflags = HPTE_V_BOLTED;
- }
+ shift = mmu_psize_defs[psize].shift;
+ step = 1 << shift;
- for (addr = start; addr < end; addr += step) {
+ for (vaddr = vstart, paddr = pstart; vaddr < vend;
+ vaddr += step, paddr += step) {
unsigned long vpn, hash, hpteg;
- unsigned long vsid = get_kernel_vsid(addr);
- unsigned long va = (vsid << 28) | (addr & 0xfffffff);
- int ret = -1;
-
- if (large)
- vpn = va >> HPAGE_SHIFT;
- else
- vpn = va >> PAGE_SHIFT;
-
+ unsigned long vsid = get_kernel_vsid(vaddr);
+ unsigned long va = (vsid << 28) | (vaddr & 0x0fffffff);
+ vpn = va >> shift;
tmp_mode = mode;
/* Make non-kernel text non-executable */
- if (!in_kernel_text(addr))
- tmp_mode = mode | HW_NO_EXEC;
-
- hash = hpt_hash(vpn, large);
+ if (!in_kernel_text(vaddr))
+ tmp_mode = mode | HPTE_R_N;
+ hash = hpt_hash(va, shift);
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
+ /* The crap below can be cleaned once ppd_md.probe() can
+ * set up the hash callbacks, thus we can just used the
+ * normal insert callback here.
+ */
#ifdef CONFIG_PPC_ISERIES
- if (systemcfg->platform & PLATFORM_ISERIES_LPAR)
- ret = iSeries_hpte_bolt_or_insert(hpteg, va,
- virt_to_abs(addr) >> PAGE_SHIFT,
- vflags, tmp_mode);
+ if (systemcfg->platform == PLATFORM_ISERIES_LPAR)
+ ret = iSeries_hpte_insert(hpteg, va,
+ virt_to_abs(paddr),
+ tmp_mode,
+ HPTE_V_BOLTED,
+ psize);
else
#endif
#ifdef CONFIG_PPC_PSERIES
if (systemcfg->platform & PLATFORM_LPAR)
ret = pSeries_lpar_hpte_insert(hpteg, va,
- virt_to_abs(addr) >> PAGE_SHIFT,
- vflags, tmp_mode);
+ virt_to_abs(paddr),
+ tmp_mode,
+ HPTE_V_BOLTED,
+ psize);
else
#endif
#ifdef CONFIG_PPC_MULTIPLATFORM
ret = native_hpte_insert(hpteg, va,
- virt_to_abs(addr) >> PAGE_SHIFT,
- vflags, tmp_mode);
+ virt_to_abs(paddr),
+ tmp_mode, HPTE_V_BOLTED,
+ psize);
#endif
+ if (ret < 0)
+ break;
+ }
+ return ret < 0 ? ret : 0;
+}
- if (ret == -1) {
- ppc64_terminate_msg(0x20, "create_pte_mapping");
- loop_forever();
+static int __init htab_dt_scan_page_sizes(unsigned long node,
+ const char *uname, int depth,
+ void *data)
+{
+ char *type = of_get_flat_dt_prop(node, "device_type", NULL);
+ u32 *prop;
+ unsigned long size = 0;
+
+ /* We are scanning "cpu" nodes only */
+ if (type == NULL || strcmp(type, "cpu") != 0)
+ return 0;
+
+ prop = (u32 *)of_get_flat_dt_prop(node,
+ "ibm,segment-page-sizes", &size);
+ if (prop != NULL) {
+ DBG("Page sizes from device-tree:\n");
+ size /= 4;
+ cur_cpu_spec->cpu_features &= ~(CPU_FTR_16M_PAGE);
+ while(size > 0) {
+ unsigned int shift = prop[0];
+ unsigned int slbenc = prop[1];
+ unsigned int lpnum = prop[2];
+ unsigned int lpenc = 0;
+ struct mmu_psize_def *def;
+ int idx = -1;
+
+ size -= 3; prop += 3;
+ while(size > 0 && lpnum) {
+ if (prop[0] == shift)
+ lpenc = prop[1];
+ prop += 2; size -= 2;
+ lpnum--;
+ }
+ switch(shift) {
+ case 0xc:
+ idx = MMU_PAGE_4K;
+ break;
+ case 0x10:
+ idx = MMU_PAGE_64K;
+ break;
+ case 0x14:
+ idx = MMU_PAGE_1M;
+ break;
+ case 0x18:
+ idx = MMU_PAGE_16M;
+ cur_cpu_spec->cpu_features |= CPU_FTR_16M_PAGE;
+ break;
+ case 0x22:
+ idx = MMU_PAGE_16G;
+ break;
+ }
+ if (idx < 0)
+ continue;
+ def = &mmu_psize_defs[idx];
+ def->shift = shift;
+ if (shift <= 23)
+ def->avpnm = 0;
+ else
+ def->avpnm = (1 << (shift - 23)) - 1;
+ def->sllp = slbenc;
+ def->penc = lpenc;
+ /* We don't know for sure what's up with tlbiel, so
+ * for now we only set it for 4K and 64K pages
+ */
+ if (idx == MMU_PAGE_4K || idx == MMU_PAGE_64K)
+ def->tlbiel = 1;
+ else
+ def->tlbiel = 0;
+
+ DBG(" %d: shift=%02x, sllp=%04x, avpnm=%08x, "
+ "tlbiel=%d, penc=%d\n",
+ idx, shift, def->sllp, def->avpnm, def->tlbiel,
+ def->penc);
}
+ return 1;
+ }
+ return 0;
+}
+
+
+static void __init htab_init_page_sizes(void)
+{
+ int rc;
+
+ /* Default to 4K pages only */
+ memcpy(mmu_psize_defs, mmu_psize_defaults_old,
+ sizeof(mmu_psize_defaults_old));
+
+ /*
+ * Try to find the available page sizes in the device-tree
+ */
+ rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL);
+ if (rc != 0) /* Found */
+ goto found;
+
+ /*
+ * Not in the device-tree, let's fallback on known size
+ * list for 16M capable GP & GR
+ */
+ if ((systemcfg->platform != PLATFORM_ISERIES_LPAR) &&
+ cpu_has_feature(CPU_FTR_16M_PAGE))
+ memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
+ sizeof(mmu_psize_defaults_gp));
+ found:
+ /*
+ * Pick a size for the linear mapping. Currently, we only support
+ * 16M, 1M and 4K which is the default
+ */
+ if (mmu_psize_defs[MMU_PAGE_16M].shift)
+ mmu_linear_psize = MMU_PAGE_16M;
+ else if (mmu_psize_defs[MMU_PAGE_1M].shift)
+ mmu_linear_psize = MMU_PAGE_1M;
+
+ /*
+ * Pick a size for the ordinary pages. Default is 4K, we support
+ * 64K if cache inhibited large pages are supported by the
+ * processor
+ */
+#ifdef CONFIG_PPC_64K_PAGES
+ if (mmu_psize_defs[MMU_PAGE_64K].shift &&
+ cpu_has_feature(CPU_FTR_CI_LARGE_PAGE))
+ mmu_virtual_psize = MMU_PAGE_64K;
+#endif
+
+ printk(KERN_INFO "Page orders: linear mapping = %d, others = %d\n",
+ mmu_psize_defs[mmu_linear_psize].shift,
+ mmu_psize_defs[mmu_virtual_psize].shift);
+
+#ifdef CONFIG_HUGETLB_PAGE
+ /* Init large page size. Currently, we pick 16M or 1M depending
+ * on what is available
+ */
+ if (mmu_psize_defs[MMU_PAGE_16M].shift)
+ mmu_huge_psize = MMU_PAGE_16M;
+ /* With 4k/4level pagetables, we can't (for now) cope with a
+ * huge page size < PMD_SIZE */
+ else if (mmu_psize_defs[MMU_PAGE_1M].shift)
+ mmu_huge_psize = MMU_PAGE_1M;
+
+ /* Calculate HPAGE_SHIFT and sanity check it */
+ if (mmu_psize_defs[mmu_huge_psize].shift > MIN_HUGEPTE_SHIFT &&
+ mmu_psize_defs[mmu_huge_psize].shift < SID_SHIFT)
+ HPAGE_SHIFT = mmu_psize_defs[mmu_huge_psize].shift;
+ else
+ HPAGE_SHIFT = 0; /* No huge pages dude ! */
+#endif /* CONFIG_HUGETLB_PAGE */
+}
+
+static int __init htab_dt_scan_pftsize(unsigned long node,
+ const char *uname, int depth,
+ void *data)
+{
+ char *type = of_get_flat_dt_prop(node, "device_type", NULL);
+ u32 *prop;
+
+ /* We are scanning "cpu" nodes only */
+ if (type == NULL || strcmp(type, "cpu") != 0)
+ return 0;
+
+ prop = (u32 *)of_get_flat_dt_prop(node, "ibm,pft-size", NULL);
+ if (prop != NULL) {
+ /* pft_size[0] is the NUMA CEC cookie */
+ ppc64_pft_size = prop[1];
+ return 1;
}
+ return 0;
}
-static unsigned long get_hashtable_size(void)
+static unsigned long __init htab_get_table_size(void)
{
unsigned long rnd_mem_size, pteg_count;
- /* If hash size wasn't obtained in prom.c, we calculate it now based on
- * the total RAM size
+ /* If hash size isn't already provided by the platform, we try to
+ * retreive it from the device-tree. If it's not there neither, we
+ * calculate it now based on the total RAM size
*/
+ if (ppc64_pft_size == 0)
+ of_scan_flat_dt(htab_dt_scan_pftsize, NULL);
if (ppc64_pft_size)
return 1UL << ppc64_pft_size;
@@ -181,25 +391,23 @@ void __init htab_initialize(void)
unsigned long table, htab_size_bytes;
unsigned long pteg_count;
unsigned long mode_rw;
- int i, use_largepages = 0;
unsigned long base = 0, size = 0;
+ int i;
+
extern unsigned long tce_alloc_start, tce_alloc_end;
DBG(" -> htab_initialize()\n");
+ /* Initialize page sizes */
+ htab_init_page_sizes();
+
/*
* Calculate the required size of the htab. We want the number of
* PTEGs to equal one half the number of real pages.
*/
- htab_size_bytes = get_hashtable_size();
+ htab_size_bytes = htab_get_table_size();
pteg_count = htab_size_bytes >> 7;
- /* For debug, make the HTAB 1/8 as big as it normally would be. */
- ifppcdebug(PPCDBG_HTABSIZE) {
- pteg_count >>= 3;
- htab_size_bytes = pteg_count << 7;
- }
-
htab_hash_mask = pteg_count - 1;
if (systemcfg->platform & PLATFORM_LPAR) {
@@ -211,14 +419,11 @@ void __init htab_initialize(void)
* the absolute address space.
*/
table = lmb_alloc(htab_size_bytes, htab_size_bytes);
+ BUG_ON(table == 0);
DBG("Hash table allocated at %lx, size: %lx\n", table,
htab_size_bytes);
- if ( !table ) {
- ppc64_terminate_msg(0x20, "hpt space");
- loop_forever();
- }
htab_address = abs_to_virt(table);
/* htab absolute addr + encoded htabsize */
@@ -234,8 +439,6 @@ void __init htab_initialize(void)
* _NOT_ map it to avoid cache paradoxes as it's remapped non
* cacheable later on
*/
- if (cpu_has_feature(CPU_FTR_16M_PAGE))
- use_largepages = 1;
/* create bolted the linear mapping in the hash table */
for (i=0; i < lmb.memory.cnt; i++) {
@@ -246,27 +449,32 @@ void __init htab_initialize(void)
#ifdef CONFIG_U3_DART
/* Do not map the DART space. Fortunately, it will be aligned
- * in such a way that it will not cross two lmb regions and will
- * fit within a single 16Mb page.
- * The DART space is assumed to be a full 16Mb region even if we
- * only use 2Mb of that space. We will use more of it later for
- * AGP GART. We have to use a full 16Mb large page.
+ * in such a way that it will not cross two lmb regions and
+ * will fit within a single 16Mb page.
+ * The DART space is assumed to be a full 16Mb region even if
+ * we only use 2Mb of that space. We will use more of it later
+ * for AGP GART. We have to use a full 16Mb large page.
*/
DBG("DART base: %lx\n", dart_tablebase);
if (dart_tablebase != 0 && dart_tablebase >= base
&& dart_tablebase < (base + size)) {
if (base != dart_tablebase)
- create_pte_mapping(base, dart_tablebase, mode_rw,
- use_largepages);
+ BUG_ON(htab_bolt_mapping(base, dart_tablebase,
+ base, mode_rw,
+ mmu_linear_psize));
if ((base + size) > (dart_tablebase + 16*MB))
- create_pte_mapping(dart_tablebase + 16*MB, base + size,
- mode_rw, use_largepages);
+ BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB,
+ base + size,
+ dart_tablebase+16*MB,
+ mode_rw,
+ mmu_linear_psize));
continue;
}
#endif /* CONFIG_U3_DART */
- create_pte_mapping(base, base + size, mode_rw, use_largepages);
- }
+ BUG_ON(htab_bolt_mapping(base, base + size, base,
+ mode_rw, mmu_linear_psize));
+ }
/*
* If we have a memory_limit and we've allocated TCEs then we need to
@@ -282,8 +490,9 @@ void __init htab_initialize(void)
if (base + size >= tce_alloc_start)
tce_alloc_start = base + size + 1;
- create_pte_mapping(tce_alloc_start, tce_alloc_end,
- mode_rw, use_largepages);
+ BUG_ON(htab_bolt_mapping(tce_alloc_start, tce_alloc_end,
+ tce_alloc_start, mode_rw,
+ mmu_linear_psize));
}
DBG(" <- htab_initialize()\n");
@@ -309,7 +518,7 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
__flush_dcache_icache(page_address(page));
set_bit(PG_arch_1, &page->flags);
} else
- pp |= HW_NO_EXEC;
+ pp |= HPTE_R_N;
}
return pp;
}
@@ -325,94 +534,169 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
unsigned long vsid;
struct mm_struct *mm;
pte_t *ptep;
- int ret;
- int user_region = 0;
- int local = 0;
cpumask_t tmp;
+ int rc, user_region = 0, local = 0;
- if ((ea & ~REGION_MASK) >= PGTABLE_RANGE)
- return 1;
+ DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
+ ea, access, trap);
+
+ if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) {
+ DBG_LOW(" out of pgtable range !\n");
+ return 1;
+ }
+ /* Get region & vsid */
switch (REGION_ID(ea)) {
case USER_REGION_ID:
user_region = 1;
mm = current->mm;
- if (! mm)
+ if (! mm) {
+ DBG_LOW(" user region with no mm !\n");
return 1;
-
+ }
vsid = get_vsid(mm->context.id, ea);
break;
case VMALLOC_REGION_ID:
mm = &init_mm;
vsid = get_kernel_vsid(ea);
break;
-#if 0
- case KERNEL_REGION_ID:
- /*
- * Should never get here - entire 0xC0... region is bolted.
- * Send the problem up to do_page_fault
- */
-#endif
default:
/* Not a valid range
* Send the problem up to do_page_fault
*/
return 1;
- break;
}
+ DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
+ /* Get pgdir */
pgdir = mm->pgd;
-
if (pgdir == NULL)
return 1;
+ /* Check CPU locality */
tmp = cpumask_of_cpu(smp_processor_id());
if (user_region && cpus_equal(mm->cpu_vm_mask, tmp))
local = 1;
- /* Is this a huge page ? */
- if (unlikely(in_hugepage_area(mm->context, ea)))
- ret = hash_huge_page(mm, access, ea, vsid, local);
- else {
- ptep = find_linux_pte(pgdir, ea);
- if (ptep == NULL)
- return 1;
- ret = __hash_page(ea, access, vsid, ptep, trap, local);
+ /* Handle hugepage regions */
+ if (unlikely(in_hugepage_area(mm->context, ea))) {
+ DBG_LOW(" -> huge page !\n");
+ return hash_huge_page(mm, access, ea, vsid, local);
+ }
+
+ /* Get PTE and page size from page tables */
+ ptep = find_linux_pte(pgdir, ea);
+ if (ptep == NULL || !pte_present(*ptep)) {
+ DBG_LOW(" no PTE !\n");
+ return 1;
+ }
+
+#ifndef CONFIG_PPC_64K_PAGES
+ DBG_LOW(" i-pte: %016lx\n", pte_val(*ptep));
+#else
+ DBG_LOW(" i-pte: %016lx %016lx\n", pte_val(*ptep),
+ pte_val(*(ptep + PTRS_PER_PTE)));
+#endif
+ /* Pre-check access permissions (will be re-checked atomically
+ * in __hash_page_XX but this pre-check is a fast path
+ */
+ if (access & ~pte_val(*ptep)) {
+ DBG_LOW(" no access !\n");
+ return 1;
}
- return ret;
+ /* Do actual hashing */
+#ifndef CONFIG_PPC_64K_PAGES
+ rc = __hash_page_4K(ea, access, vsid, ptep, trap, local);
+#else
+ if (mmu_virtual_psize == MMU_PAGE_64K)
+ rc = __hash_page_64K(ea, access, vsid, ptep, trap, local);
+ else
+ rc = __hash_page_4K(ea, access, vsid, ptep, trap, local);
+#endif /* CONFIG_PPC_64K_PAGES */
+
+#ifndef CONFIG_PPC_64K_PAGES
+ DBG_LOW(" o-pte: %016lx\n", pte_val(*ptep));
+#else
+ DBG_LOW(" o-pte: %016lx %016lx\n", pte_val(*ptep),
+ pte_val(*(ptep + PTRS_PER_PTE)));
+#endif
+ DBG_LOW(" -> rc=%d\n", rc);
+ return rc;
}
-void flush_hash_page(unsigned long va, pte_t pte, int local)
+void hash_preload(struct mm_struct *mm, unsigned long ea,
+ unsigned long access, unsigned long trap)
{
- unsigned long vpn, hash, secondary, slot;
- unsigned long huge = pte_huge(pte);
+ unsigned long vsid;
+ void *pgdir;
+ pte_t *ptep;
+ cpumask_t mask;
+ unsigned long flags;
+ int local = 0;
+
+ /* We don't want huge pages prefaulted for now
+ */
+ if (unlikely(in_hugepage_area(mm->context, ea)))
+ return;
- if (huge)
- vpn = va >> HPAGE_SHIFT;
+ DBG_LOW("hash_preload(mm=%p, mm->pgdir=%p, ea=%016lx, access=%lx,"
+ " trap=%lx\n", mm, mm->pgd, ea, access, trap);
+
+ /* Get PTE, VSID, access mask */
+ pgdir = mm->pgd;
+ if (pgdir == NULL)
+ return;
+ ptep = find_linux_pte(pgdir, ea);
+ if (!ptep)
+ return;
+ vsid = get_vsid(mm->context.id, ea);
+
+ /* Hash it in */
+ local_irq_save(flags);
+ mask = cpumask_of_cpu(smp_processor_id());
+ if (cpus_equal(mm->cpu_vm_mask, mask))
+ local = 1;
+#ifndef CONFIG_PPC_64K_PAGES
+ __hash_page_4K(ea, access, vsid, ptep, trap, local);
+#else
+ if (mmu_virtual_psize == MMU_PAGE_64K)
+ __hash_page_64K(ea, access, vsid, ptep, trap, local);
else
- vpn = va >> PAGE_SHIFT;
- hash = hpt_hash(vpn, huge);
- secondary = (pte_val(pte) & _PAGE_SECONDARY) >> 15;
- if (secondary)
- hash = ~hash;
- slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
- slot += (pte_val(pte) & _PAGE_GROUP_IX) >> 12;
-
- ppc_md.hpte_invalidate(slot, va, huge, local);
+ __hash_page_4K(ea, access, vsid, ptep, trap, local);
+#endif /* CONFIG_PPC_64K_PAGES */
+ local_irq_restore(flags);
+}
+
+void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int local)
+{
+ unsigned long hash, index, shift, hidx, slot;
+
+ DBG_LOW("flush_hash_page(va=%016x)\n", va);
+ pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
+ hash = hpt_hash(va, shift);
+ hidx = __rpte_to_hidx(pte, index);
+ if (hidx & _PTEIDX_SECONDARY)
+ hash = ~hash;
+ slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
+ slot += hidx & _PTEIDX_GROUP_IX;
+ DBG_LOW(" sub %d: hash=%x, hidx=%x\n", index, slot, hidx);
+ ppc_md.hpte_invalidate(slot, va, psize, local);
+ } pte_iterate_hashed_end();
}
void flush_hash_range(unsigned long number, int local)
{
- if (ppc_md.flush_hash_range) {
+ if (ppc_md.flush_hash_range)
ppc_md.flush_hash_range(number, local);
- } else {
+ else {
int i;
struct ppc64_tlb_batch *batch =
&__get_cpu_var(ppc64_tlb_batch);
for (i = 0; i < number; i++)
- flush_hash_page(batch->vaddr[i], batch->pte[i], local);
+ flush_hash_page(batch->vaddr[i], batch->pte[i],
+ batch->psize, local);
}
}
@@ -452,6 +736,18 @@ void __init htab_finish_init(void)
extern unsigned int *htab_call_hpte_remove;
extern unsigned int *htab_call_hpte_updatepp;
+#ifdef CONFIG_PPC_64K_PAGES
+ extern unsigned int *ht64_call_hpte_insert1;
+ extern unsigned int *ht64_call_hpte_insert2;
+ extern unsigned int *ht64_call_hpte_remove;
+ extern unsigned int *ht64_call_hpte_updatepp;
+
+ make_bl(ht64_call_hpte_insert1, ppc_md.hpte_insert);
+ make_bl(ht64_call_hpte_insert2, ppc_md.hpte_insert);
+ make_bl(ht64_call_hpte_remove, ppc_md.hpte_remove);
+ make_bl(ht64_call_hpte_updatepp, ppc_md.hpte_updatepp);
+#endif /* CONFIG_PPC_64K_PAGES */
+
make_bl(htab_call_hpte_insert1, ppc_md.hpte_insert);
make_bl(htab_call_hpte_insert2, ppc_md.hpte_insert);
make_bl(htab_call_hpte_remove, ppc_md.hpte_remove);
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 0ea0994ed97..426c269e552 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -47,10 +47,25 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
pu = pud_offset(pg, addr);
if (!pud_none(*pu)) {
pm = pmd_offset(pu, addr);
+#ifdef CONFIG_PPC_64K_PAGES
+ /* Currently, we use the normal PTE offset within full
+ * size PTE pages, thus our huge PTEs are scattered in
+ * the PTE page and we do waste some. We may change
+ * that in the future, but the current mecanism keeps
+ * things much simpler
+ */
+ if (!pmd_none(*pm)) {
+ /* Note: pte_offset_* are all equivalent on
+ * ppc64 as we don't have HIGHMEM
+ */
+ pt = pte_offset_kernel(pm, addr);
+ return pt;
+ }
+#else /* CONFIG_PPC_64K_PAGES */
+ /* On 4k pages, we put huge PTEs in the PMD page */
pt = (pte_t *)pm;
- BUG_ON(!pmd_none(*pm)
- && !(pte_present(*pt) && pte_huge(*pt)));
return pt;
+#endif /* CONFIG_PPC_64K_PAGES */
}
}
@@ -74,9 +89,16 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
if (pu) {
pm = pmd_alloc(mm, pu, addr);
if (pm) {
+#ifdef CONFIG_PPC_64K_PAGES
+ /* See comment in huge_pte_offset. Note that if we ever
+ * want to put the page size in the PMD, we would have
+ * to open code our own pte_alloc* function in order
+ * to populate and set the size atomically
+ */
+ pt = pte_alloc_map(mm, pm, addr);
+#else /* CONFIG_PPC_64K_PAGES */
pt = (pte_t *)pm;
- BUG_ON(!pmd_none(*pm)
- && !(pte_present(*pt) && pte_huge(*pt)));
+#endif /* CONFIG_PPC_64K_PAGES */
return pt;
}
}
@@ -84,35 +106,29 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
return NULL;
}
-#define HUGEPTE_BATCH_SIZE (HPAGE_SIZE / PMD_SIZE)
-
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
- int i;
-
if (pte_present(*ptep)) {
- pte_clear(mm, addr, ptep);
+ /* We open-code pte_clear because we need to pass the right
+ * argument to hpte_update (huge / !huge)
+ */
+ unsigned long old = pte_update(ptep, ~0UL);
+ if (old & _PAGE_HASHPTE)
+ hpte_update(mm, addr & HPAGE_MASK, ptep, old, 1);
flush_tlb_pending();
}
-
- for (i = 0; i < HUGEPTE_BATCH_SIZE; i++) {
- *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
- ptep++;
- }
+ *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
}
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
unsigned long old = pte_update(ptep, ~0UL);
- int i;
if (old & _PAGE_HASHPTE)
- hpte_update(mm, addr, old, 0);
-
- for (i = 1; i < HUGEPTE_BATCH_SIZE; i++)
- ptep[i] = __pte(0);
+ hpte_update(mm, addr & HPAGE_MASK, ptep, old, 1);
+ *ptep = __pte(0);
return __pte(old);
}
@@ -196,6 +212,12 @@ static int prepare_high_area_for_htlb(struct mm_struct *mm, unsigned long area)
BUG_ON(area >= NUM_HIGH_AREAS);
+ /* Hack, so that each addresses is controlled by exactly one
+ * of the high or low area bitmaps, the first high area starts
+ * at 4GB, not 0 */
+ if (start == 0)
+ start = 0x100000000UL;
+
/* Check no VMAs are in the region */
vma = find_vma(mm, start);
if (vma && (vma->vm_start < end))
@@ -563,6 +585,8 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
int lastshift;
u16 areamask, curareas;
+ if (HPAGE_SHIFT == 0)
+ return -EINVAL;
if (len & ~HPAGE_MASK)
return -EINVAL;
@@ -619,19 +643,15 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
unsigned long ea, unsigned long vsid, int local)
{
pte_t *ptep;
- unsigned long va, vpn;
- pte_t old_pte, new_pte;
- unsigned long rflags, prpn;
+ unsigned long old_pte, new_pte;
+ unsigned long va, rflags, pa;
long slot;
int err = 1;
- spin_lock(&mm->page_table_lock);
-
ptep = huge_pte_offset(mm, ea);
/* Search the Linux page table for a match with va */
va = (vsid << 28) | (ea & 0x0fffffff);
- vpn = va >> HPAGE_SHIFT;
/*
* If no pte found or not present, send the problem up to
@@ -640,8 +660,6 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
if (unlikely(!ptep || pte_none(*ptep)))
goto out;
-/* BUG_ON(pte_bad(*ptep)); */
-
/*
* Check the user's access rights to the page. If access should be
* prevented then send the problem up to do_page_fault.
@@ -661,58 +679,64 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
*/
- old_pte = *ptep;
- new_pte = old_pte;
-
- rflags = 0x2 | (! (pte_val(new_pte) & _PAGE_RW));
+ do {
+ old_pte = pte_val(*ptep);
+ if (old_pte & _PAGE_BUSY)
+ goto out;
+ new_pte = old_pte | _PAGE_BUSY |
+ _PAGE_ACCESSED | _PAGE_HASHPTE;
+ } while(old_pte != __cmpxchg_u64((unsigned long *)ptep,
+ old_pte, new_pte));
+
+ rflags = 0x2 | (!(new_pte & _PAGE_RW));
/* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
- rflags |= ((pte_val(new_pte) & _PAGE_EXEC) ? 0 : HW_NO_EXEC);
+ rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
/* Check if pte already has an hpte (case 2) */
- if (unlikely(pte_val(old_pte) & _PAGE_HASHPTE)) {
+ if (unlikely(old_pte & _PAGE_HASHPTE)) {
/* There MIGHT be an HPTE for this pte */
unsigned long hash, slot;
- hash = hpt_hash(vpn, 1);
- if (pte_val(old_pte) & _PAGE_SECONDARY)
+ hash = hpt_hash(va, HPAGE_SHIFT);
+ if (old_pte & _PAGE_F_SECOND)
hash = ~hash;
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
- slot += (pte_val(old_pte) & _PAGE_GROUP_IX) >> 12;
+ slot += (old_pte & _PAGE_F_GIX) >> 12;
if (ppc_md.hpte_updatepp(slot, rflags, va, 1, local) == -1)
- pte_val(old_pte) &= ~_PAGE_HPTEFLAGS;
+ old_pte &= ~_PAGE_HPTEFLAGS;
}
- if (likely(!(pte_val(old_pte) & _PAGE_HASHPTE))) {
- unsigned long hash = hpt_hash(vpn, 1);
+ if (likely(!(old_pte & _PAGE_HASHPTE))) {
+ unsigned long hash = hpt_hash(va, HPAGE_SHIFT);
unsigned long hpte_group;
- prpn = pte_pfn(old_pte);
+ pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
repeat:
hpte_group = ((hash & htab_hash_mask) *
HPTES_PER_GROUP) & ~0x7UL;
- /* Update the linux pte with the HPTE slot */
- pte_val(new_pte) &= ~_PAGE_HPTEFLAGS;
- pte_val(new_pte) |= _PAGE_HASHPTE;
+ /* clear HPTE slot informations in new PTE */
+ new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
/* Add in WIMG bits */
/* XXX We should store these in the pte */
+ /* --BenH: I think they are ... */
rflags |= _PAGE_COHERENT;
- slot = ppc_md.hpte_insert(hpte_group, va, prpn,
- HPTE_V_LARGE, rflags);
+ /* Insert into the hash table, primary slot */
+ slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 0,
+ mmu_huge_psize);
/* Primary is full, try the secondary */
if (unlikely(slot == -1)) {
- pte_val(new_pte) |= _PAGE_SECONDARY;
+ new_pte |= _PAGE_F_SECOND;
hpte_group = ((~hash & htab_hash_mask) *
HPTES_PER_GROUP) & ~0x7UL;
- slot = ppc_md.hpte_insert(hpte_group, va, prpn,
- HPTE_V_LARGE |
+ slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags,
HPTE_V_SECONDARY,
- rflags);
+ mmu_huge_psize);
if (slot == -1) {
if (mftb() & 0x1)
hpte_group = ((hash & htab_hash_mask) *
@@ -726,20 +750,18 @@ repeat:
if (unlikely(slot == -2))
panic("hash_huge_page: pte_insert failed\n");
- pte_val(new_pte) |= (slot<<12) & _PAGE_GROUP_IX;
-
- /*
- * No need to use ldarx/stdcx here because all who
- * might be updating the pte will hold the
- * page_table_lock
- */
- *ptep = new_pte;
+ new_pte |= (slot << 12) & _PAGE_F_GIX;
}
+ /*
+ * No need to use ldarx/stdcx here because all who
+ * might be updating the pte will hold the
+ * page_table_lock
+ */
+ *ptep = __pte(new_pte & ~_PAGE_BUSY);
+
err = 0;
out:
- spin_unlock(&mm->page_table_lock);
-
return err;
}
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index b0fc822ec29..ce974c83d88 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -57,7 +57,6 @@
#include <asm/processor.h>
#include <asm/mmzone.h>
#include <asm/cputable.h>
-#include <asm/ppcdebug.h>
#include <asm/sections.h>
#include <asm/system.h>
#include <asm/iommu.h>
@@ -188,12 +187,21 @@ static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
memset(addr, 0, kmem_cache_size(cache));
}
+#ifdef CONFIG_PPC_64K_PAGES
+static const int pgtable_cache_size[2] = {
+ PTE_TABLE_SIZE, PGD_TABLE_SIZE
+};
+static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
+ "pte_pmd_cache", "pgd_cache",
+};
+#else
static const int pgtable_cache_size[2] = {
PTE_TABLE_SIZE, PMD_TABLE_SIZE
};
static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
"pgd_pte_cache", "pud_pmd_cache",
};
+#endif /* CONFIG_PPC_64K_PAGES */
kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
@@ -201,19 +209,14 @@ void pgtable_cache_init(void)
{
int i;
- BUILD_BUG_ON(PTE_TABLE_SIZE != pgtable_cache_size[PTE_CACHE_NUM]);
- BUILD_BUG_ON(PMD_TABLE_SIZE != pgtable_cache_size[PMD_CACHE_NUM]);
- BUILD_BUG_ON(PUD_TABLE_SIZE != pgtable_cache_size[PUD_CACHE_NUM]);
- BUILD_BUG_ON(PGD_TABLE_SIZE != pgtable_cache_size[PGD_CACHE_NUM]);
-
for (i = 0; i < ARRAY_SIZE(pgtable_cache_size); i++) {
int size = pgtable_cache_size[i];
const char *name = pgtable_cache_name[i];
pgtable_cache[i] = kmem_cache_create(name,
size, size,
- SLAB_HWCACHE_ALIGN
- | SLAB_MUST_HWCACHE_ALIGN,
+ SLAB_HWCACHE_ALIGN |
+ SLAB_MUST_HWCACHE_ALIGN,
zero_ctor,
NULL);
if (! pgtable_cache[i])
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 117b00012e1..6f55efd9be9 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -61,6 +61,9 @@ int init_bootmem_done;
int mem_init_done;
unsigned long memory_limit;
+extern void hash_preload(struct mm_struct *mm, unsigned long ea,
+ unsigned long access, unsigned long trap);
+
/*
* This is called by /dev/mem to know if a given address has to
* be mapped non-cacheable or not
@@ -355,7 +358,7 @@ void __init mem_init(void)
}
codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
- datasize = (unsigned long)&__init_begin - (unsigned long)&_sdata;
+ datasize = (unsigned long)&_edata - (unsigned long)&_sdata;
initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
@@ -493,18 +496,10 @@ EXPORT_SYMBOL(flush_icache_user_range);
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
pte_t pte)
{
- /* handle i-cache coherency */
- unsigned long pfn = pte_pfn(pte);
-#ifdef CONFIG_PPC32
- pmd_t *pmd;
-#else
- unsigned long vsid;
- void *pgdir;
- pte_t *ptep;
- int local = 0;
- cpumask_t tmp;
- unsigned long flags;
+#ifdef CONFIG_PPC_STD_MMU
+ unsigned long access = 0, trap;
#endif
+ unsigned long pfn = pte_pfn(pte);
/* handle i-cache coherency */
if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
@@ -535,30 +530,21 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
if (!pte_young(pte) || address >= TASK_SIZE)
return;
-#ifdef CONFIG_PPC32
- if (Hash == 0)
- return;
- pmd = pmd_offset(pgd_offset(vma->vm_mm, address), address);
- if (!pmd_none(*pmd))
- add_hash_page(vma->vm_mm->context, address, pmd_val(*pmd));
-#else
- pgdir = vma->vm_mm->pgd;
- if (pgdir == NULL)
- return;
- ptep = find_linux_pte(pgdir, address);
- if (!ptep)
+ /* We try to figure out if we are coming from an instruction
+ * access fault and pass that down to __hash_page so we avoid
+ * double-faulting on execution of fresh text. We have to test
+ * for regs NULL since init will get here first thing at boot
+ *
+ * We also avoid filling the hash if not coming from a fault
+ */
+ if (current->thread.regs == NULL)
return;
-
- vsid = get_vsid(vma->vm_mm->context.id, address);
-
- local_irq_save(flags);
- tmp = cpumask_of_cpu(smp_processor_id());
- if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
- local = 1;
-
- __hash_page(address, 0, vsid, ptep, 0x300, local);
- local_irq_restore(flags);
-#endif
-#endif
+ trap = TRAP(current->thread.regs);
+ if (trap == 0x400)
+ access |= _PAGE_EXEC;
+ else if (trap != 0x300)
+ return;
+ hash_preload(vma->vm_mm, address, access, trap);
+#endif /* CONFIG_PPC_STD_MMU */
}
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 4035cad8d7f..da09ba03c42 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -21,6 +21,7 @@
#include <asm/machdep.h>
#include <asm/abs_addr.h>
#include <asm/system.h>
+#include <asm/smp.h>
static int numa_enabled = 1;
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index b79a7820613..900842451bd 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -59,7 +59,6 @@
#include <asm/processor.h>
#include <asm/mmzone.h>
#include <asm/cputable.h>
-#include <asm/ppcdebug.h>
#include <asm/sections.h>
#include <asm/system.h>
#include <asm/iommu.h>
@@ -101,7 +100,6 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags)
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
- unsigned long vsid;
if (mem_init_done) {
pgdp = pgd_offset_k(ea);
@@ -117,28 +115,15 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags)
set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
__pgprot(flags)));
} else {
- unsigned long va, vpn, hash, hpteg;
-
/*
* If the mm subsystem is not fully up, we cannot create a
* linux page table entry for this mapping. Simply bolt an
* entry in the hardware page table.
+ *
*/
- vsid = get_kernel_vsid(ea);
- va = (vsid << 28) | (ea & 0xFFFFFFF);
- vpn = va >> PAGE_SHIFT;
-
- hash = hpt_hash(vpn, 0);
-
- hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
-
- /* Panic if a pte grpup is full */
- if (ppc_md.hpte_insert(hpteg, va, pa >> PAGE_SHIFT,
- HPTE_V_BOLTED,
- _PAGE_NO_CACHE|_PAGE_GUARDED|PP_RWXX)
- == -1) {
- panic("map_io_page: could not insert mapping");
- }
+ if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags,
+ mmu_virtual_psize))
+ panic("Can't map bolted IO mapping");
}
return 0;
}
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c
index cef9e83cc7e..ed7fcfe5fd3 100644
--- a/arch/powerpc/mm/ppc_mmu_32.c
+++ b/arch/powerpc/mm/ppc_mmu_32.c
@@ -179,6 +179,21 @@ void __init setbat(int index, unsigned long virt, unsigned long phys,
}
/*
+ * Preload a translation in the hash table
+ */
+void hash_preload(struct mm_struct *mm, unsigned long ea,
+ unsigned long access, unsigned long trap)
+{
+ pmd_t *pmd;
+
+ if (Hash == 0)
+ return;
+ pmd = pmd_offset(pgd_offset(mm, ea), ea);
+ if (!pmd_none(*pmd))
+ add_hash_page(mm->context, ea, pmd_val(*pmd));
+}
+
+/*
* Initialize the hash table and patch the instructions in hashtable.S.
*/
void __init MMU_init_hw(void)
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 0473953f6a3..60e852f2f8e 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -14,14 +14,32 @@
* 2 of the License, or (at your option) any later version.
*/
+#undef DEBUG
+
#include <linux/config.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/paca.h>
#include <asm/cputable.h>
+#include <asm/cacheflush.h>
+
+#ifdef DEBUG
+#define DBG(fmt...) udbg_printf(fmt)
+#else
+#define DBG(fmt...)
+#endif
-extern void slb_allocate(unsigned long ea);
+extern void slb_allocate_realmode(unsigned long ea);
+extern void slb_allocate_user(unsigned long ea);
+
+static void slb_allocate(unsigned long ea)
+{
+ /* Currently, we do real mode for all SLBs including user, but
+ * that will change if we bring back dynamic VSIDs
+ */
+ slb_allocate_realmode(ea);
+}
static inline unsigned long mk_esid_data(unsigned long ea, unsigned long slot)
{
@@ -46,13 +64,15 @@ static void slb_flush_and_rebolt(void)
{
/* If you change this make sure you change SLB_NUM_BOLTED
* appropriately too. */
- unsigned long ksp_flags = SLB_VSID_KERNEL;
+ unsigned long linear_llp, virtual_llp, lflags, vflags;
unsigned long ksp_esid_data;
WARN_ON(!irqs_disabled());
- if (cpu_has_feature(CPU_FTR_16M_PAGE))
- ksp_flags |= SLB_VSID_L;
+ linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
+ virtual_llp = mmu_psize_defs[mmu_virtual_psize].sllp;
+ lflags = SLB_VSID_KERNEL | linear_llp;
+ vflags = SLB_VSID_KERNEL | virtual_llp;
ksp_esid_data = mk_esid_data(get_paca()->kstack, 2);
if ((ksp_esid_data & ESID_MASK) == KERNELBASE)
@@ -67,9 +87,9 @@ static void slb_flush_and_rebolt(void)
/* Slot 2 - kernel stack */
"slbmte %2,%3\n"
"isync"
- :: "r"(mk_vsid_data(VMALLOCBASE, SLB_VSID_KERNEL)),
+ :: "r"(mk_vsid_data(VMALLOCBASE, vflags)),
"r"(mk_esid_data(VMALLOCBASE, 1)),
- "r"(mk_vsid_data(ksp_esid_data, ksp_flags)),
+ "r"(mk_vsid_data(ksp_esid_data, lflags)),
"r"(ksp_esid_data)
: "memory");
}
@@ -102,6 +122,9 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
get_paca()->slb_cache_ptr = 0;
get_paca()->context = mm->context;
+#ifdef CONFIG_PPC_64K_PAGES
+ get_paca()->pgdir = mm->pgd;
+#endif /* CONFIG_PPC_64K_PAGES */
/*
* preload some userspace segments into the SLB.
@@ -131,28 +154,77 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
slb_allocate(unmapped_base);
}
+static inline void patch_slb_encoding(unsigned int *insn_addr,
+ unsigned int immed)
+{
+ /* Assume the instruction had a "0" immediate value, just
+ * "or" in the new value
+ */
+ *insn_addr |= immed;
+ flush_icache_range((unsigned long)insn_addr, 4+
+ (unsigned long)insn_addr);
+}
+
void slb_initialize(void)
{
+ unsigned long linear_llp, virtual_llp;
+ static int slb_encoding_inited;
+ extern unsigned int *slb_miss_kernel_load_linear;
+ extern unsigned int *slb_miss_kernel_load_virtual;
+ extern unsigned int *slb_miss_user_load_normal;
+#ifdef CONFIG_HUGETLB_PAGE
+ extern unsigned int *slb_miss_user_load_huge;
+ unsigned long huge_llp;
+
+ huge_llp = mmu_psize_defs[mmu_huge_psize].sllp;
+#endif
+
+ /* Prepare our SLB miss handler based on our page size */
+ linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
+ virtual_llp = mmu_psize_defs[mmu_virtual_psize].sllp;
+ if (!slb_encoding_inited) {
+ slb_encoding_inited = 1;
+ patch_slb_encoding(slb_miss_kernel_load_linear,
+ SLB_VSID_KERNEL | linear_llp);
+ patch_slb_encoding(slb_miss_kernel_load_virtual,
+ SLB_VSID_KERNEL | virtual_llp);
+ patch_slb_encoding(slb_miss_user_load_normal,
+ SLB_VSID_USER | virtual_llp);
+
+ DBG("SLB: linear LLP = %04x\n", linear_llp);
+ DBG("SLB: virtual LLP = %04x\n", virtual_llp);
+#ifdef CONFIG_HUGETLB_PAGE
+ patch_slb_encoding(slb_miss_user_load_huge,
+ SLB_VSID_USER | huge_llp);
+ DBG("SLB: huge LLP = %04x\n", huge_llp);
+#endif
+ }
+
/* On iSeries the bolted entries have already been set up by
* the hypervisor from the lparMap data in head.S */
#ifndef CONFIG_PPC_ISERIES
- unsigned long flags = SLB_VSID_KERNEL;
+ {
+ unsigned long lflags, vflags;
- /* Invalidate the entire SLB (even slot 0) & all the ERATS */
- if (cpu_has_feature(CPU_FTR_16M_PAGE))
- flags |= SLB_VSID_L;
+ lflags = SLB_VSID_KERNEL | linear_llp;
+ vflags = SLB_VSID_KERNEL | virtual_llp;
- asm volatile("isync":::"memory");
- asm volatile("slbmte %0,%0"::"r" (0) : "memory");
+ /* Invalidate the entire SLB (even slot 0) & all the ERATS */
+ asm volatile("isync":::"memory");
+ asm volatile("slbmte %0,%0"::"r" (0) : "memory");
asm volatile("isync; slbia; isync":::"memory");
- create_slbe(KERNELBASE, flags, 0);
- create_slbe(VMALLOCBASE, SLB_VSID_KERNEL, 1);
+ create_slbe(KERNELBASE, lflags, 0);
+
+ /* VMALLOC space has 4K pages always for now */
+ create_slbe(VMALLOCBASE, vflags, 1);
+
/* We don't bolt the stack for the time being - we're in boot,
* so the stack is in the bolted segment. By the time it goes
* elsewhere, we'll call _switch() which will bolt in the new
* one. */
asm volatile("isync":::"memory");
-#endif
+ }
+#endif /* CONFIG_PPC_ISERIES */
get_paca()->stab_rr = SLB_NUM_BOLTED;
}
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index a3a03da503b..950ffc5848c 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -18,61 +18,28 @@
#include <linux/config.h>
#include <asm/processor.h>
-#include <asm/page.h>
-#include <asm/mmu.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/cputable.h>
+#include <asm/page.h>
+#include <asm/mmu.h>
+#include <asm/pgtable.h>
-/* void slb_allocate(unsigned long ea);
+/* void slb_allocate_realmode(unsigned long ea);
*
* Create an SLB entry for the given EA (user or kernel).
* r3 = faulting address, r13 = PACA
* r9, r10, r11 are clobbered by this function
* No other registers are examined or changed.
*/
-_GLOBAL(slb_allocate)
- /*
- * First find a slot, round robin. Previously we tried to find
- * a free slot first but that took too long. Unfortunately we
- * dont have any LRU information to help us choose a slot.
- */
-#ifdef CONFIG_PPC_ISERIES
- /*
- * On iSeries, the "bolted" stack segment can be cast out on
- * shared processor switch so we need to check for a miss on
- * it and restore it to the right slot.
- */
- ld r9,PACAKSAVE(r13)
- clrrdi r9,r9,28
- clrrdi r11,r3,28
- li r10,SLB_NUM_BOLTED-1 /* Stack goes in last bolted slot */
- cmpld r9,r11
- beq 3f
-#endif /* CONFIG_PPC_ISERIES */
-
- ld r10,PACASTABRR(r13)
- addi r10,r10,1
- /* use a cpu feature mask if we ever change our slb size */
- cmpldi r10,SLB_NUM_ENTRIES
-
- blt+ 4f
- li r10,SLB_NUM_BOLTED
-
-4:
- std r10,PACASTABRR(r13)
-3:
- /* r3 = faulting address, r10 = entry */
+_GLOBAL(slb_allocate_realmode)
+ /* r3 = faulting address */
srdi r9,r3,60 /* get region */
- srdi r3,r3,28 /* get esid */
+ srdi r10,r3,28 /* get esid */
cmpldi cr7,r9,0xc /* cmp KERNELBASE for later use */
- rldimi r10,r3,28,0 /* r10= ESID<<28 | entry */
- oris r10,r10,SLB_ESID_V@h /* r10 |= SLB_ESID_V */
-
- /* r3 = esid, r10 = esid_data, cr7 = <>KERNELBASE */
-
+ /* r3 = address, r10 = esid, cr7 = <>KERNELBASE */
blt cr7,0f /* user or kernel? */
/* kernel address: proto-VSID = ESID */
@@ -81,43 +48,166 @@ _GLOBAL(slb_allocate)
* top segment. That's ok, the scramble below will translate
* it to VSID 0, which is reserved as a bad VSID - one which
* will never have any pages in it. */
- li r11,SLB_VSID_KERNEL
-BEGIN_FTR_SECTION
- bne cr7,9f
- li r11,(SLB_VSID_KERNEL|SLB_VSID_L)
-END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE)
- b 9f
-0: /* user address: proto-VSID = context<<15 | ESID */
- srdi. r9,r3,USER_ESID_BITS
+ /* Check if hitting the linear mapping of the vmalloc/ioremap
+ * kernel space
+ */
+ bne cr7,1f
+
+ /* Linear mapping encoding bits, the "li" instruction below will
+ * be patched by the kernel at boot
+ */
+_GLOBAL(slb_miss_kernel_load_linear)
+ li r11,0
+ b slb_finish_load
+
+1: /* vmalloc/ioremap mapping encoding bits, the "li" instruction below
+ * will be patched by the kernel at boot
+ */
+_GLOBAL(slb_miss_kernel_load_virtual)
+ li r11,0
+ b slb_finish_load
+
+
+0: /* user address: proto-VSID = context << 15 | ESID. First check
+ * if the address is within the boundaries of the user region
+ */
+ srdi. r9,r10,USER_ESID_BITS
bne- 8f /* invalid ea bits set */
+ /* Figure out if the segment contains huge pages */
#ifdef CONFIG_HUGETLB_PAGE
BEGIN_FTR_SECTION
+ b 1f
+END_FTR_SECTION_IFCLR(CPU_FTR_16M_PAGE)
+ cmpldi r10,16
+
+ lhz r9,PACALOWHTLBAREAS(r13)
+ mr r11,r10
+ blt 5f
+
lhz r9,PACAHIGHHTLBAREAS(r13)
- srdi r11,r3,(HTLB_AREA_SHIFT-SID_SHIFT)
- srd r9,r9,r11
- lhz r11,PACALOWHTLBAREAS(r13)
- srd r11,r11,r3
- or r9,r9,r11
-END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE)
+ srdi r11,r10,(HTLB_AREA_SHIFT-SID_SHIFT)
+
+5: srd r9,r9,r11
+ andi. r9,r9,1
+ beq 1f
+_GLOBAL(slb_miss_user_load_huge)
+ li r11,0
+ b 2f
+1:
#endif /* CONFIG_HUGETLB_PAGE */
- li r11,SLB_VSID_USER
+_GLOBAL(slb_miss_user_load_normal)
+ li r11,0
-#ifdef CONFIG_HUGETLB_PAGE
-BEGIN_FTR_SECTION
- rldimi r11,r9,8,55 /* shift masked bit into SLB_VSID_L */
-END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE)
-#endif /* CONFIG_HUGETLB_PAGE */
+2:
+ ld r9,PACACONTEXTID(r13)
+ rldimi r10,r9,USER_ESID_BITS,0
+ b slb_finish_load
+8: /* invalid EA */
+ li r10,0 /* BAD_VSID */
+ li r11,SLB_VSID_USER /* flags don't much matter */
+ b slb_finish_load
+
+#ifdef __DISABLED__
+
+/* void slb_allocate_user(unsigned long ea);
+ *
+ * Create an SLB entry for the given EA (user or kernel).
+ * r3 = faulting address, r13 = PACA
+ * r9, r10, r11 are clobbered by this function
+ * No other registers are examined or changed.
+ *
+ * It is called with translation enabled in order to be able to walk the
+ * page tables. This is not currently used.
+ */
+_GLOBAL(slb_allocate_user)
+ /* r3 = faulting address */
+ srdi r10,r3,28 /* get esid */
+
+ crset 4*cr7+lt /* set "user" flag for later */
+
+ /* check if we fit in the range covered by the pagetables*/
+ srdi. r9,r3,PGTABLE_EADDR_SIZE
+ crnot 4*cr0+eq,4*cr0+eq
+ beqlr
+
+ /* now we need to get to the page tables in order to get the page
+ * size encoding from the PMD. In the future, we'll be able to deal
+ * with 1T segments too by getting the encoding from the PGD instead
+ */
+ ld r9,PACAPGDIR(r13)
+ cmpldi cr0,r9,0
+ beqlr
+ rlwinm r11,r10,8,25,28
+ ldx r9,r9,r11 /* get pgd_t */
+ cmpldi cr0,r9,0
+ beqlr
+ rlwinm r11,r10,3,17,28
+ ldx r9,r9,r11 /* get pmd_t */
+ cmpldi cr0,r9,0
+ beqlr
+
+ /* build vsid flags */
+ andi. r11,r9,SLB_VSID_LLP
+ ori r11,r11,SLB_VSID_USER
+
+ /* get context to calculate proto-VSID */
ld r9,PACACONTEXTID(r13)
- rldimi r3,r9,USER_ESID_BITS,0
+ rldimi r10,r9,USER_ESID_BITS,0
+
+ /* fall through slb_finish_load */
+
+#endif /* __DISABLED__ */
+
+
+/*
+ * Finish loading of an SLB entry and return
+ *
+ * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <>KERNELBASE
+ */
+slb_finish_load:
+ ASM_VSID_SCRAMBLE(r10,r9)
+ rldimi r11,r10,SLB_VSID_SHIFT,16 /* combine VSID and flags */
+
+ /* r3 = EA, r11 = VSID data */
+ /*
+ * Find a slot, round robin. Previously we tried to find a
+ * free slot first but that took too long. Unfortunately we
+ * dont have any LRU information to help us choose a slot.
+ */
+#ifdef CONFIG_PPC_ISERIES
+ /*
+ * On iSeries, the "bolted" stack segment can be cast out on
+ * shared processor switch so we need to check for a miss on
+ * it and restore it to the right slot.
+ */
+ ld r9,PACAKSAVE(r13)
+ clrrdi r9,r9,28
+ clrrdi r3,r3,28
+ li r10,SLB_NUM_BOLTED-1 /* Stack goes in last bolted slot */
+ cmpld r9,r3
+ beq 3f
+#endif /* CONFIG_PPC_ISERIES */
+
+ ld r10,PACASTABRR(r13)
+ addi r10,r10,1
+ /* use a cpu feature mask if we ever change our slb size */
+ cmpldi r10,SLB_NUM_ENTRIES
+
+ blt+ 4f
+ li r10,SLB_NUM_BOLTED
-9: /* r3 = protovsid, r11 = flags, r10 = esid_data, cr7 = <>KERNELBASE */
- ASM_VSID_SCRAMBLE(r3,r9)
+4:
+ std r10,PACASTABRR(r13)
+
+3:
+ rldimi r3,r10,0,36 /* r3= EA[0:35] | entry */
+ oris r10,r3,SLB_ESID_V@h /* r3 |= SLB_ESID_V */
- rldimi r11,r3,SLB_VSID_SHIFT,16 /* combine VSID and flags */
+ /* r3 = ESID data, r11 = VSID data */
/*
* No need for an isync before or after this slbmte. The exception
@@ -125,7 +215,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE)
*/
slbmte r11,r10
- bgelr cr7 /* we're done for kernel addresses */
+ /* we're done for kernel addresses */
+ crclr 4*cr0+eq /* set result to "success" */
+ bgelr cr7
/* Update the slb cache */
lhz r3,PACASLBCACHEPTR(r13) /* offset = paca->slb_cache_ptr */
@@ -143,9 +235,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE)
li r3,SLB_CACHE_ENTRIES+1
2:
sth r3,PACASLBCACHEPTR(r13) /* paca->slb_cache_ptr = offset */
+ crclr 4*cr0+eq /* set result to "success" */
blr
-8: /* invalid EA */
- li r3,0 /* BAD_VSID */
- li r11,SLB_VSID_USER /* flags don't much matter */
- b 9b
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index 1b83f002bf2..fa325dbf98f 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -26,7 +26,6 @@ struct stab_entry {
unsigned long vsid_data;
};
-/* Both the segment table and SLB code uses the following cache */
#define NR_STAB_CACHE_ENTRIES 8
DEFINE_PER_CPU(long, stab_cache_ptr);
DEFINE_PER_CPU(long, stab_cache[NR_STAB_CACHE_ENTRIES]);
@@ -186,7 +185,7 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
/* Never flush the first entry. */
ste += 1;
for (entry = 1;
- entry < (PAGE_SIZE / sizeof(struct stab_entry));
+ entry < (HW_PAGE_SIZE / sizeof(struct stab_entry));
entry++, ste++) {
unsigned long ea;
ea = ste->esid_data & ESID_MASK;
@@ -200,6 +199,10 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
__get_cpu_var(stab_cache_ptr) = 0;
+#ifdef CONFIG_PPC_64K_PAGES
+ get_paca()->pgdir = mm->pgd;
+#endif /* CONFIG_PPC_64K_PAGES */
+
/* Now preload some entries for the new task */
if (test_tsk_thread_flag(tsk, TIF_32BIT))
unmapped_base = TASK_UNMAPPED_BASE_USER32;
@@ -223,8 +226,6 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
asm volatile("sync" : : : "memory");
}
-extern void slb_initialize(void);
-
/*
* Allocate segment tables for secondary CPUs. These must all go in
* the first (bolted) segment, so that do_stab_bolted won't get a
@@ -243,18 +244,21 @@ void stabs_alloc(void)
if (cpu == 0)
continue; /* stab for CPU 0 is statically allocated */
- newstab = lmb_alloc_base(PAGE_SIZE, PAGE_SIZE, 1<<SID_SHIFT);
+ newstab = lmb_alloc_base(HW_PAGE_SIZE, HW_PAGE_SIZE,
+ 1<<SID_SHIFT);
if (! newstab)
panic("Unable to allocate segment table for CPU %d.\n",
cpu);
newstab += KERNELBASE;
- memset((void *)newstab, 0, PAGE_SIZE);
+ memset((void *)newstab, 0, HW_PAGE_SIZE);
paca[cpu].stab_addr = newstab;
paca[cpu].stab_real = virt_to_abs(newstab);
- printk(KERN_DEBUG "Segment table for CPU %d at 0x%lx virtual, 0x%lx absolute\n", cpu, paca[cpu].stab_addr, paca[cpu].stab_real);
+ printk(KERN_DEBUG "Segment table for CPU %d at 0x%lx "
+ "virtual, 0x%lx absolute\n",
+ cpu, paca[cpu].stab_addr, paca[cpu].stab_real);
}
}
@@ -267,13 +271,9 @@ void stab_initialize(unsigned long stab)
{
unsigned long vsid = get_kernel_vsid(KERNELBASE);
- if (cpu_has_feature(CPU_FTR_SLB)) {
- slb_initialize();
- } else {
- asm volatile("isync; slbia; isync":::"memory");
- make_ste(stab, GET_ESID(KERNELBASE), vsid);
+ asm volatile("isync; slbia; isync":::"memory");
+ make_ste(stab, GET_ESID(KERNELBASE), vsid);
- /* Order update */
- asm volatile("sync":::"memory");
- }
+ /* Order update */
+ asm volatile("sync":::"memory");
}
diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c
index 09ab81a10f4..53e31b834ac 100644
--- a/arch/powerpc/mm/tlb_64.c
+++ b/arch/powerpc/mm/tlb_64.c
@@ -21,6 +21,7 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
+
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/mm.h>
@@ -30,7 +31,7 @@
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
-#include <linux/highmem.h>
+#include <asm/bug.h>
DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
@@ -126,28 +127,46 @@ void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
* (if we remove it we should clear the _PTE_HPTEFLAGS bits).
*/
void hpte_update(struct mm_struct *mm, unsigned long addr,
- unsigned long pte, int wrprot)
+ pte_t *ptep, unsigned long pte, int huge)
{
struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
unsigned long vsid;
+ unsigned int psize = mmu_virtual_psize;
int i;
i = batch->index;
+ /* We mask the address for the base page size. Huge pages will
+ * have applied their own masking already
+ */
+ addr &= PAGE_MASK;
+
+ /* Get page size (maybe move back to caller) */
+ if (huge) {
+#ifdef CONFIG_HUGETLB_PAGE
+ psize = mmu_huge_psize;
+#else
+ BUG();
+#endif
+ }
+
/*
* This can happen when we are in the middle of a TLB batch and
* we encounter memory pressure (eg copy_page_range when it tries
* to allocate a new pte). If we have to reclaim memory and end
* up scanning and resetting referenced bits then our batch context
* will change mid stream.
+ *
+ * We also need to ensure only one page size is present in a given
+ * batch
*/
- if (i != 0 && (mm != batch->mm || batch->large != pte_huge(pte))) {
+ if (i != 0 && (mm != batch->mm || batch->psize != psize)) {
flush_tlb_pending();
i = 0;
}
if (i == 0) {
batch->mm = mm;
- batch->large = pte_huge(pte);
+ batch->psize = psize;
}
if (addr < KERNELBASE) {
vsid = get_vsid(mm->context.id, addr);
@@ -155,7 +174,7 @@ void hpte_update(struct mm_struct *mm, unsigned long addr,
} else
vsid = get_kernel_vsid(addr);
batch->vaddr[i] = (vsid << 28 ) | (addr & 0x0fffffff);
- batch->pte[i] = __pte(pte);
+ batch->pte[i] = __real_pte(__pte(pte), ptep);
batch->index = ++i;
if (i >= PPC64_TLB_BATCH_NR)
flush_tlb_pending();
@@ -177,7 +196,8 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
local = 1;
if (i == 1)
- flush_hash_page(batch->vaddr[0], batch->pte[0], local);
+ flush_hash_page(batch->vaddr[0], batch->pte[0],
+ batch->psize, local);
else
flush_hash_range(i, local);
batch->index = 0;
diff --git a/arch/powerpc/oprofile/Kconfig b/arch/powerpc/oprofile/Kconfig
index 19d37730b66..eb2dece76a5 100644
--- a/arch/powerpc/oprofile/Kconfig
+++ b/arch/powerpc/oprofile/Kconfig
@@ -1,7 +1,3 @@
-
-menu "Profiling support"
- depends on EXPERIMENTAL
-
config PROFILING
bool "Profiling support (EXPERIMENTAL)"
help
@@ -19,5 +15,3 @@ config OPROFILE
If unsure, say N.
-endmenu
-
diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c
index 88644931584..c4ee5478427 100644
--- a/arch/powerpc/oprofile/op_model_power4.c
+++ b/arch/powerpc/oprofile/op_model_power4.c
@@ -17,6 +17,7 @@
#include <asm/systemcfg.h>
#include <asm/rtas.h>
#include <asm/oprofile_impl.h>
+#include <asm/reg.h>
#define dbg(args...)
@@ -81,6 +82,26 @@ static void power4_reg_setup(struct op_counter_config *ctr,
extern void ppc64_enable_pmcs(void);
+/*
+ * Older CPUs require the MMCRA sample bit to be always set, but newer
+ * CPUs only want it set for some groups. Eventually we will remove all
+ * knowledge of this bit in the kernel, oprofile userspace should be
+ * setting it when required.
+ *
+ * In order to keep current installations working we force the bit for
+ * those older CPUs. Once everyone has updated their oprofile userspace we
+ * can remove this hack.
+ */
+static inline int mmcra_must_set_sample(void)
+{
+ if (__is_processor(PV_POWER4) || __is_processor(PV_POWER4p) ||
+ __is_processor(PV_970) || __is_processor(PV_970FX) ||
+ __is_processor(PV_970MP))
+ return 1;
+
+ return 0;
+}
+
static void power4_cpu_setup(void *unused)
{
unsigned int mmcr0 = mmcr0_val;
@@ -98,7 +119,8 @@ static void power4_cpu_setup(void *unused)
mtspr(SPRN_MMCR1, mmcr1_val);
- mmcra |= MMCRA_SAMPLE_ENABLE;
+ if (mmcra_must_set_sample())
+ mmcra |= MMCRA_SAMPLE_ENABLE;
mtspr(SPRN_MMCRA, mmcra);
dbg("setup on cpu %d, mmcr0 %lx\n", smp_processor_id(),
diff --git a/arch/powerpc/platforms/iseries/htab.c b/arch/powerpc/platforms/iseries/htab.c
index b3c6c3374ca..30bdcf3925d 100644
--- a/arch/powerpc/platforms/iseries/htab.c
+++ b/arch/powerpc/platforms/iseries/htab.c
@@ -39,15 +39,16 @@ static inline void iSeries_hunlock(unsigned long slot)
spin_unlock(&iSeries_hlocks[(slot >> 4) & 0x3f]);
}
-static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
- unsigned long prpn, unsigned long vflags,
- unsigned long rflags)
+long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
+ unsigned long pa, unsigned long rflags,
+ unsigned long vflags, int psize)
{
- unsigned long arpn;
long slot;
hpte_t lhpte;
int secondary = 0;
+ BUG_ON(psize != MMU_PAGE_4K);
+
/*
* The hypervisor tries both primary and secondary.
* If we are being called to insert in the secondary,
@@ -59,8 +60,19 @@ static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
iSeries_hlock(hpte_group);
- slot = HvCallHpt_findValid(&lhpte, va >> PAGE_SHIFT);
- BUG_ON(lhpte.v & HPTE_V_VALID);
+ slot = HvCallHpt_findValid(&lhpte, va >> HW_PAGE_SHIFT);
+ if (unlikely(lhpte.v & HPTE_V_VALID)) {
+ if (vflags & HPTE_V_BOLTED) {
+ HvCallHpt_setSwBits(slot, 0x10, 0);
+ HvCallHpt_setPp(slot, PP_RWXX);
+ iSeries_hunlock(hpte_group);
+ if (slot < 0)
+ return 0x8 | (slot & 7);
+ else
+ return slot & 7;
+ }
+ BUG();
+ }
if (slot == -1) { /* No available entry found in either group */
iSeries_hunlock(hpte_group);
@@ -73,10 +85,9 @@ static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
slot &= 0x7fffffffffffffff;
}
- arpn = phys_to_abs(prpn << PAGE_SHIFT) >> PAGE_SHIFT;
- lhpte.v = (va >> 23) << HPTE_V_AVPN_SHIFT | vflags | HPTE_V_VALID;
- lhpte.r = (arpn << HPTE_R_RPN_SHIFT) | rflags;
+ lhpte.v = hpte_encode_v(va, MMU_PAGE_4K) | vflags | HPTE_V_VALID;
+ lhpte.r = hpte_encode_r(phys_to_abs(pa), MMU_PAGE_4K) | rflags;
/* Now fill in the actual HPTE */
HvCallHpt_addValidate(slot, secondary, &lhpte);
@@ -86,25 +97,6 @@ static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
return (secondary << 3) | (slot & 7);
}
-long iSeries_hpte_bolt_or_insert(unsigned long hpte_group,
- unsigned long va, unsigned long prpn, unsigned long vflags,
- unsigned long rflags)
-{
- long slot;
- hpte_t lhpte;
-
- slot = HvCallHpt_findValid(&lhpte, va >> PAGE_SHIFT);
-
- if (lhpte.v & HPTE_V_VALID) {
- /* Bolt the existing HPTE */
- HvCallHpt_setSwBits(slot, 0x10, 0);
- HvCallHpt_setPp(slot, PP_RWXX);
- return 0;
- }
-
- return iSeries_hpte_insert(hpte_group, va, prpn, vflags, rflags);
-}
-
static unsigned long iSeries_hpte_getword0(unsigned long slot)
{
hpte_t hpte;
@@ -150,15 +142,17 @@ static long iSeries_hpte_remove(unsigned long hpte_group)
* bits 61..63 : PP2,PP1,PP0
*/
static long iSeries_hpte_updatepp(unsigned long slot, unsigned long newpp,
- unsigned long va, int large, int local)
+ unsigned long va, int psize, int local)
{
hpte_t hpte;
- unsigned long avpn = va >> 23;
+ unsigned long want_v;
iSeries_hlock(slot);
HvCallHpt_get(&hpte, slot);
- if ((HPTE_V_AVPN_VAL(hpte.v) == avpn) && (hpte.v & HPTE_V_VALID)) {
+ want_v = hpte_encode_v(va, MMU_PAGE_4K);
+
+ if (HPTE_V_COMPARE(hpte.v, want_v) && (hpte.v & HPTE_V_VALID)) {
/*
* Hypervisor expects bits as NPPP, which is
* different from how they are mapped in our PP.
@@ -210,14 +204,17 @@ static long iSeries_hpte_find(unsigned long vpn)
*
* No need to lock here because we should be the only user.
*/
-static void iSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea)
+static void iSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
+ int psize)
{
unsigned long vsid,va,vpn;
long slot;
+ BUG_ON(psize != MMU_PAGE_4K);
+
vsid = get_kernel_vsid(ea);
va = (vsid << 28) | (ea & 0x0fffffff);
- vpn = va >> PAGE_SHIFT;
+ vpn = va >> HW_PAGE_SHIFT;
slot = iSeries_hpte_find(vpn);
if (slot == -1)
panic("updateboltedpp: Could not find page to bolt\n");
@@ -225,7 +222,7 @@ static void iSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea)
}
static void iSeries_hpte_invalidate(unsigned long slot, unsigned long va,
- int large, int local)
+ int psize, int local)
{
unsigned long hpte_v;
unsigned long avpn = va >> 23;
diff --git a/arch/powerpc/platforms/iseries/hvlog.c b/arch/powerpc/platforms/iseries/hvlog.c
index 62ec7347968..f476d71194f 100644
--- a/arch/powerpc/platforms/iseries/hvlog.c
+++ b/arch/powerpc/platforms/iseries/hvlog.c
@@ -22,7 +22,7 @@ void HvCall_writeLogBuffer(const void *buffer, u64 len)
while (len) {
hv_buf.addr = cur;
- left_this_page = ((cur & PAGE_MASK) + PAGE_SIZE) - cur;
+ left_this_page = ((cur & HW_PAGE_MASK) + HW_PAGE_SIZE) - cur;
if (left_this_page > len)
left_this_page = len;
hv_buf.len = left_this_page;
@@ -30,6 +30,6 @@ void HvCall_writeLogBuffer(const void *buffer, u64 len)
HvCall2(HvCallBaseWriteLogBuffer,
virt_to_abs(&hv_buf),
left_this_page);
- cur = (cur & PAGE_MASK) + PAGE_SIZE;
+ cur = (cur & HW_PAGE_MASK) + HW_PAGE_SIZE;
}
}
diff --git a/arch/powerpc/platforms/iseries/iommu.c b/arch/powerpc/platforms/iseries/iommu.c
index 1a6845b5c5a..bf081b34582 100644
--- a/arch/powerpc/platforms/iseries/iommu.c
+++ b/arch/powerpc/platforms/iseries/iommu.c
@@ -43,9 +43,12 @@ static void tce_build_iSeries(struct iommu_table *tbl, long index, long npages,
u64 rc;
union tce_entry tce;
+ index <<= TCE_PAGE_FACTOR;
+ npages <<= TCE_PAGE_FACTOR;
+
while (npages--) {
tce.te_word = 0;
- tce.te_bits.tb_rpn = virt_to_abs(uaddr) >> PAGE_SHIFT;
+ tce.te_bits.tb_rpn = virt_to_abs(uaddr) >> TCE_SHIFT;
if (tbl->it_type == TCE_VB) {
/* Virtual Bus */
@@ -66,7 +69,7 @@ static void tce_build_iSeries(struct iommu_table *tbl, long index, long npages,
panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%lx\n",
rc);
index++;
- uaddr += PAGE_SIZE;
+ uaddr += TCE_PAGE_SIZE;
}
}
@@ -74,6 +77,9 @@ static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages)
{
u64 rc;
+ npages <<= TCE_PAGE_FACTOR;
+ index <<= TCE_PAGE_FACTOR;
+
while (npages--) {
rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, 0);
if (rc)
@@ -83,27 +89,6 @@ static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages)
}
}
-#ifdef CONFIG_PCI
-/*
- * This function compares the known tables to find an iommu_table
- * that has already been built for hardware TCEs.
- */
-static struct iommu_table *iommu_table_find(struct iommu_table * tbl)
-{
- struct pci_dn *pdn;
-
- list_for_each_entry(pdn, &iSeries_Global_Device_List, Device_List) {
- struct iommu_table *it = pdn->iommu_table;
- if ((it != NULL) &&
- (it->it_type == TCE_PCI) &&
- (it->it_offset == tbl->it_offset) &&
- (it->it_index == tbl->it_index) &&
- (it->it_size == tbl->it_size))
- return it;
- }
- return NULL;
-}
-
/*
* Call Hv with the architected data structure to get TCE table info.
* info. Put the returned data into the Linux representation of the
@@ -113,8 +98,10 @@ static struct iommu_table *iommu_table_find(struct iommu_table * tbl)
* 2. TCE table per Bus.
* 3. TCE Table per IOA.
*/
-static void iommu_table_getparms(struct pci_dn *pdn,
- struct iommu_table* tbl)
+void iommu_table_getparms_iSeries(unsigned long busno,
+ unsigned char slotno,
+ unsigned char virtbus,
+ struct iommu_table* tbl)
{
struct iommu_table_cb *parms;
@@ -124,9 +111,9 @@ static void iommu_table_getparms(struct pci_dn *pdn,
memset(parms, 0, sizeof(*parms));
- parms->itc_busno = pdn->busno;
- parms->itc_slotno = pdn->LogicalSlot;
- parms->itc_virtbus = 0;
+ parms->itc_busno = busno;
+ parms->itc_slotno = slotno;
+ parms->itc_virtbus = virtbus;
HvCallXm_getTceTableParms(iseries_hv_addr(parms));
@@ -134,17 +121,40 @@ static void iommu_table_getparms(struct pci_dn *pdn,
panic("PCI_DMA: parms->size is zero, parms is 0x%p", parms);
/* itc_size is in pages worth of table, it_size is in # of entries */
- tbl->it_size = (parms->itc_size * PAGE_SIZE) / sizeof(union tce_entry);
+ tbl->it_size = ((parms->itc_size * TCE_PAGE_SIZE) /
+ sizeof(union tce_entry)) >> TCE_PAGE_FACTOR;
tbl->it_busno = parms->itc_busno;
- tbl->it_offset = parms->itc_offset;
+ tbl->it_offset = parms->itc_offset >> TCE_PAGE_FACTOR;
tbl->it_index = parms->itc_index;
tbl->it_blocksize = 1;
- tbl->it_type = TCE_PCI;
+ tbl->it_type = virtbus ? TCE_VB : TCE_PCI;
kfree(parms);
}
+#ifdef CONFIG_PCI
+/*
+ * This function compares the known tables to find an iommu_table
+ * that has already been built for hardware TCEs.
+ */
+static struct iommu_table *iommu_table_find(struct iommu_table * tbl)
+{
+ struct pci_dn *pdn;
+
+ list_for_each_entry(pdn, &iSeries_Global_Device_List, Device_List) {
+ struct iommu_table *it = pdn->iommu_table;
+ if ((it != NULL) &&
+ (it->it_type == TCE_PCI) &&
+ (it->it_offset == tbl->it_offset) &&
+ (it->it_index == tbl->it_index) &&
+ (it->it_size == tbl->it_size))
+ return it;
+ }
+ return NULL;
+}
+
+
void iommu_devnode_init_iSeries(struct device_node *dn)
{
struct iommu_table *tbl;
@@ -152,7 +162,7 @@ void iommu_devnode_init_iSeries(struct device_node *dn)
tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL);
- iommu_table_getparms(pdn, tbl);
+ iommu_table_getparms_iSeries(pdn->busno, pdn->LogicalSlot, 0, tbl);
/* Look for existing tce table */
pdn->iommu_table = iommu_table_find(tbl);
diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c
index c1135912cc0..a06603d84a4 100644
--- a/arch/powerpc/platforms/iseries/irq.c
+++ b/arch/powerpc/platforms/iseries/irq.c
@@ -35,7 +35,6 @@
#include <linux/irq.h>
#include <linux/spinlock.h>
-#include <asm/ppcdebug.h>
#include <asm/iseries/hv_types.h>
#include <asm/iseries/hv_lp_event.h>
#include <asm/iseries/hv_call_xm.h>
@@ -227,8 +226,6 @@ static void iSeries_enable_IRQ(unsigned int irq)
/* Unmask secondary INTA */
mask = 0x80000000;
HvCallPci_unmaskInterrupts(bus, subBus, deviceId, mask);
- PPCDBG(PPCDBG_BUSWALK, "iSeries_enable_IRQ 0x%02X.%02X.%02X 0x%04X\n",
- bus, subBus, deviceId, irq);
}
/* This is called by iSeries_activate_IRQs */
@@ -310,8 +307,6 @@ static void iSeries_disable_IRQ(unsigned int irq)
/* Mask secondary INTA */
mask = 0x80000000;
HvCallPci_maskInterrupts(bus, subBus, deviceId, mask);
- PPCDBG(PPCDBG_BUSWALK, "iSeries_disable_IRQ 0x%02X.%02X.%02X 0x%04X\n",
- bus, subBus, deviceId, irq);
}
/*
diff --git a/arch/powerpc/platforms/iseries/pci.c b/arch/powerpc/platforms/iseries/pci.c
index 7d7d5884343..4b75131773a 100644
--- a/arch/powerpc/platforms/iseries/pci.c
+++ b/arch/powerpc/platforms/iseries/pci.c
@@ -32,7 +32,6 @@
#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
-#include <asm/ppcdebug.h>
#include <asm/iommu.h>
#include <asm/abs_addr.h>
@@ -207,10 +206,6 @@ static struct device_node *build_device_node(HvBusNumber Bus,
struct device_node *node;
struct pci_dn *pdn;
- PPCDBG(PPCDBG_BUSWALK,
- "-build_device_node 0x%02X.%02X.%02X Function: %02X\n",
- Bus, SubBus, AgentId, Function);
-
node = kmalloc(sizeof(struct device_node), GFP_KERNEL);
if (node == NULL)
return NULL;
@@ -243,8 +238,6 @@ unsigned long __init find_and_init_phbs(void)
struct pci_controller *phb;
HvBusNumber bus;
- PPCDBG(PPCDBG_BUSWALK, "find_and_init_phbs Entry\n");
-
/* Check all possible buses. */
for (bus = 0; bus < 256; bus++) {
int ret = HvCallXm_testBus(bus);
@@ -261,9 +254,6 @@ unsigned long __init find_and_init_phbs(void)
phb->last_busno = bus;
phb->ops = &iSeries_pci_ops;
- PPCDBG(PPCDBG_BUSWALK, "PCI:Create iSeries pci_controller(%p), Bus: %04X\n",
- phb, bus);
-
/* Find and connect the devices. */
scan_PHB_slots(phb);
}
@@ -285,11 +275,9 @@ unsigned long __init find_and_init_phbs(void)
*/
void iSeries_pcibios_init(void)
{
- PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_init Entry.\n");
iomm_table_initialize();
find_and_init_phbs();
io_page_mask = -1;
- PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_init Exit.\n");
}
/*
@@ -301,8 +289,6 @@ void __init iSeries_pci_final_fixup(void)
struct device_node *node;
int DeviceCount = 0;
- PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_fixup Entry.\n");
-
/* Fix up at the device node and pci_dev relationship */
mf_display_src(0xC9000100);
@@ -316,9 +302,6 @@ void __init iSeries_pci_final_fixup(void)
++DeviceCount;
pdev->sysdata = (void *)node;
PCI_DN(node)->pcidev = pdev;
- PPCDBG(PPCDBG_BUSWALK,
- "pdev 0x%p <==> DevNode 0x%p\n",
- pdev, node);
allocate_device_bars(pdev);
iSeries_Device_Information(pdev, DeviceCount);
iommu_devnode_init_iSeries(node);
@@ -333,13 +316,10 @@ void __init iSeries_pci_final_fixup(void)
void pcibios_fixup_bus(struct pci_bus *PciBus)
{
- PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_fixup_bus(0x%04X) Entry.\n",
- PciBus->number);
}
void pcibios_fixup_resources(struct pci_dev *pdev)
{
- PPCDBG(PPCDBG_BUSWALK, "fixup_resources pdev %p\n", pdev);
}
/*
@@ -401,9 +381,6 @@ static void scan_EADS_bridge(HvBusNumber bus, HvSubBusNumber SubBus,
printk("found device at bus %d idsel %d func %d (AgentId %x)\n",
bus, IdSel, Function, AgentId);
/* Connect EADs: 0x18.00.12 = 0x00 */
- PPCDBG(PPCDBG_BUSWALK,
- "PCI:Connect EADs: 0x%02X.%02X.%02X\n",
- bus, SubBus, AgentId);
HvRc = HvCallPci_getBusUnitInfo(bus, SubBus, AgentId,
iseries_hv_addr(BridgeInfo),
sizeof(struct HvCallPci_BridgeInfo));
@@ -414,14 +391,6 @@ static void scan_EADS_bridge(HvBusNumber bus, HvSubBusNumber SubBus,
BridgeInfo->maxAgents,
BridgeInfo->maxSubBusNumber,
BridgeInfo->logicalSlotNumber);
- PPCDBG(PPCDBG_BUSWALK,
- "PCI: BridgeInfo, Type:0x%02X, SubBus:0x%02X, MaxAgents:0x%02X, MaxSubBus: 0x%02X, LSlot: 0x%02X\n",
- BridgeInfo->busUnitInfo.deviceType,
- BridgeInfo->subBusNumber,
- BridgeInfo->maxAgents,
- BridgeInfo->maxSubBusNumber,
- BridgeInfo->logicalSlotNumber);
-
if (BridgeInfo->busUnitInfo.deviceType ==
HvCallPci_BridgeDevice) {
/* Scan_Bridge_Slot...: 0x18.00.12 */
@@ -454,9 +423,6 @@ static int scan_bridge_slot(HvBusNumber Bus,
/* iSeries_allocate_IRQ.: 0x18.00.12(0xA3) */
Irq = iSeries_allocate_IRQ(Bus, 0, EADsIdSel);
- PPCDBG(PPCDBG_BUSWALK,
- "PCI:- allocate and assign IRQ 0x%02X.%02X.%02X = 0x%02X\n",
- Bus, 0, EADsIdSel, Irq);
/*
* Connect all functions of any device found.
@@ -482,9 +448,6 @@ static int scan_bridge_slot(HvBusNumber Bus,
printk("read vendor ID: %x\n", VendorId);
/* FoundDevice: 0x18.28.10 = 0x12AE */
- PPCDBG(PPCDBG_BUSWALK,
- "PCI:- FoundDevice: 0x%02X.%02X.%02X = 0x%04X, irq %d\n",
- Bus, SubBus, AgentId, VendorId, Irq);
HvRc = HvCallPci_configStore8(Bus, SubBus, AgentId,
PCI_INTERRUPT_LINE, Irq);
if (HvRc != 0)
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c
index fda712b4216..d3e4bf756c8 100644
--- a/arch/powerpc/platforms/iseries/setup.c
+++ b/arch/powerpc/platforms/iseries/setup.c
@@ -71,8 +71,6 @@ extern void hvlog(char *fmt, ...);
#endif
/* Function Prototypes */
-extern void ppcdbg_initialize(void);
-
static void build_iSeries_Memory_Map(void);
static void iseries_shared_idle(void);
static void iseries_dedicated_idle(void);
@@ -309,8 +307,6 @@ static void __init iSeries_init_early(void)
ppc64_firmware_features = FW_FEATURE_ISERIES;
- ppcdbg_initialize();
-
ppc64_interrupt_controller = IC_ISERIES;
#if defined(CONFIG_BLK_DEV_INITRD)
@@ -320,11 +316,11 @@ static void __init iSeries_init_early(void)
*/
if (naca.xRamDisk) {
initrd_start = (unsigned long)__va(naca.xRamDisk);
- initrd_end = initrd_start + naca.xRamDiskSize * PAGE_SIZE;
+ initrd_end = initrd_start + naca.xRamDiskSize * HW_PAGE_SIZE;
initrd_below_start_ok = 1; // ramdisk in kernel space
ROOT_DEV = Root_RAM0;
- if (((rd_size * 1024) / PAGE_SIZE) < naca.xRamDiskSize)
- rd_size = (naca.xRamDiskSize * PAGE_SIZE) / 1024;
+ if (((rd_size * 1024) / HW_PAGE_SIZE) < naca.xRamDiskSize)
+ rd_size = (naca.xRamDiskSize * HW_PAGE_SIZE) / 1024;
} else
#endif /* CONFIG_BLK_DEV_INITRD */
{
@@ -470,13 +466,14 @@ static void __init build_iSeries_Memory_Map(void)
*/
hptFirstChunk = (u32)addr_to_chunk(HvCallHpt_getHptAddress());
hptSizePages = (u32)HvCallHpt_getHptPages();
- hptSizeChunks = hptSizePages >> (MSCHUNKS_CHUNK_SHIFT - PAGE_SHIFT);
+ hptSizeChunks = hptSizePages >>
+ (MSCHUNKS_CHUNK_SHIFT - HW_PAGE_SHIFT);
hptLastChunk = hptFirstChunk + hptSizeChunks - 1;
printk("HPT absolute addr = %016lx, size = %dK\n",
chunk_to_addr(hptFirstChunk), hptSizeChunks * 256);
- ppc64_pft_size = __ilog2(hptSizePages * PAGE_SIZE);
+ ppc64_pft_size = __ilog2(hptSizePages * HW_PAGE_SIZE);
/*
* The actual hashed page table is in the hypervisor,
@@ -629,7 +626,7 @@ static void __init iSeries_fixup_klimit(void)
*/
if (naca.xRamDisk)
klimit = KERNELBASE + (u64)naca.xRamDisk +
- (naca.xRamDiskSize * PAGE_SIZE);
+ (naca.xRamDiskSize * HW_PAGE_SIZE);
else {
/*
* No ram disk was included - check and see if there
diff --git a/arch/powerpc/platforms/iseries/smp.c b/arch/powerpc/platforms/iseries/smp.c
index 3336bad6772..fcb094ec6ae 100644
--- a/arch/powerpc/platforms/iseries/smp.c
+++ b/arch/powerpc/platforms/iseries/smp.c
@@ -40,7 +40,6 @@
#include <asm/paca.h>
#include <asm/iseries/hv_call.h>
#include <asm/time.h>
-#include <asm/ppcdebug.h>
#include <asm/machdep.h>
#include <asm/cputable.h>
#include <asm/system.h>
diff --git a/arch/powerpc/platforms/iseries/vio.c b/arch/powerpc/platforms/iseries/vio.c
index c27a66876c2..384360ee06e 100644
--- a/arch/powerpc/platforms/iseries/vio.c
+++ b/arch/powerpc/platforms/iseries/vio.c
@@ -30,41 +30,14 @@ static struct iommu_table vio_iommu_table;
static void __init iommu_vio_init(void)
{
- struct iommu_table *t;
- struct iommu_table_cb cb;
- unsigned long cbp;
- unsigned long itc_entries;
+ iommu_table_getparms_iSeries(255, 0, 0xff, &veth_iommu_table);
+ veth_iommu_table.it_size /= 2;
+ vio_iommu_table = veth_iommu_table;
+ vio_iommu_table.it_offset += veth_iommu_table.it_size;
- cb.itc_busno = 255; /* Bus 255 is the virtual bus */
- cb.itc_virtbus = 0xff; /* Ask for virtual bus */
-
- cbp = virt_to_abs(&cb);
- HvCallXm_getTceTableParms(cbp);
-
- itc_entries = cb.itc_size * PAGE_SIZE / sizeof(union tce_entry);
- veth_iommu_table.it_size = itc_entries / 2;
- veth_iommu_table.it_busno = cb.itc_busno;
- veth_iommu_table.it_offset = cb.itc_offset;
- veth_iommu_table.it_index = cb.itc_index;
- veth_iommu_table.it_type = TCE_VB;
- veth_iommu_table.it_blocksize = 1;
-
- t = iommu_init_table(&veth_iommu_table);
-
- if (!t)
+ if (!iommu_init_table(&veth_iommu_table))
printk("Virtual Bus VETH TCE table failed.\n");
-
- vio_iommu_table.it_size = itc_entries - veth_iommu_table.it_size;
- vio_iommu_table.it_busno = cb.itc_busno;
- vio_iommu_table.it_offset = cb.itc_offset +
- veth_iommu_table.it_size;
- vio_iommu_table.it_index = cb.itc_index;
- vio_iommu_table.it_type = TCE_VB;
- vio_iommu_table.it_blocksize = 1;
-
- t = iommu_init_table(&vio_iommu_table);
-
- if (!t)
+ if (!iommu_init_table(&vio_iommu_table))
printk("Virtual Bus VIO TCE table failed.\n");
}
diff --git a/arch/powerpc/platforms/iseries/viopath.c b/arch/powerpc/platforms/iseries/viopath.c
index fe97bfbf746..84267269559 100644
--- a/arch/powerpc/platforms/iseries/viopath.c
+++ b/arch/powerpc/platforms/iseries/viopath.c
@@ -68,7 +68,8 @@ static DEFINE_SPINLOCK(statuslock);
* For each kind of event we allocate a buffer that is
* guaranteed not to cross a page boundary
*/
-static unsigned char event_buffer[VIO_MAX_SUBTYPES * 256] __page_aligned;
+static unsigned char event_buffer[VIO_MAX_SUBTYPES * 256]
+ __attribute__((__aligned__(4096)));
static atomic_t event_buffer_available[VIO_MAX_SUBTYPES];
static int event_buffer_initialised;
@@ -116,12 +117,12 @@ static int proc_viopath_show(struct seq_file *m, void *v)
HvLpEvent_Rc hvrc;
DECLARE_MUTEX_LOCKED(Semaphore);
- buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ buf = kmalloc(HW_PAGE_SIZE, GFP_KERNEL);
if (!buf)
return 0;
- memset(buf, 0, PAGE_SIZE);
+ memset(buf, 0, HW_PAGE_SIZE);
- handle = dma_map_single(iSeries_vio_dev, buf, PAGE_SIZE,
+ handle = dma_map_single(iSeries_vio_dev, buf, HW_PAGE_SIZE,
DMA_FROM_DEVICE);
hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
@@ -131,7 +132,7 @@ static int proc_viopath_show(struct seq_file *m, void *v)
viopath_sourceinst(viopath_hostLp),
viopath_targetinst(viopath_hostLp),
(u64)(unsigned long)&Semaphore, VIOVERSION << 16,
- ((u64)handle) << 32, PAGE_SIZE, 0, 0);
+ ((u64)handle) << 32, HW_PAGE_SIZE, 0, 0);
if (hvrc != HvLpEvent_Rc_Good)
printk(VIOPATH_KERN_WARN "hv error on op %d\n", (int)hvrc);
@@ -140,7 +141,7 @@ static int proc_viopath_show(struct seq_file *m, void *v)
vlanMap = HvLpConfig_getVirtualLanIndexMap();
- buf[PAGE_SIZE-1] = '\0';
+ buf[HW_PAGE_SIZE-1] = '\0';
seq_printf(m, "%s", buf);
seq_printf(m, "AVAILABLE_VETH=%x\n", vlanMap);
seq_printf(m, "SRLNBR=%c%c%c%c%c%c%c\n",
@@ -152,7 +153,8 @@ static int proc_viopath_show(struct seq_file *m, void *v)
e2a(xItExtVpdPanel.systemSerial[4]),
e2a(xItExtVpdPanel.systemSerial[5]));
- dma_unmap_single(iSeries_vio_dev, handle, PAGE_SIZE, DMA_FROM_DEVICE);
+ dma_unmap_single(iSeries_vio_dev, handle, HW_PAGE_SIZE,
+ DMA_FROM_DEVICE);
kfree(buf);
return 0;
diff --git a/arch/powerpc/platforms/powermac/Makefile b/arch/powerpc/platforms/powermac/Makefile
index 4369676f1d5..c9df44fcf57 100644
--- a/arch/powerpc/platforms/powermac/Makefile
+++ b/arch/powerpc/platforms/powermac/Makefile
@@ -1,7 +1,8 @@
obj-y += pic.o setup.o time.o feature.o pci.o \
sleep.o low_i2c.o cache.o
obj-$(CONFIG_PMAC_BACKLIGHT) += backlight.o
-obj-$(CONFIG_CPU_FREQ_PMAC) += cpufreq.o
+obj-$(CONFIG_CPU_FREQ_PMAC) += cpufreq_32.o
+obj-$(CONFIG_CPU_FREQ_PMAC64) += cpufreq_64.o
obj-$(CONFIG_NVRAM) += nvram.o
# ppc64 pmac doesn't define CONFIG_NVRAM but needs nvram stuff
obj-$(CONFIG_PPC64) += nvram.o
diff --git a/arch/powerpc/platforms/powermac/cpufreq.c b/arch/powerpc/platforms/powermac/cpufreq_32.c
index c47f8b69725..56fd4e05fed 100644
--- a/arch/powerpc/platforms/powermac/cpufreq.c
+++ b/arch/powerpc/platforms/powermac/cpufreq_32.c
@@ -397,18 +397,16 @@ static int pmac_cpufreq_target( struct cpufreq_policy *policy,
unsigned int relation)
{
unsigned int newstate = 0;
+ int rc;
if (cpufreq_frequency_table_target(policy, pmac_cpu_freqs,
target_freq, relation, &newstate))
return -EINVAL;
- return do_set_cpu_speed(newstate, 1);
-}
+ rc = do_set_cpu_speed(newstate, 1);
-unsigned int pmac_get_one_cpufreq(int i)
-{
- /* Supports only one CPU for now */
- return (i == 0) ? cur_freq : 0;
+ ppc_proc_freq = cur_freq * 1000ul;
+ return rc;
}
static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy)
@@ -474,6 +472,8 @@ static int pmac_cpufreq_resume(struct cpufreq_policy *policy)
do_set_cpu_speed(sleep_freq == low_freq ?
CPUFREQ_LOW : CPUFREQ_HIGH, 0);
+ ppc_proc_freq = cur_freq * 1000ul;
+
no_schedule = 0;
return 0;
}
@@ -547,7 +547,7 @@ static int pmac_cpufreq_init_MacRISC3(struct device_node *cpunode)
*/
if (low_freq < 98000000)
low_freq = 101000000;
-
+
/* Convert those to CPU core clocks */
low_freq = (low_freq * (*ratio)) / 2000;
hi_freq = (hi_freq * (*ratio)) / 2000;
@@ -714,6 +714,7 @@ out:
pmac_cpu_freqs[CPUFREQ_LOW].frequency = low_freq;
pmac_cpu_freqs[CPUFREQ_HIGH].frequency = hi_freq;
+ ppc_proc_freq = cur_freq * 1000ul;
printk(KERN_INFO "Registering PowerMac CPU frequency driver\n");
printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Boot: %d Mhz\n",
diff --git a/arch/powerpc/platforms/powermac/cpufreq_64.c b/arch/powerpc/platforms/powermac/cpufreq_64.c
new file mode 100644
index 00000000000..39150342c6f
--- /dev/null
+++ b/arch/powerpc/platforms/powermac/cpufreq_64.c
@@ -0,0 +1,323 @@
+/*
+ * Copyright (C) 2002 - 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
+ * and Markus Demleitner <msdemlei@cl.uni-heidelberg.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This driver adds basic cpufreq support for SMU & 970FX based G5 Macs,
+ * that is iMac G5 and latest single CPU desktop.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/cpufreq.h>
+#include <linux/init.h>
+#include <linux/completion.h>
+#include <asm/prom.h>
+#include <asm/machdep.h>
+#include <asm/irq.h>
+#include <asm/sections.h>
+#include <asm/cputable.h>
+#include <asm/time.h>
+#include <asm/smu.h>
+
+#undef DEBUG
+
+#ifdef DEBUG
+#define DBG(fmt...) printk(fmt)
+#else
+#define DBG(fmt...)
+#endif
+
+/* see 970FX user manual */
+
+#define SCOM_PCR 0x0aa001 /* PCR scom addr */
+
+#define PCR_HILO_SELECT 0x80000000U /* 1 = PCR, 0 = PCRH */
+#define PCR_SPEED_FULL 0x00000000U /* 1:1 speed value */
+#define PCR_SPEED_HALF 0x00020000U /* 1:2 speed value */
+#define PCR_SPEED_QUARTER 0x00040000U /* 1:4 speed value */
+#define PCR_SPEED_MASK 0x000e0000U /* speed mask */
+#define PCR_SPEED_SHIFT 17
+#define PCR_FREQ_REQ_VALID 0x00010000U /* freq request valid */
+#define PCR_VOLT_REQ_VALID 0x00008000U /* volt request valid */
+#define PCR_TARGET_TIME_MASK 0x00006000U /* target time */
+#define PCR_STATLAT_MASK 0x00001f00U /* STATLAT value */
+#define PCR_SNOOPLAT_MASK 0x000000f0U /* SNOOPLAT value */
+#define PCR_SNOOPACC_MASK 0x0000000fU /* SNOOPACC value */
+
+#define SCOM_PSR 0x408001 /* PSR scom addr */
+/* warning: PSR is a 64 bits register */
+#define PSR_CMD_RECEIVED 0x2000000000000000U /* command received */
+#define PSR_CMD_COMPLETED 0x1000000000000000U /* command completed */
+#define PSR_CUR_SPEED_MASK 0x0300000000000000U /* current speed */
+#define PSR_CUR_SPEED_SHIFT (56)
+
+/*
+ * The G5 only supports two frequencies (Quarter speed is not supported)
+ */
+#define CPUFREQ_HIGH 0
+#define CPUFREQ_LOW 1
+
+static struct cpufreq_frequency_table g5_cpu_freqs[] = {
+ {CPUFREQ_HIGH, 0},
+ {CPUFREQ_LOW, 0},
+ {0, CPUFREQ_TABLE_END},
+};
+
+static struct freq_attr* g5_cpu_freqs_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+/* Power mode data is an array of the 32 bits PCR values to use for
+ * the various frequencies, retreived from the device-tree
+ */
+static u32 *g5_pmode_data;
+static int g5_pmode_max;
+static int g5_pmode_cur;
+
+static DECLARE_MUTEX(g5_switch_mutex);
+
+
+static struct smu_sdbp_fvt *g5_fvt_table; /* table of op. points */
+static int g5_fvt_count; /* number of op. points */
+static int g5_fvt_cur; /* current op. point */
+
+/* ----------------- real hardware interface */
+
+static void g5_switch_volt(int speed_mode)
+{
+ struct smu_simple_cmd cmd;
+
+ DECLARE_COMPLETION(comp);
+ smu_queue_simple(&cmd, SMU_CMD_POWER_COMMAND, 8, smu_done_complete,
+ &comp, 'V', 'S', 'L', 'E', 'W',
+ 0xff, g5_fvt_cur+1, speed_mode);
+ wait_for_completion(&comp);
+}
+
+static int g5_switch_freq(int speed_mode)
+{
+ struct cpufreq_freqs freqs;
+ int to;
+
+ if (g5_pmode_cur == speed_mode)
+ return 0;
+
+ down(&g5_switch_mutex);
+
+ freqs.old = g5_cpu_freqs[g5_pmode_cur].frequency;
+ freqs.new = g5_cpu_freqs[speed_mode].frequency;
+ freqs.cpu = 0;
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+ /* If frequency is going up, first ramp up the voltage */
+ if (speed_mode < g5_pmode_cur)
+ g5_switch_volt(speed_mode);
+
+ /* Clear PCR high */
+ scom970_write(SCOM_PCR, 0);
+ /* Clear PCR low */
+ scom970_write(SCOM_PCR, PCR_HILO_SELECT | 0);
+ /* Set PCR low */
+ scom970_write(SCOM_PCR, PCR_HILO_SELECT |
+ g5_pmode_data[speed_mode]);
+
+ /* Wait for completion */
+ for (to = 0; to < 10; to++) {
+ unsigned long psr = scom970_read(SCOM_PSR);
+
+ if ((psr & PSR_CMD_RECEIVED) == 0 &&
+ (((psr >> PSR_CUR_SPEED_SHIFT) ^
+ (g5_pmode_data[speed_mode] >> PCR_SPEED_SHIFT)) & 0x3)
+ == 0)
+ break;
+ if (psr & PSR_CMD_COMPLETED)
+ break;
+ udelay(100);
+ }
+
+ /* If frequency is going down, last ramp the voltage */
+ if (speed_mode > g5_pmode_cur)
+ g5_switch_volt(speed_mode);
+
+ g5_pmode_cur = speed_mode;
+ ppc_proc_freq = g5_cpu_freqs[speed_mode].frequency * 1000ul;
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+ up(&g5_switch_mutex);
+
+ return 0;
+}
+
+static int g5_query_freq(void)
+{
+ unsigned long psr = scom970_read(SCOM_PSR);
+ int i;
+
+ for (i = 0; i <= g5_pmode_max; i++)
+ if ((((psr >> PSR_CUR_SPEED_SHIFT) ^
+ (g5_pmode_data[i] >> PCR_SPEED_SHIFT)) & 0x3) == 0)
+ break;
+ return i;
+}
+
+/* ----------------- cpufreq bookkeeping */
+
+static int g5_cpufreq_verify(struct cpufreq_policy *policy)
+{
+ return cpufreq_frequency_table_verify(policy, g5_cpu_freqs);
+}
+
+static int g5_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq, unsigned int relation)
+{
+ unsigned int newstate = 0;
+
+ if (cpufreq_frequency_table_target(policy, g5_cpu_freqs,
+ target_freq, relation, &newstate))
+ return -EINVAL;
+
+ return g5_switch_freq(newstate);
+}
+
+static unsigned int g5_cpufreq_get_speed(unsigned int cpu)
+{
+ return g5_cpu_freqs[g5_pmode_cur].frequency;
+}
+
+static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ if (policy->cpu != 0)
+ return -ENODEV;
+
+ policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
+ policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
+ policy->cur = g5_cpu_freqs[g5_query_freq()].frequency;
+ cpufreq_frequency_table_get_attr(g5_cpu_freqs, policy->cpu);
+
+ return cpufreq_frequency_table_cpuinfo(policy,
+ g5_cpu_freqs);
+}
+
+
+static struct cpufreq_driver g5_cpufreq_driver = {
+ .name = "powermac",
+ .owner = THIS_MODULE,
+ .flags = CPUFREQ_CONST_LOOPS,
+ .init = g5_cpufreq_cpu_init,
+ .verify = g5_cpufreq_verify,
+ .target = g5_cpufreq_target,
+ .get = g5_cpufreq_get_speed,
+ .attr = g5_cpu_freqs_attr,
+};
+
+
+static int __init g5_cpufreq_init(void)
+{
+ struct device_node *cpunode;
+ unsigned int psize, ssize;
+ struct smu_sdbp_header *shdr;
+ unsigned long max_freq;
+ u32 *valp;
+ int rc = -ENODEV;
+
+ /* Look for CPU and SMU nodes */
+ cpunode = of_find_node_by_type(NULL, "cpu");
+ if (!cpunode) {
+ DBG("No CPU node !\n");
+ return -ENODEV;
+ }
+
+ /* Check 970FX for now */
+ valp = (u32 *)get_property(cpunode, "cpu-version", NULL);
+ if (!valp) {
+ DBG("No cpu-version property !\n");
+ goto bail_noprops;
+ }
+ if (((*valp) >> 16) != 0x3c) {
+ DBG("Wrong CPU version: %08x\n", *valp);
+ goto bail_noprops;
+ }
+
+ /* Look for the powertune data in the device-tree */
+ g5_pmode_data = (u32 *)get_property(cpunode, "power-mode-data",&psize);
+ if (!g5_pmode_data) {
+ DBG("No power-mode-data !\n");
+ goto bail_noprops;
+ }
+ g5_pmode_max = psize / sizeof(u32) - 1;
+
+ /* Look for the FVT table */
+ shdr = smu_get_sdb_partition(SMU_SDB_FVT_ID, NULL);
+ if (!shdr)
+ goto bail_noprops;
+ g5_fvt_table = (struct smu_sdbp_fvt *)&shdr[1];
+ ssize = (shdr->len * sizeof(u32)) - sizeof(struct smu_sdbp_header);
+ g5_fvt_count = ssize / sizeof(struct smu_sdbp_fvt);
+ g5_fvt_cur = 0;
+
+ /* Sanity checking */
+ if (g5_fvt_count < 1 || g5_pmode_max < 1)
+ goto bail_noprops;
+
+ /*
+ * From what I see, clock-frequency is always the maximal frequency.
+ * The current driver can not slew sysclk yet, so we really only deal
+ * with powertune steps for now. We also only implement full freq and
+ * half freq in this version. So far, I haven't yet seen a machine
+ * supporting anything else.
+ */
+ valp = (u32 *)get_property(cpunode, "clock-frequency", NULL);
+ if (!valp)
+ return -ENODEV;
+ max_freq = (*valp)/1000;
+ g5_cpu_freqs[0].frequency = max_freq;
+ g5_cpu_freqs[1].frequency = max_freq/2;
+
+ /* Check current frequency */
+ g5_pmode_cur = g5_query_freq();
+ if (g5_pmode_cur > 1)
+ /* We don't support anything but 1:1 and 1:2, fixup ... */
+ g5_pmode_cur = 1;
+
+ /* Force apply current frequency to make sure everything is in
+ * sync (voltage is right for example). Firmware may leave us with
+ * a strange setting ...
+ */
+ g5_switch_freq(g5_pmode_cur);
+
+ printk(KERN_INFO "Registering G5 CPU frequency driver\n");
+ printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
+ g5_cpu_freqs[1].frequency/1000,
+ g5_cpu_freqs[0].frequency/1000,
+ g5_cpu_freqs[g5_pmode_cur].frequency/1000);
+
+ rc = cpufreq_register_driver(&g5_cpufreq_driver);
+
+ /* We keep the CPU node on hold... hopefully, Apple G5 don't have
+ * hotplug CPU with a dynamic device-tree ...
+ */
+ return rc;
+
+ bail_noprops:
+ of_node_put(cpunode);
+
+ return rc;
+}
+
+module_init(g5_cpufreq_init);
+
+
+MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 80b58c1ec41..7acb0546671 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -193,18 +193,6 @@ static void pmac_show_cpuinfo(struct seq_file *m)
pmac_newworld ? "NewWorld" : "OldWorld");
}
-static void pmac_show_percpuinfo(struct seq_file *m, int i)
-{
-#ifdef CONFIG_CPU_FREQ_PMAC
- extern unsigned int pmac_get_one_cpufreq(int i);
- unsigned int freq = pmac_get_one_cpufreq(i);
- if (freq != 0) {
- seq_printf(m, "clock\t\t: %dMHz\n", freq/1000);
- return;
- }
-#endif /* CONFIG_CPU_FREQ_PMAC */
-}
-
#ifndef CONFIG_ADB_CUDA
int find_via_cuda(void)
{
@@ -767,7 +755,6 @@ struct machdep_calls __initdata pmac_md = {
.setup_arch = pmac_setup_arch,
.init_early = pmac_init_early,
.show_cpuinfo = pmac_show_cpuinfo,
- .show_percpuinfo = pmac_show_percpuinfo,
.init_IRQ = pmac_pic_init,
.get_irq = mpic_get_irq, /* changed later */
.pcibios_fixup = pmac_pcibios_fixup,
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 513e2723149..fcc50bfd43f 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -37,7 +37,6 @@
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/rtas.h>
-#include <asm/ppcdebug.h>
#include <asm/iommu.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
@@ -47,6 +46,7 @@
#include <asm/firmware.h>
#include <asm/tce.h>
#include <asm/ppc-pci.h>
+#include <asm/udbg.h>
#include "plpar_wrappers.h"
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index e384a5a9179..a50e5f3f396 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -19,7 +19,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#define DEBUG
+#undef DEBUG_LOW
#include <linux/config.h>
#include <linux/kernel.h>
@@ -31,20 +31,21 @@
#include <asm/machdep.h>
#include <asm/abs_addr.h>
#include <asm/mmu_context.h>
-#include <asm/ppcdebug.h>
#include <asm/iommu.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include <asm/prom.h>
#include <asm/abs_addr.h>
#include <asm/cputable.h>
+#include <asm/udbg.h>
+#include <asm/smp.h>
#include "plpar_wrappers.h"
-#ifdef DEBUG
-#define DBG(fmt...) udbg_printf(fmt)
+#ifdef DEBUG_LOW
+#define DBG_LOW(fmt...) do { udbg_printf(fmt); } while(0)
#else
-#define DBG(fmt...)
+#define DBG_LOW(fmt...) do { } while(0)
#endif
/* in pSeries_hvCall.S */
@@ -276,8 +277,9 @@ void vpa_init(int cpu)
}
long pSeries_lpar_hpte_insert(unsigned long hpte_group,
- unsigned long va, unsigned long prpn,
- unsigned long vflags, unsigned long rflags)
+ unsigned long va, unsigned long pa,
+ unsigned long rflags, unsigned long vflags,
+ int psize)
{
unsigned long lpar_rc;
unsigned long flags;
@@ -285,11 +287,28 @@ long pSeries_lpar_hpte_insert(unsigned long hpte_group,
unsigned long hpte_v, hpte_r;
unsigned long dummy0, dummy1;
- hpte_v = ((va >> 23) << HPTE_V_AVPN_SHIFT) | vflags | HPTE_V_VALID;
- if (vflags & HPTE_V_LARGE)
- hpte_v &= ~(1UL << HPTE_V_AVPN_SHIFT);
-
- hpte_r = (prpn << HPTE_R_RPN_SHIFT) | rflags;
+ if (!(vflags & HPTE_V_BOLTED))
+ DBG_LOW("hpte_insert(group=%lx, va=%016lx, pa=%016lx, "
+ "rflags=%lx, vflags=%lx, psize=%d)\n",
+ hpte_group, va, pa, rflags, vflags, psize);
+
+ hpte_v = hpte_encode_v(va, psize) | vflags | HPTE_V_VALID;
+ hpte_r = hpte_encode_r(pa, psize) | rflags;
+
+ if (!(vflags & HPTE_V_BOLTED))
+ DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
+
+#if 1
+ {
+ int i;
+ for (i=0;i<8;i++) {
+ unsigned long w0, w1;
+ plpar_pte_read(0, hpte_group, &w0, &w1);
+ BUG_ON (HPTE_V_COMPARE(hpte_v, w0)
+ && (w0 & HPTE_V_VALID));
+ }
+ }
+#endif
/* Now fill in the actual HPTE */
/* Set CEC cookie to 0 */
@@ -299,23 +318,30 @@ long pSeries_lpar_hpte_insert(unsigned long hpte_group,
/* Exact = 0 */
flags = 0;
- /* XXX why is this here? - Anton */
+ /* Make pHyp happy */
if (rflags & (_PAGE_GUARDED|_PAGE_NO_CACHE))
hpte_r &= ~_PAGE_COHERENT;
lpar_rc = plpar_hcall(H_ENTER, flags, hpte_group, hpte_v,
hpte_r, &slot, &dummy0, &dummy1);
-
- if (unlikely(lpar_rc == H_PTEG_Full))
+ if (unlikely(lpar_rc == H_PTEG_Full)) {
+ if (!(vflags & HPTE_V_BOLTED))
+ DBG_LOW(" full\n");
return -1;
+ }
/*
* Since we try and ioremap PHBs we don't own, the pte insert
* will fail. However we must catch the failure in hash_page
* or we will loop forever, so return -2 in this case.
*/
- if (unlikely(lpar_rc != H_Success))
+ if (unlikely(lpar_rc != H_Success)) {
+ if (!(vflags & HPTE_V_BOLTED))
+ DBG_LOW(" lpar err %d\n", lpar_rc);
return -2;
+ }
+ if (!(vflags & HPTE_V_BOLTED))
+ DBG_LOW(" -> slot: %d\n", slot & 7);
/* Because of iSeries, we have to pass down the secondary
* bucket bit here as well
@@ -340,10 +366,8 @@ static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
/* don't remove a bolted entry */
lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset,
(0x1UL << 4), &dummy1, &dummy2);
-
if (lpar_rc == H_Success)
return i;
-
BUG_ON(lpar_rc != H_Not_Found);
slot_offset++;
@@ -371,20 +395,28 @@ static void pSeries_lpar_hptab_clear(void)
* We can probably optimize here and assume the high bits of newpp are
* already zero. For now I am paranoid.
*/
-static long pSeries_lpar_hpte_updatepp(unsigned long slot, unsigned long newpp,
- unsigned long va, int large, int local)
+static long pSeries_lpar_hpte_updatepp(unsigned long slot,
+ unsigned long newpp,
+ unsigned long va,
+ int psize, int local)
{
unsigned long lpar_rc;
unsigned long flags = (newpp & 7) | H_AVPN;
- unsigned long avpn = va >> 23;
+ unsigned long want_v;
- if (large)
- avpn &= ~0x1UL;
+ want_v = hpte_encode_v(va, psize);
- lpar_rc = plpar_pte_protect(flags, slot, (avpn << 7));
+ DBG_LOW(" update: avpnv=%016lx, hash=%016lx, f=%x, psize: %d ... ",
+ want_v & HPTE_V_AVPN, slot, flags, psize);
- if (lpar_rc == H_Not_Found)
+ lpar_rc = plpar_pte_protect(flags, slot, want_v & HPTE_V_AVPN);
+
+ if (lpar_rc == H_Not_Found) {
+ DBG_LOW("not found !\n");
return -1;
+ }
+
+ DBG_LOW("ok\n");
BUG_ON(lpar_rc != H_Success);
@@ -410,21 +442,22 @@ static unsigned long pSeries_lpar_hpte_getword0(unsigned long slot)
return dword0;
}
-static long pSeries_lpar_hpte_find(unsigned long vpn)
+static long pSeries_lpar_hpte_find(unsigned long va, int psize)
{
unsigned long hash;
unsigned long i, j;
long slot;
- unsigned long hpte_v;
+ unsigned long want_v, hpte_v;
- hash = hpt_hash(vpn, 0);
+ hash = hpt_hash(va, mmu_psize_defs[psize].shift);
+ want_v = hpte_encode_v(va, psize);
for (j = 0; j < 2; j++) {
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
for (i = 0; i < HPTES_PER_GROUP; i++) {
hpte_v = pSeries_lpar_hpte_getword0(slot);
- if ((HPTE_V_AVPN_VAL(hpte_v) == (vpn >> 11))
+ if (HPTE_V_COMPARE(hpte_v, want_v)
&& (hpte_v & HPTE_V_VALID)
&& (!!(hpte_v & HPTE_V_SECONDARY) == j)) {
/* HPTE matches */
@@ -441,17 +474,15 @@ static long pSeries_lpar_hpte_find(unsigned long vpn)
}
static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
- unsigned long ea)
+ unsigned long ea,
+ int psize)
{
- unsigned long lpar_rc;
- unsigned long vsid, va, vpn, flags;
- long slot;
+ unsigned long lpar_rc, slot, vsid, va, flags;
vsid = get_kernel_vsid(ea);
va = (vsid << 28) | (ea & 0x0fffffff);
- vpn = va >> PAGE_SHIFT;
- slot = pSeries_lpar_hpte_find(vpn);
+ slot = pSeries_lpar_hpte_find(va, psize);
BUG_ON(slot == -1);
flags = newpp & 7;
@@ -461,18 +492,18 @@ static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
}
static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
- int large, int local)
+ int psize, int local)
{
- unsigned long avpn = va >> 23;
+ unsigned long want_v;
unsigned long lpar_rc;
unsigned long dummy1, dummy2;
- if (large)
- avpn &= ~0x1UL;
-
- lpar_rc = plpar_pte_remove(H_AVPN, slot, (avpn << 7), &dummy1,
- &dummy2);
+ DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d",
+ slot, va, psize, local);
+ want_v = hpte_encode_v(va, psize);
+ lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v & HPTE_V_AVPN,
+ &dummy1, &dummy2);
if (lpar_rc == H_Not_Found)
return;
@@ -494,7 +525,8 @@ void pSeries_lpar_flush_hash_range(unsigned long number, int local)
spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
for (i = 0; i < number; i++)
- flush_hash_page(batch->vaddr[i], batch->pte[i], local);
+ flush_hash_page(batch->vaddr[i], batch->pte[i],
+ batch->psize, local);
if (lock_tlbie)
spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h
index 382f8c5b0e7..3bd1b3e0600 100644
--- a/arch/powerpc/platforms/pseries/plpar_wrappers.h
+++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h
@@ -107,14 +107,4 @@ static inline long plpar_put_term_char(unsigned long termno, unsigned long len,
lbuf[1]);
}
-static inline long plpar_set_xdabr(unsigned long address, unsigned long flags)
-{
- return plpar_hcall_norets(H_SET_XDABR, address, flags);
-}
-
-static inline long plpar_set_dabr(unsigned long val)
-{
- return plpar_hcall_norets(H_SET_DABR, val);
-}
-
#endif /* _PSERIES_PLPAR_WRAPPERS_H */
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 6562ff4b0a8..fbd214d68b0 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -48,7 +48,7 @@
#include <asm/ptrace.h>
#include <asm/machdep.h>
#include <asm/rtas.h>
-#include <asm/ppcdebug.h>
+#include <asm/udbg.h>
static unsigned char ras_log_buf[RTAS_ERROR_LOG_MAX];
static DEFINE_SPINLOCK(ras_log_buf_lock);
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index 58c61219d08..d7d40033945 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -286,10 +286,8 @@ static struct property *new_property(const char *name, const int length,
return new;
cleanup:
- if (new->name)
- kfree(new->name);
- if (new->value)
- kfree(new->value);
+ kfree(new->name);
+ kfree(new->value);
kfree(new);
return NULL;
}
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 65bee939eec..e78c3936884 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -65,6 +65,7 @@
#include <asm/ppc-pci.h>
#include <asm/i8259.h>
#include <asm/udbg.h>
+#include <asm/smp.h>
#include "plpar_wrappers.h"
@@ -353,14 +354,15 @@ static void pSeries_mach_cpu_die(void)
static int pseries_set_dabr(unsigned long dabr)
{
- if (firmware_has_feature(FW_FEATURE_XDABR)) {
- /* We want to catch accesses from kernel and userspace */
- return plpar_set_xdabr(dabr, H_DABRX_KERNEL | H_DABRX_USER);
- }
-
- return plpar_set_dabr(dabr);
+ return plpar_hcall_norets(H_SET_DABR, dabr);
}
+static int pseries_set_xdabr(unsigned long dabr)
+{
+ /* We want to catch accesses from kernel and userspace */
+ return plpar_hcall_norets(H_SET_XDABR, dabr,
+ H_DABRX_KERNEL | H_DABRX_USER);
+}
/*
* Early initialization. Relocation is on but do not reference unbolted pages
@@ -396,8 +398,10 @@ static void __init pSeries_init_early(void)
DBG("Hello World !\n");
}
- if (firmware_has_feature(FW_FEATURE_XDABR | FW_FEATURE_DABR))
+ if (firmware_has_feature(FW_FEATURE_DABR))
ppc_md.set_dabr = pseries_set_dabr;
+ else if (firmware_has_feature(FW_FEATURE_XDABR))
+ ppc_md.set_dabr = pseries_set_xdabr;
iommu_init_early_pSeries();
diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c
index 90bce6e0c19..b7ac32fdd77 100644
--- a/arch/powerpc/sysdev/i8259.c
+++ b/arch/powerpc/sysdev/i8259.c
@@ -207,6 +207,9 @@ void __init i8259_init(unsigned long intack_addr, int offset)
spin_unlock_irqrestore(&i8259_lock, flags);
+ for (i = 0; i < NUM_ISA_INTERRUPTS; ++i)
+ irq_desc[offset + i].handler = &i8259_pic;
+
/* reserve our resources */
setup_irq(offset + 2, &i8259_irqaction);
request_resource(&ioport_resource, &pic1_iores);
@@ -216,6 +219,4 @@ void __init i8259_init(unsigned long intack_addr, int offset)
if (intack_addr != 0)
pci_intack = ioremap(intack_addr, 1);
- for (i = 0; i < NUM_ISA_INTERRUPTS; ++i)
- irq_desc[offset + i].handler = &i8259_pic;
}
diff --git a/arch/powerpc/sysdev/u3_iommu.c b/arch/powerpc/sysdev/u3_iommu.c
index 607722178c1..543d6590981 100644
--- a/arch/powerpc/sysdev/u3_iommu.c
+++ b/arch/powerpc/sysdev/u3_iommu.c
@@ -37,7 +37,6 @@
#include <linux/vmalloc.h>
#include <asm/io.h>
#include <asm/prom.h>
-#include <asm/ppcdebug.h>
#include <asm/iommu.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
diff --git a/arch/ppc/4xx_io/serial_sicc.c b/arch/ppc/4xx_io/serial_sicc.c
index e95c48d5757..84d96b857e4 100644
--- a/arch/ppc/4xx_io/serial_sicc.c
+++ b/arch/ppc/4xx_io/serial_sicc.c
@@ -1145,8 +1145,8 @@ static int set_serial_info(struct SICC_info *info,
info->flags = ((state->flags & ~ASYNC_INTERNAL_FLAGS) |
(info->flags & ASYNC_INTERNAL_FLAGS));
state->custom_divisor = new_serial.custom_divisor;
- state->close_delay = new_serial.close_delay * HZ / 100;
- state->closing_wait = new_serial.closing_wait * HZ / 100;
+ state->close_delay = msecs_to_jiffies(10 * new_serial.close_delay);
+ state->closing_wait = msecs_to_jiffies(10 * new_serial.closing_wait);
info->tty->low_latency = (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
port->fifosize = new_serial.xmit_fifo_size;
@@ -1465,10 +1465,8 @@ static void siccuart_close(struct tty_struct *tty, struct file *filp)
info->event = 0;
info->tty = NULL;
if (info->blocked_open) {
- if (info->state->close_delay) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(info->state->close_delay);
- }
+ if (info->state->close_delay)
+ schedule_timeout_interruptible(info->state->close_delay);
wake_up_interruptible(&info->open_wait);
}
info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING);
@@ -1496,7 +1494,7 @@ static void siccuart_wait_until_sent(struct tty_struct *tty, int timeout)
* Note: we have to use pretty tight timings here to satisfy
* the NIST-PCTS.
*/
- char_time = (info->timeout - HZ/50) / info->port->fifosize;
+ char_time = (info->timeout - msecs_to_jiffies(20)) / info->port->fifosize;
char_time = char_time / 5;
if (char_time == 0)
char_time = 1;
@@ -1521,8 +1519,7 @@ static void siccuart_wait_until_sent(struct tty_struct *tty, int timeout)
tty->index, jiffies,
expire, char_time);
while ((readb(info->port->uart_base + BL_SICC_LSR) & _LSR_TX_ALL) != _LSR_TX_ALL) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(char_time);
+ schedule_timeout_interruptible(char_time);
if (signal_pending(current))
break;
if (timeout && time_after(jiffies, expire))
@@ -1773,7 +1770,7 @@ int __init siccuart_init(void)
for (i = 0; i < SERIAL_SICC_NR; i++) {
struct SICC_state *state = sicc_state + i;
state->line = i;
- state->close_delay = 5 * HZ / 10;
+ state->close_delay = msecs_to_jiffies(500);
state->closing_wait = 30 * HZ;
spin_lock_init(&state->sicc_lock);
}
diff --git a/arch/ppc/8260_io/fcc_enet.c b/arch/ppc/8260_io/fcc_enet.c
index 2086c6ad114..4edeede9ccf 100644
--- a/arch/ppc/8260_io/fcc_enet.c
+++ b/arch/ppc/8260_io/fcc_enet.c
@@ -1309,8 +1309,7 @@ static void mii_dm9161_wait(uint mii_reg, struct net_device *dev)
/* Davicom takes a bit to come up after a reset,
* so wait here for a bit */
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(timeout);
+ schedule_timeout_uninterruptible(timeout);
}
static phy_info_t phy_info_dm9161 = {
diff --git a/arch/ppc/8xx_io/cs4218_tdm.c b/arch/ppc/8xx_io/cs4218_tdm.c
index 532caa388dc..49eb2a7e65c 100644
--- a/arch/ppc/8xx_io/cs4218_tdm.c
+++ b/arch/ppc/8xx_io/cs4218_tdm.c
@@ -1013,8 +1013,7 @@ static void CS_IrqCleanup(void)
*/
cpm_free_handler(CPMVEC_SMC2);
- if (beep_buf)
- kfree(beep_buf);
+ kfree(beep_buf);
kd_mksound = orig_mksound;
}
#endif /* MODULE */
diff --git a/arch/ppc/Kconfig b/arch/ppc/Kconfig
index 114b90fdea2..8fa51b0a32d 100644
--- a/arch/ppc/Kconfig
+++ b/arch/ppc/Kconfig
@@ -746,6 +746,16 @@ config MPC834x
bool
default y if MPC834x_SYS
+config CPM1
+ bool
+ depends on 8xx
+ default y
+ help
+ The CPM1 (Communications Processor Module) is a coprocessor on
+ embedded CPUs made by Motorola. Selecting this option means that
+ you wish to build a kernel for a machine with a CPM1 coprocessor
+ on it (8xx, 827x, 8560).
+
config CPM2
bool
depends on 8260 || MPC8560 || MPC8555
@@ -1247,6 +1257,14 @@ source "drivers/pci/Kconfig"
source "drivers/pcmcia/Kconfig"
+config RAPIDIO
+ bool "RapidIO support" if MPC8540 || MPC8560
+ help
+ If you say Y here, the kernel will include drivers and
+ infrastructure code to support RapidIO interconnect devices.
+
+source "drivers/rapidio/Kconfig"
+
endmenu
menu "Advanced setup"
diff --git a/arch/ppc/boot/simple/Makefile b/arch/ppc/boot/simple/Makefile
index b7bd8f61a4a..82df88b01bb 100644
--- a/arch/ppc/boot/simple/Makefile
+++ b/arch/ppc/boot/simple/Makefile
@@ -67,6 +67,12 @@ zimageinitrd-$(CONFIG_BAMBOO) := zImage.initrd-TREE
entrypoint-$(CONFIG_BAMBOO) := 0x01000000
extra.o-$(CONFIG_BAMBOO) := pibs.o
+ zimage-$(CONFIG_BUBINGA) := zImage-TREE
+zimageinitrd-$(CONFIG_BUBINGA) := zImage.initrd-TREE
+ end-$(CONFIG_BUBINGA) := bubinga
+ entrypoint-$(CONFIG_BUBINGA) := 0x01000000
+ extra.o-$(CONFIG_BUBINGA) := openbios.o
+
zimage-$(CONFIG_EBONY) := zImage-TREE
zimageinitrd-$(CONFIG_EBONY) := zImage.initrd-TREE
end-$(CONFIG_EBONY) := ebony
@@ -79,12 +85,30 @@ zimageinitrd-$(CONFIG_LUAN) := zImage.initrd-TREE
entrypoint-$(CONFIG_LUAN) := 0x01000000
extra.o-$(CONFIG_LUAN) := pibs.o
+ zimage-$(CONFIG_YUCCA) := zImage-TREE
+zimageinitrd-$(CONFIG_YUCCA) := zImage.initrd-TREE
+ end-$(CONFIG_YUCCA) := yucca
+ entrypoint-$(CONFIG_YUCCA) := 0x01000000
+ extra.o-$(CONFIG_YUCCA) := pibs.o
+
zimage-$(CONFIG_OCOTEA) := zImage-TREE
zimageinitrd-$(CONFIG_OCOTEA) := zImage.initrd-TREE
end-$(CONFIG_OCOTEA) := ocotea
entrypoint-$(CONFIG_OCOTEA) := 0x01000000
extra.o-$(CONFIG_OCOTEA) := pibs.o
+ zimage-$(CONFIG_SYCAMORE) := zImage-TREE
+zimageinitrd-$(CONFIG_SYCAMORE) := zImage.initrd-TREE
+ end-$(CONFIG_SYCAMORE) := sycamore
+ entrypoint-$(CONFIG_SYCAMORE) := 0x01000000
+ extra.o-$(CONFIG_SYCAMORE) := openbios.o
+
+ zimage-$(CONFIG_WALNUT) := zImage-TREE
+zimageinitrd-$(CONFIG_WALNUT) := zImage.initrd-TREE
+ end-$(CONFIG_WALNUT) := walnut
+ entrypoint-$(CONFIG_WALNUT) := 0x01000000
+ extra.o-$(CONFIG_WALNUT) := openbios.o
+
extra.o-$(CONFIG_EV64260) := misc-ev64260.o
end-$(CONFIG_EV64260) := ev64260
cacheflag-$(CONFIG_EV64260) := -include $(clear_L2_L3)
@@ -162,7 +186,8 @@ OBJCOPY_ARGS := -O elf32-powerpc
# head.o and relocate.o must be at the start.
boot-y := head.o relocate.o $(extra.o-y) $(misc-y)
-boot-$(CONFIG_40x) += embed_config.o
+boot-$(CONFIG_REDWOOD_5) += embed_config.o
+boot-$(CONFIG_REDWOOD_6) += embed_config.o
boot-$(CONFIG_8xx) += embed_config.o
boot-$(CONFIG_8260) += embed_config.o
boot-$(CONFIG_BSEIP) += iic.o
diff --git a/arch/ppc/boot/simple/misc.c b/arch/ppc/boot/simple/misc.c
index e02de5b467a..f415d6c6236 100644
--- a/arch/ppc/boot/simple/misc.c
+++ b/arch/ppc/boot/simple/misc.c
@@ -23,7 +23,7 @@
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/bootinfo.h>
-#ifdef CONFIG_44x
+#ifdef CONFIG_4xx
#include <asm/ibm4xx.h>
#endif
#include <asm/reg.h>
@@ -88,6 +88,14 @@ get_mem_size(void)
return 0;
}
+#if defined(CONFIG_40x)
+#define PPC4xx_EMAC0_MR0 EMAC0_BASE
+#endif
+
+#if defined(CONFIG_44x) && defined(PPC44x_EMAC0_MR0)
+#define PPC4xx_EMAC0_MR0 PPC44x_EMAC0_MR0
+#endif
+
struct bi_record *
decompress_kernel(unsigned long load_addr, int num_words, unsigned long cksum)
{
@@ -103,13 +111,13 @@ decompress_kernel(unsigned long load_addr, int num_words, unsigned long cksum)
com_port = serial_init(0, NULL);
#endif
-#if defined(CONFIG_44x) && defined(PPC44x_EMAC0_MR0)
+#if defined(PPC4xx_EMAC0_MR0)
/* Reset MAL */
mtdcr(DCRN_MALCR(DCRN_MAL_BASE), MALCR_MMSR);
/* Wait for reset */
while (mfdcr(DCRN_MALCR(DCRN_MAL_BASE)) & MALCR_MMSR) {};
/* Reset EMAC */
- *(volatile unsigned long *)PPC44x_EMAC0_MR0 = 0x20000000;
+ *(volatile unsigned long *)PPC4xx_EMAC0_MR0 = 0x20000000;
__asm__ __volatile__("eieio");
#endif
@@ -164,7 +172,9 @@ decompress_kernel(unsigned long load_addr, int num_words, unsigned long cksum)
puts(" "); puthex((unsigned long)(&__ramdisk_end));puts("\n");
}
+#ifndef CONFIG_40x /* don't overwrite the 40x image located at 0x00400000! */
avail_ram = (char *)0x00400000;
+#endif
end_avail = (char *)0x00800000;
puts("avail ram: "); puthex((unsigned long)avail_ram); puts(" ");
puthex((unsigned long)end_avail); puts("\n");
diff --git a/arch/ppc/boot/simple/openbios.c b/arch/ppc/boot/simple/openbios.c
index c732b6d70cf..81f11d8b30a 100644
--- a/arch/ppc/boot/simple/openbios.c
+++ b/arch/ppc/boot/simple/openbios.c
@@ -1,19 +1,43 @@
/*
* arch/ppc/boot/simple/openbios.c
*
- * 2005 (c) SYSGO AG - g.jaeger@sysgo.com
+ * Copyright (c) 2005 DENX Software Engineering
+ * Stefan Roese <sr@denx.de>
+ *
+ * Based on original work by
+ * 2005 (c) SYSGO AG - g.jaeger@sysgo.com
+ *
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without
* any warranty of any kind, whether express or implied.
*
- * Derived from arch/ppc/boot/simple/pibs.c (from MontaVista)
*/
#include <linux/types.h>
#include <linux/config.h>
#include <linux/string.h>
#include <asm/ppcboot.h>
-#include <platforms/4xx/ebony.h>
+#include <asm/ibm4xx.h>
+#include <asm/reg.h>
+#ifdef CONFIG_40x
+#include <asm/io.h>
+#endif
+
+#if defined(CONFIG_BUBINGA)
+#define BOARD_INFO_VECTOR 0xFFF80B50 /* openbios 1.19 moved this vector down - armin */
+#else
+#define BOARD_INFO_VECTOR 0xFFFE0B50
+#endif
+
+#ifdef CONFIG_40x
+/* Supply a default Ethernet address for those eval boards that don't
+ * ship with one. This is an address from the MBX board I have, so
+ * it is unlikely you will find it on your network.
+ */
+static ushort def_enet_addr[] = { 0x0800, 0x3e26, 0x1559 };
+
+extern unsigned long timebase_period_ns;
+#endif /* CONFIG_40x */
extern unsigned long decompress_kernel(unsigned long load_addr, int num_words,
unsigned long cksum);
@@ -23,15 +47,85 @@ extern unsigned long decompress_kernel(unsigned long load_addr, int num_words,
bd_t hold_resid_buf __attribute__ ((__section__ (".data.boot")));
bd_t *hold_residual = &hold_resid_buf;
+typedef struct openbios_board_info {
+ unsigned char bi_s_version[4]; /* Version of this structure */
+ unsigned char bi_r_version[30]; /* Version of the IBM ROM */
+ unsigned int bi_memsize; /* DRAM installed, in bytes */
+#ifdef CONFIG_405EP
+ unsigned char bi_enetaddr[2][6]; /* Local Ethernet MAC address */
+#else /* CONFIG_405EP */
+ unsigned char bi_enetaddr[6]; /* Local Ethernet MAC address */
+#endif /* CONFIG_405EP */
+ unsigned char bi_pci_enetaddr[6]; /* PCI Ethernet MAC address */
+ unsigned int bi_intfreq; /* Processor speed, in Hz */
+ unsigned int bi_busfreq; /* PLB Bus speed, in Hz */
+ unsigned int bi_pci_busfreq; /* PCI Bus speed, in Hz */
+#ifdef CONFIG_405EP
+ unsigned int bi_opb_busfreq; /* OPB Bus speed, in Hz */
+ unsigned int bi_pllouta_freq; /* PLL OUTA speed, in Hz */
+#endif /* CONFIG_405EP */
+} openbios_bd_t;
+
void *
load_kernel(unsigned long load_addr, int num_words, unsigned long cksum,
void *ign1, void *ign2)
{
- decompress_kernel(load_addr, num_words, cksum);
+#ifdef CONFIG_40x
+ openbios_bd_t *openbios_bd = NULL;
+ openbios_bd_t *(*get_board_info)(void) =
+ (openbios_bd_t *(*)(void))(*(unsigned long *)BOARD_INFO_VECTOR);
+
+ /*
+ * On 40x platforms we not only need the MAC-addresses, but also the
+ * clocks and memsize. Now try to get all values using the OpenBIOS
+ * "get_board_info()" callback.
+ */
+ if ((openbios_bd = get_board_info()) != NULL) {
+ /*
+ * Copy bd_info from OpenBIOS struct into U-Boot struct
+ * used by kernel
+ */
+ hold_residual->bi_memsize = openbios_bd->bi_memsize;
+ hold_residual->bi_intfreq = openbios_bd->bi_intfreq;
+ hold_residual->bi_busfreq = openbios_bd->bi_busfreq;
+ hold_residual->bi_pci_busfreq = openbios_bd->bi_pci_busfreq;
+ memcpy(hold_residual->bi_pci_enetaddr, openbios_bd->bi_pci_enetaddr, 6);
+#ifdef CONFIG_405EP
+ memcpy(hold_residual->bi_enetaddr, openbios_bd->bi_enetaddr[0], 6);
+ memcpy(hold_residual->bi_enet1addr, openbios_bd->bi_enetaddr[1], 6);
+ hold_residual->bi_opbfreq = openbios_bd->bi_opb_busfreq;
+ hold_residual->bi_procfreq = openbios_bd->bi_pllouta_freq;
+#else /* CONFIG_405EP */
+ memcpy(hold_residual->bi_enetaddr, openbios_bd->bi_enetaddr, 6);
+#endif /* CONFIG_405EP */
+ } else {
+ /* Hmmm...better try to stuff some defaults.
+ */
+ hold_residual->bi_memsize = 16 * 1024 * 1024;
+ hold_residual->bi_intfreq = 200000000;
+ hold_residual->bi_busfreq = 100000000;
+ hold_residual->bi_pci_busfreq = 66666666;
+
+ /*
+ * Only supply one mac-address in this fallback
+ */
+ memcpy(hold_residual->bi_enetaddr, (void *)def_enet_addr, 6);
+#ifdef CONFIG_405EP
+ hold_residual->bi_opbfreq = 50000000;
+ hold_residual->bi_procfreq = 200000000;
+#endif /* CONFIG_405EP */
+ }
+ timebase_period_ns = 1000000000 / hold_residual->bi_intfreq;
+#endif /* CONFIG_40x */
+
+#ifdef CONFIG_440GP
/* simply copy the MAC addresses */
- memcpy(hold_residual->bi_enetaddr, (char *)EBONY_OPENBIOS_MAC_BASE, 6);
- memcpy(hold_residual->bi_enet1addr, (char *)(EBONY_OPENBIOS_MAC_BASE+EBONY_OPENBIOS_MAC_OFFSET), 6);
+ memcpy(hold_residual->bi_enetaddr, (char *)OPENBIOS_MAC_BASE, 6);
+ memcpy(hold_residual->bi_enet1addr, (char *)(OPENBIOS_MAC_BASE+OPENBIOS_MAC_OFFSET), 6);
+#endif /* CONFIG_440GP */
+
+ decompress_kernel(load_addr, num_words, cksum);
return (void *)hold_residual;
}
diff --git a/arch/ppc/configs/ev64360_defconfig b/arch/ppc/configs/ev64360_defconfig
index de9bbb791db..d471e578dcb 100644
--- a/arch/ppc/configs/ev64360_defconfig
+++ b/arch/ppc/configs/ev64360_defconfig
@@ -1,17 +1,17 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.13-rc5
-# Fri Aug 5 15:18:23 2005
+# Linux kernel version: 2.6.14
+# Fri Oct 28 19:15:34 2005
#
CONFIG_MMU=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
-CONFIG_HAVE_DEC_LOCK=y
CONFIG_PPC=y
CONFIG_PPC32=y
CONFIG_GENERIC_NVRAM=y
CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
#
# Code maturity level options
@@ -26,6 +26,7 @@ CONFIG_INIT_ENV_ARG_LIMIT=32
# General setup
#
CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
@@ -35,6 +36,7 @@ CONFIG_SYSCTL=y
CONFIG_HOTPLUG=y
CONFIG_KOBJECT_UEVENT=y
# CONFIG_IKCONFIG is not set
+CONFIG_INITRAMFS_SOURCE=""
# CONFIG_EMBEDDED is not set
CONFIG_KALLSYMS=y
# CONFIG_KALLSYMS_EXTRA_PASS is not set
@@ -74,7 +76,7 @@ CONFIG_TAU=y
# CONFIG_TAU_AVERAGE is not set
# CONFIG_KEXEC is not set
# CONFIG_CPU_FREQ is not set
-# CONFIG_PM is not set
+# CONFIG_WANT_EARLY_SERIAL is not set
CONFIG_PPC_STD_MMU=y
CONFIG_NOT_COHERENT_CACHE=y
@@ -86,22 +88,18 @@ CONFIG_NOT_COHERENT_CACHE=y
# CONFIG_KATANA is not set
# CONFIG_WILLOW is not set
# CONFIG_CPCI690 is not set
-# CONFIG_PCORE is not set
# CONFIG_POWERPMC250 is not set
# CONFIG_CHESTNUT is not set
# CONFIG_SPRUCE is not set
# CONFIG_HDPU is not set
# CONFIG_EV64260 is not set
# CONFIG_LOPEC is not set
-# CONFIG_MCPN765 is not set
# CONFIG_MVME5100 is not set
# CONFIG_PPLUS is not set
# CONFIG_PRPMC750 is not set
# CONFIG_PRPMC800 is not set
# CONFIG_SANDPOINT is not set
# CONFIG_RADSTONE_PPC7D is not set
-# CONFIG_ADIR is not set
-# CONFIG_K2 is not set
# CONFIG_PAL4 is not set
# CONFIG_GEMINI is not set
# CONFIG_EST8260 is not set
@@ -138,10 +136,13 @@ CONFIG_FLATMEM_MANUAL=y
# CONFIG_SPARSEMEM_MANUAL is not set
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
CONFIG_BINFMT_ELF=y
CONFIG_BINFMT_MISC=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="console=ttyMM0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2"
+# CONFIG_PM is not set
+# CONFIG_SOFTWARE_SUSPEND is not set
CONFIG_SECCOMP=y
CONFIG_ISA_DMA_API=y
@@ -152,7 +153,6 @@ CONFIG_GENERIC_ISA_DMA=y
CONFIG_PCI=y
CONFIG_PCI_DOMAINS=y
# CONFIG_PCI_LEGACY_PROC is not set
-# CONFIG_PCI_NAMES is not set
#
# PCCARD (PCMCIA/CardBus) support
@@ -206,14 +206,19 @@ CONFIG_SYN_COOKIES=y
# CONFIG_INET_ESP is not set
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_IP_TCPDIAG=y
-# CONFIG_IP_TCPDIAG_IPV6 is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_BIC=y
# CONFIG_IPV6 is not set
# CONFIG_NETFILTER is not set
#
+# DCCP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_DCCP is not set
+
+#
# SCTP Configuration (EXPERIMENTAL)
#
# CONFIG_IP_SCTP is not set
@@ -239,6 +244,7 @@ CONFIG_TCP_CONG_BIC=y
# CONFIG_HAMRADIO is not set
# CONFIG_IRDA is not set
# CONFIG_BT is not set
+# CONFIG_IEEE80211 is not set
#
# Device Drivers
@@ -252,6 +258,11 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_FW_LOADER is not set
#
+# Connector - unified userspace <-> kernelspace linker
+#
+# CONFIG_CONNECTOR is not set
+
+#
# Memory Technology Devices (MTD)
#
CONFIG_MTD=y
@@ -358,7 +369,6 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=16
CONFIG_BLK_DEV_RAM_SIZE=32768
CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE=""
# CONFIG_LBD is not set
# CONFIG_CDROM_PKTCDVD is not set
@@ -379,6 +389,7 @@ CONFIG_IOSCHED_CFQ=y
#
# SCSI device support
#
+# CONFIG_RAID_ATTRS is not set
# CONFIG_SCSI is not set
#
@@ -420,6 +431,10 @@ CONFIG_NETDEVICES=y
# CONFIG_ARCNET is not set
#
+# PHY device support
+#
+
+#
# Ethernet (10 or 100Mbit)
#
# CONFIG_NET_ETHERNET is not set
@@ -434,6 +449,7 @@ CONFIG_NETDEVICES=y
# CONFIG_HAMACHI is not set
# CONFIG_YELLOWFIN is not set
# CONFIG_R8169 is not set
+# CONFIG_SIS190 is not set
# CONFIG_SKGE is not set
# CONFIG_SK98LIN is not set
# CONFIG_TIGON3 is not set
@@ -446,6 +462,7 @@ CONFIG_MV643XX_ETH_0=y
#
# Ethernet (10000 Mbit)
#
+# CONFIG_CHELSIO_T1 is not set
# CONFIG_IXGB is not set
# CONFIG_S2IO is not set
@@ -547,7 +564,20 @@ CONFIG_LEGACY_PTY_COUNT=256
#
# Watchdog Cards
#
-# CONFIG_WATCHDOG is not set
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+CONFIG_MV64X60_WDT=y
+
+#
+# PCI-based Watchdog Cards
+#
+# CONFIG_PCIPCWATCHDOG is not set
+# CONFIG_WDTPCI is not set
# CONFIG_NVRAM is not set
CONFIG_GEN_RTC=y
# CONFIG_GEN_RTC_X is not set
@@ -571,7 +601,6 @@ CONFIG_GEN_RTC=y
# I2C support
#
# CONFIG_I2C is not set
-# CONFIG_I2C_SENSOR is not set
#
# Dallas's 1-wire bus
@@ -582,6 +611,7 @@ CONFIG_GEN_RTC=y
# Hardware Monitoring support
#
CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
# CONFIG_HWMON_DEBUG_CHIP is not set
#
@@ -589,6 +619,10 @@ CONFIG_HWMON=y
#
#
+# Multimedia Capabilities Port drivers
+#
+
+#
# Multimedia devices
#
# CONFIG_VIDEO_DEV is not set
@@ -651,10 +685,6 @@ CONFIG_EXT2_FS=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
-
-#
-# XFS support
-#
# CONFIG_XFS_FS is not set
# CONFIG_MINIX_FS is not set
# CONFIG_ROMFS_FS is not set
@@ -663,6 +693,7 @@ CONFIG_INOTIFY=y
CONFIG_DNOTIFY=y
# CONFIG_AUTOFS_FS is not set
# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
#
# CD-ROM/DVD Filesystems
@@ -683,11 +714,10 @@ CONFIG_DNOTIFY=y
CONFIG_PROC_FS=y
CONFIG_PROC_KCORE=y
CONFIG_SYSFS=y
-# CONFIG_DEVPTS_FS_XATTR is not set
CONFIG_TMPFS=y
-# CONFIG_TMPFS_XATTR is not set
# CONFIG_HUGETLB_PAGE is not set
CONFIG_RAMFS=y
+# CONFIG_RELAYFS_FS is not set
#
# Miscellaneous filesystems
@@ -735,6 +765,7 @@ CONFIG_SUNRPC=y
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
# CONFIG_AFS_FS is not set
+# CONFIG_9P_FS is not set
#
# Partition Types
@@ -751,6 +782,7 @@ CONFIG_MSDOS_PARTITION=y
# Library routines
#
# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
CONFIG_CRC32=y
# CONFIG_LIBCRC32C is not set
CONFIG_ZLIB_INFLATE=y
@@ -767,6 +799,7 @@ CONFIG_ZLIB_DEFLATE=y
# CONFIG_PRINTK_TIME is not set
# CONFIG_DEBUG_KERNEL is not set
CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_SERIAL_TEXT_DEBUG is not set
#
# Security options
diff --git a/arch/ppc/configs/stx_gp3_defconfig b/arch/ppc/configs/stx_gp3_defconfig
index 66dae836765..3fedc43e44a 100644
--- a/arch/ppc/configs/stx_gp3_defconfig
+++ b/arch/ppc/configs/stx_gp3_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.11-rc2
-# Wed Jan 26 14:32:58 2005
+# Linux kernel version: 2.6.12-rc4
+# Tue May 24 18:11:04 2005
#
CONFIG_MMU=y
CONFIG_GENERIC_HARDIRQS=y
@@ -11,6 +11,7 @@ CONFIG_HAVE_DEC_LOCK=y
CONFIG_PPC=y
CONFIG_PPC32=y
CONFIG_GENERIC_NVRAM=y
+CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
#
# Code maturity level options
@@ -18,6 +19,7 @@ CONFIG_GENERIC_NVRAM=y
CONFIG_EXPERIMENTAL=y
CONFIG_CLEAN_COMPILE=y
CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
#
# General setup
@@ -29,7 +31,6 @@ CONFIG_SYSVIPC=y
# CONFIG_BSD_PROCESS_ACCT is not set
CONFIG_SYSCTL=y
# CONFIG_AUDIT is not set
-CONFIG_LOG_BUF_SHIFT=14
CONFIG_HOTPLUG=y
CONFIG_KOBJECT_UEVENT=y
# CONFIG_IKCONFIG is not set
@@ -37,6 +38,9 @@ CONFIG_EMBEDDED=y
CONFIG_KALLSYMS=y
# CONFIG_KALLSYMS_ALL is not set
# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_BASE_FULL=y
CONFIG_FUTEX=y
CONFIG_EPOLL=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
@@ -46,6 +50,7 @@ CONFIG_CC_ALIGN_LABELS=0
CONFIG_CC_ALIGN_LOOPS=0
CONFIG_CC_ALIGN_JUMPS=0
# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
#
# Loadable module support
@@ -69,9 +74,11 @@ CONFIG_KMOD=y
CONFIG_E500=y
CONFIG_BOOKE=y
CONFIG_FSL_BOOKE=y
+# CONFIG_PHYS_64BIT is not set
# CONFIG_SPE is not set
CONFIG_MATH_EMULATION=y
# CONFIG_CPU_FREQ is not set
+# CONFIG_PM is not set
CONFIG_85xx=y
CONFIG_PPC_INDIRECT_PCI_BE=y
@@ -96,6 +103,7 @@ CONFIG_HIGHMEM=y
CONFIG_BINFMT_ELF=y
CONFIG_BINFMT_MISC=m
# CONFIG_CMDLINE_BOOL is not set
+CONFIG_ISA_DMA_API=y
#
# Bus options
@@ -104,15 +112,15 @@ CONFIG_PCI=y
CONFIG_PCI_DOMAINS=y
# CONFIG_PCI_LEGACY_PROC is not set
# CONFIG_PCI_NAMES is not set
+# CONFIG_PCI_DEBUG is not set
#
# PCCARD (PCMCIA/CardBus) support
#
# CONFIG_PCCARD is not set
-
-#
-# PC-card bridges
-#
+CONFIG_RAPIDIO=y
+CONFIG_RAPIDIO_8_BIT_TRANSPORT=y
+CONFIG_RAPIDIO_DISC_TIMEOUT=30
#
# Advanced setup
@@ -152,7 +160,7 @@ CONFIG_PARPORT=m
CONFIG_PARPORT_PC=m
# CONFIG_PARPORT_PC_FIFO is not set
# CONFIG_PARPORT_PC_SUPERIO is not set
-# CONFIG_PARPORT_OTHER is not set
+# CONFIG_PARPORT_GSC is not set
# CONFIG_PARPORT_1284 is not set
#
@@ -264,7 +272,6 @@ CONFIG_SCSI_CONSTANTS=y
# CONFIG_SCSI_BUSLOGIC is not set
# CONFIG_SCSI_DMX3191D is not set
# CONFIG_SCSI_EATA is not set
-# CONFIG_SCSI_EATA_PIO is not set
# CONFIG_SCSI_FUTURE_DOMAIN is not set
# CONFIG_SCSI_GDTH is not set
# CONFIG_SCSI_IPS is not set
@@ -274,7 +281,6 @@ CONFIG_SCSI_CONSTANTS=y
# CONFIG_SCSI_IMM is not set
# CONFIG_SCSI_SYM53C8XX_2 is not set
# CONFIG_SCSI_IPR is not set
-# CONFIG_SCSI_QLOGIC_ISP is not set
# CONFIG_SCSI_QLOGIC_FC is not set
# CONFIG_SCSI_QLOGIC_1280 is not set
CONFIG_SCSI_QLA2XXX=m
@@ -283,6 +289,7 @@ CONFIG_SCSI_QLA2XXX=m
# CONFIG_SCSI_QLA2300 is not set
# CONFIG_SCSI_QLA2322 is not set
# CONFIG_SCSI_QLA6312 is not set
+# CONFIG_SCSI_LPFC is not set
# CONFIG_SCSI_DC395x is not set
# CONFIG_SCSI_DC390T is not set
# CONFIG_SCSI_NSP32 is not set
@@ -322,7 +329,6 @@ CONFIG_NET=y
#
CONFIG_PACKET=y
# CONFIG_PACKET_MMAP is not set
-# CONFIG_NETLINK_DEV is not set
CONFIG_UNIX=y
# CONFIG_NET_KEY is not set
CONFIG_INET=y
@@ -431,7 +437,7 @@ CONFIG_IP_NF_NAT_FTP=m
#
# Network testing
#
-# CONFIG_NET_PKTGEN is not set
+CONFIG_NET_PKTGEN=y
# CONFIG_NETPOLL is not set
# CONFIG_NET_POLL_CONTROLLER is not set
# CONFIG_HAMRADIO is not set
@@ -499,6 +505,7 @@ CONFIG_GFAR_NAPI=y
# Wan interfaces
#
# CONFIG_WAN is not set
+CONFIG_RIONET=y
# CONFIG_FDDI is not set
# CONFIG_HIPPI is not set
# CONFIG_PLIP is not set
@@ -536,20 +543,6 @@ CONFIG_INPUT_EVDEV=m
# CONFIG_INPUT_EVBUG is not set
#
-# Input I/O drivers
-#
-# CONFIG_GAMEPORT is not set
-CONFIG_SOUND_GAMEPORT=y
-CONFIG_SERIO=y
-CONFIG_SERIO_I8042=y
-CONFIG_SERIO_SERPORT=y
-# CONFIG_SERIO_CT82C710 is not set
-# CONFIG_SERIO_PARKBD is not set
-# CONFIG_SERIO_PCIPS2 is not set
-CONFIG_SERIO_LIBPS2=y
-# CONFIG_SERIO_RAW is not set
-
-#
# Input Device Drivers
#
CONFIG_INPUT_KEYBOARD=y
@@ -567,6 +560,19 @@ CONFIG_MOUSE_PS2=y
# CONFIG_INPUT_MISC is not set
#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+CONFIG_SERIO_I8042=y
+CONFIG_SERIO_SERPORT=y
+# CONFIG_SERIO_PARKBD is not set
+# CONFIG_SERIO_PCIPS2 is not set
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_SERIO_RAW is not set
+# CONFIG_GAMEPORT is not set
+CONFIG_SOUND_GAMEPORT=y
+
+#
# Character devices
#
# CONFIG_VT is not set
@@ -590,6 +596,7 @@ CONFIG_SERIAL_CPM_SCC2=y
# CONFIG_SERIAL_CPM_SCC4 is not set
# CONFIG_SERIAL_CPM_SMC1 is not set
# CONFIG_SERIAL_CPM_SMC2 is not set
+# CONFIG_SERIAL_JSM is not set
CONFIG_UNIX98_PTYS=y
CONFIG_LEGACY_PTYS=y
CONFIG_LEGACY_PTY_COUNT=256
@@ -626,6 +633,11 @@ CONFIG_DRM=m
# CONFIG_RAW_DRIVER is not set
#
+# TPM devices
+#
+# CONFIG_TCG_TPM is not set
+
+#
# I2C support
#
CONFIG_I2C=m
@@ -648,12 +660,12 @@ CONFIG_I2C_ALGOBIT=m
# CONFIG_I2C_AMD8111 is not set
# CONFIG_I2C_I801 is not set
# CONFIG_I2C_I810 is not set
+# CONFIG_I2C_PIIX4 is not set
# CONFIG_I2C_ISA is not set
# CONFIG_I2C_MPC is not set
# CONFIG_I2C_NFORCE2 is not set
# CONFIG_I2C_PARPORT is not set
# CONFIG_I2C_PARPORT_LIGHT is not set
-# CONFIG_I2C_PIIX4 is not set
# CONFIG_I2C_PROSAVAGE is not set
# CONFIG_I2C_SAVAGE4 is not set
# CONFIG_SCx200_ACB is not set
@@ -677,7 +689,9 @@ CONFIG_I2C_ALGOBIT=m
# CONFIG_SENSORS_ASB100 is not set
# CONFIG_SENSORS_DS1621 is not set
# CONFIG_SENSORS_FSCHER is not set
+# CONFIG_SENSORS_FSCPOS is not set
# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
# CONFIG_SENSORS_IT87 is not set
# CONFIG_SENSORS_LM63 is not set
# CONFIG_SENSORS_LM75 is not set
@@ -688,9 +702,11 @@ CONFIG_I2C_ALGOBIT=m
# CONFIG_SENSORS_LM85 is not set
# CONFIG_SENSORS_LM87 is not set
# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
# CONFIG_SENSORS_MAX1619 is not set
# CONFIG_SENSORS_PC87360 is not set
# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_SIS5595 is not set
# CONFIG_SENSORS_SMSC47M1 is not set
# CONFIG_SENSORS_VIA686A is not set
# CONFIG_SENSORS_W83781D is not set
@@ -700,10 +716,12 @@ CONFIG_I2C_ALGOBIT=m
#
# Other I2C Chip support
#
+# CONFIG_SENSORS_DS1337 is not set
# CONFIG_SENSORS_EEPROM is not set
# CONFIG_SENSORS_PCF8574 is not set
# CONFIG_SENSORS_PCF8591 is not set
# CONFIG_SENSORS_RTC8564 is not set
+# CONFIG_SENSORS_M41T00 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
@@ -732,7 +750,6 @@ CONFIG_I2C_ALGOBIT=m
# Graphics support
#
# CONFIG_FB is not set
-# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
#
# Sound
@@ -752,13 +769,9 @@ CONFIG_SOUND=m
#
# USB support
#
-# CONFIG_USB is not set
CONFIG_USB_ARCH_HAS_HCD=y
CONFIG_USB_ARCH_HAS_OHCI=y
-
-#
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
-#
+# CONFIG_USB is not set
#
# USB Gadget Support
@@ -789,6 +802,10 @@ CONFIG_JBD_DEBUG=y
CONFIG_FS_MBCACHE=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
+
+#
+# XFS support
+#
# CONFIG_XFS_FS is not set
# CONFIG_MINIX_FS is not set
# CONFIG_ROMFS_FS is not set
@@ -859,7 +876,6 @@ CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_LOCKD=y
CONFIG_LOCKD_V4=y
-# CONFIG_EXPORTFS is not set
CONFIG_SUNRPC=y
# CONFIG_RPCSEC_GSS_KRB5 is not set
# CONFIG_RPCSEC_GSS_SPKM3 is not set
@@ -942,8 +958,10 @@ CONFIG_ZLIB_INFLATE=m
#
# Kernel hacking
#
+# CONFIG_PRINTK_TIME is not set
CONFIG_DEBUG_KERNEL=y
# CONFIG_MAGIC_SYSRQ is not set
+CONFIG_LOG_BUF_SHIFT=14
# CONFIG_SCHEDSTATS is not set
# CONFIG_DEBUG_SLAB is not set
# CONFIG_DEBUG_SPINLOCK is not set
diff --git a/arch/ppc/kernel/Makefile b/arch/ppc/kernel/Makefile
index c610ca933a2..76a55a438f2 100644
--- a/arch/ppc/kernel/Makefile
+++ b/arch/ppc/kernel/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_POWER4) += cpu_setup_power4.o
obj-$(CONFIG_MODULES) += module.o ppc_ksyms.o
obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-mapping.o
obj-$(CONFIG_PCI) += pci.o
+obj-$(CONFIG_RAPIDIO) += rio.o
obj-$(CONFIG_KGDB) += ppc-stub.o
obj-$(CONFIG_SMP) += smp.o smp-tbsync.o
obj-$(CONFIG_TAU) += temp.o
diff --git a/arch/ppc/kernel/head_44x.S b/arch/ppc/kernel/head_44x.S
index 8b49679fad5..677c571aa27 100644
--- a/arch/ppc/kernel/head_44x.S
+++ b/arch/ppc/kernel/head_44x.S
@@ -190,8 +190,8 @@ skpinv: addi r4,r4,1 /* Increment */
/* xlat fields */
lis r4,UART0_PHYS_IO_BASE@h /* RPN depends on SoC */
-#ifndef CONFIG_440EP
- ori r4,r4,0x0001 /* ERPN is 1 for second 4GB page */
+#ifdef UART0_PHYS_ERPN
+ ori r4,r4,UART0_PHYS_ERPN /* Add ERPN if above 4GB */
#endif
/* attrib fields */
diff --git a/arch/ppc/kernel/misc.S b/arch/ppc/kernel/misc.S
index 3056ede2424..ae6af29938a 100644
--- a/arch/ppc/kernel/misc.S
+++ b/arch/ppc/kernel/misc.S
@@ -25,6 +25,11 @@
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
+#ifdef CONFIG_8xx
+#define ISYNC_8xx isync
+#else
+#define ISYNC_8xx
+#endif
.text
.align 5
@@ -800,8 +805,18 @@ _GLOBAL(_insb)
subi r4,r4,1
blelr-
00: lbz r5,0(r3)
- eieio
- stbu r5,1(r4)
+01: eieio
+02: stbu r5,1(r4)
+ ISYNC_8xx
+ .section .fixup,"ax"
+03: blr
+ .text
+ .section __ex_table, "a"
+ .align 2
+ .long 00b, 03b
+ .long 01b, 03b
+ .long 02b, 03b
+ .text
bdnz 00b
blr
@@ -811,8 +826,18 @@ _GLOBAL(_outsb)
subi r4,r4,1
blelr-
00: lbzu r5,1(r4)
- stb r5,0(r3)
- eieio
+01: stb r5,0(r3)
+02: eieio
+ ISYNC_8xx
+ .section .fixup,"ax"
+03: blr
+ .text
+ .section __ex_table, "a"
+ .align 2
+ .long 00b, 03b
+ .long 01b, 03b
+ .long 02b, 03b
+ .text
bdnz 00b
blr
@@ -822,8 +847,18 @@ _GLOBAL(_insw)
subi r4,r4,2
blelr-
00: lhbrx r5,0,r3
- eieio
- sthu r5,2(r4)
+01: eieio
+02: sthu r5,2(r4)
+ ISYNC_8xx
+ .section .fixup,"ax"
+03: blr
+ .text
+ .section __ex_table, "a"
+ .align 2
+ .long 00b, 03b
+ .long 01b, 03b
+ .long 02b, 03b
+ .text
bdnz 00b
blr
@@ -833,8 +868,18 @@ _GLOBAL(_outsw)
subi r4,r4,2
blelr-
00: lhzu r5,2(r4)
- eieio
- sthbrx r5,0,r3
+01: eieio
+02: sthbrx r5,0,r3
+ ISYNC_8xx
+ .section .fixup,"ax"
+03: blr
+ .text
+ .section __ex_table, "a"
+ .align 2
+ .long 00b, 03b
+ .long 01b, 03b
+ .long 02b, 03b
+ .text
bdnz 00b
blr
@@ -844,8 +889,18 @@ _GLOBAL(_insl)
subi r4,r4,4
blelr-
00: lwbrx r5,0,r3
- eieio
- stwu r5,4(r4)
+01: eieio
+02: stwu r5,4(r4)
+ ISYNC_8xx
+ .section .fixup,"ax"
+03: blr
+ .text
+ .section __ex_table, "a"
+ .align 2
+ .long 00b, 03b
+ .long 01b, 03b
+ .long 02b, 03b
+ .text
bdnz 00b
blr
@@ -855,8 +910,18 @@ _GLOBAL(_outsl)
subi r4,r4,4
blelr-
00: lwzu r5,4(r4)
- stwbrx r5,0,r3
- eieio
+01: stwbrx r5,0,r3
+02: eieio
+ ISYNC_8xx
+ .section .fixup,"ax"
+03: blr
+ .text
+ .section __ex_table, "a"
+ .align 2
+ .long 00b, 03b
+ .long 01b, 03b
+ .long 02b, 03b
+ .text
bdnz 00b
blr
@@ -867,8 +932,18 @@ _GLOBAL(_insw_ns)
subi r4,r4,2
blelr-
00: lhz r5,0(r3)
- eieio
- sthu r5,2(r4)
+01: eieio
+02: sthu r5,2(r4)
+ ISYNC_8xx
+ .section .fixup,"ax"
+03: blr
+ .text
+ .section __ex_table, "a"
+ .align 2
+ .long 00b, 03b
+ .long 01b, 03b
+ .long 02b, 03b
+ .text
bdnz 00b
blr
@@ -879,8 +954,18 @@ _GLOBAL(_outsw_ns)
subi r4,r4,2
blelr-
00: lhzu r5,2(r4)
- sth r5,0(r3)
- eieio
+01: sth r5,0(r3)
+02: eieio
+ ISYNC_8xx
+ .section .fixup,"ax"
+03: blr
+ .text
+ .section __ex_table, "a"
+ .align 2
+ .long 00b, 03b
+ .long 01b, 03b
+ .long 02b, 03b
+ .text
bdnz 00b
blr
@@ -891,8 +976,18 @@ _GLOBAL(_insl_ns)
subi r4,r4,4
blelr-
00: lwz r5,0(r3)
- eieio
- stwu r5,4(r4)
+01: eieio
+02: stwu r5,4(r4)
+ ISYNC_8xx
+ .section .fixup,"ax"
+03: blr
+ .text
+ .section __ex_table, "a"
+ .align 2
+ .long 00b, 03b
+ .long 01b, 03b
+ .long 02b, 03b
+ .text
bdnz 00b
blr
@@ -903,8 +998,18 @@ _GLOBAL(_outsl_ns)
subi r4,r4,4
blelr-
00: lwzu r5,4(r4)
- stw r5,0(r3)
- eieio
+01: stw r5,0(r3)
+02: eieio
+ ISYNC_8xx
+ .section .fixup,"ax"
+03: blr
+ .text
+ .section __ex_table, "a"
+ .align 2
+ .long 00b, 03b
+ .long 01b, 03b
+ .long 02b, 03b
+ .text
bdnz 00b
blr
diff --git a/arch/ppc/kernel/rio.c b/arch/ppc/kernel/rio.c
new file mode 100644
index 00000000000..29487fedfc7
--- /dev/null
+++ b/arch/ppc/kernel/rio.c
@@ -0,0 +1,52 @@
+/*
+ * RapidIO PPC32 support
+ *
+ * Copyright 2005 MontaVista Software, Inc.
+ * Matt Porter <mporter@kernel.crashing.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/rio.h>
+
+#include <asm/rio.h>
+
+/**
+ * platform_rio_init - Do platform specific RIO init
+ *
+ * Any platform specific initialization of RapdIO
+ * hardware is done here as well as registration
+ * of any active master ports in the system.
+ */
+void __attribute__ ((weak))
+ platform_rio_init(void)
+{
+ printk(KERN_WARNING "RIO: No platform_rio_init() present\n");
+}
+
+/**
+ * ppc_rio_init - Do PPC32 RIO init
+ *
+ * Calls platform-specific RIO init code and then calls
+ * rio_init_mports() to initialize any master ports that
+ * have been registered with the RIO subsystem.
+ */
+static int __init ppc_rio_init(void)
+{
+ printk(KERN_INFO "RIO: RapidIO init\n");
+
+ /* Platform specific initialization */
+ platform_rio_init();
+
+ /* Enumerate all registered ports */
+ rio_init_mports();
+
+ return 0;
+}
+
+subsys_initcall(ppc_rio_init);
diff --git a/arch/ppc/kernel/traps.c b/arch/ppc/kernel/traps.c
index 16adde6b429..9dbc4d28fa2 100644
--- a/arch/ppc/kernel/traps.c
+++ b/arch/ppc/kernel/traps.c
@@ -49,7 +49,7 @@ extern int xmon_sstep(struct pt_regs *regs);
extern int xmon_iabr_match(struct pt_regs *regs);
extern int xmon_dabr_match(struct pt_regs *regs);
-void (*debugger)(struct pt_regs *regs) = xmon;
+int (*debugger)(struct pt_regs *regs) = xmon;
int (*debugger_bpt)(struct pt_regs *regs) = xmon_bpt;
int (*debugger_sstep)(struct pt_regs *regs) = xmon_sstep;
int (*debugger_iabr_match)(struct pt_regs *regs) = xmon_iabr_match;
@@ -57,7 +57,7 @@ int (*debugger_dabr_match)(struct pt_regs *regs) = xmon_dabr_match;
void (*debugger_fault_handler)(struct pt_regs *regs);
#else
#ifdef CONFIG_KGDB
-void (*debugger)(struct pt_regs *regs);
+int (*debugger)(struct pt_regs *regs);
int (*debugger_bpt)(struct pt_regs *regs);
int (*debugger_sstep)(struct pt_regs *regs);
int (*debugger_iabr_match)(struct pt_regs *regs);
@@ -159,7 +159,7 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
*/
static inline int check_io_access(struct pt_regs *regs)
{
-#ifdef CONFIG_PPC_PMAC
+#if defined CONFIG_PPC_PMAC || defined CONFIG_8xx
unsigned long msr = regs->msr;
const struct exception_table_entry *entry;
unsigned int *nip = (unsigned int *)regs->nip;
@@ -178,7 +178,11 @@ static inline int check_io_access(struct pt_regs *regs)
nip -= 2;
else if (*nip == 0x4c00012c) /* isync */
--nip;
- if (*nip == 0x7c0004ac || (*nip >> 26) == 3) {
+ /* eieio from I/O string functions */
+ else if ((*nip) == 0x7c0006ac || *(nip+1) == 0x7c0006ac)
+ nip += 2;
+ if (*nip == 0x7c0004ac || (*nip >> 26) == 3 ||
+ (*(nip+1) >> 26) == 3) {
/* sync or twi */
unsigned int rb;
diff --git a/arch/ppc/platforms/4xx/Kconfig b/arch/ppc/platforms/4xx/Kconfig
index 76f4476cab4..d8837911bbc 100644
--- a/arch/ppc/platforms/4xx/Kconfig
+++ b/arch/ppc/platforms/4xx/Kconfig
@@ -82,6 +82,12 @@ config LUAN
help
This option enables support for the IBM PPC440SP evaluation board.
+config YUCCA
+ bool "Yucca"
+ select WANT_EARLY_SERIAL
+ help
+ This option enables support for the AMCC PPC440SPe evaluation board.
+
config OCOTEA
bool "Ocotea"
select WANT_EARLY_SERIAL
@@ -124,9 +130,14 @@ config 440SP
depends on LUAN
default y
+config 440SPE
+ bool
+ depends on YUCCA
+ default y
+
config 440
bool
- depends on 440GP || 440SP || 440EP
+ depends on 440GP || 440SP || 440SPE || 440EP
default y
config 440A
@@ -158,7 +169,7 @@ config BOOKE
config IBM_OCP
bool
- depends on ASH || BAMBOO || BUBINGA || CPCI405 || EBONY || EP405 || LUAN || OCOTEA || REDWOOD_5 || REDWOOD_6 || SYCAMORE || WALNUT
+ depends on ASH || BAMBOO || BUBINGA || CPCI405 || EBONY || EP405 || LUAN || YUCCA || OCOTEA || REDWOOD_5 || REDWOOD_6 || SYCAMORE || WALNUT
default y
config XILINX_OCP
@@ -168,7 +179,7 @@ config XILINX_OCP
config IBM_EMAC4
bool
- depends on 440GX || 440SP
+ depends on 440GX || 440SP || 440SPE
default y
config BIOS_FIXUP
@@ -214,7 +225,7 @@ config EMBEDDEDBOOT
config IBM_OPENBIOS
bool
- depends on ASH || BUBINGA || REDWOOD_5 || REDWOOD_6 || SYCAMORE || WALNUT
+ depends on ASH || REDWOOD_5 || REDWOOD_6
default y
config PPC4xx_DMA
diff --git a/arch/ppc/platforms/4xx/Makefile b/arch/ppc/platforms/4xx/Makefile
index 1dd6d7fd6a9..c9bb6117095 100644
--- a/arch/ppc/platforms/4xx/Makefile
+++ b/arch/ppc/platforms/4xx/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_EBONY) += ebony.o
obj-$(CONFIG_EP405) += ep405.o
obj-$(CONFIG_BUBINGA) += bubinga.o
obj-$(CONFIG_LUAN) += luan.o
+obj-$(CONFIG_YUCCA) += yucca.o
obj-$(CONFIG_OCOTEA) += ocotea.o
obj-$(CONFIG_REDWOOD_5) += redwood5.o
obj-$(CONFIG_REDWOOD_6) += redwood6.o
@@ -22,6 +23,7 @@ obj-$(CONFIG_440EP) += ibm440ep.o
obj-$(CONFIG_440GP) += ibm440gp.o
obj-$(CONFIG_440GX) += ibm440gx.o
obj-$(CONFIG_440SP) += ibm440sp.o
+obj-$(CONFIG_440SPE) += ppc440spe.o
obj-$(CONFIG_405EP) += ibm405ep.o
obj-$(CONFIG_405GPR) += ibm405gpr.o
obj-$(CONFIG_VIRTEX_II_PRO) += virtex-ii_pro.o
diff --git a/arch/ppc/platforms/4xx/bubinga.c b/arch/ppc/platforms/4xx/bubinga.c
index 3678abf8631..8110f55668c 100644
--- a/arch/ppc/platforms/4xx/bubinga.c
+++ b/arch/ppc/platforms/4xx/bubinga.c
@@ -89,7 +89,7 @@ bubinga_early_serial_map(void)
* by 16.
*/
uart_div = (mfdcr(DCRN_CPC0_UCR_BASE) & DCRN_CPC0_UCR_U0DIV);
- uart_clock = __res.bi_pllouta_freq / uart_div;
+ uart_clock = __res.bi_procfreq / uart_div;
/* Setup serial port access */
memset(&port, 0, sizeof(port));
diff --git a/arch/ppc/platforms/4xx/bubinga.h b/arch/ppc/platforms/4xx/bubinga.h
index b1df856f8e2..b5380cfaf5c 100644
--- a/arch/ppc/platforms/4xx/bubinga.h
+++ b/arch/ppc/platforms/4xx/bubinga.h
@@ -1,52 +1,34 @@
/*
- * Support for IBM PPC 405EP evaluation board (Bubinga).
+ * arch/ppc/platforms/4xx/bubinga.h
*
- * Author: SAW (IBM), derived from walnut.h.
- * Maintained by MontaVista Software <source@mvista.com>
+ * Bubinga board definitions
+ *
+ * Copyright (c) 2005 DENX Software Engineering
+ * Stefan Roese <sr@denx.de>
+ *
+ * Based on original work by
+ * SAW (IBM)
+ * 2003 (c) MontaVista Softare Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*
- * 2003 (c) MontaVista Softare Inc. This file is licensed under the
- * terms of the GNU General Public License version 2. This program is
- * licensed "as is" without any warranty of any kind, whether express
- * or implied.
*/
#ifdef __KERNEL__
#ifndef __BUBINGA_H__
#define __BUBINGA_H__
-/* 405EP */
+#include <linux/config.h>
#include <platforms/4xx/ibm405ep.h>
-
-#ifndef __ASSEMBLY__
-/*
- * Data structure defining board information maintained by the boot
- * ROM on IBM's evaluation board. An effort has been made to
- * keep the field names consistent with the 8xx 'bd_t' board info
- * structures.
- */
-
-typedef struct board_info {
- unsigned char bi_s_version[4]; /* Version of this structure */
- unsigned char bi_r_version[30]; /* Version of the IBM ROM */
- unsigned int bi_memsize; /* DRAM installed, in bytes */
- unsigned char bi_enetaddr[2][6]; /* Local Ethernet MAC address */ unsigned char bi_pci_enetaddr[6]; /* PCI Ethernet MAC address */
- unsigned int bi_intfreq; /* Processor speed, in Hz */
- unsigned int bi_busfreq; /* PLB Bus speed, in Hz */
- unsigned int bi_pci_busfreq; /* PCI Bus speed, in Hz */
- unsigned int bi_opb_busfreq; /* OPB Bus speed, in Hz */
- unsigned int bi_pllouta_freq; /* PLL OUTA speed, in Hz */
-} bd_t;
-
-/* Some 4xx parts use a different timebase frequency from the internal clock.
-*/
-#define bi_tbfreq bi_intfreq
-
+#include <asm/ppcboot.h>
/* Memory map for the Bubinga board.
* Generic 4xx plus RTC.
*/
-extern void *bubinga_rtc_base;
#define BUBINGA_RTC_PADDR ((uint)0xf0000000)
#define BUBINGA_RTC_VADDR BUBINGA_RTC_PADDR
#define BUBINGA_RTC_SIZE ((uint)8*1024)
@@ -58,12 +40,18 @@ extern void *bubinga_rtc_base;
* for typical configurations at various CPU speeds.
* The base baud is calculated as (FWDA / EXT UART DIV / 16)
*/
-#define BASE_BAUD 0
+#define BASE_BAUD 0
-#define BUBINGA_FPGA_BASE 0xF0300000
+/* Flash */
+#define PPC40x_FPGA_BASE 0xF0300000
+#define PPC40x_FPGA_REG_OFFS 1 /* offset to flash map reg */
+#define PPC40x_FLASH_ONBD_N(x) (x & 0x02)
+#define PPC40x_FLASH_SRAM_SEL(x) (x & 0x01)
+#define PPC40x_FLASH_LOW 0xFFF00000
+#define PPC40x_FLASH_HIGH 0xFFF80000
+#define PPC40x_FLASH_SIZE 0x80000
-#define PPC4xx_MACHINE_NAME "IBM Bubinga"
+#define PPC4xx_MACHINE_NAME "IBM Bubinga"
-#endif /* !__ASSEMBLY__ */
#endif /* __BUBINGA_H__ */
#endif /* __KERNEL__ */
diff --git a/arch/ppc/platforms/4xx/ebony.h b/arch/ppc/platforms/4xx/ebony.h
index d08faa46a0a..b91ad4272df 100644
--- a/arch/ppc/platforms/4xx/ebony.h
+++ b/arch/ppc/platforms/4xx/ebony.h
@@ -24,8 +24,8 @@
#define PPC44x_EMAC0_MR0 0xE0000800
/* Where to find the MAC info */
-#define EBONY_OPENBIOS_MAC_BASE 0xfffffe0c
-#define EBONY_OPENBIOS_MAC_OFFSET 0x0c
+#define OPENBIOS_MAC_BASE 0xfffffe0c
+#define OPENBIOS_MAC_OFFSET 0x0c
/* Default clock rates for Rev. B and Rev. C silicon */
#define EBONY_440GP_RB_SYSCLK 33000000
diff --git a/arch/ppc/platforms/4xx/ppc440spe.c b/arch/ppc/platforms/4xx/ppc440spe.c
new file mode 100644
index 00000000000..6139a0b3393
--- /dev/null
+++ b/arch/ppc/platforms/4xx/ppc440spe.c
@@ -0,0 +1,148 @@
+/*
+ * arch/ppc/platforms/4xx/ppc440spe.c
+ *
+ * PPC440SPe I/O descriptions
+ *
+ * Roland Dreier <rolandd@cisco.com>
+ * Copyright (c) 2005 Cisco Systems. All rights reserved.
+ *
+ * Matt Porter <mporter@kernel.crashing.org>
+ * Copyright 2002-2005 MontaVista Software Inc.
+ *
+ * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
+ * Copyright (c) 2003, 2004 Zultys Technologies
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <platforms/4xx/ppc440spe.h>
+#include <asm/ocp.h>
+#include <asm/ppc4xx_pic.h>
+
+static struct ocp_func_emac_data ppc440spe_emac0_def = {
+ .rgmii_idx = -1, /* No RGMII */
+ .rgmii_mux = -1, /* No RGMII */
+ .zmii_idx = -1, /* No ZMII */
+ .zmii_mux = -1, /* No ZMII */
+ .mal_idx = 0, /* MAL device index */
+ .mal_rx_chan = 0, /* MAL rx channel number */
+ .mal_tx_chan = 0, /* MAL tx channel number */
+ .wol_irq = 61, /* WOL interrupt number */
+ .mdio_idx = -1, /* No shared MDIO */
+ .tah_idx = -1, /* No TAH */
+};
+OCP_SYSFS_EMAC_DATA()
+
+static struct ocp_func_mal_data ppc440spe_mal0_def = {
+ .num_tx_chans = 1, /* Number of TX channels */
+ .num_rx_chans = 1, /* Number of RX channels */
+ .txeob_irq = 38, /* TX End Of Buffer IRQ */
+ .rxeob_irq = 39, /* RX End Of Buffer IRQ */
+ .txde_irq = 34, /* TX Descriptor Error IRQ */
+ .rxde_irq = 35, /* RX Descriptor Error IRQ */
+ .serr_irq = 33, /* MAL System Error IRQ */
+ .dcr_base = DCRN_MAL_BASE /* MAL0_CFG DCR number */
+};
+OCP_SYSFS_MAL_DATA()
+
+static struct ocp_func_iic_data ppc440spe_iic0_def = {
+ .fast_mode = 0, /* Use standad mode (100Khz) */
+};
+
+static struct ocp_func_iic_data ppc440spe_iic1_def = {
+ .fast_mode = 0, /* Use standad mode (100Khz) */
+};
+OCP_SYSFS_IIC_DATA()
+
+struct ocp_def core_ocp[] = {
+ { .vendor = OCP_VENDOR_IBM,
+ .function = OCP_FUNC_16550,
+ .index = 0,
+ .paddr = PPC440SPE_UART0_ADDR,
+ .irq = UART0_INT,
+ .pm = IBM_CPM_UART0,
+ },
+ { .vendor = OCP_VENDOR_IBM,
+ .function = OCP_FUNC_16550,
+ .index = 1,
+ .paddr = PPC440SPE_UART1_ADDR,
+ .irq = UART1_INT,
+ .pm = IBM_CPM_UART1,
+ },
+ { .vendor = OCP_VENDOR_IBM,
+ .function = OCP_FUNC_16550,
+ .index = 2,
+ .paddr = PPC440SPE_UART2_ADDR,
+ .irq = UART2_INT,
+ .pm = IBM_CPM_UART2,
+ },
+ { .vendor = OCP_VENDOR_IBM,
+ .function = OCP_FUNC_IIC,
+ .index = 0,
+ .paddr = 0x00000004f0000400ULL,
+ .irq = 2,
+ .pm = IBM_CPM_IIC0,
+ .additions = &ppc440spe_iic0_def,
+ .show = &ocp_show_iic_data
+ },
+ { .vendor = OCP_VENDOR_IBM,
+ .function = OCP_FUNC_IIC,
+ .index = 1,
+ .paddr = 0x00000004f0000500ULL,
+ .irq = 3,
+ .pm = IBM_CPM_IIC1,
+ .additions = &ppc440spe_iic1_def,
+ .show = &ocp_show_iic_data
+ },
+ { .vendor = OCP_VENDOR_IBM,
+ .function = OCP_FUNC_GPIO,
+ .index = 0,
+ .paddr = 0x00000004f0000700ULL,
+ .irq = OCP_IRQ_NA,
+ .pm = IBM_CPM_GPIO0,
+ },
+ { .vendor = OCP_VENDOR_IBM,
+ .function = OCP_FUNC_MAL,
+ .paddr = OCP_PADDR_NA,
+ .irq = OCP_IRQ_NA,
+ .pm = OCP_CPM_NA,
+ .additions = &ppc440spe_mal0_def,
+ .show = &ocp_show_mal_data,
+ },
+ { .vendor = OCP_VENDOR_IBM,
+ .function = OCP_FUNC_EMAC,
+ .index = 0,
+ .paddr = 0x00000004f0000800ULL,
+ .irq = 60,
+ .pm = OCP_CPM_NA,
+ .additions = &ppc440spe_emac0_def,
+ .show = &ocp_show_emac_data,
+ },
+ { .vendor = OCP_VENDOR_INVALID
+ }
+};
+
+/* Polarity and triggering settings for internal interrupt sources */
+struct ppc4xx_uic_settings ppc4xx_core_uic_cfg[] __initdata = {
+ { .polarity = 0xffffffff,
+ .triggering = 0x010f0004,
+ .ext_irq_mask = 0x00000000,
+ },
+ { .polarity = 0xffffffff,
+ .triggering = 0x001f8040,
+ .ext_irq_mask = 0x00007c30, /* IRQ6 - IRQ7, IRQ8 - IRQ12 */
+ },
+ { .polarity = 0xffffffff,
+ .triggering = 0x00000000,
+ .ext_irq_mask = 0x000000fc, /* IRQ0 - IRQ5 */
+ },
+ { .polarity = 0xffffffff,
+ .triggering = 0x00000000,
+ .ext_irq_mask = 0x00000000,
+ },
+};
diff --git a/arch/ppc/platforms/4xx/ppc440spe.h b/arch/ppc/platforms/4xx/ppc440spe.h
new file mode 100644
index 00000000000..2216846973b
--- /dev/null
+++ b/arch/ppc/platforms/4xx/ppc440spe.h
@@ -0,0 +1,66 @@
+/*
+ * arch/ppc/platforms/4xx/ibm440spe.h
+ *
+ * PPC440SPe definitions
+ *
+ * Roland Dreier <rolandd@cisco.com>
+ * Copyright (c) 2005 Cisco Systems. All rights reserved.
+ *
+ * Matt Porter <mporter@kernel.crashing.org>
+ * Copyright 2004-2005 MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifdef __KERNEL__
+#ifndef __PPC_PLATFORMS_PPC440SPE_H
+#define __PPC_PLATFORMS_PPC440SPE_H
+
+#include <linux/config.h>
+
+#include <asm/ibm44x.h>
+
+/* UART */
+#define PPC440SPE_UART0_ADDR 0x00000004f0000200ULL
+#define PPC440SPE_UART1_ADDR 0x00000004f0000300ULL
+#define PPC440SPE_UART2_ADDR 0x00000004f0000600ULL
+#define UART0_INT 0
+#define UART1_INT 1
+#define UART2_INT 37
+
+/* Clock and Power Management */
+#define IBM_CPM_IIC0 0x80000000 /* IIC interface */
+#define IBM_CPM_IIC1 0x40000000 /* IIC interface */
+#define IBM_CPM_PCI 0x20000000 /* PCI bridge */
+#define IBM_CPM_CPU 0x02000000 /* processor core */
+#define IBM_CPM_DMA 0x01000000 /* DMA controller */
+#define IBM_CPM_BGO 0x00800000 /* PLB to OPB bus arbiter */
+#define IBM_CPM_BGI 0x00400000 /* OPB to PLB bridge */
+#define IBM_CPM_EBC 0x00200000 /* External Bux Controller */
+#define IBM_CPM_EBM 0x00100000 /* Ext Bus Master Interface */
+#define IBM_CPM_DMC 0x00080000 /* SDRAM peripheral controller */
+#define IBM_CPM_PLB 0x00040000 /* PLB bus arbiter */
+#define IBM_CPM_SRAM 0x00020000 /* SRAM memory controller */
+#define IBM_CPM_PPM 0x00002000 /* PLB Performance Monitor */
+#define IBM_CPM_UIC1 0x00001000 /* Universal Interrupt Controller */
+#define IBM_CPM_GPIO0 0x00000800 /* General Purpose IO (??) */
+#define IBM_CPM_GPT 0x00000400 /* General Purpose Timers */
+#define IBM_CPM_UART0 0x00000200 /* serial port 0 */
+#define IBM_CPM_UART1 0x00000100 /* serial port 1 */
+#define IBM_CPM_UART2 0x00000100 /* serial port 1 */
+#define IBM_CPM_UIC0 0x00000080 /* Universal Interrupt Controller */
+#define IBM_CPM_TMRCLK 0x00000040 /* CPU timers */
+#define IBM_CPM_EMAC0 0x00000020 /* EMAC 0 */
+
+#define DFLT_IBM4xx_PM ~(IBM_CPM_UIC | IBM_CPM_UIC1 | IBM_CPM_CPU \
+ | IBM_CPM_EBC | IBM_CPM_SRAM | IBM_CPM_BGO \
+ | IBM_CPM_EBM | IBM_CPM_PLB | IBM_CPM_OPB \
+ | IBM_CPM_TMRCLK | IBM_CPM_DMA | IBM_CPM_PCI \
+ | IBM_CPM_TAHOE0 | IBM_CPM_TAHOE1 \
+ | IBM_CPM_EMAC0 | IBM_CPM_EMAC1 \
+ | IBM_CPM_EMAC2 | IBM_CPM_EMAC3 )
+#endif /* __PPC_PLATFORMS_PPC440SP_H */
+#endif /* __KERNEL__ */
diff --git a/arch/ppc/platforms/4xx/sycamore.c b/arch/ppc/platforms/4xx/sycamore.c
index d8019eec470..281b4a2ffb9 100644
--- a/arch/ppc/platforms/4xx/sycamore.c
+++ b/arch/ppc/platforms/4xx/sycamore.c
@@ -88,9 +88,6 @@ ppc405_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin)
void __init
sycamore_setup_arch(void)
{
-#define SYCAMORE_PS2_BASE 0xF0100000
-#define SYCAMORE_FPGA_BASE 0xF0300000
-
void *fpga_brdc;
unsigned char fpga_brdc_data;
void *fpga_enable;
@@ -100,7 +97,7 @@ sycamore_setup_arch(void)
ppc4xx_setup_arch();
- ibm_ocp_set_emac(0, 1);
+ ibm_ocp_set_emac(0, 0);
kb_data = ioremap(SYCAMORE_PS2_BASE, 8);
if (!kb_data) {
@@ -111,7 +108,7 @@ sycamore_setup_arch(void)
kb_cs = kb_data + 1;
- fpga_status = ioremap(SYCAMORE_FPGA_BASE, 8);
+ fpga_status = ioremap(PPC40x_FPGA_BASE, 8);
if (!fpga_status) {
printk(KERN_CRIT
"sycamore_setup_arch() fpga_status ioremap failed\n");
diff --git a/arch/ppc/platforms/4xx/sycamore.h b/arch/ppc/platforms/4xx/sycamore.h
index 3e7b4e2c8c5..1cd6c824fd6 100644
--- a/arch/ppc/platforms/4xx/sycamore.h
+++ b/arch/ppc/platforms/4xx/sycamore.h
@@ -1,67 +1,52 @@
/*
* arch/ppc/platforms/4xx/sycamore.h
*
- * Macros, definitions, and data structures specific to the IBM PowerPC
- * 405GPr "Sycamore" evaluation board.
+ * Sycamore board definitions
*
- * Author: Armin Kuster <akuster@mvista.com>
+ * Copyright (c) 2005 DENX Software Engineering
+ * Stefan Roese <sr@denx.de>
+ *
+ * Based on original work by
+ * Armin Kuster <akuster@mvista.com>
+ * 2000 (c) MontaVista, Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*
- * 2000 (c) MontaVista, Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
*/
#ifdef __KERNEL__
#ifndef __ASM_SYCAMORE_H__
#define __ASM_SYCAMORE_H__
+#include <linux/config.h>
#include <platforms/4xx/ibm405gpr.h>
+#include <asm/ppcboot.h>
-#ifndef __ASSEMBLY__
-/*
- * Data structure defining board information maintained by the boot
- * ROM on IBM's "Sycamore" evaluation board. An effort has been made to
- * keep the field names consistent with the 8xx 'bd_t' board info
- * structures.
- */
-
-typedef struct board_info {
- unsigned char bi_s_version[4]; /* Version of this structure */
- unsigned char bi_r_version[30]; /* Version of the IBM ROM */
- unsigned int bi_memsize; /* DRAM installed, in bytes */
- unsigned char bi_enetaddr[6]; /* Local Ethernet MAC address */
- unsigned char bi_pci_enetaddr[6]; /* PCI Ethernet MAC address */
- unsigned int bi_intfreq; /* Processor speed, in Hz */
- unsigned int bi_busfreq; /* PLB Bus speed, in Hz */
- unsigned int bi_pci_busfreq; /* PCI Bus speed, in Hz */
-} bd_t;
-
-/* Some 4xx parts use a different timebase frequency from the internal clock.
-*/
-#define bi_tbfreq bi_intfreq
-
-
-/* Memory map for the IBM "Sycamore" 405GP evaluation board.
+/* Memory map for the IBM "Sycamore" 405GPr evaluation board.
* Generic 4xx plus RTC.
*/
-extern void *sycamore_rtc_base;
#define SYCAMORE_RTC_PADDR ((uint)0xf0000000)
#define SYCAMORE_RTC_VADDR SYCAMORE_RTC_PADDR
-#define SYCAMORE_RTC_SIZE ((uint)8*1024)
+#define SYCAMORE_RTC_SIZE ((uint)8*1024)
-#ifdef CONFIG_PPC405GP_INTERNAL_CLOCK
-#define BASE_BAUD 201600
-#else
#define BASE_BAUD 691200
-#endif
-#define SYCAMORE_PS2_BASE 0xF0100000
-#define SYCAMORE_FPGA_BASE 0xF0300000
+#define SYCAMORE_PS2_BASE 0xF0100000
+
+/* Flash */
+#define PPC40x_FPGA_BASE 0xF0300000
+#define PPC40x_FPGA_REG_OFFS 5 /* offset to flash map reg */
+#define PPC40x_FLASH_ONBD_N(x) (x & 0x02)
+#define PPC40x_FLASH_SRAM_SEL(x) (x & 0x01)
+#define PPC40x_FLASH_LOW 0xFFF00000
+#define PPC40x_FLASH_HIGH 0xFFF80000
+#define PPC40x_FLASH_SIZE 0x80000
#define PPC4xx_MACHINE_NAME "IBM Sycamore"
-#endif /* !__ASSEMBLY__ */
#endif /* __ASM_SYCAMORE_H__ */
#endif /* __KERNEL__ */
diff --git a/arch/ppc/platforms/4xx/walnut.c b/arch/ppc/platforms/4xx/walnut.c
index a33eda4b748..74cb33182d9 100644
--- a/arch/ppc/platforms/4xx/walnut.c
+++ b/arch/ppc/platforms/4xx/walnut.c
@@ -90,7 +90,7 @@ walnut_setup_arch(void)
kb_cs = kb_data + 1;
- fpga_status = ioremap(WALNUT_FPGA_BASE, 8);
+ fpga_status = ioremap(PPC40x_FPGA_BASE, 8);
if (!fpga_status) {
printk(KERN_CRIT
"walnut_setup_arch() fpga_status ioremap failed\n");
diff --git a/arch/ppc/platforms/4xx/walnut.h b/arch/ppc/platforms/4xx/walnut.h
index 04cfbf3696b..dcf2691698c 100644
--- a/arch/ppc/platforms/4xx/walnut.h
+++ b/arch/ppc/platforms/4xx/walnut.h
@@ -1,72 +1,55 @@
/*
* arch/ppc/platforms/4xx/walnut.h
*
- * Macros, definitions, and data structures specific to the IBM PowerPC
- * 405GP "Walnut" evaluation board.
+ * Walnut board definitions
*
- * Authors: Grant Erickson <grant@lcse.umn.edu>, Frank Rowand
- * <frank_rowand@mvista.com>, Debbie Chu <debbie_chu@mvista.com> or
- * source@mvista.com
+ * Copyright (c) 2005 DENX Software Engineering
+ * Stefan Roese <sr@denx.de>
*
- * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
+ * Based on original work by
+ * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
+ * Frank Rowand <frank_rowand@mvista.com>
+ * Debbie Chu <debbie_chu@mvista.com>
+ * 2000 (c) MontaVista, Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
*
- * 2000 (c) MontaVista, Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
*/
#ifdef __KERNEL__
#ifndef __ASM_WALNUT_H__
#define __ASM_WALNUT_H__
-/* We have a 405GP core */
+#include <linux/config.h>
#include <platforms/4xx/ibm405gp.h>
-
-#ifndef __ASSEMBLY__
-/*
- * Data structure defining board information maintained by the boot
- * ROM on IBM's "Walnut" evaluation board. An effort has been made to
- * keep the field names consistent with the 8xx 'bd_t' board info
- * structures.
- */
-
-typedef struct board_info {
- unsigned char bi_s_version[4]; /* Version of this structure */
- unsigned char bi_r_version[30]; /* Version of the IBM ROM */
- unsigned int bi_memsize; /* DRAM installed, in bytes */
- unsigned char bi_enetaddr[6]; /* Local Ethernet MAC address */
- unsigned char bi_pci_enetaddr[6]; /* PCI Ethernet MAC address */
- unsigned int bi_intfreq; /* Processor speed, in Hz */
- unsigned int bi_busfreq; /* PLB Bus speed, in Hz */
- unsigned int bi_pci_busfreq; /* PCI Bus speed, in Hz */
-} bd_t;
-
-/* Some 4xx parts use a different timebase frequency from the internal clock.
-*/
-#define bi_tbfreq bi_intfreq
-
+#include <asm/ppcboot.h>
/* Memory map for the IBM "Walnut" 405GP evaluation board.
* Generic 4xx plus RTC.
*/
-extern void *walnut_rtc_base;
#define WALNUT_RTC_PADDR ((uint)0xf0000000)
#define WALNUT_RTC_VADDR WALNUT_RTC_PADDR
#define WALNUT_RTC_SIZE ((uint)8*1024)
-#ifdef CONFIG_PPC405GP_INTERNAL_CLOCK
-#define BASE_BAUD 201600
-#else
#define BASE_BAUD 691200
-#endif
#define WALNUT_PS2_BASE 0xF0100000
-#define WALNUT_FPGA_BASE 0xF0300000
+
+/* Flash */
+#define PPC40x_FPGA_BASE 0xF0300000
+#define PPC40x_FPGA_REG_OFFS 5 /* offset to flash map reg */
+#define PPC40x_FLASH_ONBD_N(x) (x & 0x02)
+#define PPC40x_FLASH_SRAM_SEL(x) (x & 0x01)
+#define PPC40x_FLASH_LOW 0xFFF00000
+#define PPC40x_FLASH_HIGH 0xFFF80000
+#define PPC40x_FLASH_SIZE 0x80000
+#define WALNUT_FPGA_BASE PPC40x_FPGA_BASE
#define PPC4xx_MACHINE_NAME "IBM Walnut"
-#endif /* !__ASSEMBLY__ */
#endif /* __ASM_WALNUT_H__ */
#endif /* __KERNEL__ */
diff --git a/arch/ppc/platforms/4xx/yucca.c b/arch/ppc/platforms/4xx/yucca.c
new file mode 100644
index 00000000000..e60f4bd437e
--- /dev/null
+++ b/arch/ppc/platforms/4xx/yucca.c
@@ -0,0 +1,395 @@
+/*
+ * arch/ppc/platforms/4xx/yucca.c
+ *
+ * Yucca board specific routines
+ *
+ * Roland Dreier <rolandd@cisco.com> (based on luan.c by Matt Porter)
+ *
+ * Copyright 2004-2005 MontaVista Software Inc.
+ * Copyright (c) 2005 Cisco Systems. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/reboot.h>
+#include <linux/pci.h>
+#include <linux/kdev_t.h>
+#include <linux/types.h>
+#include <linux/major.h>
+#include <linux/blkdev.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/ide.h>
+#include <linux/initrd.h>
+#include <linux/seq_file.h>
+#include <linux/root_dev.h>
+#include <linux/tty.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+
+#include <asm/system.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <asm/machdep.h>
+#include <asm/ocp.h>
+#include <asm/pci-bridge.h>
+#include <asm/time.h>
+#include <asm/todc.h>
+#include <asm/bootinfo.h>
+#include <asm/ppc4xx_pic.h>
+#include <asm/ppcboot.h>
+
+#include <syslib/ibm44x_common.h>
+#include <syslib/ibm440gx_common.h>
+#include <syslib/ibm440sp_common.h>
+#include <syslib/ppc440spe_pcie.h>
+
+extern bd_t __res;
+
+static struct ibm44x_clocks clocks __initdata;
+
+static void __init
+yucca_calibrate_decr(void)
+{
+ unsigned int freq;
+
+ if (mfspr(SPRN_CCR1) & CCR1_TCS)
+ freq = YUCCA_TMR_CLK;
+ else
+ freq = clocks.cpu;
+
+ ibm44x_calibrate_decr(freq);
+}
+
+static int
+yucca_show_cpuinfo(struct seq_file *m)
+{
+ seq_printf(m, "vendor\t\t: AMCC\n");
+ seq_printf(m, "machine\t\t: PPC440SPe EVB (Yucca)\n");
+
+ return 0;
+}
+
+static enum {
+ HOSE_UNKNOWN,
+ HOSE_PCIX,
+ HOSE_PCIE0,
+ HOSE_PCIE1,
+ HOSE_PCIE2
+} hose_type[4];
+
+static inline int
+yucca_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin)
+{
+ struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
+
+ if (hose_type[hose->index] == HOSE_PCIX) {
+ static char pci_irq_table[][4] =
+ /*
+ * PCI IDSEL/INTPIN->INTLINE
+ * A B C D
+ */
+ {
+ { 81, -1, -1, -1 }, /* IDSEL 1 - PCIX0 Slot 0 */
+ };
+ const long min_idsel = 1, max_idsel = 1, irqs_per_slot = 4;
+ return PCI_IRQ_TABLE_LOOKUP;
+ } else if (hose_type[hose->index] == HOSE_PCIE0) {
+ static char pci_irq_table[][4] =
+ /*
+ * PCI IDSEL/INTPIN->INTLINE
+ * A B C D
+ */
+ {
+ { 96, 97, 98, 99 },
+ };
+ const long min_idsel = 1, max_idsel = 1, irqs_per_slot = 4;
+ return PCI_IRQ_TABLE_LOOKUP;
+ } else if (hose_type[hose->index] == HOSE_PCIE1) {
+ static char pci_irq_table[][4] =
+ /*
+ * PCI IDSEL/INTPIN->INTLINE
+ * A B C D
+ */
+ {
+ { 100, 101, 102, 103 },
+ };
+ const long min_idsel = 1, max_idsel = 1, irqs_per_slot = 4;
+ return PCI_IRQ_TABLE_LOOKUP;
+ } else if (hose_type[hose->index] == HOSE_PCIE2) {
+ static char pci_irq_table[][4] =
+ /*
+ * PCI IDSEL/INTPIN->INTLINE
+ * A B C D
+ */
+ {
+ { 104, 105, 106, 107 },
+ };
+ const long min_idsel = 1, max_idsel = 1, irqs_per_slot = 4;
+ return PCI_IRQ_TABLE_LOOKUP;
+ }
+ return -1;
+}
+
+static void __init yucca_set_emacdata(void)
+{
+ struct ocp_def *def;
+ struct ocp_func_emac_data *emacdata;
+
+ /* Set phy_map, phy_mode, and mac_addr for the EMAC */
+ def = ocp_get_one_device(OCP_VENDOR_IBM, OCP_FUNC_EMAC, 0);
+ emacdata = def->additions;
+ emacdata->phy_map = 0x00000001; /* Skip 0x00 */
+ emacdata->phy_mode = PHY_MODE_GMII;
+ memcpy(emacdata->mac_addr, __res.bi_enetaddr, 6);
+}
+
+static int __init yucca_pcie_card_present(int port)
+{
+ void __iomem *pcie_fpga_base;
+ u16 reg;
+
+ pcie_fpga_base = ioremap64(YUCCA_FPGA_REG_BASE, YUCCA_FPGA_REG_SIZE);
+ reg = in_be16(pcie_fpga_base + FPGA_REG1C);
+ iounmap(pcie_fpga_base);
+
+ switch(port) {
+ case 0: return !(reg & FPGA_REG1C_PE0_PRSNT);
+ case 1: return !(reg & FPGA_REG1C_PE1_PRSNT);
+ case 2: return !(reg & FPGA_REG1C_PE2_PRSNT);
+ default: return 0;
+ }
+}
+
+/*
+ * For the given slot, set rootpoint mode, send power to the slot,
+ * turn on the green LED and turn off the yellow LED, enable the clock
+ * and turn off reset.
+ */
+static void __init yucca_setup_pcie_fpga_rootpoint(int port)
+{
+ void __iomem *pcie_reg_fpga_base;
+ u16 power, clock, green_led, yellow_led, reset_off, rootpoint, endpoint;
+
+ pcie_reg_fpga_base = ioremap64(YUCCA_FPGA_REG_BASE, YUCCA_FPGA_REG_SIZE);
+
+ switch(port) {
+ case 0:
+ rootpoint = FPGA_REG1C_PE0_ROOTPOINT;
+ endpoint = 0;
+ power = FPGA_REG1A_PE0_PWRON;
+ green_led = FPGA_REG1A_PE0_GLED;
+ clock = FPGA_REG1A_PE0_REFCLK_ENABLE;
+ yellow_led = FPGA_REG1A_PE0_YLED;
+ reset_off = FPGA_REG1C_PE0_PERST;
+ break;
+ case 1:
+ rootpoint = 0;
+ endpoint = FPGA_REG1C_PE1_ENDPOINT;
+ power = FPGA_REG1A_PE1_PWRON;
+ green_led = FPGA_REG1A_PE1_GLED;
+ clock = FPGA_REG1A_PE1_REFCLK_ENABLE;
+ yellow_led = FPGA_REG1A_PE1_YLED;
+ reset_off = FPGA_REG1C_PE1_PERST;
+ break;
+ case 2:
+ rootpoint = 0;
+ endpoint = FPGA_REG1C_PE2_ENDPOINT;
+ power = FPGA_REG1A_PE2_PWRON;
+ green_led = FPGA_REG1A_PE2_GLED;
+ clock = FPGA_REG1A_PE2_REFCLK_ENABLE;
+ yellow_led = FPGA_REG1A_PE2_YLED;
+ reset_off = FPGA_REG1C_PE2_PERST;
+ break;
+
+ default:
+ return;
+ }
+
+ out_be16(pcie_reg_fpga_base + FPGA_REG1A,
+ ~(power | clock | green_led) &
+ (yellow_led | in_be16(pcie_reg_fpga_base + FPGA_REG1A)));
+ out_be16(pcie_reg_fpga_base + FPGA_REG1C,
+ ~(endpoint | reset_off) &
+ (rootpoint | in_be16(pcie_reg_fpga_base + FPGA_REG1C)));
+
+ /*
+ * Leave device in reset for a while after powering on the
+ * slot to give it a chance to initialize.
+ */
+ mdelay(250);
+
+ out_be16(pcie_reg_fpga_base + FPGA_REG1C,
+ reset_off | in_be16(pcie_reg_fpga_base + FPGA_REG1C));
+
+ iounmap(pcie_reg_fpga_base);
+}
+
+static void __init
+yucca_setup_hoses(void)
+{
+ struct pci_controller *hose;
+ char name[20];
+ int i;
+
+ if (0 && ppc440spe_init_pcie()) {
+ printk(KERN_WARNING "PPC440SPe PCI Express initialization failed\n");
+ return;
+ }
+
+ for (i = 0; i <= 2; ++i) {
+ if (!yucca_pcie_card_present(i))
+ continue;
+
+ printk(KERN_INFO "PCIE%d: card present\n", i);
+ yucca_setup_pcie_fpga_rootpoint(i);
+ if (ppc440spe_init_pcie_rootport(i)) {
+ printk(KERN_WARNING "PCIE%d: initialization failed\n", i);
+ continue;
+ }
+
+ hose = pcibios_alloc_controller();
+ if (!hose)
+ return;
+
+ sprintf(name, "PCIE%d host bridge", i);
+ pci_init_resource(&hose->io_resource,
+ YUCCA_PCIX_LOWER_IO,
+ YUCCA_PCIX_UPPER_IO,
+ IORESOURCE_IO,
+ name);
+
+ hose->mem_space.start = YUCCA_PCIE_LOWER_MEM +
+ i * YUCCA_PCIE_MEM_SIZE;
+ hose->mem_space.end = hose->mem_space.start +
+ YUCCA_PCIE_MEM_SIZE - 1;
+
+ pci_init_resource(&hose->mem_resources[0],
+ hose->mem_space.start,
+ hose->mem_space.end,
+ IORESOURCE_MEM,
+ name);
+
+ hose->first_busno = 0;
+ hose->last_busno = 15;
+ hose_type[hose->index] = HOSE_PCIE0 + i;
+
+ ppc440spe_setup_pcie(hose, i);
+ hose->last_busno = pciauto_bus_scan(hose, hose->first_busno);
+ }
+
+ ppc_md.pci_swizzle = common_swizzle;
+ ppc_md.pci_map_irq = yucca_map_irq;
+}
+
+TODC_ALLOC();
+
+static void __init
+yucca_early_serial_map(void)
+{
+ struct uart_port port;
+
+ /* Setup ioremapped serial port access */
+ memset(&port, 0, sizeof(port));
+ port.membase = ioremap64(PPC440SPE_UART0_ADDR, 8);
+ port.irq = UART0_INT;
+ port.uartclk = clocks.uart0;
+ port.regshift = 0;
+ port.iotype = SERIAL_IO_MEM;
+ port.flags = ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST;
+ port.line = 0;
+
+ if (early_serial_setup(&port) != 0) {
+ printk("Early serial init of port 0 failed\n");
+ }
+
+ port.membase = ioremap64(PPC440SPE_UART1_ADDR, 8);
+ port.irq = UART1_INT;
+ port.uartclk = clocks.uart1;
+ port.line = 1;
+
+ if (early_serial_setup(&port) != 0) {
+ printk("Early serial init of port 1 failed\n");
+ }
+
+ port.membase = ioremap64(PPC440SPE_UART2_ADDR, 8);
+ port.irq = UART2_INT;
+ port.uartclk = BASE_BAUD;
+ port.line = 2;
+
+ if (early_serial_setup(&port) != 0) {
+ printk("Early serial init of port 2 failed\n");
+ }
+}
+
+static void __init
+yucca_setup_arch(void)
+{
+ yucca_set_emacdata();
+
+#if !defined(CONFIG_BDI_SWITCH)
+ /*
+ * The Abatron BDI JTAG debugger does not tolerate others
+ * mucking with the debug registers.
+ */
+ mtspr(SPRN_DBCR0, (DBCR0_TDE | DBCR0_IDM));
+#endif
+
+ /*
+ * Determine various clocks.
+ * To be completely correct we should get SysClk
+ * from FPGA, because it can be changed by on-board switches
+ * --ebs
+ */
+ /* 440GX and 440SPe clocking is the same - rd */
+ ibm440gx_get_clocks(&clocks, 33333333, 6 * 1843200);
+ ocp_sys_info.opb_bus_freq = clocks.opb;
+
+ /* init to some ~sane value until calibrate_delay() runs */
+ loops_per_jiffy = 50000000/HZ;
+
+ /* Setup PCIXn host bridges */
+ yucca_setup_hoses();
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (initrd_start)
+ ROOT_DEV = Root_RAM0;
+ else
+#endif
+#ifdef CONFIG_ROOT_NFS
+ ROOT_DEV = Root_NFS;
+#else
+ ROOT_DEV = Root_HDA1;
+#endif
+
+ yucca_early_serial_map();
+
+ /* Identify the system */
+ printk("Yucca port (Roland Dreier <rolandd@cisco.com>)\n");
+}
+
+void __init platform_init(unsigned long r3, unsigned long r4,
+ unsigned long r5, unsigned long r6, unsigned long r7)
+{
+ ibm44x_platform_init(r3, r4, r5, r6, r7);
+
+ ppc_md.setup_arch = yucca_setup_arch;
+ ppc_md.show_cpuinfo = yucca_show_cpuinfo;
+ ppc_md.find_end_of_memory = ibm440sp_find_end_of_memory;
+ ppc_md.get_irq = NULL; /* Set in ppc4xx_pic_init() */
+
+ ppc_md.calibrate_decr = yucca_calibrate_decr;
+#ifdef CONFIG_KGDB
+ ppc_md.early_serial_map = yucca_early_serial_map;
+#endif
+}
diff --git a/arch/ppc/platforms/4xx/yucca.h b/arch/ppc/platforms/4xx/yucca.h
new file mode 100644
index 00000000000..01a4afea151
--- /dev/null
+++ b/arch/ppc/platforms/4xx/yucca.h
@@ -0,0 +1,111 @@
+/*
+ * arch/ppc/platforms/4xx/yucca.h
+ *
+ * Yucca board definitions
+ *
+ * Roland Dreier <rolandd@cisco.com> (based on luan.h by Matt Porter)
+ *
+ * Copyright 2004-2005 MontaVista Software Inc.
+ * Copyright (c) 2005 Cisco Systems. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifdef __KERNEL__
+#ifndef __ASM_YUCCA_H__
+#define __ASM_YUCCA_H__
+
+#include <linux/config.h>
+#include <platforms/4xx/ppc440spe.h>
+
+/* F/W TLB mapping used in bootloader glue to reset EMAC */
+#define PPC44x_EMAC0_MR0 0xa0000800
+
+/* Location of MAC addresses in PIBS image */
+#define PIBS_FLASH_BASE 0xffe00000
+#define PIBS_MAC_BASE (PIBS_FLASH_BASE+0x1b0400)
+
+/* External timer clock frequency */
+#define YUCCA_TMR_CLK 25000000
+
+/*
+ * FPGA registers
+ */
+#define YUCCA_FPGA_REG_BASE 0x00000004e2000000ULL
+#define YUCCA_FPGA_REG_SIZE 0x24
+
+#define FPGA_REG1A 0x1a
+
+#define FPGA_REG1A_PE0_GLED 0x8000
+#define FPGA_REG1A_PE1_GLED 0x4000
+#define FPGA_REG1A_PE2_GLED 0x2000
+#define FPGA_REG1A_PE0_YLED 0x1000
+#define FPGA_REG1A_PE1_YLED 0x0800
+#define FPGA_REG1A_PE2_YLED 0x0400
+#define FPGA_REG1A_PE0_PWRON 0x0200
+#define FPGA_REG1A_PE1_PWRON 0x0100
+#define FPGA_REG1A_PE2_PWRON 0x0080
+#define FPGA_REG1A_PE0_REFCLK_ENABLE 0x0040
+#define FPGA_REG1A_PE1_REFCLK_ENABLE 0x0020
+#define FPGA_REG1A_PE2_REFCLK_ENABLE 0x0010
+#define FPGA_REG1A_PE_SPREAD0 0x0008
+#define FPGA_REG1A_PE_SPREAD1 0x0004
+#define FPGA_REG1A_PE_SELSOURCE_0 0x0002
+#define FPGA_REG1A_PE_SELSOURCE_1 0x0001
+
+#define FPGA_REG1C 0x1c
+
+#define FPGA_REG1C_PE0_ROOTPOINT 0x8000
+#define FPGA_REG1C_PE1_ENDPOINT 0x4000
+#define FPGA_REG1C_PE2_ENDPOINT 0x2000
+#define FPGA_REG1C_PE0_PRSNT 0x1000
+#define FPGA_REG1C_PE1_PRSNT 0x0800
+#define FPGA_REG1C_PE2_PRSNT 0x0400
+#define FPGA_REG1C_PE0_WAKE 0x0080
+#define FPGA_REG1C_PE1_WAKE 0x0040
+#define FPGA_REG1C_PE2_WAKE 0x0020
+#define FPGA_REG1C_PE0_PERST 0x0010
+#define FPGA_REG1C_PE1_PERST 0x0008
+#define FPGA_REG1C_PE2_PERST 0x0004
+
+/*
+ * Serial port defines
+ */
+#define RS_TABLE_SIZE 3
+
+/* PIBS defined UART mappings, used before early_serial_setup */
+#define UART0_IO_BASE 0xa0000200
+#define UART1_IO_BASE 0xa0000300
+#define UART2_IO_BASE 0xa0000600
+
+#define BASE_BAUD 11059200
+#define STD_UART_OP(num) \
+ { 0, BASE_BAUD, 0, UART##num##_INT, \
+ (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST), \
+ iomem_base: (void*)UART##num##_IO_BASE, \
+ io_type: SERIAL_IO_MEM},
+
+#define SERIAL_PORT_DFNS \
+ STD_UART_OP(0) \
+ STD_UART_OP(1) \
+ STD_UART_OP(2)
+
+/* PCI support */
+#define YUCCA_PCIX_LOWER_IO 0x00000000
+#define YUCCA_PCIX_UPPER_IO 0x0000ffff
+#define YUCCA_PCIX_LOWER_MEM 0x80000000
+#define YUCCA_PCIX_UPPER_MEM 0x8fffffff
+#define YUCCA_PCIE_LOWER_MEM 0x90000000
+#define YUCCA_PCIE_MEM_SIZE 0x10000000
+
+#define YUCCA_PCIX_MEM_SIZE 0x10000000
+#define YUCCA_PCIX_MEM_OFFSET 0x00000000
+#define YUCCA_PCIE_MEM_SIZE 0x10000000
+#define YUCCA_PCIE_MEM_OFFSET 0x00000000
+
+#endif /* __ASM_YUCCA_H__ */
+#endif /* __KERNEL__ */
diff --git a/arch/ppc/platforms/85xx/mpc85xx_ads_common.c b/arch/ppc/platforms/85xx/mpc85xx_ads_common.c
index bd3ac013675..16ad092d8a0 100644
--- a/arch/ppc/platforms/85xx/mpc85xx_ads_common.c
+++ b/arch/ppc/platforms/85xx/mpc85xx_ads_common.c
@@ -45,6 +45,8 @@
#include <mm/mmu_decl.h>
+#include <syslib/ppc85xx_rio.h>
+
#include <platforms/85xx/mpc85xx_ads_common.h>
#ifndef CONFIG_PCI
@@ -189,3 +191,11 @@ mpc85xx_exclude_device(u_char bus, u_char devfn)
}
#endif /* CONFIG_PCI */
+
+#ifdef CONFIG_RAPIDIO
+void platform_rio_init(void)
+{
+ /* 512MB RIO LAW at 0xc0000000 */
+ mpc85xx_rio_setup(0xc0000000, 0x20000000);
+}
+#endif /* CONFIG_RAPIDIO */
diff --git a/arch/ppc/platforms/85xx/stx_gp3.c b/arch/ppc/platforms/85xx/stx_gp3.c
index 1e1b85f8193..15ce9d07063 100644
--- a/arch/ppc/platforms/85xx/stx_gp3.c
+++ b/arch/ppc/platforms/85xx/stx_gp3.c
@@ -37,6 +37,7 @@
#include <linux/module.h>
#include <linux/fsl_devices.h>
#include <linux/interrupt.h>
+#include <linux/rio.h>
#include <asm/system.h>
#include <asm/pgtable.h>
@@ -57,6 +58,7 @@
#include <syslib/cpm2_pic.h>
#include <syslib/ppc85xx_common.h>
+#include <syslib/ppc85xx_rio.h>
unsigned char __res[sizeof(bd_t)];
@@ -273,6 +275,18 @@ int mpc85xx_exclude_device(u_char bus, u_char devfn)
}
#endif /* CONFIG_PCI */
+#ifdef CONFIG_RAPIDIO
+void
+platform_rio_init(void)
+{
+ /*
+ * The STx firmware configures the RapidIO Local Access Window
+ * at 0xc0000000 with a size of 512MB.
+ */
+ mpc85xx_rio_setup(0xc0000000, 0x20000000);
+}
+#endif /* CONFIG_RAPIDIO */
+
void __init
platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7)
diff --git a/arch/ppc/platforms/ev64360.c b/arch/ppc/platforms/ev64360.c
index b1324564456..b9d844f88c2 100644
--- a/arch/ppc/platforms/ev64360.c
+++ b/arch/ppc/platforms/ev64360.c
@@ -52,6 +52,8 @@ static u32 ev64360_bus_frequency;
unsigned char __res[sizeof(bd_t)];
+TODC_ALLOC();
+
static int __init
ev64360_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin)
{
@@ -182,6 +184,9 @@ ev64360_setup_peripherals(void)
EV64360_RTC_WINDOW_BASE, EV64360_RTC_WINDOW_SIZE, 0);
bh.ci->enable_window_32bit(&bh, MV64x60_CPU2DEV_1_WIN);
+ TODC_INIT(TODC_TYPE_DS1501, 0, 0,
+ ioremap(EV64360_RTC_WINDOW_BASE, EV64360_RTC_WINDOW_SIZE), 8);
+
mv64x60_set_32bit_window(&bh, MV64x60_CPU2SRAM_WIN,
EV64360_INTERNAL_SRAM_BASE, MV64360_SRAM_SIZE, 0);
bh.ci->enable_window_32bit(&bh, MV64x60_CPU2SRAM_WIN);
@@ -496,6 +501,13 @@ platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
ppc_md.power_off = ev64360_power_off;
ppc_md.halt = ev64360_halt;
ppc_md.find_end_of_memory = ev64360_find_end_of_memory;
+ ppc_md.init = NULL;
+
+ ppc_md.time_init = todc_time_init;
+ ppc_md.set_rtc_time = todc_set_rtc_time;
+ ppc_md.get_rtc_time = todc_get_rtc_time;
+ ppc_md.nvram_read_val = todc_direct_read_val;
+ ppc_md.nvram_write_val = todc_direct_write_val;
ppc_md.calibrate_decr = ev64360_calibrate_decr;
#if defined(CONFIG_SERIAL_TEXT_DEBUG) && defined(CONFIG_SERIAL_MPSC_CONSOLE)
diff --git a/arch/ppc/syslib/Makefile b/arch/ppc/syslib/Makefile
index b4ef15b45c4..5bd33baac24 100644
--- a/arch/ppc/syslib/Makefile
+++ b/arch/ppc/syslib/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_440EP) += ibm440gx_common.o
obj-$(CONFIG_440GP) += ibm440gp_common.o
obj-$(CONFIG_440GX) += ibm440gx_common.o
obj-$(CONFIG_440SP) += ibm440gx_common.o ibm440sp_common.o
+obj-$(CONFIG_440SPE) += ibm440gx_common.o ibm440sp_common.o ppc440spe_pcie.o
ifeq ($(CONFIG_4xx),y)
ifeq ($(CONFIG_VIRTEX_II_PRO),y)
obj-$(CONFIG_40x) += xilinx_pic.o
@@ -32,6 +33,7 @@ obj-$(CONFIG_PPC4xx_DMA) += ppc4xx_dma.o
obj-$(CONFIG_PPC4xx_EDMA) += ppc4xx_sgdma.o
ifeq ($(CONFIG_40x),y)
obj-$(CONFIG_PCI) += pci_auto.o ppc405_pci.o
+obj-$(CONFIG_RAPIDIO) += ppc85xx_rio.o
endif
endif
obj-$(CONFIG_8xx) += m8xx_setup.o ppc8xx_pic.o $(wdt-mpc8xx-y) \
@@ -46,12 +48,14 @@ obj-$(CONFIG_BAMBOO) += pci_auto.o todc_time.o
obj-$(CONFIG_CPCI690) += todc_time.o pci_auto.o
obj-$(CONFIG_EBONY) += pci_auto.o todc_time.o
obj-$(CONFIG_EV64260) += todc_time.o pci_auto.o
+obj-$(CONFIG_EV64360) += todc_time.o
obj-$(CONFIG_CHESTNUT) += mv64360_pic.o pci_auto.o
obj-$(CONFIG_GEMINI) += open_pic.o
obj-$(CONFIG_GT64260) += gt64260_pic.o
obj-$(CONFIG_LOPEC) += pci_auto.o todc_time.o
obj-$(CONFIG_HDPU) += pci_auto.o
obj-$(CONFIG_LUAN) += pci_auto.o todc_time.o
+obj-$(CONFIG_YUCCA) += pci_auto.o todc_time.o
obj-$(CONFIG_KATANA) += pci_auto.o
obj-$(CONFIG_MV64360) += mv64360_pic.o
obj-$(CONFIG_MV64X60) += mv64x60.o mv64x60_win.o
diff --git a/arch/ppc/syslib/ibm440sp_common.c b/arch/ppc/syslib/ibm440sp_common.c
index 417d4cff77a..cdafda127d8 100644
--- a/arch/ppc/syslib/ibm440sp_common.c
+++ b/arch/ppc/syslib/ibm440sp_common.c
@@ -1,7 +1,7 @@
/*
* arch/ppc/syslib/ibm440sp_common.c
*
- * PPC440SP system library
+ * PPC440SP/PPC440SPe system library
*
* Matt Porter <mporter@kernel.crashing.org>
* Copyright 2002-2005 MontaVista Software Inc.
@@ -35,7 +35,7 @@ unsigned long __init ibm440sp_find_end_of_memory(void)
u32 mem_size = 0;
/* Read two bank sizes and sum */
- for (i=0; i<2; i++)
+ for (i=0; i< MQ0_NUM_BANKS; i++)
switch (mfdcr(DCRN_MQ0_BS0BAS + i) & MQ0_CONFIG_SIZE_MASK) {
case MQ0_CONFIG_SIZE_8M:
mem_size += PPC44x_MEM_SIZE_8M;
diff --git a/arch/ppc/syslib/ibm44x_common.c b/arch/ppc/syslib/ibm44x_common.c
index 5152c8e4134..71db11d2215 100644
--- a/arch/ppc/syslib/ibm44x_common.c
+++ b/arch/ppc/syslib/ibm44x_common.c
@@ -20,6 +20,7 @@
#include <linux/types.h>
#include <linux/serial.h>
#include <linux/module.h>
+#include <linux/initrd.h>
#include <asm/ibm44x.h>
#include <asm/mmu.h>
@@ -214,9 +215,20 @@ void __init ibm44x_platform_init(unsigned long r3, unsigned long r4, unsigned lo
/* Called from machine_check_exception */
void platform_machine_check(struct pt_regs *regs)
{
+#if defined(CONFIG_440SP) || defined(CONFIG_440SPE)
+ printk("PLB0: BEAR=0x%08x%08x ACR= 0x%08x BESR= 0x%08x%08x\n",
+ mfdcr(DCRN_PLB0_BEARH), mfdcr(DCRN_PLB0_BEARL),
+ mfdcr(DCRN_PLB0_ACR), mfdcr(DCRN_PLB0_BESRH),
+ mfdcr(DCRN_PLB0_BESRL));
+ printk("PLB1: BEAR=0x%08x%08x ACR= 0x%08x BESR= 0x%08x%08x\n",
+ mfdcr(DCRN_PLB1_BEARH), mfdcr(DCRN_PLB1_BEARL),
+ mfdcr(DCRN_PLB1_ACR), mfdcr(DCRN_PLB1_BESRH),
+ mfdcr(DCRN_PLB1_BESRL));
+#else
printk("PLB0: BEAR=0x%08x%08x ACR= 0x%08x BESR= 0x%08x\n",
mfdcr(DCRN_PLB0_BEARH), mfdcr(DCRN_PLB0_BEARL),
mfdcr(DCRN_PLB0_ACR), mfdcr(DCRN_PLB0_BESR));
+#endif
printk("POB0: BEAR=0x%08x%08x BESR0=0x%08x BESR1=0x%08x\n",
mfdcr(DCRN_POB0_BEARH), mfdcr(DCRN_POB0_BEARL),
mfdcr(DCRN_POB0_BESR0), mfdcr(DCRN_POB0_BESR1));
diff --git a/arch/ppc/syslib/m8xx_wdt.c b/arch/ppc/syslib/m8xx_wdt.c
index c5ac5ce5d7d..a21632d37e5 100644
--- a/arch/ppc/syslib/m8xx_wdt.c
+++ b/arch/ppc/syslib/m8xx_wdt.c
@@ -14,6 +14,7 @@
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/sched.h>
+#include <asm/io.h>
#include <asm/8xx_immap.h>
#include <syslib/m8xx_wdt.h>
@@ -29,8 +30,8 @@ void m8xx_wdt_reset(void)
{
volatile immap_t *imap = (volatile immap_t *)IMAP_ADDR;
- out_be16(imap->im_siu_conf.sc_swsr, 0x556c); /* write magic1 */
- out_be16(imap->im_siu_conf.sc_swsr, 0xaa39); /* write magic2 */
+ out_be16(&imap->im_siu_conf.sc_swsr, 0x556c); /* write magic1 */
+ out_be16(&imap->im_siu_conf.sc_swsr, 0xaa39); /* write magic2 */
}
static irqreturn_t m8xx_wdt_interrupt(int irq, void *dev, struct pt_regs *regs)
@@ -39,7 +40,7 @@ static irqreturn_t m8xx_wdt_interrupt(int irq, void *dev, struct pt_regs *regs)
m8xx_wdt_reset();
- out_be16(imap->im_sit.sit_piscr, in_be16(imap->im_sit.sit_piscr | PISCR_PS)); /* clear irq */
+ out_be16(&imap->im_sit.sit_piscr, in_be16(&imap->im_sit.sit_piscr) | PISCR_PS); /* clear irq */
return IRQ_HANDLED;
}
@@ -51,7 +52,7 @@ void __init m8xx_wdt_handler_install(bd_t * binfo)
u32 sypcr;
u32 pitrtclk;
- sypcr = in_be32(imap->im_siu_conf.sc_sypcr);
+ sypcr = in_be32(&imap->im_siu_conf.sc_sypcr);
if (!(sypcr & 0x04)) {
printk(KERN_NOTICE "m8xx_wdt: wdt disabled (SYPCR: 0x%08X)\n",
@@ -87,9 +88,9 @@ void __init m8xx_wdt_handler_install(bd_t * binfo)
else
pitc = pitrtclk * wdt_timeout / binfo->bi_intfreq / 2;
- out_be32(imap->im_sit.sit_pitc, pitc << 16);
+ out_be32(&imap->im_sit.sit_pitc, pitc << 16);
- out_be16(imap->im_sit.sit_piscr, (mk_int_int_mask(PIT_INTERRUPT) << 8) | PISCR_PIE | PISCR_PTE);
+ out_be16(&imap->im_sit.sit_piscr, (mk_int_int_mask(PIT_INTERRUPT) << 8) | PISCR_PIE | PISCR_PTE);
if (setup_irq(PIT_INTERRUPT, &m8xx_wdt_irqaction))
panic("m8xx_wdt: error setting up the watchdog irq!");
diff --git a/arch/ppc/syslib/ppc405_pci.c b/arch/ppc/syslib/ppc405_pci.c
index 81c83bf98df..d6d838b16da 100644
--- a/arch/ppc/syslib/ppc405_pci.c
+++ b/arch/ppc/syslib/ppc405_pci.c
@@ -89,13 +89,6 @@ ppc4xx_find_bridges(void)
isa_mem_base = 0;
pci_dram_offset = 0;
-#if (PSR_PCI_ARBIT_EN > 1)
- /* Check if running in slave mode */
- if ((mfdcr(DCRN_CHPSR) & PSR_PCI_ARBIT_EN) == 0) {
- printk("Running as PCI slave, kernel PCI disabled !\n");
- return;
- }
-#endif
/* Setup PCI32 hose */
hose_a = pcibios_alloc_controller();
if (!hose_a)
diff --git a/arch/ppc/syslib/ppc440spe_pcie.c b/arch/ppc/syslib/ppc440spe_pcie.c
new file mode 100644
index 00000000000..1509fc1ddfb
--- /dev/null
+++ b/arch/ppc/syslib/ppc440spe_pcie.c
@@ -0,0 +1,442 @@
+/*
+ * Copyright (c) 2005 Cisco Systems. All rights reserved.
+ * Roland Dreier <rolandd@cisco.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+
+#include <asm/reg.h>
+#include <asm/io.h>
+#include <asm/ibm44x.h>
+
+#include "ppc440spe_pcie.h"
+
+static int
+pcie_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
+ int len, u32 *val)
+{
+ struct pci_controller *hose = bus->sysdata;
+
+ if (PCI_SLOT(devfn) != 1)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ offset += devfn << 12;
+
+ /*
+ * Note: the caller has already checked that offset is
+ * suitably aligned and that len is 1, 2 or 4.
+ */
+ switch (len) {
+ case 1:
+ *val = in_8(hose->cfg_data + offset);
+ break;
+ case 2:
+ *val = in_le16(hose->cfg_data + offset);
+ break;
+ default:
+ *val = in_le32(hose->cfg_data + offset);
+ break;
+ }
+
+ if (0) printk("%s: read %x(%d) @ %x\n", __func__, *val, len, offset);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+pcie_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
+ int len, u32 val)
+{
+ struct pci_controller *hose = bus->sysdata;
+
+ if (PCI_SLOT(devfn) != 1)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ offset += devfn << 12;
+
+ switch (len) {
+ case 1:
+ out_8(hose->cfg_data + offset, val);
+ break;
+ case 2:
+ out_le16(hose->cfg_data + offset, val);
+ break;
+ default:
+ out_le32(hose->cfg_data + offset, val);
+ break;
+ }
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops pcie_pci_ops =
+{
+ .read = pcie_read_config,
+ .write = pcie_write_config
+};
+
+enum {
+ PTYPE_ENDPOINT = 0x0,
+ PTYPE_LEGACY_ENDPOINT = 0x1,
+ PTYPE_ROOT_PORT = 0x4,
+
+ LNKW_X1 = 0x1,
+ LNKW_X4 = 0x4,
+ LNKW_X8 = 0x8
+};
+
+static void check_error(void)
+{
+ u32 valPE0, valPE1, valPE2;
+
+ /* SDR0_PEGPLLLCT1 reset */
+ if (!(valPE0 = SDR_READ(PESDR0_PLLLCT1) & 0x01000000)) {
+ printk(KERN_INFO "PCIE: SDR0_PEGPLLLCT1 reset error 0x%8x\n", valPE0);
+ }
+
+ valPE0 = SDR_READ(PESDR0_RCSSET);
+ valPE1 = SDR_READ(PESDR1_RCSSET);
+ valPE2 = SDR_READ(PESDR2_RCSSET);
+
+ /* SDR0_PExRCSSET rstgu */
+ if ( !(valPE0 & 0x01000000) ||
+ !(valPE1 & 0x01000000) ||
+ !(valPE2 & 0x01000000)) {
+ printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstgu error\n");
+ }
+
+ /* SDR0_PExRCSSET rstdl */
+ if ( !(valPE0 & 0x00010000) ||
+ !(valPE1 & 0x00010000) ||
+ !(valPE2 & 0x00010000)) {
+ printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstdl error\n");
+ }
+
+ /* SDR0_PExRCSSET rstpyn */
+ if ( (valPE0 & 0x00001000) ||
+ (valPE1 & 0x00001000) ||
+ (valPE2 & 0x00001000)) {
+ printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstpyn error\n");
+ }
+
+ /* SDR0_PExRCSSET hldplb */
+ if ( (valPE0 & 0x10000000) ||
+ (valPE1 & 0x10000000) ||
+ (valPE2 & 0x10000000)) {
+ printk(KERN_INFO "PCIE: SDR0_PExRCSSET hldplb error\n");
+ }
+
+ /* SDR0_PExRCSSET rdy */
+ if ( (valPE0 & 0x00100000) ||
+ (valPE1 & 0x00100000) ||
+ (valPE2 & 0x00100000)) {
+ printk(KERN_INFO "PCIE: SDR0_PExRCSSET rdy error\n");
+ }
+
+ /* SDR0_PExRCSSET shutdown */
+ if ( (valPE0 & 0x00000100) ||
+ (valPE1 & 0x00000100) ||
+ (valPE2 & 0x00000100)) {
+ printk(KERN_INFO "PCIE: SDR0_PExRCSSET shutdown error\n");
+ }
+}
+
+/*
+ * Initialize PCI Express core as described in User Manual section 27.12.1
+ */
+int ppc440spe_init_pcie(void)
+{
+ /* Set PLL clock receiver to LVPECL */
+ SDR_WRITE(PESDR0_PLLLCT1, SDR_READ(PESDR0_PLLLCT1) | 1 << 28);
+
+ check_error();
+
+ printk(KERN_INFO "PCIE initialization OK\n");
+
+ if (!(SDR_READ(PESDR0_PLLLCT2) & 0x10000))
+ printk(KERN_INFO "PESDR_PLLCT2 resistance calibration failed (0x%08x)\n",
+ SDR_READ(PESDR0_PLLLCT2));
+
+ /* De-assert reset of PCIe PLL, wait for lock */
+ SDR_WRITE(PESDR0_PLLLCT1, SDR_READ(PESDR0_PLLLCT1) & ~(1 << 24));
+ udelay(3);
+
+ return 0;
+}
+
+int ppc440spe_init_pcie_rootport(int port)
+{
+ static int core_init;
+ void __iomem *utl_base;
+ u32 val = 0;
+ int i;
+
+ if (!core_init) {
+ ++core_init;
+ i = ppc440spe_init_pcie();
+ if (i)
+ return i;
+ }
+
+ /*
+ * Initialize various parts of the PCI Express core for our port:
+ *
+ * - Set as a root port and enable max width
+ * (PXIE0 -> X8, PCIE1 and PCIE2 -> X4).
+ * - Set up UTL configuration.
+ * - Increase SERDES drive strength to levels suggested by AMCC.
+ * - De-assert RSTPYN, RSTDL and RSTGU.
+ */
+ switch (port) {
+ case 0:
+ SDR_WRITE(PESDR0_DLPSET, PTYPE_ROOT_PORT << 20 | LNKW_X8 << 12);
+
+ SDR_WRITE(PESDR0_UTLSET1, 0x21222222);
+ SDR_WRITE(PESDR0_UTLSET2, 0x11000000);
+
+ SDR_WRITE(PESDR0_HSSL0SET1, 0x35000000);
+ SDR_WRITE(PESDR0_HSSL1SET1, 0x35000000);
+ SDR_WRITE(PESDR0_HSSL2SET1, 0x35000000);
+ SDR_WRITE(PESDR0_HSSL3SET1, 0x35000000);
+ SDR_WRITE(PESDR0_HSSL4SET1, 0x35000000);
+ SDR_WRITE(PESDR0_HSSL5SET1, 0x35000000);
+ SDR_WRITE(PESDR0_HSSL6SET1, 0x35000000);
+ SDR_WRITE(PESDR0_HSSL7SET1, 0x35000000);
+
+ SDR_WRITE(PESDR0_RCSSET,
+ (SDR_READ(PESDR0_RCSSET) & ~(1 << 24 | 1 << 16)) | 1 << 12);
+ break;
+
+ case 1:
+ SDR_WRITE(PESDR1_DLPSET, PTYPE_ROOT_PORT << 20 | LNKW_X4 << 12);
+
+ SDR_WRITE(PESDR1_UTLSET1, 0x21222222);
+ SDR_WRITE(PESDR1_UTLSET2, 0x11000000);
+
+ SDR_WRITE(PESDR1_HSSL0SET1, 0x35000000);
+ SDR_WRITE(PESDR1_HSSL1SET1, 0x35000000);
+ SDR_WRITE(PESDR1_HSSL2SET1, 0x35000000);
+ SDR_WRITE(PESDR1_HSSL3SET1, 0x35000000);
+
+ SDR_WRITE(PESDR1_RCSSET,
+ (SDR_READ(PESDR1_RCSSET) & ~(1 << 24 | 1 << 16)) | 1 << 12);
+ break;
+
+ case 2:
+ SDR_WRITE(PESDR2_DLPSET, PTYPE_ROOT_PORT << 20 | LNKW_X4 << 12);
+
+ SDR_WRITE(PESDR2_UTLSET1, 0x21222222);
+ SDR_WRITE(PESDR2_UTLSET2, 0x11000000);
+
+ SDR_WRITE(PESDR2_HSSL0SET1, 0x35000000);
+ SDR_WRITE(PESDR2_HSSL1SET1, 0x35000000);
+ SDR_WRITE(PESDR2_HSSL2SET1, 0x35000000);
+ SDR_WRITE(PESDR2_HSSL3SET1, 0x35000000);
+
+ SDR_WRITE(PESDR2_RCSSET,
+ (SDR_READ(PESDR2_RCSSET) & ~(1 << 24 | 1 << 16)) | 1 << 12);
+ break;
+ }
+
+ mdelay(1000);
+
+ switch (port) {
+ case 0: val = SDR_READ(PESDR0_RCSSTS); break;
+ case 1: val = SDR_READ(PESDR1_RCSSTS); break;
+ case 2: val = SDR_READ(PESDR2_RCSSTS); break;
+ }
+
+ if (!(val & (1 << 20)))
+ printk(KERN_INFO "PCIE%d: PGRST inactive\n", port);
+ else
+ printk(KERN_WARNING "PGRST for PCIE%d failed %08x\n", port, val);
+
+ switch (port) {
+ case 0: printk(KERN_INFO "PCIE0: LOOP %08x\n", SDR_READ(PESDR0_LOOP)); break;
+ case 1: printk(KERN_INFO "PCIE1: LOOP %08x\n", SDR_READ(PESDR1_LOOP)); break;
+ case 2: printk(KERN_INFO "PCIE2: LOOP %08x\n", SDR_READ(PESDR2_LOOP)); break;
+ }
+
+ /*
+ * Map UTL registers at 0xc_1000_0n00
+ */
+ switch (port) {
+ case 0:
+ mtdcr(DCRN_PEGPL_REGBAH(PCIE0), 0x0000000c);
+ mtdcr(DCRN_PEGPL_REGBAL(PCIE0), 0x10000000);
+ mtdcr(DCRN_PEGPL_REGMSK(PCIE0), 0x00007001);
+ mtdcr(DCRN_PEGPL_SPECIAL(PCIE0), 0x68782800);
+ break;
+
+ case 1:
+ mtdcr(DCRN_PEGPL_REGBAH(PCIE1), 0x0000000c);
+ mtdcr(DCRN_PEGPL_REGBAL(PCIE1), 0x10001000);
+ mtdcr(DCRN_PEGPL_REGMSK(PCIE1), 0x00007001);
+ mtdcr(DCRN_PEGPL_SPECIAL(PCIE1), 0x68782800);
+ break;
+
+ case 2:
+ mtdcr(DCRN_PEGPL_REGBAH(PCIE2), 0x0000000c);
+ mtdcr(DCRN_PEGPL_REGBAL(PCIE2), 0x10002000);
+ mtdcr(DCRN_PEGPL_REGMSK(PCIE2), 0x00007001);
+ mtdcr(DCRN_PEGPL_SPECIAL(PCIE2), 0x68782800);
+ }
+
+ utl_base = ioremap64(0xc10000000ull + 0x1000 * port, 0x100);
+
+ /*
+ * Set buffer allocations and then assert VRB and TXE.
+ */
+ out_be32(utl_base + PEUTL_OUTTR, 0x08000000);
+ out_be32(utl_base + PEUTL_INTR, 0x02000000);
+ out_be32(utl_base + PEUTL_OPDBSZ, 0x10000000);
+ out_be32(utl_base + PEUTL_PBBSZ, 0x53000000);
+ out_be32(utl_base + PEUTL_IPHBSZ, 0x08000000);
+ out_be32(utl_base + PEUTL_IPDBSZ, 0x10000000);
+ out_be32(utl_base + PEUTL_RCIRQEN, 0x00f00000);
+ out_be32(utl_base + PEUTL_PCTL, 0x80800066);
+
+ iounmap(utl_base);
+
+ /*
+ * We map PCI Express configuration access into the 512MB regions
+ * PCIE0: 0xc_4000_0000
+ * PCIE1: 0xc_8000_0000
+ * PCIE2: 0xc_c000_0000
+ */
+ switch (port) {
+ case 0:
+ mtdcr(DCRN_PEGPL_CFGBAH(PCIE0), 0x0000000c);
+ mtdcr(DCRN_PEGPL_CFGBAL(PCIE0), 0x40000000);
+ mtdcr(DCRN_PEGPL_CFGMSK(PCIE0), 0xe0000001); /* 512MB region, valid */
+ break;
+
+ case 1:
+ mtdcr(DCRN_PEGPL_CFGBAH(PCIE1), 0x0000000c);
+ mtdcr(DCRN_PEGPL_CFGBAL(PCIE1), 0x80000000);
+ mtdcr(DCRN_PEGPL_CFGMSK(PCIE1), 0xe0000001); /* 512MB region, valid */
+ break;
+
+ case 2:
+ mtdcr(DCRN_PEGPL_CFGBAH(PCIE2), 0x0000000c);
+ mtdcr(DCRN_PEGPL_CFGBAL(PCIE2), 0xc0000000);
+ mtdcr(DCRN_PEGPL_CFGMSK(PCIE2), 0xe0000001); /* 512MB region, valid */
+ break;
+ }
+
+ /*
+ * Check for VC0 active and assert RDY.
+ */
+ switch (port) {
+ case 0:
+ if (!(SDR_READ(PESDR0_RCSSTS) & (1 << 16)))
+ printk(KERN_WARNING "PCIE0: VC0 not active\n");
+ SDR_WRITE(PESDR0_RCSSET, SDR_READ(PESDR0_RCSSET) | 1 << 20);
+ break;
+ case 1:
+ if (!(SDR_READ(PESDR1_RCSSTS) & (1 << 16)))
+ printk(KERN_WARNING "PCIE0: VC0 not active\n");
+ SDR_WRITE(PESDR1_RCSSET, SDR_READ(PESDR1_RCSSET) | 1 << 20);
+ break;
+ case 2:
+ if (!(SDR_READ(PESDR2_RCSSTS) & (1 << 16)))
+ printk(KERN_WARNING "PCIE0: VC0 not active\n");
+ SDR_WRITE(PESDR2_RCSSET, SDR_READ(PESDR2_RCSSET) | 1 << 20);
+ break;
+ }
+
+#if 0
+ /* Dump all config regs */
+ for (i = 0x300; i <= 0x320; ++i)
+ printk("[%04x] 0x%08x\n", i, SDR_READ(i));
+ for (i = 0x340; i <= 0x353; ++i)
+ printk("[%04x] 0x%08x\n", i, SDR_READ(i));
+ for (i = 0x370; i <= 0x383; ++i)
+ printk("[%04x] 0x%08x\n", i, SDR_READ(i));
+ for (i = 0x3a0; i <= 0x3a2; ++i)
+ printk("[%04x] 0x%08x\n", i, SDR_READ(i));
+ for (i = 0x3c0; i <= 0x3c3; ++i)
+ printk("[%04x] 0x%08x\n", i, SDR_READ(i));
+#endif
+
+ mdelay(100);
+
+ return 0;
+}
+
+void ppc440spe_setup_pcie(struct pci_controller *hose, int port)
+{
+ void __iomem *mbase;
+
+ /*
+ * Map 16MB, which is enough for 4 bits of bus #
+ */
+ hose->cfg_data = ioremap64(0xc40000000ull + port * 0x40000000,
+ 1 << 24);
+ hose->ops = &pcie_pci_ops;
+
+ /*
+ * Set bus numbers on our root port
+ */
+ mbase = ioremap64(0xc50000000ull + port * 0x40000000, 4096);
+ out_8(mbase + PCI_PRIMARY_BUS, 0);
+ out_8(mbase + PCI_SECONDARY_BUS, 0);
+
+ /*
+ * Set up outbound translation to hose->mem_space from PLB
+ * addresses at an offset of 0xd_0000_0000. We set the low
+ * bits of the mask to 11 to turn off splitting into 8
+ * subregions and to enable the outbound translation.
+ */
+ out_le32(mbase + PECFG_POM0LAH, 0);
+ out_le32(mbase + PECFG_POM0LAL, hose->mem_space.start);
+
+ switch (port) {
+ case 0:
+ mtdcr(DCRN_PEGPL_OMR1BAH(PCIE0), 0x0000000d);
+ mtdcr(DCRN_PEGPL_OMR1BAL(PCIE0), hose->mem_space.start);
+ mtdcr(DCRN_PEGPL_OMR1MSKH(PCIE0), 0x7fffffff);
+ mtdcr(DCRN_PEGPL_OMR1MSKL(PCIE0),
+ ~(hose->mem_space.end - hose->mem_space.start) | 3);
+ break;
+ case 1:
+ mtdcr(DCRN_PEGPL_OMR1BAH(PCIE1), 0x0000000d);
+ mtdcr(DCRN_PEGPL_OMR1BAL(PCIE1), hose->mem_space.start);
+ mtdcr(DCRN_PEGPL_OMR1MSKH(PCIE1), 0x7fffffff);
+ mtdcr(DCRN_PEGPL_OMR1MSKL(PCIE1),
+ ~(hose->mem_space.end - hose->mem_space.start) | 3);
+
+ break;
+ case 2:
+ mtdcr(DCRN_PEGPL_OMR1BAH(PCIE2), 0x0000000d);
+ mtdcr(DCRN_PEGPL_OMR1BAL(PCIE2), hose->mem_space.start);
+ mtdcr(DCRN_PEGPL_OMR1MSKH(PCIE2), 0x7fffffff);
+ mtdcr(DCRN_PEGPL_OMR1MSKL(PCIE2),
+ ~(hose->mem_space.end - hose->mem_space.start) | 3);
+ break;
+ }
+
+ /* Set up 16GB inbound memory window at 0 */
+ out_le32(mbase + PCI_BASE_ADDRESS_0, 0);
+ out_le32(mbase + PCI_BASE_ADDRESS_1, 0);
+ out_le32(mbase + PECFG_BAR0HMPA, 0x7fffffc);
+ out_le32(mbase + PECFG_BAR0LMPA, 0);
+ out_le32(mbase + PECFG_PIM0LAL, 0);
+ out_le32(mbase + PECFG_PIM0LAH, 0);
+ out_le32(mbase + PECFG_PIMEN, 0x1);
+
+ /* Enable I/O, Mem, and Busmaster cycles */
+ out_le16(mbase + PCI_COMMAND,
+ in_le16(mbase + PCI_COMMAND) |
+ PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
+
+ iounmap(mbase);
+}
diff --git a/arch/ppc/syslib/ppc440spe_pcie.h b/arch/ppc/syslib/ppc440spe_pcie.h
new file mode 100644
index 00000000000..55b765ad327
--- /dev/null
+++ b/arch/ppc/syslib/ppc440spe_pcie.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2005 Cisco Systems. All rights reserved.
+ * Roland Dreier <rolandd@cisco.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __PPC_SYSLIB_PPC440SPE_PCIE_H
+#define __PPC_SYSLIB_PPC440SPE_PCIE_H
+
+#define DCRN_SDR0_CFGADDR 0x00e
+#define DCRN_SDR0_CFGDATA 0x00f
+
+#define DCRN_PCIE0_BASE 0x100
+#define DCRN_PCIE1_BASE 0x120
+#define DCRN_PCIE2_BASE 0x140
+#define PCIE0 DCRN_PCIE0_BASE
+#define PCIE1 DCRN_PCIE1_BASE
+#define PCIE2 DCRN_PCIE2_BASE
+
+#define DCRN_PEGPL_CFGBAH(base) (base + 0x00)
+#define DCRN_PEGPL_CFGBAL(base) (base + 0x01)
+#define DCRN_PEGPL_CFGMSK(base) (base + 0x02)
+#define DCRN_PEGPL_MSGBAH(base) (base + 0x03)
+#define DCRN_PEGPL_MSGBAL(base) (base + 0x04)
+#define DCRN_PEGPL_MSGMSK(base) (base + 0x05)
+#define DCRN_PEGPL_OMR1BAH(base) (base + 0x06)
+#define DCRN_PEGPL_OMR1BAL(base) (base + 0x07)
+#define DCRN_PEGPL_OMR1MSKH(base) (base + 0x08)
+#define DCRN_PEGPL_OMR1MSKL(base) (base + 0x09)
+#define DCRN_PEGPL_REGBAH(base) (base + 0x12)
+#define DCRN_PEGPL_REGBAL(base) (base + 0x13)
+#define DCRN_PEGPL_REGMSK(base) (base + 0x14)
+#define DCRN_PEGPL_SPECIAL(base) (base + 0x15)
+
+/*
+ * System DCRs (SDRs)
+ */
+#define PESDR0_PLLLCT1 0x03a0
+#define PESDR0_PLLLCT2 0x03a1
+#define PESDR0_PLLLCT3 0x03a2
+
+#define PESDR0_UTLSET1 0x0300
+#define PESDR0_UTLSET2 0x0301
+#define PESDR0_DLPSET 0x0302
+#define PESDR0_LOOP 0x0303
+#define PESDR0_RCSSET 0x0304
+#define PESDR0_RCSSTS 0x0305
+#define PESDR0_HSSL0SET1 0x0306
+#define PESDR0_HSSL0SET2 0x0307
+#define PESDR0_HSSL0STS 0x0308
+#define PESDR0_HSSL1SET1 0x0309
+#define PESDR0_HSSL1SET2 0x030a
+#define PESDR0_HSSL1STS 0x030b
+#define PESDR0_HSSL2SET1 0x030c
+#define PESDR0_HSSL2SET2 0x030d
+#define PESDR0_HSSL2STS 0x030e
+#define PESDR0_HSSL3SET1 0x030f
+#define PESDR0_HSSL3SET2 0x0310
+#define PESDR0_HSSL3STS 0x0311
+#define PESDR0_HSSL4SET1 0x0312
+#define PESDR0_HSSL4SET2 0x0313
+#define PESDR0_HSSL4STS 0x0314
+#define PESDR0_HSSL5SET1 0x0315
+#define PESDR0_HSSL5SET2 0x0316
+#define PESDR0_HSSL5STS 0x0317
+#define PESDR0_HSSL6SET1 0x0318
+#define PESDR0_HSSL6SET2 0x0319
+#define PESDR0_HSSL6STS 0x031a
+#define PESDR0_HSSL7SET1 0x031b
+#define PESDR0_HSSL7SET2 0x031c
+#define PESDR0_HSSL7STS 0x031d
+#define PESDR0_HSSCTLSET 0x031e
+#define PESDR0_LANE_ABCD 0x031f
+#define PESDR0_LANE_EFGH 0x0320
+
+#define PESDR1_UTLSET1 0x0340
+#define PESDR1_UTLSET2 0x0341
+#define PESDR1_DLPSET 0x0342
+#define PESDR1_LOOP 0x0343
+#define PESDR1_RCSSET 0x0344
+#define PESDR1_RCSSTS 0x0345
+#define PESDR1_HSSL0SET1 0x0346
+#define PESDR1_HSSL0SET2 0x0347
+#define PESDR1_HSSL0STS 0x0348
+#define PESDR1_HSSL1SET1 0x0349
+#define PESDR1_HSSL1SET2 0x034a
+#define PESDR1_HSSL1STS 0x034b
+#define PESDR1_HSSL2SET1 0x034c
+#define PESDR1_HSSL2SET2 0x034d
+#define PESDR1_HSSL2STS 0x034e
+#define PESDR1_HSSL3SET1 0x034f
+#define PESDR1_HSSL3SET2 0x0350
+#define PESDR1_HSSL3STS 0x0351
+#define PESDR1_HSSCTLSET 0x0352
+#define PESDR1_LANE_ABCD 0x0353
+
+#define PESDR2_UTLSET1 0x0370
+#define PESDR2_UTLSET2 0x0371
+#define PESDR2_DLPSET 0x0372
+#define PESDR2_LOOP 0x0373
+#define PESDR2_RCSSET 0x0374
+#define PESDR2_RCSSTS 0x0375
+#define PESDR2_HSSL0SET1 0x0376
+#define PESDR2_HSSL0SET2 0x0377
+#define PESDR2_HSSL0STS 0x0378
+#define PESDR2_HSSL1SET1 0x0379
+#define PESDR2_HSSL1SET2 0x037a
+#define PESDR2_HSSL1STS 0x037b
+#define PESDR2_HSSL2SET1 0x037c
+#define PESDR2_HSSL2SET2 0x037d
+#define PESDR2_HSSL2STS 0x037e
+#define PESDR2_HSSL3SET1 0x037f
+#define PESDR2_HSSL3SET2 0x0380
+#define PESDR2_HSSL3STS 0x0381
+#define PESDR2_HSSCTLSET 0x0382
+#define PESDR2_LANE_ABCD 0x0383
+
+/*
+ * UTL register offsets
+ */
+#define PEUTL_PBBSZ 0x20
+#define PEUTL_OPDBSZ 0x68
+#define PEUTL_IPHBSZ 0x70
+#define PEUTL_IPDBSZ 0x78
+#define PEUTL_OUTTR 0x90
+#define PEUTL_INTR 0x98
+#define PEUTL_PCTL 0xa0
+#define PEUTL_RCIRQEN 0xb8
+
+/*
+ * Config space register offsets
+ */
+#define PECFG_BAR0LMPA 0x210
+#define PECFG_BAR0HMPA 0x214
+#define PECFG_PIMEN 0x33c
+#define PECFG_PIM0LAL 0x340
+#define PECFG_PIM0LAH 0x344
+#define PECFG_POM0LAL 0x380
+#define PECFG_POM0LAH 0x384
+
+int ppc440spe_init_pcie(void);
+int ppc440spe_init_pcie_rootport(int port);
+void ppc440spe_setup_pcie(struct pci_controller *hose, int port);
+
+#endif /* __PPC_SYSLIB_PPC440SPE_PCIE_H */
diff --git a/arch/ppc/syslib/ppc4xx_pic.c b/arch/ppc/syslib/ppc4xx_pic.c
index 0b435633a0d..aa4165144ec 100644
--- a/arch/ppc/syslib/ppc4xx_pic.c
+++ b/arch/ppc/syslib/ppc4xx_pic.c
@@ -38,6 +38,7 @@ extern unsigned char ppc4xx_uic_ext_irq_cfg[] __attribute__ ((weak));
#define IRQ_MASK_UICx(irq) (1 << (31 - ((irq) & 0x1f)))
#define IRQ_MASK_UIC1(irq) IRQ_MASK_UICx(irq)
#define IRQ_MASK_UIC2(irq) IRQ_MASK_UICx(irq)
+#define IRQ_MASK_UIC3(irq) IRQ_MASK_UICx(irq)
#define UIC_HANDLERS(n) \
static void ppc4xx_uic##n##_enable(unsigned int irq) \
@@ -88,7 +89,38 @@ static void ppc4xx_uic##n##_end(unsigned int irq) \
.end = ppc4xx_uic##n##_end, \
} \
-#if NR_UICS == 3
+#if NR_UICS == 4
+#define ACK_UIC0_PARENT
+#define ACK_UIC1_PARENT mtdcr(DCRN_UIC_SR(UIC0), UIC0_UIC1NC);
+#define ACK_UIC2_PARENT mtdcr(DCRN_UIC_SR(UIC0), UIC0_UIC2NC);
+#define ACK_UIC3_PARENT mtdcr(DCRN_UIC_SR(UIC0), UIC0_UIC3NC);
+UIC_HANDLERS(0);
+UIC_HANDLERS(1);
+UIC_HANDLERS(2);
+UIC_HANDLERS(3);
+
+static int ppc4xx_pic_get_irq(struct pt_regs *regs)
+{
+ u32 uic0 = mfdcr(DCRN_UIC_MSR(UIC0));
+ if (uic0 & UIC0_UIC1NC)
+ return 64 - ffs(mfdcr(DCRN_UIC_MSR(UIC1)));
+ else if (uic0 & UIC0_UIC2NC)
+ return 96 - ffs(mfdcr(DCRN_UIC_MSR(UIC2)));
+ else if (uic0 & UIC0_UIC3NC)
+ return 128 - ffs(mfdcr(DCRN_UIC_MSR(UIC3)));
+ else
+ return uic0 ? 32 - ffs(uic0) : -1;
+}
+
+static void __init ppc4xx_pic_impl_init(void)
+{
+ /* Enable cascade interrupts in UIC0 */
+ ppc_cached_irq_mask[0] |= UIC0_UIC1NC | UIC0_UIC2NC | UIC0_UIC3NC;
+ mtdcr(DCRN_UIC_SR(UIC0), UIC0_UIC1NC | UIC0_UIC2NC | UIC0_UIC3NC);
+ mtdcr(DCRN_UIC_ER(UIC0), ppc_cached_irq_mask[0]);
+}
+
+#elif NR_UICS == 3
#define ACK_UIC0_PARENT mtdcr(DCRN_UIC_SR(UICB), UICB_UIC0NC);
#define ACK_UIC1_PARENT mtdcr(DCRN_UIC_SR(UICB), UICB_UIC1NC);
#define ACK_UIC2_PARENT mtdcr(DCRN_UIC_SR(UICB), UICB_UIC2NC);
@@ -170,6 +202,9 @@ static struct ppc4xx_uic_impl {
{ .decl = DECLARE_UIC(1), .base = UIC1 },
#if NR_UICS > 2
{ .decl = DECLARE_UIC(2), .base = UIC2 },
+#if NR_UICS > 3
+ { .decl = DECLARE_UIC(3), .base = UIC3 },
+#endif
#endif
#endif
};
diff --git a/arch/ppc/syslib/ppc85xx_rio.c b/arch/ppc/syslib/ppc85xx_rio.c
new file mode 100644
index 00000000000..297f3b54917
--- /dev/null
+++ b/arch/ppc/syslib/ppc85xx_rio.c
@@ -0,0 +1,938 @@
+/*
+ * MPC85xx RapidIO support
+ *
+ * Copyright 2005 MontaVista Software, Inc.
+ * Matt Porter <mporter@kernel.crashing.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/rio.h>
+#include <linux/rio_drv.h>
+
+#include <asm/io.h>
+
+#define RIO_REGS_BASE (CCSRBAR + 0xc0000)
+#define RIO_ATMU_REGS_OFFSET 0x10c00
+#define RIO_MSG_REGS_OFFSET 0x11000
+#define RIO_MAINT_WIN_SIZE 0x400000
+#define RIO_DBELL_WIN_SIZE 0x1000
+
+#define RIO_MSG_OMR_MUI 0x00000002
+#define RIO_MSG_OSR_TE 0x00000080
+#define RIO_MSG_OSR_QOI 0x00000020
+#define RIO_MSG_OSR_QFI 0x00000010
+#define RIO_MSG_OSR_MUB 0x00000004
+#define RIO_MSG_OSR_EOMI 0x00000002
+#define RIO_MSG_OSR_QEI 0x00000001
+
+#define RIO_MSG_IMR_MI 0x00000002
+#define RIO_MSG_ISR_TE 0x00000080
+#define RIO_MSG_ISR_QFI 0x00000010
+#define RIO_MSG_ISR_DIQI 0x00000001
+
+#define RIO_MSG_DESC_SIZE 32
+#define RIO_MSG_BUFFER_SIZE 4096
+#define RIO_MIN_TX_RING_SIZE 2
+#define RIO_MAX_TX_RING_SIZE 2048
+#define RIO_MIN_RX_RING_SIZE 2
+#define RIO_MAX_RX_RING_SIZE 2048
+
+#define DOORBELL_DMR_DI 0x00000002
+#define DOORBELL_DSR_TE 0x00000080
+#define DOORBELL_DSR_QFI 0x00000010
+#define DOORBELL_DSR_DIQI 0x00000001
+#define DOORBELL_TID_OFFSET 0x03
+#define DOORBELL_SID_OFFSET 0x05
+#define DOORBELL_INFO_OFFSET 0x06
+
+#define DOORBELL_MESSAGE_SIZE 0x08
+#define DBELL_SID(x) (*(u8 *)(x + DOORBELL_SID_OFFSET))
+#define DBELL_TID(x) (*(u8 *)(x + DOORBELL_TID_OFFSET))
+#define DBELL_INF(x) (*(u16 *)(x + DOORBELL_INFO_OFFSET))
+
+#define is_power_of_2(x) (((x) & ((x) - 1)) == 0)
+
+struct rio_atmu_regs {
+ u32 rowtar;
+ u32 pad1;
+ u32 rowbar;
+ u32 pad2;
+ u32 rowar;
+ u32 pad3[3];
+};
+
+struct rio_msg_regs {
+ u32 omr;
+ u32 osr;
+ u32 pad1;
+ u32 odqdpar;
+ u32 pad2;
+ u32 osar;
+ u32 odpr;
+ u32 odatr;
+ u32 odcr;
+ u32 pad3;
+ u32 odqepar;
+ u32 pad4[13];
+ u32 imr;
+ u32 isr;
+ u32 pad5;
+ u32 ifqdpar;
+ u32 pad6;
+ u32 ifqepar;
+ u32 pad7[250];
+ u32 dmr;
+ u32 dsr;
+ u32 pad8;
+ u32 dqdpar;
+ u32 pad9;
+ u32 dqepar;
+ u32 pad10[26];
+ u32 pwmr;
+ u32 pwsr;
+ u32 pad11;
+ u32 pwqbar;
+};
+
+struct rio_tx_desc {
+ u32 res1;
+ u32 saddr;
+ u32 dport;
+ u32 dattr;
+ u32 res2;
+ u32 res3;
+ u32 dwcnt;
+ u32 res4;
+};
+
+static u32 regs_win;
+static struct rio_atmu_regs *atmu_regs;
+static struct rio_atmu_regs *maint_atmu_regs;
+static struct rio_atmu_regs *dbell_atmu_regs;
+static u32 dbell_win;
+static u32 maint_win;
+static struct rio_msg_regs *msg_regs;
+
+static struct rio_dbell_ring {
+ void *virt;
+ dma_addr_t phys;
+} dbell_ring;
+
+static struct rio_msg_tx_ring {
+ void *virt;
+ dma_addr_t phys;
+ void *virt_buffer[RIO_MAX_TX_RING_SIZE];
+ dma_addr_t phys_buffer[RIO_MAX_TX_RING_SIZE];
+ int tx_slot;
+ int size;
+ void *dev_id;
+} msg_tx_ring;
+
+static struct rio_msg_rx_ring {
+ void *virt;
+ dma_addr_t phys;
+ void *virt_buffer[RIO_MAX_RX_RING_SIZE];
+ int rx_slot;
+ int size;
+ void *dev_id;
+} msg_rx_ring;
+
+/**
+ * mpc85xx_rio_doorbell_send - Send a MPC85xx doorbell message
+ * @index: ID of RapidIO interface
+ * @destid: Destination ID of target device
+ * @data: 16-bit info field of RapidIO doorbell message
+ *
+ * Sends a MPC85xx doorbell message. Returns %0 on success or
+ * %-EINVAL on failure.
+ */
+static int mpc85xx_rio_doorbell_send(int index, u16 destid, u16 data)
+{
+ pr_debug("mpc85xx_doorbell_send: index %d destid %4.4x data %4.4x\n",
+ index, destid, data);
+ out_be32((void *)&dbell_atmu_regs->rowtar, destid << 22);
+ out_be16((void *)(dbell_win), data);
+
+ return 0;
+}
+
+/**
+ * mpc85xx_local_config_read - Generate a MPC85xx local config space read
+ * @index: ID of RapdiIO interface
+ * @offset: Offset into configuration space
+ * @len: Length (in bytes) of the maintenance transaction
+ * @data: Value to be read into
+ *
+ * Generates a MPC85xx local configuration space read. Returns %0 on
+ * success or %-EINVAL on failure.
+ */
+static int mpc85xx_local_config_read(int index, u32 offset, int len, u32 * data)
+{
+ pr_debug("mpc85xx_local_config_read: index %d offset %8.8x\n", index,
+ offset);
+ *data = in_be32((void *)(regs_win + offset));
+
+ return 0;
+}
+
+/**
+ * mpc85xx_local_config_write - Generate a MPC85xx local config space write
+ * @index: ID of RapdiIO interface
+ * @offset: Offset into configuration space
+ * @len: Length (in bytes) of the maintenance transaction
+ * @data: Value to be written
+ *
+ * Generates a MPC85xx local configuration space write. Returns %0 on
+ * success or %-EINVAL on failure.
+ */
+static int mpc85xx_local_config_write(int index, u32 offset, int len, u32 data)
+{
+ pr_debug
+ ("mpc85xx_local_config_write: index %d offset %8.8x data %8.8x\n",
+ index, offset, data);
+ out_be32((void *)(regs_win + offset), data);
+
+ return 0;
+}
+
+/**
+ * mpc85xx_rio_config_read - Generate a MPC85xx read maintenance transaction
+ * @index: ID of RapdiIO interface
+ * @destid: Destination ID of transaction
+ * @hopcount: Number of hops to target device
+ * @offset: Offset into configuration space
+ * @len: Length (in bytes) of the maintenance transaction
+ * @val: Location to be read into
+ *
+ * Generates a MPC85xx read maintenance transaction. Returns %0 on
+ * success or %-EINVAL on failure.
+ */
+static int
+mpc85xx_rio_config_read(int index, u16 destid, u8 hopcount, u32 offset, int len,
+ u32 * val)
+{
+ u8 *data;
+
+ pr_debug
+ ("mpc85xx_rio_config_read: index %d destid %d hopcount %d offset %8.8x len %d\n",
+ index, destid, hopcount, offset, len);
+ out_be32((void *)&maint_atmu_regs->rowtar,
+ (destid << 22) | (hopcount << 12) | ((offset & ~0x3) >> 9));
+
+ data = (u8 *) maint_win + offset;
+ switch (len) {
+ case 1:
+ *val = in_8((u8 *) data);
+ break;
+ case 2:
+ *val = in_be16((u16 *) data);
+ break;
+ default:
+ *val = in_be32((u32 *) data);
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * mpc85xx_rio_config_write - Generate a MPC85xx write maintenance transaction
+ * @index: ID of RapdiIO interface
+ * @destid: Destination ID of transaction
+ * @hopcount: Number of hops to target device
+ * @offset: Offset into configuration space
+ * @len: Length (in bytes) of the maintenance transaction
+ * @val: Value to be written
+ *
+ * Generates an MPC85xx write maintenance transaction. Returns %0 on
+ * success or %-EINVAL on failure.
+ */
+static int
+mpc85xx_rio_config_write(int index, u16 destid, u8 hopcount, u32 offset,
+ int len, u32 val)
+{
+ u8 *data;
+ pr_debug
+ ("mpc85xx_rio_config_write: index %d destid %d hopcount %d offset %8.8x len %d val %8.8x\n",
+ index, destid, hopcount, offset, len, val);
+ out_be32((void *)&maint_atmu_regs->rowtar,
+ (destid << 22) | (hopcount << 12) | ((offset & ~0x3) >> 9));
+
+ data = (u8 *) maint_win + offset;
+ switch (len) {
+ case 1:
+ out_8((u8 *) data, val);
+ break;
+ case 2:
+ out_be16((u16 *) data, val);
+ break;
+ default:
+ out_be32((u32 *) data, val);
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * rio_hw_add_outb_message - Add message to the MPC85xx outbound message queue
+ * @mport: Master port with outbound message queue
+ * @rdev: Target of outbound message
+ * @mbox: Outbound mailbox
+ * @buffer: Message to add to outbound queue
+ * @len: Length of message
+ *
+ * Adds the @buffer message to the MPC85xx outbound message queue. Returns
+ * %0 on success or %-EINVAL on failure.
+ */
+int
+rio_hw_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox,
+ void *buffer, size_t len)
+{
+ u32 omr;
+ struct rio_tx_desc *desc =
+ (struct rio_tx_desc *)msg_tx_ring.virt + msg_tx_ring.tx_slot;
+ int ret = 0;
+
+ pr_debug
+ ("RIO: rio_hw_add_outb_message(): destid %4.4x mbox %d buffer %8.8x len %8.8x\n",
+ rdev->destid, mbox, (int)buffer, len);
+
+ if ((len < 8) || (len > RIO_MAX_MSG_SIZE)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Copy and clear rest of buffer */
+ memcpy(msg_tx_ring.virt_buffer[msg_tx_ring.tx_slot], buffer, len);
+ if (len < (RIO_MAX_MSG_SIZE - 4))
+ memset((void *)((u32) msg_tx_ring.
+ virt_buffer[msg_tx_ring.tx_slot] + len), 0,
+ RIO_MAX_MSG_SIZE - len);
+
+ /* Set mbox field for message */
+ desc->dport = mbox & 0x3;
+
+ /* Enable EOMI interrupt, set priority, and set destid */
+ desc->dattr = 0x28000000 | (rdev->destid << 2);
+
+ /* Set transfer size aligned to next power of 2 (in double words) */
+ desc->dwcnt = is_power_of_2(len) ? len : 1 << get_bitmask_order(len);
+
+ /* Set snooping and source buffer address */
+ desc->saddr = 0x00000004 | msg_tx_ring.phys_buffer[msg_tx_ring.tx_slot];
+
+ /* Increment enqueue pointer */
+ omr = in_be32((void *)&msg_regs->omr);
+ out_be32((void *)&msg_regs->omr, omr | RIO_MSG_OMR_MUI);
+
+ /* Go to next descriptor */
+ if (++msg_tx_ring.tx_slot == msg_tx_ring.size)
+ msg_tx_ring.tx_slot = 0;
+
+ out:
+ return ret;
+}
+
+EXPORT_SYMBOL_GPL(rio_hw_add_outb_message);
+
+/**
+ * mpc85xx_rio_tx_handler - MPC85xx outbound message interrupt handler
+ * @irq: Linux interrupt number
+ * @dev_instance: Pointer to interrupt-specific data
+ * @regs: Register context
+ *
+ * Handles outbound message interrupts. Executes a register outbound
+ * mailbox event handler and acks the interrupt occurence.
+ */
+static irqreturn_t
+mpc85xx_rio_tx_handler(int irq, void *dev_instance, struct pt_regs *regs)
+{
+ int osr;
+ struct rio_mport *port = (struct rio_mport *)dev_instance;
+
+ osr = in_be32((void *)&msg_regs->osr);
+
+ if (osr & RIO_MSG_OSR_TE) {
+ pr_info("RIO: outbound message transmission error\n");
+ out_be32((void *)&msg_regs->osr, RIO_MSG_OSR_TE);
+ goto out;
+ }
+
+ if (osr & RIO_MSG_OSR_QOI) {
+ pr_info("RIO: outbound message queue overflow\n");
+ out_be32((void *)&msg_regs->osr, RIO_MSG_OSR_QOI);
+ goto out;
+ }
+
+ if (osr & RIO_MSG_OSR_EOMI) {
+ u32 dqp = in_be32((void *)&msg_regs->odqdpar);
+ int slot = (dqp - msg_tx_ring.phys) >> 5;
+ port->outb_msg[0].mcback(port, msg_tx_ring.dev_id, -1, slot);
+
+ /* Ack the end-of-message interrupt */
+ out_be32((void *)&msg_regs->osr, RIO_MSG_OSR_EOMI);
+ }
+
+ out:
+ return IRQ_HANDLED;
+}
+
+/**
+ * rio_open_outb_mbox - Initialize MPC85xx outbound mailbox
+ * @mport: Master port implementing the outbound message unit
+ * @dev_id: Device specific pointer to pass on event
+ * @mbox: Mailbox to open
+ * @entries: Number of entries in the outbound mailbox ring
+ *
+ * Initializes buffer ring, request the outbound message interrupt,
+ * and enables the outbound message unit. Returns %0 on success and
+ * %-EINVAL or %-ENOMEM on failure.
+ */
+int rio_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
+{
+ int i, j, rc = 0;
+
+ if ((entries < RIO_MIN_TX_RING_SIZE) ||
+ (entries > RIO_MAX_TX_RING_SIZE) || (!is_power_of_2(entries))) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Initialize shadow copy ring */
+ msg_tx_ring.dev_id = dev_id;
+ msg_tx_ring.size = entries;
+
+ for (i = 0; i < msg_tx_ring.size; i++) {
+ if (!
+ (msg_tx_ring.virt_buffer[i] =
+ dma_alloc_coherent(NULL, RIO_MSG_BUFFER_SIZE,
+ &msg_tx_ring.phys_buffer[i],
+ GFP_KERNEL))) {
+ rc = -ENOMEM;
+ for (j = 0; j < msg_tx_ring.size; j++)
+ if (msg_tx_ring.virt_buffer[j])
+ dma_free_coherent(NULL,
+ RIO_MSG_BUFFER_SIZE,
+ msg_tx_ring.
+ virt_buffer[j],
+ msg_tx_ring.
+ phys_buffer[j]);
+ goto out;
+ }
+ }
+
+ /* Initialize outbound message descriptor ring */
+ if (!(msg_tx_ring.virt = dma_alloc_coherent(NULL,
+ msg_tx_ring.size *
+ RIO_MSG_DESC_SIZE,
+ &msg_tx_ring.phys,
+ GFP_KERNEL))) {
+ rc = -ENOMEM;
+ goto out_dma;
+ }
+ memset(msg_tx_ring.virt, 0, msg_tx_ring.size * RIO_MSG_DESC_SIZE);
+ msg_tx_ring.tx_slot = 0;
+
+ /* Point dequeue/enqueue pointers at first entry in ring */
+ out_be32((void *)&msg_regs->odqdpar, msg_tx_ring.phys);
+ out_be32((void *)&msg_regs->odqepar, msg_tx_ring.phys);
+
+ /* Configure for snooping */
+ out_be32((void *)&msg_regs->osar, 0x00000004);
+
+ /* Clear interrupt status */
+ out_be32((void *)&msg_regs->osr, 0x000000b3);
+
+ /* Hook up outbound message handler */
+ if ((rc =
+ request_irq(MPC85xx_IRQ_RIO_TX, mpc85xx_rio_tx_handler, 0,
+ "msg_tx", (void *)mport)) < 0)
+ goto out_irq;
+
+ /*
+ * Configure outbound message unit
+ * Snooping
+ * Interrupts (all enabled, except QEIE)
+ * Chaining mode
+ * Disable
+ */
+ out_be32((void *)&msg_regs->omr, 0x00100220);
+
+ /* Set number of entries */
+ out_be32((void *)&msg_regs->omr,
+ in_be32((void *)&msg_regs->omr) |
+ ((get_bitmask_order(entries) - 2) << 12));
+
+ /* Now enable the unit */
+ out_be32((void *)&msg_regs->omr, in_be32((void *)&msg_regs->omr) | 0x1);
+
+ out:
+ return rc;
+
+ out_irq:
+ dma_free_coherent(NULL, msg_tx_ring.size * RIO_MSG_DESC_SIZE,
+ msg_tx_ring.virt, msg_tx_ring.phys);
+
+ out_dma:
+ for (i = 0; i < msg_tx_ring.size; i++)
+ dma_free_coherent(NULL, RIO_MSG_BUFFER_SIZE,
+ msg_tx_ring.virt_buffer[i],
+ msg_tx_ring.phys_buffer[i]);
+
+ return rc;
+}
+
+/**
+ * rio_close_outb_mbox - Shut down MPC85xx outbound mailbox
+ * @mport: Master port implementing the outbound message unit
+ * @mbox: Mailbox to close
+ *
+ * Disables the outbound message unit, free all buffers, and
+ * frees the outbound message interrupt.
+ */
+void rio_close_outb_mbox(struct rio_mport *mport, int mbox)
+{
+ /* Disable inbound message unit */
+ out_be32((void *)&msg_regs->omr, 0);
+
+ /* Free ring */
+ dma_free_coherent(NULL, msg_tx_ring.size * RIO_MSG_DESC_SIZE,
+ msg_tx_ring.virt, msg_tx_ring.phys);
+
+ /* Free interrupt */
+ free_irq(MPC85xx_IRQ_RIO_TX, (void *)mport);
+}
+
+/**
+ * mpc85xx_rio_rx_handler - MPC85xx inbound message interrupt handler
+ * @irq: Linux interrupt number
+ * @dev_instance: Pointer to interrupt-specific data
+ * @regs: Register context
+ *
+ * Handles inbound message interrupts. Executes a registered inbound
+ * mailbox event handler and acks the interrupt occurence.
+ */
+static irqreturn_t
+mpc85xx_rio_rx_handler(int irq, void *dev_instance, struct pt_regs *regs)
+{
+ int isr;
+ struct rio_mport *port = (struct rio_mport *)dev_instance;
+
+ isr = in_be32((void *)&msg_regs->isr);
+
+ if (isr & RIO_MSG_ISR_TE) {
+ pr_info("RIO: inbound message reception error\n");
+ out_be32((void *)&msg_regs->isr, RIO_MSG_ISR_TE);
+ goto out;
+ }
+
+ /* XXX Need to check/dispatch until queue empty */
+ if (isr & RIO_MSG_ISR_DIQI) {
+ /*
+ * We implement *only* mailbox 0, but can receive messages
+ * for any mailbox/letter to that mailbox destination. So,
+ * make the callback with an unknown/invalid mailbox number
+ * argument.
+ */
+ port->inb_msg[0].mcback(port, msg_rx_ring.dev_id, -1, -1);
+
+ /* Ack the queueing interrupt */
+ out_be32((void *)&msg_regs->isr, RIO_MSG_ISR_DIQI);
+ }
+
+ out:
+ return IRQ_HANDLED;
+}
+
+/**
+ * rio_open_inb_mbox - Initialize MPC85xx inbound mailbox
+ * @mport: Master port implementing the inbound message unit
+ * @dev_id: Device specific pointer to pass on event
+ * @mbox: Mailbox to open
+ * @entries: Number of entries in the inbound mailbox ring
+ *
+ * Initializes buffer ring, request the inbound message interrupt,
+ * and enables the inbound message unit. Returns %0 on success
+ * and %-EINVAL or %-ENOMEM on failure.
+ */
+int rio_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
+{
+ int i, rc = 0;
+
+ if ((entries < RIO_MIN_RX_RING_SIZE) ||
+ (entries > RIO_MAX_RX_RING_SIZE) || (!is_power_of_2(entries))) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Initialize client buffer ring */
+ msg_rx_ring.dev_id = dev_id;
+ msg_rx_ring.size = entries;
+ msg_rx_ring.rx_slot = 0;
+ for (i = 0; i < msg_rx_ring.size; i++)
+ msg_rx_ring.virt_buffer[i] = NULL;
+
+ /* Initialize inbound message ring */
+ if (!(msg_rx_ring.virt = dma_alloc_coherent(NULL,
+ msg_rx_ring.size *
+ RIO_MAX_MSG_SIZE,
+ &msg_rx_ring.phys,
+ GFP_KERNEL))) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Point dequeue/enqueue pointers at first entry in ring */
+ out_be32((void *)&msg_regs->ifqdpar, (u32) msg_rx_ring.phys);
+ out_be32((void *)&msg_regs->ifqepar, (u32) msg_rx_ring.phys);
+
+ /* Clear interrupt status */
+ out_be32((void *)&msg_regs->isr, 0x00000091);
+
+ /* Hook up inbound message handler */
+ if ((rc =
+ request_irq(MPC85xx_IRQ_RIO_RX, mpc85xx_rio_rx_handler, 0,
+ "msg_rx", (void *)mport)) < 0) {
+ dma_free_coherent(NULL, RIO_MSG_BUFFER_SIZE,
+ msg_tx_ring.virt_buffer[i],
+ msg_tx_ring.phys_buffer[i]);
+ goto out;
+ }
+
+ /*
+ * Configure inbound message unit:
+ * Snooping
+ * 4KB max message size
+ * Unmask all interrupt sources
+ * Disable
+ */
+ out_be32((void *)&msg_regs->imr, 0x001b0060);
+
+ /* Set number of queue entries */
+ out_be32((void *)&msg_regs->imr,
+ in_be32((void *)&msg_regs->imr) |
+ ((get_bitmask_order(entries) - 2) << 12));
+
+ /* Now enable the unit */
+ out_be32((void *)&msg_regs->imr, in_be32((void *)&msg_regs->imr) | 0x1);
+
+ out:
+ return rc;
+}
+
+/**
+ * rio_close_inb_mbox - Shut down MPC85xx inbound mailbox
+ * @mport: Master port implementing the inbound message unit
+ * @mbox: Mailbox to close
+ *
+ * Disables the inbound message unit, free all buffers, and
+ * frees the inbound message interrupt.
+ */
+void rio_close_inb_mbox(struct rio_mport *mport, int mbox)
+{
+ /* Disable inbound message unit */
+ out_be32((void *)&msg_regs->imr, 0);
+
+ /* Free ring */
+ dma_free_coherent(NULL, msg_rx_ring.size * RIO_MAX_MSG_SIZE,
+ msg_rx_ring.virt, msg_rx_ring.phys);
+
+ /* Free interrupt */
+ free_irq(MPC85xx_IRQ_RIO_RX, (void *)mport);
+}
+
+/**
+ * rio_hw_add_inb_buffer - Add buffer to the MPC85xx inbound message queue
+ * @mport: Master port implementing the inbound message unit
+ * @mbox: Inbound mailbox number
+ * @buf: Buffer to add to inbound queue
+ *
+ * Adds the @buf buffer to the MPC85xx inbound message queue. Returns
+ * %0 on success or %-EINVAL on failure.
+ */
+int rio_hw_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf)
+{
+ int rc = 0;
+
+ pr_debug("RIO: rio_hw_add_inb_buffer(), msg_rx_ring.rx_slot %d\n",
+ msg_rx_ring.rx_slot);
+
+ if (msg_rx_ring.virt_buffer[msg_rx_ring.rx_slot]) {
+ printk(KERN_ERR
+ "RIO: error adding inbound buffer %d, buffer exists\n",
+ msg_rx_ring.rx_slot);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ msg_rx_ring.virt_buffer[msg_rx_ring.rx_slot] = buf;
+ if (++msg_rx_ring.rx_slot == msg_rx_ring.size)
+ msg_rx_ring.rx_slot = 0;
+
+ out:
+ return rc;
+}
+
+EXPORT_SYMBOL_GPL(rio_hw_add_inb_buffer);
+
+/**
+ * rio_hw_get_inb_message - Fetch inbound message from the MPC85xx message unit
+ * @mport: Master port implementing the inbound message unit
+ * @mbox: Inbound mailbox number
+ *
+ * Gets the next available inbound message from the inbound message queue.
+ * A pointer to the message is returned on success or NULL on failure.
+ */
+void *rio_hw_get_inb_message(struct rio_mport *mport, int mbox)
+{
+ u32 imr;
+ u32 phys_buf, virt_buf;
+ void *buf = NULL;
+ int buf_idx;
+
+ phys_buf = in_be32((void *)&msg_regs->ifqdpar);
+
+ /* If no more messages, then bail out */
+ if (phys_buf == in_be32((void *)&msg_regs->ifqepar))
+ goto out2;
+
+ virt_buf = (u32) msg_rx_ring.virt + (phys_buf - msg_rx_ring.phys);
+ buf_idx = (phys_buf - msg_rx_ring.phys) / RIO_MAX_MSG_SIZE;
+ buf = msg_rx_ring.virt_buffer[buf_idx];
+
+ if (!buf) {
+ printk(KERN_ERR
+ "RIO: inbound message copy failed, no buffers\n");
+ goto out1;
+ }
+
+ /* Copy max message size, caller is expected to allocate that big */
+ memcpy(buf, (void *)virt_buf, RIO_MAX_MSG_SIZE);
+
+ /* Clear the available buffer */
+ msg_rx_ring.virt_buffer[buf_idx] = NULL;
+
+ out1:
+ imr = in_be32((void *)&msg_regs->imr);
+ out_be32((void *)&msg_regs->imr, imr | RIO_MSG_IMR_MI);
+
+ out2:
+ return buf;
+}
+
+EXPORT_SYMBOL_GPL(rio_hw_get_inb_message);
+
+/**
+ * mpc85xx_rio_dbell_handler - MPC85xx doorbell interrupt handler
+ * @irq: Linux interrupt number
+ * @dev_instance: Pointer to interrupt-specific data
+ * @regs: Register context
+ *
+ * Handles doorbell interrupts. Parses a list of registered
+ * doorbell event handlers and executes a matching event handler.
+ */
+static irqreturn_t
+mpc85xx_rio_dbell_handler(int irq, void *dev_instance, struct pt_regs *regs)
+{
+ int dsr;
+ struct rio_mport *port = (struct rio_mport *)dev_instance;
+
+ dsr = in_be32((void *)&msg_regs->dsr);
+
+ if (dsr & DOORBELL_DSR_TE) {
+ pr_info("RIO: doorbell reception error\n");
+ out_be32((void *)&msg_regs->dsr, DOORBELL_DSR_TE);
+ goto out;
+ }
+
+ if (dsr & DOORBELL_DSR_QFI) {
+ pr_info("RIO: doorbell queue full\n");
+ out_be32((void *)&msg_regs->dsr, DOORBELL_DSR_QFI);
+ goto out;
+ }
+
+ /* XXX Need to check/dispatch until queue empty */
+ if (dsr & DOORBELL_DSR_DIQI) {
+ u32 dmsg =
+ (u32) dbell_ring.virt +
+ (in_be32((void *)&msg_regs->dqdpar) & 0xfff);
+ u32 dmr;
+ struct rio_dbell *dbell;
+ int found = 0;
+
+ pr_debug
+ ("RIO: processing doorbell, sid %2.2x tid %2.2x info %4.4x\n",
+ DBELL_SID(dmsg), DBELL_TID(dmsg), DBELL_INF(dmsg));
+
+ list_for_each_entry(dbell, &port->dbells, node) {
+ if ((dbell->res->start <= DBELL_INF(dmsg)) &&
+ (dbell->res->end >= DBELL_INF(dmsg))) {
+ found = 1;
+ break;
+ }
+ }
+ if (found) {
+ dbell->dinb(port, dbell->dev_id, DBELL_SID(dmsg), DBELL_TID(dmsg),
+ DBELL_INF(dmsg));
+ } else {
+ pr_debug
+ ("RIO: spurious doorbell, sid %2.2x tid %2.2x info %4.4x\n",
+ DBELL_SID(dmsg), DBELL_TID(dmsg), DBELL_INF(dmsg));
+ }
+ dmr = in_be32((void *)&msg_regs->dmr);
+ out_be32((void *)&msg_regs->dmr, dmr | DOORBELL_DMR_DI);
+ out_be32((void *)&msg_regs->dsr, DOORBELL_DSR_DIQI);
+ }
+
+ out:
+ return IRQ_HANDLED;
+}
+
+/**
+ * mpc85xx_rio_doorbell_init - MPC85xx doorbell interface init
+ * @mport: Master port implementing the inbound doorbell unit
+ *
+ * Initializes doorbell unit hardware and inbound DMA buffer
+ * ring. Called from mpc85xx_rio_setup(). Returns %0 on success
+ * or %-ENOMEM on failure.
+ */
+static int mpc85xx_rio_doorbell_init(struct rio_mport *mport)
+{
+ int rc = 0;
+
+ /* Map outbound doorbell window immediately after maintenance window */
+ if (!(dbell_win =
+ (u32) ioremap(mport->iores.start + RIO_MAINT_WIN_SIZE,
+ RIO_DBELL_WIN_SIZE))) {
+ printk(KERN_ERR
+ "RIO: unable to map outbound doorbell window\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Initialize inbound doorbells */
+ if (!(dbell_ring.virt = dma_alloc_coherent(NULL,
+ 512 * DOORBELL_MESSAGE_SIZE,
+ &dbell_ring.phys,
+ GFP_KERNEL))) {
+ printk(KERN_ERR "RIO: unable allocate inbound doorbell ring\n");
+ rc = -ENOMEM;
+ iounmap((void *)dbell_win);
+ goto out;
+ }
+
+ /* Point dequeue/enqueue pointers at first entry in ring */
+ out_be32((void *)&msg_regs->dqdpar, (u32) dbell_ring.phys);
+ out_be32((void *)&msg_regs->dqepar, (u32) dbell_ring.phys);
+
+ /* Clear interrupt status */
+ out_be32((void *)&msg_regs->dsr, 0x00000091);
+
+ /* Hook up doorbell handler */
+ if ((rc =
+ request_irq(MPC85xx_IRQ_RIO_BELL, mpc85xx_rio_dbell_handler, 0,
+ "dbell_rx", (void *)mport) < 0)) {
+ iounmap((void *)dbell_win);
+ dma_free_coherent(NULL, 512 * DOORBELL_MESSAGE_SIZE,
+ dbell_ring.virt, dbell_ring.phys);
+ printk(KERN_ERR
+ "MPC85xx RIO: unable to request inbound doorbell irq");
+ goto out;
+ }
+
+ /* Configure doorbells for snooping, 512 entries, and enable */
+ out_be32((void *)&msg_regs->dmr, 0x00108161);
+
+ out:
+ return rc;
+}
+
+static char *cmdline = NULL;
+
+static int mpc85xx_rio_get_hdid(int index)
+{
+ /* XXX Need to parse multiple entries in some format */
+ if (!cmdline)
+ return -1;
+
+ return simple_strtol(cmdline, NULL, 0);
+}
+
+static int mpc85xx_rio_get_cmdline(char *s)
+{
+ if (!s)
+ return 0;
+
+ cmdline = s;
+ return 1;
+}
+
+__setup("riohdid=", mpc85xx_rio_get_cmdline);
+
+/**
+ * mpc85xx_rio_setup - Setup MPC85xx RapidIO interface
+ * @law_start: Starting physical address of RapidIO LAW
+ * @law_size: Size of RapidIO LAW
+ *
+ * Initializes MPC85xx RapidIO hardware interface, configures
+ * master port with system-specific info, and registers the
+ * master port with the RapidIO subsystem.
+ */
+void mpc85xx_rio_setup(int law_start, int law_size)
+{
+ struct rio_ops *ops;
+ struct rio_mport *port;
+
+ ops = kmalloc(sizeof(struct rio_ops), GFP_KERNEL);
+ ops->lcread = mpc85xx_local_config_read;
+ ops->lcwrite = mpc85xx_local_config_write;
+ ops->cread = mpc85xx_rio_config_read;
+ ops->cwrite = mpc85xx_rio_config_write;
+ ops->dsend = mpc85xx_rio_doorbell_send;
+
+ port = kmalloc(sizeof(struct rio_mport), GFP_KERNEL);
+ port->id = 0;
+ port->index = 0;
+ INIT_LIST_HEAD(&port->dbells);
+ port->iores.start = law_start;
+ port->iores.end = law_start + law_size;
+ port->iores.flags = IORESOURCE_MEM;
+
+ rio_init_dbell_res(&port->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
+ rio_init_mbox_res(&port->riores[RIO_INB_MBOX_RESOURCE], 0, 0);
+ rio_init_mbox_res(&port->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0);
+ strcpy(port->name, "RIO0 mport");
+
+ port->ops = ops;
+ port->host_deviceid = mpc85xx_rio_get_hdid(port->id);
+
+ rio_register_mport(port);
+
+ regs_win = (u32) ioremap(RIO_REGS_BASE, 0x20000);
+ atmu_regs = (struct rio_atmu_regs *)(regs_win + RIO_ATMU_REGS_OFFSET);
+ maint_atmu_regs = atmu_regs + 1;
+ dbell_atmu_regs = atmu_regs + 2;
+ msg_regs = (struct rio_msg_regs *)(regs_win + RIO_MSG_REGS_OFFSET);
+
+ /* Configure maintenance transaction window */
+ out_be32((void *)&maint_atmu_regs->rowbar, 0x000c0000);
+ out_be32((void *)&maint_atmu_regs->rowar, 0x80077015);
+
+ maint_win = (u32) ioremap(law_start, RIO_MAINT_WIN_SIZE);
+
+ /* Configure outbound doorbell window */
+ out_be32((void *)&dbell_atmu_regs->rowbar, 0x000c0400);
+ out_be32((void *)&dbell_atmu_regs->rowar, 0x8004200b);
+ mpc85xx_rio_doorbell_init(port);
+}
diff --git a/arch/ppc/syslib/ppc85xx_rio.h b/arch/ppc/syslib/ppc85xx_rio.h
new file mode 100644
index 00000000000..c0827a2c3ee
--- /dev/null
+++ b/arch/ppc/syslib/ppc85xx_rio.h
@@ -0,0 +1,21 @@
+/*
+ * MPC85xx RapidIO definitions
+ *
+ * Copyright 2005 MontaVista Software, Inc.
+ * Matt Porter <mporter@kernel.crashing.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __PPC_SYSLIB_PPC85XX_RIO_H
+#define __PPC_SYSLIB_PPC85XX_RIO_H
+
+#include <linux/config.h>
+#include <linux/init.h>
+
+extern void mpc85xx_rio_setup(int law_start, int law_size);
+
+#endif /* __PPC_SYSLIB_PPC85XX_RIO_H */
diff --git a/arch/ppc/syslib/ppc_sys.c b/arch/ppc/syslib/ppc_sys.c
index 62ee86e8071..603f0119081 100644
--- a/arch/ppc/syslib/ppc_sys.c
+++ b/arch/ppc/syslib/ppc_sys.c
@@ -14,6 +14,7 @@
* option) any later version.
*/
+#include <linux/string.h>
#include <asm/ppc_sys.h>
int (*ppc_sys_device_fixup) (struct platform_device * pdev);
diff --git a/arch/ppc/syslib/prom.c b/arch/ppc/syslib/prom.c
index 278da6ee62e..03b1fc9b950 100644
--- a/arch/ppc/syslib/prom.c
+++ b/arch/ppc/syslib/prom.c
@@ -1165,7 +1165,7 @@ get_property(struct device_node *np, const char *name, int *lenp)
/*
* Add a property to a node
*/
-void
+int
prom_add_property(struct device_node* np, struct property* prop)
{
struct property **next = &np->properties;
@@ -1174,6 +1174,8 @@ prom_add_property(struct device_node* np, struct property* prop)
while (*next)
next = &(*next)->next;
*next = prop;
+
+ return 0;
}
/* I quickly hacked that one, check against spec ! */
@@ -1335,10 +1337,8 @@ release_OF_resource(struct device_node* node, int index)
if (!res)
return -ENODEV;
- if (res->name) {
- kfree(res->name);
- res->name = NULL;
- }
+ kfree(res->name);
+ res->name = NULL;
release_resource(res);
kfree(res);
diff --git a/arch/ppc/xmon/xmon.c b/arch/ppc/xmon/xmon.c
index 66bfaa3211a..2b483b4f160 100644
--- a/arch/ppc/xmon/xmon.c
+++ b/arch/ppc/xmon/xmon.c
@@ -220,8 +220,7 @@ static void get_tb(unsigned *p)
p[1] = lo;
}
-void
-xmon(struct pt_regs *excp)
+int xmon(struct pt_regs *excp)
{
struct pt_regs regs;
int msr, cmd;
@@ -290,6 +289,8 @@ xmon(struct pt_regs *excp)
#endif /* CONFIG_SMP */
set_msr(msr); /* restore interrupt enable */
get_tb(start_tb[smp_processor_id()]);
+
+ return cmd != 'X';
}
irqreturn_t
diff --git a/arch/ppc64/Kconfig b/arch/ppc64/Kconfig
index b987164fca4..29552348e58 100644
--- a/arch/ppc64/Kconfig
+++ b/arch/ppc64/Kconfig
@@ -47,11 +47,16 @@ config ARCH_MAY_HAVE_PC_FDC
bool
default y
+config PPC_STD_MMU
+ bool
+ default y
+
# We optimistically allocate largepages from the VM, so make the limit
# large enough (16MB). This badly named config option is actually
# max order + 1
config FORCE_MAX_ZONEORDER
int
+ default "9" if PPC_64K_PAGES
default "13"
source "init/Kconfig"
@@ -169,6 +174,16 @@ config KEXEC
support. As of this writing the exact hardware interface is
strongly in flux, so no good recommendation can be made.
+source "drivers/cpufreq/Kconfig"
+
+config CPU_FREQ_PMAC64
+ bool "Support for some Apple G5s"
+ depends on CPU_FREQ && PMAC_SMU && PPC64
+ select CPU_FREQ_TABLE
+ help
+ This adds support for frequency switching on Apple iMac G5,
+ and some of the more recent desktop G5 machines as well.
+
config IBMVIO
depends on PPC_PSERIES || PPC_ISERIES
bool
@@ -294,6 +309,15 @@ config NODES_SPAN_OTHER_NODES
def_bool y
depends on NEED_MULTIPLE_NODES
+config PPC_64K_PAGES
+ bool "64k page size"
+ help
+ This option changes the kernel logical page size to 64k. On machines
+ without processor support for 64k pages, the kernel will simulate
+ them by loading each individual 4k page on demand transparently,
+ while on hardware with such support, it will be used to map
+ normal application pages.
+
config SCHED_SMT
bool "SMT (Hyperthreading) scheduler support"
depends on SMP
diff --git a/arch/ppc64/Kconfig.debug b/arch/ppc64/Kconfig.debug
index f16a5030527..b258c9314a1 100644
--- a/arch/ppc64/Kconfig.debug
+++ b/arch/ppc64/Kconfig.debug
@@ -55,10 +55,6 @@ config XMON_DEFAULT
xmon is normally disabled unless booted with 'xmon=on'.
Use 'xmon=off' to disable xmon init during runtime.
-config PPCDBG
- bool "Include PPCDBG realtime debugging"
- depends on DEBUG_KERNEL
-
config IRQSTACKS
bool "Use separate kernel stacks when processing interrupts"
help
diff --git a/arch/ppc64/boot/main.c b/arch/ppc64/boot/main.c
index c1dc876bcca..e0dde24a72c 100644
--- a/arch/ppc64/boot/main.c
+++ b/arch/ppc64/boot/main.c
@@ -203,8 +203,15 @@ void start(unsigned long a1, unsigned long a2, void *promptr, void *sp)
if (elf64ph->p_type == PT_LOAD && elf64ph->p_offset != 0)
break;
}
- vmlinux.size = (unsigned long)elf64ph->p_filesz;
- vmlinux.memsize = (unsigned long)elf64ph->p_memsz;
+ vmlinux.size = (unsigned long)elf64ph->p_filesz +
+ (unsigned long)elf64ph->p_offset;
+ /* We need to claim the memsize plus the file offset since gzip
+ * will expand the header (file offset), then the kernel, then
+ * possible rubbish we don't care about. But the kernel bss must
+ * be claimed (it will be zero'd by the kernel itself)
+ */
+ vmlinux.memsize = (unsigned long)elf64ph->p_memsz +
+ (unsigned long)elf64ph->p_offset;
printf("Allocating 0x%lx bytes for kernel ...\n\r", vmlinux.memsize);
vmlinux.addr = try_claim(vmlinux.memsize);
if (vmlinux.addr == 0) {
diff --git a/arch/ppc64/kernel/asm-offsets.c b/arch/ppc64/kernel/asm-offsets.c
index 504dee836d2..bce9065da6c 100644
--- a/arch/ppc64/kernel/asm-offsets.c
+++ b/arch/ppc64/kernel/asm-offsets.c
@@ -93,6 +93,9 @@ int main(void)
DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
+#ifdef CONFIG_PPC_64K_PAGES
+ DEFINE(PACAPGDIR, offsetof(struct paca_struct, pgdir));
+#endif
#ifdef CONFIG_HUGETLB_PAGE
DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas));
DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas));
diff --git a/arch/ppc64/kernel/head.S b/arch/ppc64/kernel/head.S
index db1cf397be2..9e8050ea122 100644
--- a/arch/ppc64/kernel/head.S
+++ b/arch/ppc64/kernel/head.S
@@ -195,11 +195,11 @@ exception_marker:
#define EX_R12 24
#define EX_R13 32
#define EX_SRR0 40
-#define EX_R3 40 /* SLB miss saves R3, but not SRR0 */
#define EX_DAR 48
-#define EX_LR 48 /* SLB miss saves LR, but not DAR */
#define EX_DSISR 56
#define EX_CCR 60
+#define EX_R3 64
+#define EX_LR 72
#define EXCEPTION_PROLOG_PSERIES(area, label) \
mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \
@@ -419,17 +419,22 @@ data_access_slb_pSeries:
mtspr SPRN_SPRG1,r13
RUNLATCH_ON(r13)
mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
+ std r3,PACA_EXSLB+EX_R3(r13)
+ mfspr r3,SPRN_DAR
std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
+ mfcr r9
+#ifdef __DISABLED__
+ /* Keep that around for when we re-implement dynamic VSIDs */
+ cmpdi r3,0
+ bge slb_miss_user_pseries
+#endif /* __DISABLED__ */
std r10,PACA_EXSLB+EX_R10(r13)
std r11,PACA_EXSLB+EX_R11(r13)
std r12,PACA_EXSLB+EX_R12(r13)
- std r3,PACA_EXSLB+EX_R3(r13)
- mfspr r9,SPRN_SPRG1
- std r9,PACA_EXSLB+EX_R13(r13)
- mfcr r9
+ mfspr r10,SPRN_SPRG1
+ std r10,PACA_EXSLB+EX_R13(r13)
mfspr r12,SPRN_SRR1 /* and SRR1 */
- mfspr r3,SPRN_DAR
- b .do_slb_miss /* Rel. branch works in real mode */
+ b .slb_miss_realmode /* Rel. branch works in real mode */
STD_EXCEPTION_PSERIES(0x400, instruction_access)
@@ -440,17 +445,22 @@ instruction_access_slb_pSeries:
mtspr SPRN_SPRG1,r13
RUNLATCH_ON(r13)
mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
+ std r3,PACA_EXSLB+EX_R3(r13)
+ mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
+ mfcr r9
+#ifdef __DISABLED__
+ /* Keep that around for when we re-implement dynamic VSIDs */
+ cmpdi r3,0
+ bge slb_miss_user_pseries
+#endif /* __DISABLED__ */
std r10,PACA_EXSLB+EX_R10(r13)
std r11,PACA_EXSLB+EX_R11(r13)
std r12,PACA_EXSLB+EX_R12(r13)
- std r3,PACA_EXSLB+EX_R3(r13)
- mfspr r9,SPRN_SPRG1
- std r9,PACA_EXSLB+EX_R13(r13)
- mfcr r9
+ mfspr r10,SPRN_SPRG1
+ std r10,PACA_EXSLB+EX_R13(r13)
mfspr r12,SPRN_SRR1 /* and SRR1 */
- mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
- b .do_slb_miss /* Rel. branch works in real mode */
+ b .slb_miss_realmode /* Rel. branch works in real mode */
STD_EXCEPTION_PSERIES(0x500, hardware_interrupt)
STD_EXCEPTION_PSERIES(0x600, alignment)
@@ -509,6 +519,38 @@ _GLOBAL(do_stab_bolted_pSeries)
EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
/*
+ * We have some room here we use that to put
+ * the peries slb miss user trampoline code so it's reasonably
+ * away from slb_miss_user_common to avoid problems with rfid
+ *
+ * This is used for when the SLB miss handler has to go virtual,
+ * which doesn't happen for now anymore but will once we re-implement
+ * dynamic VSIDs for shared page tables
+ */
+#ifdef __DISABLED__
+slb_miss_user_pseries:
+ std r10,PACA_EXGEN+EX_R10(r13)
+ std r11,PACA_EXGEN+EX_R11(r13)
+ std r12,PACA_EXGEN+EX_R12(r13)
+ mfspr r10,SPRG1
+ ld r11,PACA_EXSLB+EX_R9(r13)
+ ld r12,PACA_EXSLB+EX_R3(r13)
+ std r10,PACA_EXGEN+EX_R13(r13)
+ std r11,PACA_EXGEN+EX_R9(r13)
+ std r12,PACA_EXGEN+EX_R3(r13)
+ clrrdi r12,r13,32
+ mfmsr r10
+ mfspr r11,SRR0 /* save SRR0 */
+ ori r12,r12,slb_miss_user_common@l /* virt addr of handler */
+ ori r10,r10,MSR_IR|MSR_DR|MSR_RI
+ mtspr SRR0,r12
+ mfspr r12,SRR1 /* and SRR1 */
+ mtspr SRR1,r10
+ rfid
+ b . /* prevent spec. execution */
+#endif /* __DISABLED__ */
+
+/*
* Vectors for the FWNMI option. Share common code.
*/
.globl system_reset_fwnmi
@@ -559,22 +601,59 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
.globl data_access_slb_iSeries
data_access_slb_iSeries:
mtspr SPRN_SPRG1,r13 /* save r13 */
- EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
+ mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
std r3,PACA_EXSLB+EX_R3(r13)
- ld r12,PACALPPACA+LPPACASRR1(r13)
mfspr r3,SPRN_DAR
- b .do_slb_miss
+ std r9,PACA_EXSLB+EX_R9(r13)
+ mfcr r9
+#ifdef __DISABLED__
+ cmpdi r3,0
+ bge slb_miss_user_iseries
+#endif
+ std r10,PACA_EXSLB+EX_R10(r13)
+ std r11,PACA_EXSLB+EX_R11(r13)
+ std r12,PACA_EXSLB+EX_R12(r13)
+ mfspr r10,SPRN_SPRG1
+ std r10,PACA_EXSLB+EX_R13(r13)
+ ld r12,PACALPPACA+LPPACASRR1(r13);
+ b .slb_miss_realmode
STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN)
.globl instruction_access_slb_iSeries
instruction_access_slb_iSeries:
mtspr SPRN_SPRG1,r13 /* save r13 */
- EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB)
+ mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
std r3,PACA_EXSLB+EX_R3(r13)
- ld r12,PACALPPACA+LPPACASRR1(r13)
- ld r3,PACALPPACA+LPPACASRR0(r13)
- b .do_slb_miss
+ ld r3,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */
+ std r9,PACA_EXSLB+EX_R9(r13)
+ mfcr r9
+#ifdef __DISABLED__
+ cmpdi r3,0
+ bge .slb_miss_user_iseries
+#endif
+ std r10,PACA_EXSLB+EX_R10(r13)
+ std r11,PACA_EXSLB+EX_R11(r13)
+ std r12,PACA_EXSLB+EX_R12(r13)
+ mfspr r10,SPRN_SPRG1
+ std r10,PACA_EXSLB+EX_R13(r13)
+ ld r12,PACALPPACA+LPPACASRR1(r13);
+ b .slb_miss_realmode
+
+#ifdef __DISABLED__
+slb_miss_user_iseries:
+ std r10,PACA_EXGEN+EX_R10(r13)
+ std r11,PACA_EXGEN+EX_R11(r13)
+ std r12,PACA_EXGEN+EX_R12(r13)
+ mfspr r10,SPRG1
+ ld r11,PACA_EXSLB+EX_R9(r13)
+ ld r12,PACA_EXSLB+EX_R3(r13)
+ std r10,PACA_EXGEN+EX_R13(r13)
+ std r11,PACA_EXGEN+EX_R9(r13)
+ std r12,PACA_EXGEN+EX_R3(r13)
+ EXCEPTION_PROLOG_ISERIES_2
+ b slb_miss_user_common
+#endif
MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt)
STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN)
@@ -809,6 +888,126 @@ instruction_access_common:
li r5,0x400
b .do_hash_page /* Try to handle as hpte fault */
+/*
+ * Here is the common SLB miss user that is used when going to virtual
+ * mode for SLB misses, that is currently not used
+ */
+#ifdef __DISABLED__
+ .align 7
+ .globl slb_miss_user_common
+slb_miss_user_common:
+ mflr r10
+ std r3,PACA_EXGEN+EX_DAR(r13)
+ stw r9,PACA_EXGEN+EX_CCR(r13)
+ std r10,PACA_EXGEN+EX_LR(r13)
+ std r11,PACA_EXGEN+EX_SRR0(r13)
+ bl .slb_allocate_user
+
+ ld r10,PACA_EXGEN+EX_LR(r13)
+ ld r3,PACA_EXGEN+EX_R3(r13)
+ lwz r9,PACA_EXGEN+EX_CCR(r13)
+ ld r11,PACA_EXGEN+EX_SRR0(r13)
+ mtlr r10
+ beq- slb_miss_fault
+
+ andi. r10,r12,MSR_RI /* check for unrecoverable exception */
+ beq- unrecov_user_slb
+ mfmsr r10
+
+.machine push
+.machine "power4"
+ mtcrf 0x80,r9
+.machine pop
+
+ clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */
+ mtmsrd r10,1
+
+ mtspr SRR0,r11
+ mtspr SRR1,r12
+
+ ld r9,PACA_EXGEN+EX_R9(r13)
+ ld r10,PACA_EXGEN+EX_R10(r13)
+ ld r11,PACA_EXGEN+EX_R11(r13)
+ ld r12,PACA_EXGEN+EX_R12(r13)
+ ld r13,PACA_EXGEN+EX_R13(r13)
+ rfid
+ b .
+
+slb_miss_fault:
+ EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
+ ld r4,PACA_EXGEN+EX_DAR(r13)
+ li r5,0
+ std r4,_DAR(r1)
+ std r5,_DSISR(r1)
+ b .handle_page_fault
+
+unrecov_user_slb:
+ EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
+ DISABLE_INTS
+ bl .save_nvgprs
+1: addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .unrecoverable_exception
+ b 1b
+
+#endif /* __DISABLED__ */
+
+
+/*
+ * r13 points to the PACA, r9 contains the saved CR,
+ * r12 contain the saved SRR1, SRR0 is still ready for return
+ * r3 has the faulting address
+ * r9 - r13 are saved in paca->exslb.
+ * r3 is saved in paca->slb_r3
+ * We assume we aren't going to take any exceptions during this procedure.
+ */
+_GLOBAL(slb_miss_realmode)
+ mflr r10
+
+ stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
+ std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
+
+ bl .slb_allocate_realmode
+
+ /* All done -- return from exception. */
+
+ ld r10,PACA_EXSLB+EX_LR(r13)
+ ld r3,PACA_EXSLB+EX_R3(r13)
+ lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
+#ifdef CONFIG_PPC_ISERIES
+ ld r11,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */
+#endif /* CONFIG_PPC_ISERIES */
+
+ mtlr r10
+
+ andi. r10,r12,MSR_RI /* check for unrecoverable exception */
+ beq- unrecov_slb
+
+.machine push
+.machine "power4"
+ mtcrf 0x80,r9
+ mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
+.machine pop
+
+#ifdef CONFIG_PPC_ISERIES
+ mtspr SPRN_SRR0,r11
+ mtspr SPRN_SRR1,r12
+#endif /* CONFIG_PPC_ISERIES */
+ ld r9,PACA_EXSLB+EX_R9(r13)
+ ld r10,PACA_EXSLB+EX_R10(r13)
+ ld r11,PACA_EXSLB+EX_R11(r13)
+ ld r12,PACA_EXSLB+EX_R12(r13)
+ ld r13,PACA_EXSLB+EX_R13(r13)
+ rfid
+ b . /* prevent speculative execution */
+
+unrecov_slb:
+ EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
+ DISABLE_INTS
+ bl .save_nvgprs
+1: addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .unrecoverable_exception
+ b 1b
+
.align 7
.globl hardware_interrupt_common
.globl hardware_interrupt_entry
@@ -1139,62 +1338,6 @@ _GLOBAL(do_stab_bolted)
b . /* prevent speculative execution */
/*
- * r13 points to the PACA, r9 contains the saved CR,
- * r11 and r12 contain the saved SRR0 and SRR1.
- * r3 has the faulting address
- * r9 - r13 are saved in paca->exslb.
- * r3 is saved in paca->slb_r3
- * We assume we aren't going to take any exceptions during this procedure.
- */
-_GLOBAL(do_slb_miss)
- mflr r10
-
- stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
- std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
-
- bl .slb_allocate /* handle it */
-
- /* All done -- return from exception. */
-
- ld r10,PACA_EXSLB+EX_LR(r13)
- ld r3,PACA_EXSLB+EX_R3(r13)
- lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
-#ifdef CONFIG_PPC_ISERIES
- ld r11,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */
-#endif /* CONFIG_PPC_ISERIES */
-
- mtlr r10
-
- andi. r10,r12,MSR_RI /* check for unrecoverable exception */
- beq- unrecov_slb
-
-.machine push
-.machine "power4"
- mtcrf 0x80,r9
- mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
-.machine pop
-
-#ifdef CONFIG_PPC_ISERIES
- mtspr SPRN_SRR0,r11
- mtspr SPRN_SRR1,r12
-#endif /* CONFIG_PPC_ISERIES */
- ld r9,PACA_EXSLB+EX_R9(r13)
- ld r10,PACA_EXSLB+EX_R10(r13)
- ld r11,PACA_EXSLB+EX_R11(r13)
- ld r12,PACA_EXSLB+EX_R12(r13)
- ld r13,PACA_EXSLB+EX_R13(r13)
- rfid
- b . /* prevent speculative execution */
-
-unrecov_slb:
- EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
- DISABLE_INTS
- bl .save_nvgprs
-1: addi r3,r1,STACK_FRAME_OVERHEAD
- bl .unrecoverable_exception
- b 1b
-
-/*
* Space for CPU0's segment table.
*
* On iSeries, the hypervisor must fill in at least one entry before
@@ -1569,7 +1712,10 @@ _GLOBAL(__secondary_start)
#endif
/* Initialize the first segment table (or SLB) entry */
ld r3,PACASTABVIRT(r13) /* get addr of segment table */
+BEGIN_FTR_SECTION
bl .stab_initialize
+END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
+ bl .slb_initialize
/* Initialize the kernel stack. Just a repeat for iSeries. */
LOADADDR(r3,current_set)
diff --git a/arch/ppc64/kernel/idle.c b/arch/ppc64/kernel/idle.c
index 8abd2ad9283..8fec2746980 100644
--- a/arch/ppc64/kernel/idle.c
+++ b/arch/ppc64/kernel/idle.c
@@ -28,6 +28,7 @@
#include <asm/time.h>
#include <asm/systemcfg.h>
#include <asm/machdep.h>
+#include <asm/smp.h>
extern void power4_idle(void);
diff --git a/arch/ppc64/kernel/kprobes.c b/arch/ppc64/kernel/kprobes.c
index ed876a5178a..511af54e623 100644
--- a/arch/ppc64/kernel/kprobes.c
+++ b/arch/ppc64/kernel/kprobes.c
@@ -30,19 +30,14 @@
#include <linux/config.h>
#include <linux/kprobes.h>
#include <linux/ptrace.h>
-#include <linux/spinlock.h>
#include <linux/preempt.h>
#include <asm/cacheflush.h>
#include <asm/kdebug.h>
#include <asm/sstep.h>
static DECLARE_MUTEX(kprobe_mutex);
-
-static struct kprobe *current_kprobe;
-static unsigned long kprobe_status, kprobe_saved_msr;
-static struct kprobe *kprobe_prev;
-static unsigned long kprobe_status_prev, kprobe_saved_msr_prev;
-static struct pt_regs jprobe_saved_regs;
+DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
@@ -108,20 +103,28 @@ static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
regs->nip = (unsigned long)p->ainsn.insn;
}
-static inline void save_previous_kprobe(void)
+static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ kcb->prev_kprobe.kp = kprobe_running();
+ kcb->prev_kprobe.status = kcb->kprobe_status;
+ kcb->prev_kprobe.saved_msr = kcb->kprobe_saved_msr;
+}
+
+static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
- kprobe_prev = current_kprobe;
- kprobe_status_prev = kprobe_status;
- kprobe_saved_msr_prev = kprobe_saved_msr;
+ __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+ kcb->kprobe_status = kcb->prev_kprobe.status;
+ kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr;
}
-static inline void restore_previous_kprobe(void)
+static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
{
- current_kprobe = kprobe_prev;
- kprobe_status = kprobe_status_prev;
- kprobe_saved_msr = kprobe_saved_msr_prev;
+ __get_cpu_var(current_kprobe) = p;
+ kcb->kprobe_saved_msr = regs->msr;
}
+/* Called with kretprobe_lock held */
void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
struct pt_regs *regs)
{
@@ -145,19 +148,24 @@ static inline int kprobe_handler(struct pt_regs *regs)
struct kprobe *p;
int ret = 0;
unsigned int *addr = (unsigned int *)regs->nip;
+ struct kprobe_ctlblk *kcb;
+
+ /*
+ * We don't want to be preempted for the entire
+ * duration of kprobe processing
+ */
+ preempt_disable();
+ kcb = get_kprobe_ctlblk();
/* Check we're not actually recursing */
if (kprobe_running()) {
- /* We *are* holding lock here, so this is safe.
- Disarm the probe we just hit, and ignore it. */
p = get_kprobe(addr);
if (p) {
kprobe_opcode_t insn = *p->ainsn.insn;
- if (kprobe_status == KPROBE_HIT_SS &&
+ if (kcb->kprobe_status == KPROBE_HIT_SS &&
is_trap(insn)) {
regs->msr &= ~MSR_SE;
- regs->msr |= kprobe_saved_msr;
- unlock_kprobes();
+ regs->msr |= kcb->kprobe_saved_msr;
goto no_kprobe;
}
/* We have reentered the kprobe_handler(), since
@@ -166,27 +174,24 @@ static inline int kprobe_handler(struct pt_regs *regs)
* just single step on the instruction of the new probe
* without calling any user handlers.
*/
- save_previous_kprobe();
- current_kprobe = p;
- kprobe_saved_msr = regs->msr;
+ save_previous_kprobe(kcb);
+ set_current_kprobe(p, regs, kcb);
+ kcb->kprobe_saved_msr = regs->msr;
p->nmissed++;
prepare_singlestep(p, regs);
- kprobe_status = KPROBE_REENTER;
+ kcb->kprobe_status = KPROBE_REENTER;
return 1;
} else {
- p = current_kprobe;
+ p = __get_cpu_var(current_kprobe);
if (p->break_handler && p->break_handler(p, regs)) {
goto ss_probe;
}
}
- /* If it's not ours, can't be delete race, (we hold lock). */
goto no_kprobe;
}
- lock_kprobes();
p = get_kprobe(addr);
if (!p) {
- unlock_kprobes();
if (*addr != BREAKPOINT_INSTRUCTION) {
/*
* PowerPC has multiple variants of the "trap"
@@ -209,24 +214,19 @@ static inline int kprobe_handler(struct pt_regs *regs)
goto no_kprobe;
}
- kprobe_status = KPROBE_HIT_ACTIVE;
- current_kprobe = p;
- kprobe_saved_msr = regs->msr;
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+ set_current_kprobe(p, regs, kcb);
if (p->pre_handler && p->pre_handler(p, regs))
/* handler has already set things up, so skip ss setup */
return 1;
ss_probe:
prepare_singlestep(p, regs);
- kprobe_status = KPROBE_HIT_SS;
- /*
- * This preempt_disable() matches the preempt_enable_no_resched()
- * in post_kprobe_handler().
- */
- preempt_disable();
+ kcb->kprobe_status = KPROBE_HIT_SS;
return 1;
no_kprobe:
+ preempt_enable_no_resched();
return ret;
}
@@ -251,9 +251,10 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
struct kretprobe_instance *ri = NULL;
struct hlist_head *head;
struct hlist_node *node, *tmp;
- unsigned long orig_ret_address = 0;
+ unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
+ spin_lock_irqsave(&kretprobe_lock, flags);
head = kretprobe_inst_table_head(current);
/*
@@ -292,12 +293,14 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address));
regs->nip = orig_ret_address;
- unlock_kprobes();
+ reset_current_kprobe();
+ spin_unlock_irqrestore(&kretprobe_lock, flags);
+ preempt_enable_no_resched();
/*
* By returning a non-zero value, we are telling
- * kprobe_handler() that we have handled unlocking
- * and re-enabling preemption.
+ * kprobe_handler() that we don't want the post_handler
+ * to run (and have re-enabled preemption)
*/
return 1;
}
@@ -323,23 +326,26 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
static inline int post_kprobe_handler(struct pt_regs *regs)
{
- if (!kprobe_running())
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ if (!cur)
return 0;
- if ((kprobe_status != KPROBE_REENTER) && current_kprobe->post_handler) {
- kprobe_status = KPROBE_HIT_SSDONE;
- current_kprobe->post_handler(current_kprobe, regs, 0);
+ if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ cur->post_handler(cur, regs, 0);
}
- resume_execution(current_kprobe, regs);
- regs->msr |= kprobe_saved_msr;
+ resume_execution(cur, regs);
+ regs->msr |= kcb->kprobe_saved_msr;
/*Restore back the original saved kprobes variables and continue. */
- if (kprobe_status == KPROBE_REENTER) {
- restore_previous_kprobe();
+ if (kcb->kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
goto out;
}
- unlock_kprobes();
+ reset_current_kprobe();
out:
preempt_enable_no_resched();
@@ -354,19 +360,20 @@ out:
return 1;
}
-/* Interrupts disabled, kprobe_lock held. */
static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
- if (current_kprobe->fault_handler
- && current_kprobe->fault_handler(current_kprobe, regs, trapnr))
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
return 1;
- if (kprobe_status & KPROBE_HIT_SS) {
- resume_execution(current_kprobe, regs);
+ if (kcb->kprobe_status & KPROBE_HIT_SS) {
+ resume_execution(cur, regs);
regs->msr &= ~MSR_SE;
- regs->msr |= kprobe_saved_msr;
+ regs->msr |= kcb->kprobe_saved_msr;
- unlock_kprobes();
+ reset_current_kprobe();
preempt_enable_no_resched();
}
return 0;
@@ -381,11 +388,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
struct die_args *args = (struct die_args *)data;
int ret = NOTIFY_DONE;
- /*
- * Interrupts are not disabled here. We need to disable
- * preemption, because kprobe_running() uses smp_processor_id().
- */
- preempt_disable();
switch (val) {
case DIE_BPT:
if (kprobe_handler(args->regs))
@@ -396,22 +398,25 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
ret = NOTIFY_STOP;
break;
case DIE_PAGE_FAULT:
+ /* kprobe_running() needs smp_processor_id() */
+ preempt_disable();
if (kprobe_running() &&
kprobe_fault_handler(args->regs, args->trapnr))
ret = NOTIFY_STOP;
+ preempt_enable();
break;
default:
break;
}
- preempt_enable_no_resched();
return ret;
}
int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
struct jprobe *jp = container_of(p, struct jprobe, kp);
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- memcpy(&jprobe_saved_regs, regs, sizeof(struct pt_regs));
+ memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
/* setup return addr to the jprobe handler routine */
regs->nip = (unsigned long)(((func_descr_t *)jp->entry)->entry);
@@ -431,12 +436,15 @@ void __kprobes jprobe_return_end(void)
int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
/*
* FIXME - we should ideally be validating that we got here 'cos
* of the "trap" in jprobe_return() above, before restoring the
* saved regs...
*/
- memcpy(regs, &jprobe_saved_regs, sizeof(struct pt_regs));
+ memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
+ preempt_enable_no_resched();
return 1;
}
diff --git a/arch/ppc64/kernel/lparcfg.c b/arch/ppc64/kernel/lparcfg.c
index e86155770bb..3e7b2f28ec8 100644
--- a/arch/ppc64/kernel/lparcfg.c
+++ b/arch/ppc64/kernel/lparcfg.c
@@ -599,9 +599,7 @@ int __init lparcfg_init(void)
void __exit lparcfg_cleanup(void)
{
if (proc_ppc64_lparcfg) {
- if (proc_ppc64_lparcfg->data) {
- kfree(proc_ppc64_lparcfg->data);
- }
+ kfree(proc_ppc64_lparcfg->data);
remove_proc_entry("lparcfg", proc_ppc64_lparcfg->parent);
}
}
diff --git a/arch/ppc64/kernel/machine_kexec.c b/arch/ppc64/kernel/machine_kexec.c
index ff8679f260f..07ea03598c0 100644
--- a/arch/ppc64/kernel/machine_kexec.c
+++ b/arch/ppc64/kernel/machine_kexec.c
@@ -24,6 +24,7 @@
#include <asm/mmu.h>
#include <asm/sections.h> /* _end */
#include <asm/prom.h>
+#include <asm/smp.h>
#define HASH_GROUP_SIZE 0x80 /* size of each hash group, asm/mmu.h */
diff --git a/arch/ppc64/kernel/misc.S b/arch/ppc64/kernel/misc.S
index 077507ffbab..914632ec587 100644
--- a/arch/ppc64/kernel/misc.S
+++ b/arch/ppc64/kernel/misc.S
@@ -560,7 +560,7 @@ _GLOBAL(real_readb)
isync
blr
- /*
+/*
* Do an IO access in real mode
*/
_GLOBAL(real_writeb)
@@ -593,6 +593,76 @@ _GLOBAL(real_writeb)
#endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */
/*
+ * SCOM access functions for 970 (FX only for now)
+ *
+ * unsigned long scom970_read(unsigned int address);
+ * void scom970_write(unsigned int address, unsigned long value);
+ *
+ * The address passed in is the 24 bits register address. This code
+ * is 970 specific and will not check the status bits, so you should
+ * know what you are doing.
+ */
+_GLOBAL(scom970_read)
+ /* interrupts off */
+ mfmsr r4
+ ori r0,r4,MSR_EE
+ xori r0,r0,MSR_EE
+ mtmsrd r0,1
+
+ /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
+ * (including parity). On current CPUs they must be 0'd,
+ * and finally or in RW bit
+ */
+ rlwinm r3,r3,8,0,15
+ ori r3,r3,0x8000
+
+ /* do the actual scom read */
+ sync
+ mtspr SPRN_SCOMC,r3
+ isync
+ mfspr r3,SPRN_SCOMD
+ isync
+ mfspr r0,SPRN_SCOMC
+ isync
+
+ /* XXX: fixup result on some buggy 970's (ouch ! we lost a bit, bah
+ * that's the best we can do). Not implemented yet as we don't use
+ * the scom on any of the bogus CPUs yet, but may have to be done
+ * ultimately
+ */
+
+ /* restore interrupts */
+ mtmsrd r4,1
+ blr
+
+
+_GLOBAL(scom970_write)
+ /* interrupts off */
+ mfmsr r5
+ ori r0,r5,MSR_EE
+ xori r0,r0,MSR_EE
+ mtmsrd r0,1
+
+ /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
+ * (including parity). On current CPUs they must be 0'd.
+ */
+
+ rlwinm r3,r3,8,0,15
+
+ sync
+ mtspr SPRN_SCOMD,r4 /* write data */
+ isync
+ mtspr SPRN_SCOMC,r3 /* write command */
+ isync
+ mfspr 3,SPRN_SCOMC
+ isync
+
+ /* restore interrupts */
+ mtmsrd r5,1
+ blr
+
+
+/*
* Create a kernel thread
* kernel_thread(fn, arg, flags)
*/
diff --git a/arch/ppc64/kernel/pacaData.c b/arch/ppc64/kernel/pacaData.c
index 5e27e5a6a35..3133c72b28e 100644
--- a/arch/ppc64/kernel/pacaData.c
+++ b/arch/ppc64/kernel/pacaData.c
@@ -23,7 +23,7 @@
static union {
struct systemcfg data;
u8 page[PAGE_SIZE];
-} systemcfg_store __page_aligned;
+} systemcfg_store __attribute__((__section__(".data.page.aligned")));
struct systemcfg *systemcfg = &systemcfg_store.data;
EXPORT_SYMBOL(systemcfg);
diff --git a/arch/ppc64/kernel/pci.c b/arch/ppc64/kernel/pci.c
index 3d2106b022a..30247ff7497 100644
--- a/arch/ppc64/kernel/pci.c
+++ b/arch/ppc64/kernel/pci.c
@@ -295,8 +295,8 @@ static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev)
}
}
-static struct pci_dev *of_create_pci_dev(struct device_node *node,
- struct pci_bus *bus, int devfn)
+struct pci_dev *of_create_pci_dev(struct device_node *node,
+ struct pci_bus *bus, int devfn)
{
struct pci_dev *dev;
const char *type;
@@ -354,10 +354,9 @@ static struct pci_dev *of_create_pci_dev(struct device_node *node,
return dev;
}
+EXPORT_SYMBOL(of_create_pci_dev);
-static void of_scan_pci_bridge(struct device_node *node, struct pci_dev *dev);
-
-static void __devinit of_scan_bus(struct device_node *node,
+void __devinit of_scan_bus(struct device_node *node,
struct pci_bus *bus)
{
struct device_node *child = NULL;
@@ -381,9 +380,10 @@ static void __devinit of_scan_bus(struct device_node *node,
do_bus_setup(bus);
}
+EXPORT_SYMBOL(of_scan_bus);
-static void __devinit of_scan_pci_bridge(struct device_node *node,
- struct pci_dev *dev)
+void __devinit of_scan_pci_bridge(struct device_node *node,
+ struct pci_dev *dev)
{
struct pci_bus *bus;
u32 *busrange, *ranges;
@@ -464,9 +464,10 @@ static void __devinit of_scan_pci_bridge(struct device_node *node,
else if (mode == PCI_PROBE_NORMAL)
pci_scan_child_bus(bus);
}
+EXPORT_SYMBOL(of_scan_pci_bridge);
#endif /* CONFIG_PPC_MULTIPLATFORM */
-static void __devinit scan_phb(struct pci_controller *hose)
+void __devinit scan_phb(struct pci_controller *hose)
{
struct pci_bus *bus;
struct device_node *node = hose->arch_data;
diff --git a/arch/ppc64/kernel/prom.c b/arch/ppc64/kernel/prom.c
index 97bfceb5353..3402fbee62c 100644
--- a/arch/ppc64/kernel/prom.c
+++ b/arch/ppc64/kernel/prom.c
@@ -31,6 +31,7 @@
#include <linux/initrd.h>
#include <linux/bitops.h>
#include <linux/module.h>
+#include <linux/module.h>
#include <asm/prom.h>
#include <asm/rtas.h>
@@ -46,7 +47,6 @@
#include <asm/pgtable.h>
#include <asm/pci.h>
#include <asm/iommu.h>
-#include <asm/ppcdebug.h>
#include <asm/btext.h>
#include <asm/sections.h>
#include <asm/machdep.h>
@@ -635,10 +635,10 @@ static inline char *find_flat_dt_string(u32 offset)
* used to extract the memory informations at boot before we can
* unflatten the tree
*/
-static int __init scan_flat_dt(int (*it)(unsigned long node,
- const char *uname, int depth,
- void *data),
- void *data)
+int __init of_scan_flat_dt(int (*it)(unsigned long node,
+ const char *uname, int depth,
+ void *data),
+ void *data)
{
unsigned long p = ((unsigned long)initial_boot_params) +
initial_boot_params->off_dt_struct;
@@ -695,8 +695,8 @@ static int __init scan_flat_dt(int (*it)(unsigned long node,
* This function can be used within scan_flattened_dt callback to get
* access to properties
*/
-static void* __init get_flat_dt_prop(unsigned long node, const char *name,
- unsigned long *size)
+void* __init of_get_flat_dt_prop(unsigned long node, const char *name,
+ unsigned long *size)
{
unsigned long p = node;
@@ -996,7 +996,7 @@ void __init unflatten_device_tree(void)
static int __init early_init_dt_scan_cpus(unsigned long node,
const char *uname, int depth, void *data)
{
- char *type = get_flat_dt_prop(node, "device_type", NULL);
+ char *type = of_get_flat_dt_prop(node, "device_type", NULL);
u32 *prop;
unsigned long size;
@@ -1004,17 +1004,6 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
if (type == NULL || strcmp(type, "cpu") != 0)
return 0;
- /* On LPAR, look for the first ibm,pft-size property for the hash table size
- */
- if (systemcfg->platform == PLATFORM_PSERIES_LPAR && ppc64_pft_size == 0) {
- u32 *pft_size;
- pft_size = (u32 *)get_flat_dt_prop(node, "ibm,pft-size", NULL);
- if (pft_size != NULL) {
- /* pft_size[0] is the NUMA CEC cookie */
- ppc64_pft_size = pft_size[1];
- }
- }
-
if (initial_boot_params && initial_boot_params->version >= 2) {
/* version 2 of the kexec param format adds the phys cpuid
* of booted proc.
@@ -1023,8 +1012,9 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
boot_cpuid = 0;
} else {
/* Check if it's the boot-cpu, set it's hw index in paca now */
- if (get_flat_dt_prop(node, "linux,boot-cpu", NULL) != NULL) {
- u32 *prop = get_flat_dt_prop(node, "reg", NULL);
+ if (of_get_flat_dt_prop(node, "linux,boot-cpu", NULL)
+ != NULL) {
+ u32 *prop = of_get_flat_dt_prop(node, "reg", NULL);
set_hard_smp_processor_id(0, prop == NULL ? 0 : *prop);
boot_cpuid_phys = get_hard_smp_processor_id(0);
}
@@ -1032,14 +1022,14 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
#ifdef CONFIG_ALTIVEC
/* Check if we have a VMX and eventually update CPU features */
- prop = (u32 *)get_flat_dt_prop(node, "ibm,vmx", NULL);
+ prop = (u32 *)of_get_flat_dt_prop(node, "ibm,vmx", NULL);
if (prop && (*prop) > 0) {
cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
}
/* Same goes for Apple's "altivec" property */
- prop = (u32 *)get_flat_dt_prop(node, "altivec", NULL);
+ prop = (u32 *)of_get_flat_dt_prop(node, "altivec", NULL);
if (prop) {
cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
@@ -1051,7 +1041,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
* this by looking at the size of the ibm,ppc-interrupt-server#s
* property
*/
- prop = (u32 *)get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s",
+ prop = (u32 *)of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s",
&size);
cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
if (prop && ((size / sizeof(u32)) > 1))
@@ -1072,26 +1062,26 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
return 0;
/* get platform type */
- prop = (u32 *)get_flat_dt_prop(node, "linux,platform", NULL);
+ prop = (u32 *)of_get_flat_dt_prop(node, "linux,platform", NULL);
if (prop == NULL)
return 0;
systemcfg->platform = *prop;
/* check if iommu is forced on or off */
- if (get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
+ if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
iommu_is_off = 1;
- if (get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
+ if (of_get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
iommu_force_on = 1;
- prop64 = (u64*)get_flat_dt_prop(node, "linux,memory-limit", NULL);
+ prop64 = (u64*)of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
if (prop64)
memory_limit = *prop64;
- prop64 = (u64*)get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
+ prop64 = (u64*)of_get_flat_dt_prop(node, "linux,tce-alloc-start",NULL);
if (prop64)
tce_alloc_start = *prop64;
- prop64 = (u64*)get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
+ prop64 = (u64*)of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
if (prop64)
tce_alloc_end = *prop64;
@@ -1102,9 +1092,12 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
{
u64 *basep, *entryp;
- basep = (u64*)get_flat_dt_prop(node, "linux,rtas-base", NULL);
- entryp = (u64*)get_flat_dt_prop(node, "linux,rtas-entry", NULL);
- prop = (u32*)get_flat_dt_prop(node, "linux,rtas-size", NULL);
+ basep = (u64*)of_get_flat_dt_prop(node,
+ "linux,rtas-base", NULL);
+ entryp = (u64*)of_get_flat_dt_prop(node,
+ "linux,rtas-entry", NULL);
+ prop = (u32*)of_get_flat_dt_prop(node,
+ "linux,rtas-size", NULL);
if (basep && entryp && prop) {
rtas.base = *basep;
rtas.entry = *entryp;
@@ -1125,11 +1118,11 @@ static int __init early_init_dt_scan_root(unsigned long node,
if (depth != 0)
return 0;
- prop = (u32 *)get_flat_dt_prop(node, "#size-cells", NULL);
+ prop = (u32 *)of_get_flat_dt_prop(node, "#size-cells", NULL);
dt_root_size_cells = (prop == NULL) ? 1 : *prop;
DBG("dt_root_size_cells = %x\n", dt_root_size_cells);
- prop = (u32 *)get_flat_dt_prop(node, "#address-cells", NULL);
+ prop = (u32 *)of_get_flat_dt_prop(node, "#address-cells", NULL);
dt_root_addr_cells = (prop == NULL) ? 2 : *prop;
DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells);
@@ -1161,7 +1154,7 @@ static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp)
static int __init early_init_dt_scan_memory(unsigned long node,
const char *uname, int depth, void *data)
{
- char *type = get_flat_dt_prop(node, "device_type", NULL);
+ char *type = of_get_flat_dt_prop(node, "device_type", NULL);
cell_t *reg, *endp;
unsigned long l;
@@ -1169,7 +1162,7 @@ static int __init early_init_dt_scan_memory(unsigned long node,
if (type == NULL || strcmp(type, "memory") != 0)
return 0;
- reg = (cell_t *)get_flat_dt_prop(node, "reg", &l);
+ reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l);
if (reg == NULL)
return 0;
@@ -1225,19 +1218,16 @@ void __init early_init_devtree(void *params)
/* Setup flat device-tree pointer */
initial_boot_params = params;
- /* By default, hash size is not set */
- ppc64_pft_size = 0;
-
/* Retreive various informations from the /chosen node of the
* device-tree, including the platform type, initrd location and
* size, TCE reserve, and more ...
*/
- scan_flat_dt(early_init_dt_scan_chosen, NULL);
+ of_scan_flat_dt(early_init_dt_scan_chosen, NULL);
/* Scan memory nodes and rebuild LMBs */
lmb_init();
- scan_flat_dt(early_init_dt_scan_root, NULL);
- scan_flat_dt(early_init_dt_scan_memory, NULL);
+ of_scan_flat_dt(early_init_dt_scan_root, NULL);
+ of_scan_flat_dt(early_init_dt_scan_memory, NULL);
lmb_enforce_memory_limit(memory_limit);
lmb_analyze();
systemcfg->physicalMemorySize = lmb_phys_mem_size();
@@ -1253,26 +1243,8 @@ void __init early_init_devtree(void *params)
/* Retreive hash table size from flattened tree plus other
* CPU related informations (altivec support, boot CPU ID, ...)
*/
- scan_flat_dt(early_init_dt_scan_cpus, NULL);
+ of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
- /* If hash size wasn't obtained above, we calculate it now based on
- * the total RAM size
- */
- if (ppc64_pft_size == 0) {
- unsigned long rnd_mem_size, pteg_count;
-
- /* round mem_size up to next power of 2 */
- rnd_mem_size = 1UL << __ilog2(systemcfg->physicalMemorySize);
- if (rnd_mem_size < systemcfg->physicalMemorySize)
- rnd_mem_size <<= 1;
-
- /* # pages / 2 */
- pteg_count = max(rnd_mem_size >> (12 + 1), 1UL << 11);
-
- ppc64_pft_size = __ilog2(pteg_count << 7);
- }
-
- DBG("Hash pftSize: %x\n", (int)ppc64_pft_size);
DBG(" <- early_init_devtree()\n");
}
@@ -1894,17 +1866,32 @@ get_property(struct device_node *np, const char *name, int *lenp)
EXPORT_SYMBOL(get_property);
/*
- * Add a property to a node
+ * Add a property to a node.
*/
-void
+int
prom_add_property(struct device_node* np, struct property* prop)
{
- struct property **next = &np->properties;
+ struct property **next;
prop->next = NULL;
- while (*next)
+ write_lock(&devtree_lock);
+ next = &np->properties;
+ while (*next) {
+ if (strcmp(prop->name, (*next)->name) == 0) {
+ /* duplicate ! don't insert it */
+ write_unlock(&devtree_lock);
+ return -1;
+ }
next = &(*next)->next;
+ }
*next = prop;
+ write_unlock(&devtree_lock);
+
+ /* try to add to proc as well if it was initialized */
+ if (np->pde)
+ proc_device_tree_add_prop(np->pde, prop);
+
+ return 0;
}
#if 0
diff --git a/arch/ppc64/kernel/prom_init.c b/arch/ppc64/kernel/prom_init.c
index a4bbca6dbb8..e4c880dab99 100644
--- a/arch/ppc64/kernel/prom_init.c
+++ b/arch/ppc64/kernel/prom_init.c
@@ -44,7 +44,6 @@
#include <asm/pgtable.h>
#include <asm/pci.h>
#include <asm/iommu.h>
-#include <asm/ppcdebug.h>
#include <asm/btext.h>
#include <asm/sections.h>
#include <asm/machdep.h>
@@ -1825,7 +1824,7 @@ static void __init fixup_device_tree(void)
if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev))
== PROM_ERROR)
return;
- if (u3_rev != 0x35 && u3_rev != 0x37)
+ if (u3_rev < 0x35 || u3_rev > 0x39)
return;
/* does it need fixup ? */
if (prom_getproplen(i2c, "interrupts") > 0)
diff --git a/arch/ppc64/kernel/rtas_pci.c b/arch/ppc64/kernel/rtas_pci.c
index 3ad15c90fbb..3c3f19192fc 100644
--- a/arch/ppc64/kernel/rtas_pci.c
+++ b/arch/ppc64/kernel/rtas_pci.c
@@ -440,7 +440,6 @@ struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn)
struct device_node *root = of_find_node_by_path("/");
unsigned int root_size_cells = 0;
struct pci_controller *phb;
- struct pci_bus *bus;
int primary;
root_size_cells = prom_n_size_cells(root);
@@ -456,10 +455,7 @@ struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn)
of_node_put(root);
pci_devs_phb_init_dynamic(phb);
- phb->last_busno = 0xff;
- bus = pci_scan_bus(phb->first_busno, phb->ops, phb->arch_data);
- phb->bus = bus;
- phb->last_busno = bus->subordinate;
+ scan_phb(phb);
return phb;
}
diff --git a/arch/ppc64/kernel/scanlog.c b/arch/ppc64/kernel/scanlog.c
index 215bf890030..2edc947f7c4 100644
--- a/arch/ppc64/kernel/scanlog.c
+++ b/arch/ppc64/kernel/scanlog.c
@@ -225,8 +225,7 @@ int __init scanlog_init(void)
void __exit scanlog_cleanup(void)
{
if (proc_ppc64_scan_log_dump) {
- if (proc_ppc64_scan_log_dump->data)
- kfree(proc_ppc64_scan_log_dump->data);
+ kfree(proc_ppc64_scan_log_dump->data);
remove_proc_entry("scan-log-dump", proc_ppc64_scan_log_dump->parent);
}
}
diff --git a/arch/ppc64/kernel/sysfs.c b/arch/ppc64/kernel/sysfs.c
index 6654b350979..e99ec62c2c5 100644
--- a/arch/ppc64/kernel/sysfs.c
+++ b/arch/ppc64/kernel/sysfs.c
@@ -20,6 +20,7 @@
#include <asm/paca.h>
#include <asm/lppaca.h>
#include <asm/machdep.h>
+#include <asm/smp.h>
static DEFINE_PER_CPU(struct cpu, cpu_devices);
diff --git a/arch/ppc64/kernel/udbg.c b/arch/ppc64/kernel/udbg.c
index d49c3613c8e..0d878e72fc4 100644
--- a/arch/ppc64/kernel/udbg.c
+++ b/arch/ppc64/kernel/udbg.c
@@ -10,12 +10,10 @@
*/
#include <stdarg.h>
-#define WANT_PPCDBG_TAB /* Only defined here */
#include <linux/config.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/console.h>
-#include <asm/ppcdebug.h>
#include <asm/processor.h>
void (*udbg_putc)(unsigned char c);
@@ -89,59 +87,6 @@ void udbg_printf(const char *fmt, ...)
va_end(args);
}
-/* PPCDBG stuff */
-
-u64 ppc64_debug_switch;
-
-/* Special print used by PPCDBG() macro */
-void udbg_ppcdbg(unsigned long debug_flags, const char *fmt, ...)
-{
- unsigned long active_debugs = debug_flags & ppc64_debug_switch;
-
- if (active_debugs) {
- va_list ap;
- unsigned char buf[UDBG_BUFSIZE];
- unsigned long i, len = 0;
-
- for (i=0; i < PPCDBG_NUM_FLAGS; i++) {
- if (((1U << i) & active_debugs) &&
- trace_names[i]) {
- len += strlen(trace_names[i]);
- udbg_puts(trace_names[i]);
- break;
- }
- }
-
- snprintf(buf, UDBG_BUFSIZE, " [%s]: ", current->comm);
- len += strlen(buf);
- udbg_puts(buf);
-
- while (len < 18) {
- udbg_puts(" ");
- len++;
- }
-
- va_start(ap, fmt);
- vsnprintf(buf, UDBG_BUFSIZE, fmt, ap);
- udbg_puts(buf);
- va_end(ap);
- }
-}
-
-unsigned long udbg_ifdebug(unsigned long flags)
-{
- return (flags & ppc64_debug_switch);
-}
-
-/*
- * Initialize the PPCDBG state. Called before relocation has been enabled.
- */
-void __init ppcdbg_initialize(void)
-{
- ppc64_debug_switch = PPC_DEBUG_DEFAULT; /* | PPCDBG_BUSWALK | */
- /* PPCDBG_PHBINIT | PPCDBG_MM | PPCDBG_MMINIT | PPCDBG_TCEINIT | PPCDBG_TCE */;
-}
-
/*
* Early boot console based on udbg
*/
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 98db30481d9..73a09a6ee6c 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -76,9 +76,7 @@ AFLAGS += $(aflags-y)
OBJCOPYFLAGS := -O binary
LDFLAGS_vmlinux := -e start
-head-$(CONFIG_ARCH_S390_31) += arch/$(ARCH)/kernel/head.o
-head-$(CONFIG_ARCH_S390X) += arch/$(ARCH)/kernel/head64.o
-head-y += arch/$(ARCH)/kernel/init_task.o
+head-y := arch/$(ARCH)/kernel/head.o arch/$(ARCH)/kernel/init_task.o
core-y += arch/$(ARCH)/mm/ arch/$(ARCH)/kernel/ arch/$(ARCH)/crypto/ \
arch/$(ARCH)/appldata/
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 8584dd82321..7434c32bc63 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -8,9 +8,7 @@ obj-y := bitmap.o traps.o time.o process.o \
setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
semaphore.o s390_ext.o debug.o profile.o irq.o reipl_diag.o
-extra-$(CONFIG_ARCH_S390_31) += head.o
-extra-$(CONFIG_ARCH_S390X) += head64.o
-extra-y += init_task.o vmlinux.lds
+extra-y += head.o init_task.o vmlinux.lds
obj-$(CONFIG_MODULES) += s390_ksyms.o module.o
obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 9b30f4cf32c..27b07730b7b 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -288,7 +288,7 @@ sysc_sigpending:
bo BASED(sysc_restart)
tm __TI_flags+3(%r9),_TIF_SINGLE_STEP
bo BASED(sysc_singlestep)
- b BASED(sysc_leave) # out of here, do NOT recheck
+ b BASED(sysc_work_loop)
#
# _TIF_RESTART_SVC is set, set up registers and restart svc
@@ -645,7 +645,7 @@ io_sigpending:
l %r1,BASED(.Ldo_signal)
basr %r14,%r1 # call do_signal
stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
- b BASED(io_leave) # out of here, do NOT recheck
+ b BASED(io_work_loop)
/*
* External interrupt handler routine
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 7b9b4a2ba1d..4eb71ffcf48 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -283,7 +283,7 @@ sysc_sigpending:
jo sysc_restart
tm __TI_flags+7(%r9),_TIF_SINGLE_STEP
jo sysc_singlestep
- j sysc_leave # out of here, do NOT recheck
+ j sysc_work_loop
#
# _TIF_RESTART_SVC is set, set up registers and restart svc
@@ -684,7 +684,7 @@ io_sigpending:
slgr %r3,%r3 # clear *oldset
brasl %r14,do_signal # call do_signal
stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
- j sysc_leave # out of here, do NOT recheck
+ j io_work_loop
/*
* External interrupt handler routine
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 039354d7234..d31a97c89f6 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -1,11 +1,12 @@
/*
* arch/s390/kernel/head.S
*
- * S390 version
- * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
- * Author(s): Hartmut Penner (hp@de.ibm.com),
- * Martin Schwidefsky (schwidefsky@de.ibm.com),
- * Rob van der Heij (rvdhei@iae.nl)
+ * (C) Copyright IBM Corp. 1999, 2005
+ *
+ * Author(s): Hartmut Penner <hp@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Rob van der Heij <rvdhei@iae.nl>
+ * Heiko Carstens <heiko.carstens@de.ibm.com>
*
* There are 5 different IPL methods
* 1) load the image directly into ram at address 0 and do an PSW restart
@@ -19,12 +20,7 @@
* 5) direct call of start by the SALIPL loader
* We use the cpuid to distinguish between VM and native ipl
* params for kernel are pushed to 0x10400 (see setup.h)
-
- Changes:
- Okt 25 2000 <rvdheij@iae.nl>
- added code to skip HDR and EOF to allow SL tape IPL (5 retries)
- changed first CCW from rewind to backspace block
-
+ *
*/
#include <linux/config.h>
@@ -34,6 +30,12 @@
#include <asm/thread_info.h>
#include <asm/page.h>
+#ifdef CONFIG_ARCH_S390X
+#define ARCH_OFFSET 4
+#else
+#define ARCH_OFFSET 0
+#endif
+
#ifndef CONFIG_IPL
.org 0
.long 0x00080000,0x80000000+startup # Just a restart PSW
@@ -201,7 +203,7 @@
ssch 0(%r3) # load chunk of 1600 bytes
bnz .Llderr
.Lwait4irq:
- mvc __LC_IO_NEW_PSW(8),.Lnewpsw # set up IO interrupt psw
+ mvc 0x78(8),.Lnewpsw # set up IO interrupt psw
lpsw .Lwaitpsw
.Lioint:
c %r1,0xb8 # compare subchannel number
@@ -265,13 +267,13 @@ iplstart:
la %r2,IPL_BS # load start address
bas %r14,.Lloader # load rest of ipl image
l %r12,.Lparm # pointer to parameter area
- st %r1,IPL_DEVICE-PARMAREA(%r12) # store ipl device number
+ st %r1,IPL_DEVICE+ARCH_OFFSET-PARMAREA(%r12) # save ipl device number
#
# load parameter file from ipl device
#
.Lagain1:
- l %r2,INITRD_START-PARMAREA(%r12) # use ramdisk location as temp
+ l %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12) # ramdisk loc. is temp
bas %r14,.Lloader # load parameter file
ltr %r2,%r2 # got anything ?
bz .Lnopf
@@ -279,7 +281,7 @@ iplstart:
bnh .Lnotrunc
la %r2,895
.Lnotrunc:
- l %r4,INITRD_START-PARMAREA(%r12)
+ l %r4,INITRD_START+ARCH_OFFSET-PARMAREA(%r12)
clc 0(3,%r4),.L_hdr # if it is HDRx
bz .Lagain1 # skip dataset header
clc 0(3,%r4),.L_eof # if it is EOFx
@@ -322,14 +324,14 @@ iplstart:
# load ramdisk from ipl device
#
.Lagain2:
- l %r2,INITRD_START-PARMAREA(%r12) # load adr. of ramdisk
+ l %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12) # addr of ramdisk
bas %r14,.Lloader # load ramdisk
- st %r2,INITRD_SIZE-PARMAREA(%r12) # store size of ramdisk
+ st %r2,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r12) # store size of ramdisk
ltr %r2,%r2
bnz .Lrdcont
- st %r2,INITRD_START-PARMAREA(%r12) # no ramdisk found, null it
+ st %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12) # no ramdisk found
.Lrdcont:
- l %r2,INITRD_START-PARMAREA(%r12)
+ l %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12)
clc 0(3,%r2),.L_hdr # skip HDRx and EOFx
bz .Lagain2
@@ -432,10 +434,10 @@ start:
la %r3,1(%r3)
.done:
l %r1,.memsize
- st %r3,0(%r1)
+ st %r3,ARCH_OFFSET(%r1)
slr %r0,%r0
- st %r0,INITRD_SIZE-PARMAREA(%r11)
- st %r0,INITRD_START-PARMAREA(%r11)
+ st %r0,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r11)
+ st %r0,INITRD_START+ARCH_OFFSET-PARMAREA(%r11)
j startup # continue with startup
.tbl: .long _ebcasc # translate table
.cmd: .long COMMAND_LINE # address of command line buffer
@@ -478,304 +480,23 @@ start:
.byte 0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7
.byte 0xf8,0xf9,0xfa,0xfb,0xfc,0xfd,0xfe,0xff
-#
-# startup-code at 0x10000, running in real mode
-# this is called either by the ipl loader or directly by PSW restart
-# or linload or SALIPL
-#
- .org 0x10000
-startup:basr %r13,0 # get base
-.LPG1: l %r1, .Lget_ipl_device_addr-.LPG1(%r13)
- basr %r14, %r1
- lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
- la %r12,_pstart-.LPG1(%r13) # pointer to parameter area
- # move IPL device to lowcore
- mvc __LC_IPLDEV(4),IPL_DEVICE-PARMAREA(%r12)
-
-#
-# clear bss memory
-#
- l %r2,.Lbss_bgn-.LPG1(%r13) # start of bss
- l %r3,.Lbss_end-.LPG1(%r13) # end of bss
- sr %r3,%r2 # length of bss
- sr %r4,%r4 #
- sr %r5,%r5 # set src,length and pad to zero
- sr %r0,%r0 #
- mvcle %r2,%r4,0 # clear mem
- jo .-4 # branch back, if not finish
-
- l %r2,.Lrcp-.LPG1(%r13) # Read SCP forced command word
-.Lservicecall:
- stosm .Lpmask-.LPG1(%r13),0x01 # authorize ext interrupts
-
- stctl %r0, %r0,.Lcr-.LPG1(%r13) # get cr0
- la %r1,0x200 # set bit 22
- o %r1,.Lcr-.LPG1(%r13) # or old cr0 with r1
- st %r1,.Lcr-.LPG1(%r13)
- lctl %r0, %r0,.Lcr-.LPG1(%r13) # load modified cr0
-
- mvc __LC_EXT_NEW_PSW(8),.Lpcext-.LPG1(%r13) # set postcall psw
- la %r1, .Lsclph-.LPG1(%r13)
- a %r1,__LC_EXT_NEW_PSW+4 # set handler
- st %r1,__LC_EXT_NEW_PSW+4
-
- la %r4,_pstart-.LPG1(%r13) # %r4 is our index for sccb stuff
- la %r1, .Lsccb-PARMAREA(%r4) # our sccb
- .insn rre,0xb2200000,%r2,%r1 # service call
- ipm %r1
- srl %r1,28 # get cc code
- xr %r3, %r3
- chi %r1,3
- be .Lfchunk-.LPG1(%r13) # leave
- chi %r1,2
- be .Lservicecall-.LPG1(%r13)
- lpsw .Lwaitsclp-.LPG1(%r13)
-.Lsclph:
- lh %r1,.Lsccbr-PARMAREA(%r4)
- chi %r1,0x10 # 0x0010 is the sucess code
- je .Lprocsccb # let's process the sccb
- chi %r1,0x1f0
- bne .Lfchunk-.LPG1(%r13) # unhandled error code
- c %r2, .Lrcp-.LPG1(%r13) # Did we try Read SCP forced
- bne .Lfchunk-.LPG1(%r13) # if no, give up
- l %r2, .Lrcp2-.LPG1(%r13) # try with Read SCP
- b .Lservicecall-.LPG1(%r13)
-.Lprocsccb:
- lhi %r1,0
- icm %r1,3,.Lscpincr1-PARMAREA(%r4) # use this one if != 0
- jnz .Lscnd
- lhi %r1,0x800 # otherwise report 2GB
-.Lscnd:
- lhi %r3,0x800 # limit reported memory size to 2GB
- cr %r1,%r3
- jl .Lno2gb
- lr %r1,%r3
-.Lno2gb:
- xr %r3,%r3 # same logic
- ic %r3,.Lscpa1-PARMAREA(%r4)
- chi %r3,0x00
- jne .Lcompmem
- l %r3,.Lscpa2-PARMAREA(%r13)
-.Lcompmem:
- mr %r2,%r1 # mem in MB on 128-bit
- l %r1,.Lonemb-.LPG1(%r13)
- mr %r2,%r1 # mem size in bytes in %r3
- b .Lfchunk-.LPG1(%r13)
-
- .align 4
-.Lget_ipl_device_addr:
- .long .Lget_ipl_device
-.Lpmask:
- .byte 0
-.align 8
-.Lpcext:.long 0x00080000,0x80000000
-.Lcr:
- .long 0x00 # place holder for cr0
-.Lwaitsclp:
- .long 0x020A0000
- .long .Lsclph
-.Lrcp:
- .int 0x00120001 # Read SCP forced code
-.Lrcp2:
- .int 0x00020001 # Read SCP code
-.Lonemb:
- .int 0x100000
-.Lfchunk:
-
-#
-# find memory chunks.
-#
- lr %r9,%r3 # end of mem
- mvc __LC_PGM_NEW_PSW(8),.Lpcmem-.LPG1(%r13)
- la %r1,1 # test in increments of 128KB
- sll %r1,17
- l %r3,.Lmchunk-.LPG1(%r13) # get pointer to memory_chunk array
- slr %r4,%r4 # set start of chunk to zero
- slr %r5,%r5 # set end of chunk to zero
- slr %r6,%r6 # set access code to zero
- la %r10, MEMORY_CHUNKS # number of chunks
-.Lloop:
- tprot 0(%r5),0 # test protection of first byte
- ipm %r7
- srl %r7,28
- clr %r6,%r7 # compare cc with last access code
- be .Lsame-.LPG1(%r13)
- b .Lchkmem-.LPG1(%r13)
-.Lsame:
- ar %r5,%r1 # add 128KB to end of chunk
- bno .Lloop-.LPG1(%r13) # r1 < 0x80000000 -> loop
-.Lchkmem: # > 2GB or tprot got a program check
- clr %r4,%r5 # chunk size > 0?
- be .Lchkloop-.LPG1(%r13)
- st %r4,0(%r3) # store start address of chunk
- lr %r0,%r5
- slr %r0,%r4
- st %r0,4(%r3) # store size of chunk
- st %r6,8(%r3) # store type of chunk
- la %r3,12(%r3)
- l %r4,.Lmemsize-.LPG1(%r13) # address of variable memory_size
- st %r5,0(%r4) # store last end to memory size
- ahi %r10,-1 # update chunk number
-.Lchkloop:
- lr %r6,%r7 # set access code to last cc
- # we got an exception or we're starting a new
- # chunk , we must check if we should
- # still try to find valid memory (if we detected
- # the amount of available storage), and if we
- # have chunks left
- xr %r0,%r0
- clr %r0,%r9 # did we detect memory?
- je .Ldonemem # if not, leave
- chi %r10,0 # do we have chunks left?
- je .Ldonemem
- alr %r5,%r1 # add 128KB to end of chunk
- lr %r4,%r5 # potential new chunk
- clr %r5,%r9 # should we go on?
- jl .Lloop
-.Ldonemem:
- l %r12,.Lmflags-.LPG1(%r13) # get address of machine_flags
-#
-# find out if we are running under VM
-#
- stidp __LC_CPUID # store cpuid
- tm __LC_CPUID,0xff # running under VM ?
- bno .Lnovm-.LPG1(%r13)
- oi 3(%r12),1 # set VM flag
-.Lnovm:
- lh %r0,__LC_CPUID+4 # get cpu version
- chi %r0,0x7490 # running on a P/390 ?
- bne .Lnop390-.LPG1(%r13)
- oi 3(%r12),4 # set P/390 flag
-.Lnop390:
-
-#
-# find out if we have an IEEE fpu
-#
- mvc __LC_PGM_NEW_PSW(8),.Lpcfpu-.LPG1(%r13)
- efpc %r0,0 # test IEEE extract fpc instruction
- oi 3(%r12),2 # set IEEE fpu flag
-.Lchkfpu:
-
-#
-# find out if we have the CSP instruction
-#
- mvc __LC_PGM_NEW_PSW(8),.Lpccsp-.LPG1(%r13)
- la %r0,0
- lr %r1,%r0
- la %r2,4
- csp %r0,%r2 # Test CSP instruction
- oi 3(%r12),8 # set CSP flag
-.Lchkcsp:
-
-#
-# find out if we have the MVPG instruction
-#
- mvc __LC_PGM_NEW_PSW(8),.Lpcmvpg-.LPG1(%r13)
- sr %r0,%r0
- la %r1,0
- la %r2,0
- mvpg %r1,%r2 # Test CSP instruction
- oi 3(%r12),16 # set MVPG flag
-.Lchkmvpg:
-
-#
-# find out if we have the IDTE instruction
-#
- mvc __LC_PGM_NEW_PSW(8),.Lpcidte-.LPG1(%r13)
- .long 0xb2b10000 # store facility list
- tm 0xc8,0x08 # check bit for clearing-by-ASCE
- bno .Lchkidte-.LPG1(%r13)
- lhi %r1,2094
- lhi %r2,0
- .long 0xb98e2001
- oi 3(%r12),0x80 # set IDTE flag
-.Lchkidte:
-
- lpsw .Lentry-.LPG1(13) # jump to _stext in primary-space,
- # virtual and never return ...
- .align 8
-.Lentry:.long 0x00080000,0x80000000 + _stext
-.Lctl: .long 0x04b50002 # cr0: various things
- .long 0 # cr1: primary space segment table
- .long .Lduct # cr2: dispatchable unit control table
- .long 0 # cr3: instruction authorization
- .long 0 # cr4: instruction authorization
- .long 0xffffffff # cr5: primary-aste origin
- .long 0 # cr6: I/O interrupts
- .long 0 # cr7: secondary space segment table
- .long 0 # cr8: access registers translation
- .long 0 # cr9: tracing off
- .long 0 # cr10: tracing off
- .long 0 # cr11: tracing off
- .long 0 # cr12: tracing off
- .long 0 # cr13: home space segment table
- .long 0xc0000000 # cr14: machine check handling off
- .long 0 # cr15: linkage stack operations
-.Lpcmem:.long 0x00080000,0x80000000 + .Lchkmem
-.Lpcfpu:.long 0x00080000,0x80000000 + .Lchkfpu
-.Lpccsp:.long 0x00080000,0x80000000 + .Lchkcsp
-.Lpcmvpg:.long 0x00080000,0x80000000 + .Lchkmvpg
-.Lpcidte:.long 0x00080000,0x80000000 + .Lchkidte
-.Lmemsize:.long memory_size
-.Lmchunk:.long memory_chunk
-.Lmflags:.long machine_flags
-.Lbss_bgn: .long __bss_start
-.Lbss_end: .long _end
-
- .org PARMAREA-64
-.Lduct: .long 0,0,0,0,0,0,0,0
- .long 0,0,0,0,0,0,0,0
-
-#
-# params at 10400 (setup.h)
-#
- .org PARMAREA
- .global _pstart
-_pstart:
- .long 0,0 # IPL_DEVICE
- .long 0,RAMDISK_ORIGIN # INITRD_START
- .long 0,RAMDISK_SIZE # INITRD_SIZE
-
- .org COMMAND_LINE
- .byte "root=/dev/ram0 ro"
- .byte 0
- .org 0x11000
-.Lsccb:
- .hword 0x1000 # length, one page
- .byte 0x00,0x00,0x00
- .byte 0x80 # variable response bit set
-.Lsccbr:
- .hword 0x00 # response code
-.Lscpincr1:
- .hword 0x00
-.Lscpa1:
- .byte 0x00
- .fill 89,1,0
-.Lscpa2:
- .int 0x00
-.Lscpincr2:
- .quad 0x00
- .fill 3984,1,0
- .org 0x12000
- .global _pend
-_pend:
-
+.macro GET_IPL_DEVICE
.Lget_ipl_device:
basr %r12,0
-.LPG2: l %r1,0xb8 # get sid
+.LGID: l %r1,0xb8 # get sid
sll %r1,15 # test if subchannel is enabled
srl %r1,31
ltr %r1,%r1
bz 0(%r14) # subchannel disabled
l %r1,0xb8
- la %r5,.Lipl_schib-.LPG2(%r12)
+ la %r5,.Lipl_schib-.LGID(%r12)
stsch 0(%r5) # get schib of subchannel
bnz 0(%r14) # schib not available
tm 5(%r5),0x01 # devno valid?
bno 0(%r14)
- la %r6,ipl_parameter_flags-.LPG2(%r12)
+ la %r6,ipl_parameter_flags-.LGID(%r12)
oi 3(%r6),0x01 # set flag
- la %r2,ipl_devno-.LPG2(%r12)
+ la %r2,ipl_devno-.LGID(%r12)
mvc 0(2,%r2),6(%r5) # store devno
tm 4(%r5),0x80 # qdio capable device?
bno 0(%r14)
@@ -816,46 +537,10 @@ ipl_parameter_flags:
.globl ipl_devno
ipl_devno:
.word 0
+.endm
-#ifdef CONFIG_SHARED_KERNEL
- .org 0x100000
+#ifdef CONFIG_ARCH_S390X
+#include "head64.S"
+#else
+#include "head31.S"
#endif
-
-#
-# startup-code, running in virtual mode
-#
- .globl _stext
-_stext: basr %r13,0 # get base
-.LPG3:
-#
-# Setup stack
-#
- l %r15,.Linittu-.LPG3(%r13)
- mvc __LC_CURRENT(4),__TI_task(%r15)
- ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE
- st %r15,__LC_KERNEL_STACK # set end of kernel stack
- ahi %r15,-96
- xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain
-
-# check control registers
- stctl %c0,%c15,0(%r15)
- oi 2(%r15),0x40 # enable sigp emergency signal
- oi 0(%r15),0x10 # switch on low address protection
- lctl %c0,%c15,0(%r15)
-
-#
- lam 0,15,.Laregs-.LPG3(%r13) # load access regs needed by uaccess
- l %r14,.Lstart-.LPG3(%r13)
- basr %r14,%r14 # call start_kernel
-#
-# We returned from start_kernel ?!? PANIK
-#
- basr %r13,0
- lpsw .Ldw-.(%r13) # load disabled wait psw
-#
- .align 8
-.Ldw: .long 0x000a0000,0x00000000
-.Linittu: .long init_thread_union
-.Lstart: .long start_kernel
-.Laregs: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
-
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S
new file mode 100644
index 00000000000..2d3b089bfb8
--- /dev/null
+++ b/arch/s390/kernel/head31.S
@@ -0,0 +1,336 @@
+/*
+ * arch/s390/kernel/head31.S
+ *
+ * (C) Copyright IBM Corp. 2005
+ *
+ * Author(s): Hartmut Penner <hp@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Rob van der Heij <rvdhei@iae.nl>
+ * Heiko Carstens <heiko.carstens@de.ibm.com>
+ *
+ */
+
+#
+# startup-code at 0x10000, running in absolute addressing mode
+# this is called either by the ipl loader or directly by PSW restart
+# or linload or SALIPL
+#
+ .org 0x10000
+startup:basr %r13,0 # get base
+.LPG1: l %r1, .Lget_ipl_device_addr-.LPG1(%r13)
+ basr %r14, %r1
+ lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
+ la %r12,_pstart-.LPG1(%r13) # pointer to parameter area
+ # move IPL device to lowcore
+ mvc __LC_IPLDEV(4),IPL_DEVICE-PARMAREA(%r12)
+
+#
+# clear bss memory
+#
+ l %r2,.Lbss_bgn-.LPG1(%r13) # start of bss
+ l %r3,.Lbss_end-.LPG1(%r13) # end of bss
+ sr %r3,%r2 # length of bss
+ sr %r4,%r4
+ sr %r5,%r5 # set src,length and pad to zero
+ sr %r0,%r0
+ mvcle %r2,%r4,0 # clear mem
+ jo .-4 # branch back, if not finish
+
+ l %r2,.Lrcp-.LPG1(%r13) # Read SCP forced command word
+.Lservicecall:
+ stosm .Lpmask-.LPG1(%r13),0x01 # authorize ext interrupts
+
+ stctl %r0, %r0,.Lcr-.LPG1(%r13) # get cr0
+ la %r1,0x200 # set bit 22
+ o %r1,.Lcr-.LPG1(%r13) # or old cr0 with r1
+ st %r1,.Lcr-.LPG1(%r13)
+ lctl %r0, %r0,.Lcr-.LPG1(%r13) # load modified cr0
+
+ mvc __LC_EXT_NEW_PSW(8),.Lpcext-.LPG1(%r13) # set postcall psw
+ la %r1, .Lsclph-.LPG1(%r13)
+ a %r1,__LC_EXT_NEW_PSW+4 # set handler
+ st %r1,__LC_EXT_NEW_PSW+4
+
+ la %r4,_pstart-.LPG1(%r13) # %r4 is our index for sccb stuff
+ la %r1, .Lsccb-PARMAREA(%r4) # our sccb
+ .insn rre,0xb2200000,%r2,%r1 # service call
+ ipm %r1
+ srl %r1,28 # get cc code
+ xr %r3, %r3
+ chi %r1,3
+ be .Lfchunk-.LPG1(%r13) # leave
+ chi %r1,2
+ be .Lservicecall-.LPG1(%r13)
+ lpsw .Lwaitsclp-.LPG1(%r13)
+.Lsclph:
+ lh %r1,.Lsccbr-PARMAREA(%r4)
+ chi %r1,0x10 # 0x0010 is the sucess code
+ je .Lprocsccb # let's process the sccb
+ chi %r1,0x1f0
+ bne .Lfchunk-.LPG1(%r13) # unhandled error code
+ c %r2, .Lrcp-.LPG1(%r13) # Did we try Read SCP forced
+ bne .Lfchunk-.LPG1(%r13) # if no, give up
+ l %r2, .Lrcp2-.LPG1(%r13) # try with Read SCP
+ b .Lservicecall-.LPG1(%r13)
+.Lprocsccb:
+ lhi %r1,0
+ icm %r1,3,.Lscpincr1-PARMAREA(%r4) # use this one if != 0
+ jnz .Lscnd
+ lhi %r1,0x800 # otherwise report 2GB
+.Lscnd:
+ lhi %r3,0x800 # limit reported memory size to 2GB
+ cr %r1,%r3
+ jl .Lno2gb
+ lr %r1,%r3
+.Lno2gb:
+ xr %r3,%r3 # same logic
+ ic %r3,.Lscpa1-PARMAREA(%r4)
+ chi %r3,0x00
+ jne .Lcompmem
+ l %r3,.Lscpa2-PARMAREA(%r13)
+.Lcompmem:
+ mr %r2,%r1 # mem in MB on 128-bit
+ l %r1,.Lonemb-.LPG1(%r13)
+ mr %r2,%r1 # mem size in bytes in %r3
+ b .Lfchunk-.LPG1(%r13)
+
+ .align 4
+.Lget_ipl_device_addr:
+ .long .Lget_ipl_device
+.Lpmask:
+ .byte 0
+.align 8
+.Lpcext:.long 0x00080000,0x80000000
+.Lcr:
+ .long 0x00 # place holder for cr0
+.Lwaitsclp:
+ .long 0x010a0000,0x80000000 + .Lsclph
+.Lrcp:
+ .int 0x00120001 # Read SCP forced code
+.Lrcp2:
+ .int 0x00020001 # Read SCP code
+.Lonemb:
+ .int 0x100000
+.Lfchunk:
+
+#
+# find memory chunks.
+#
+ lr %r9,%r3 # end of mem
+ mvc __LC_PGM_NEW_PSW(8),.Lpcmem-.LPG1(%r13)
+ la %r1,1 # test in increments of 128KB
+ sll %r1,17
+ l %r3,.Lmchunk-.LPG1(%r13) # get pointer to memory_chunk array
+ slr %r4,%r4 # set start of chunk to zero
+ slr %r5,%r5 # set end of chunk to zero
+ slr %r6,%r6 # set access code to zero
+ la %r10, MEMORY_CHUNKS # number of chunks
+.Lloop:
+ tprot 0(%r5),0 # test protection of first byte
+ ipm %r7
+ srl %r7,28
+ clr %r6,%r7 # compare cc with last access code
+ be .Lsame-.LPG1(%r13)
+ b .Lchkmem-.LPG1(%r13)
+.Lsame:
+ ar %r5,%r1 # add 128KB to end of chunk
+ bno .Lloop-.LPG1(%r13) # r1 < 0x80000000 -> loop
+.Lchkmem: # > 2GB or tprot got a program check
+ clr %r4,%r5 # chunk size > 0?
+ be .Lchkloop-.LPG1(%r13)
+ st %r4,0(%r3) # store start address of chunk
+ lr %r0,%r5
+ slr %r0,%r4
+ st %r0,4(%r3) # store size of chunk
+ st %r6,8(%r3) # store type of chunk
+ la %r3,12(%r3)
+ l %r4,.Lmemsize-.LPG1(%r13) # address of variable memory_size
+ st %r5,0(%r4) # store last end to memory size
+ ahi %r10,-1 # update chunk number
+.Lchkloop:
+ lr %r6,%r7 # set access code to last cc
+ # we got an exception or we're starting a new
+ # chunk , we must check if we should
+ # still try to find valid memory (if we detected
+ # the amount of available storage), and if we
+ # have chunks left
+ xr %r0,%r0
+ clr %r0,%r9 # did we detect memory?
+ je .Ldonemem # if not, leave
+ chi %r10,0 # do we have chunks left?
+ je .Ldonemem
+ alr %r5,%r1 # add 128KB to end of chunk
+ lr %r4,%r5 # potential new chunk
+ clr %r5,%r9 # should we go on?
+ jl .Lloop
+.Ldonemem:
+ l %r12,.Lmflags-.LPG1(%r13) # get address of machine_flags
+#
+# find out if we are running under VM
+#
+ stidp __LC_CPUID # store cpuid
+ tm __LC_CPUID,0xff # running under VM ?
+ bno .Lnovm-.LPG1(%r13)
+ oi 3(%r12),1 # set VM flag
+.Lnovm:
+ lh %r0,__LC_CPUID+4 # get cpu version
+ chi %r0,0x7490 # running on a P/390 ?
+ bne .Lnop390-.LPG1(%r13)
+ oi 3(%r12),4 # set P/390 flag
+.Lnop390:
+
+#
+# find out if we have an IEEE fpu
+#
+ mvc __LC_PGM_NEW_PSW(8),.Lpcfpu-.LPG1(%r13)
+ efpc %r0,0 # test IEEE extract fpc instruction
+ oi 3(%r12),2 # set IEEE fpu flag
+.Lchkfpu:
+
+#
+# find out if we have the CSP instruction
+#
+ mvc __LC_PGM_NEW_PSW(8),.Lpccsp-.LPG1(%r13)
+ la %r0,0
+ lr %r1,%r0
+ la %r2,4
+ csp %r0,%r2 # Test CSP instruction
+ oi 3(%r12),8 # set CSP flag
+.Lchkcsp:
+
+#
+# find out if we have the MVPG instruction
+#
+ mvc __LC_PGM_NEW_PSW(8),.Lpcmvpg-.LPG1(%r13)
+ sr %r0,%r0
+ la %r1,0
+ la %r2,0
+ mvpg %r1,%r2 # Test CSP instruction
+ oi 3(%r12),16 # set MVPG flag
+.Lchkmvpg:
+
+#
+# find out if we have the IDTE instruction
+#
+ mvc __LC_PGM_NEW_PSW(8),.Lpcidte-.LPG1(%r13)
+ .long 0xb2b10000 # store facility list
+ tm 0xc8,0x08 # check bit for clearing-by-ASCE
+ bno .Lchkidte-.LPG1(%r13)
+ lhi %r1,2094
+ lhi %r2,0
+ .long 0xb98e2001
+ oi 3(%r12),0x80 # set IDTE flag
+.Lchkidte:
+
+ lpsw .Lentry-.LPG1(13) # jump to _stext in primary-space,
+ # virtual and never return ...
+ .align 8
+.Lentry:.long 0x00080000,0x80000000 + _stext
+.Lctl: .long 0x04b50002 # cr0: various things
+ .long 0 # cr1: primary space segment table
+ .long .Lduct # cr2: dispatchable unit control table
+ .long 0 # cr3: instruction authorization
+ .long 0 # cr4: instruction authorization
+ .long 0xffffffff # cr5: primary-aste origin
+ .long 0 # cr6: I/O interrupts
+ .long 0 # cr7: secondary space segment table
+ .long 0 # cr8: access registers translation
+ .long 0 # cr9: tracing off
+ .long 0 # cr10: tracing off
+ .long 0 # cr11: tracing off
+ .long 0 # cr12: tracing off
+ .long 0 # cr13: home space segment table
+ .long 0xc0000000 # cr14: machine check handling off
+ .long 0 # cr15: linkage stack operations
+.Lpcmem:.long 0x00080000,0x80000000 + .Lchkmem
+.Lpcfpu:.long 0x00080000,0x80000000 + .Lchkfpu
+.Lpccsp:.long 0x00080000,0x80000000 + .Lchkcsp
+.Lpcmvpg:.long 0x00080000,0x80000000 + .Lchkmvpg
+.Lpcidte:.long 0x00080000,0x80000000 + .Lchkidte
+.Lmemsize:.long memory_size
+.Lmchunk:.long memory_chunk
+.Lmflags:.long machine_flags
+.Lbss_bgn: .long __bss_start
+.Lbss_end: .long _end
+
+ .org PARMAREA-64
+.Lduct: .long 0,0,0,0,0,0,0,0
+ .long 0,0,0,0,0,0,0,0
+
+#
+# params at 10400 (setup.h)
+#
+ .org PARMAREA
+ .global _pstart
+_pstart:
+ .long 0,0 # IPL_DEVICE
+ .long 0,RAMDISK_ORIGIN # INITRD_START
+ .long 0,RAMDISK_SIZE # INITRD_SIZE
+
+ .org COMMAND_LINE
+ .byte "root=/dev/ram0 ro"
+ .byte 0
+ .org 0x11000
+.Lsccb:
+ .hword 0x1000 # length, one page
+ .byte 0x00,0x00,0x00
+ .byte 0x80 # variable response bit set
+.Lsccbr:
+ .hword 0x00 # response code
+.Lscpincr1:
+ .hword 0x00
+.Lscpa1:
+ .byte 0x00
+ .fill 89,1,0
+.Lscpa2:
+ .int 0x00
+.Lscpincr2:
+ .quad 0x00
+ .fill 3984,1,0
+ .org 0x12000
+ .global _pend
+_pend:
+
+ GET_IPL_DEVICE
+
+#ifdef CONFIG_SHARED_KERNEL
+ .org 0x100000
+#endif
+
+#
+# startup-code, running in virtual mode
+#
+ .globl _stext
+_stext: basr %r13,0 # get base
+.LPG3:
+#
+# Setup stack
+#
+ l %r15,.Linittu-.LPG3(%r13)
+ mvc __LC_CURRENT(4),__TI_task(%r15)
+ ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE
+ st %r15,__LC_KERNEL_STACK # set end of kernel stack
+ ahi %r15,-96
+ xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain
+
+# check control registers
+ stctl %c0,%c15,0(%r15)
+ oi 2(%r15),0x40 # enable sigp emergency signal
+ oi 0(%r15),0x10 # switch on low address protection
+ lctl %c0,%c15,0(%r15)
+
+#
+ lam 0,15,.Laregs-.LPG3(%r13) # load access regs needed by uaccess
+ l %r14,.Lstart-.LPG3(%r13)
+ basr %r14,%r14 # call start_kernel
+#
+# We returned from start_kernel ?!? PANIK
+#
+ basr %r13,0
+ lpsw .Ldw-.(%r13) # load disabled wait psw
+#
+ .align 8
+.Ldw: .long 0x000a0000,0x00000000
+.Linittu:.long init_thread_union
+.Lstart:.long start_kernel
+.Laregs:.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 193aafa72f5..f08c06f45d5 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -1,482 +1,17 @@
/*
- * arch/s390/kernel/head.S
+ * arch/s390/kernel/head64.S
*
- * S390 version
- * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
- * Author(s): Hartmut Penner (hp@de.ibm.com),
- * Martin Schwidefsky (schwidefsky@de.ibm.com),
- * Rob van der Heij (rvdhei@iae.nl)
+ * (C) Copyright IBM Corp. 1999,2005
+ *
+ * Author(s): Hartmut Penner <hp@de.ibm.com>
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Rob van der Heij <rvdhei@iae.nl>
+ * Heiko Carstens <heiko.carstens@de.ibm.com>
*
- * There are 5 different IPL methods
- * 1) load the image directly into ram at address 0 and do an PSW restart
- * 2) linload will load the image from address 0x10000 to memory 0x10000
- * and start the code thru LPSW 0x0008000080010000 (VM only, deprecated)
- * 3) generate the tape ipl header, store the generated image on a tape
- * and ipl from it
- * In case of SL tape you need to IPL 5 times to get past VOL1 etc
- * 4) generate the vm reader ipl header, move the generated image to the
- * VM reader (use option NOH!) and do a ipl from reader (VM only)
- * 5) direct call of start by the SALIPL loader
- * We use the cpuid to distinguish between VM and native ipl
- * params for kernel are pushed to 0x10400 (see setup.h)
-
- Changes:
- Okt 25 2000 <rvdheij@iae.nl>
- added code to skip HDR and EOF to allow SL tape IPL (5 retries)
- changed first CCW from rewind to backspace block
-
*/
-#include <linux/config.h>
-#include <asm/setup.h>
-#include <asm/lowcore.h>
-#include <asm/asm-offsets.h>
-#include <asm/thread_info.h>
-#include <asm/page.h>
-
-#ifndef CONFIG_IPL
- .org 0
- .long 0x00080000,0x80000000+startup # Just a restart PSW
-#else
-#ifdef CONFIG_IPL_TAPE
-#define IPL_BS 1024
- .org 0
- .long 0x00080000,0x80000000+iplstart # The first 24 bytes are loaded
- .long 0x27000000,0x60000001 # by ipl to addresses 0-23.
- .long 0x02000000,0x20000000+IPL_BS # (a PSW and two CCWs).
- .long 0x00000000,0x00000000 # external old psw
- .long 0x00000000,0x00000000 # svc old psw
- .long 0x00000000,0x00000000 # program check old psw
- .long 0x00000000,0x00000000 # machine check old psw
- .long 0x00000000,0x00000000 # io old psw
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x00000000,0x00000000
- .long 0x000a0000,0x00000058 # external new psw
- .long 0x000a0000,0x00000060 # svc new psw
- .long 0x000a0000,0x00000068 # program check new psw
- .long 0x000a0000,0x00000070 # machine check new psw
- .long 0x00080000,0x80000000+.Lioint # io new psw
-
- .org 0x100
-#
-# subroutine for loading from tape
-# Paramters:
-# R1 = device number
-# R2 = load address
-.Lloader:
- st %r14,.Lldret
- la %r3,.Lorbread # r3 = address of orb
- la %r5,.Lirb # r5 = address of irb
- st %r2,.Lccwread+4 # initialize CCW data addresses
- lctl %c6,%c6,.Lcr6
- slr %r2,%r2
-.Lldlp:
- la %r6,3 # 3 retries
-.Lssch:
- ssch 0(%r3) # load chunk of IPL_BS bytes
- bnz .Llderr
-.Lw4end:
- bas %r14,.Lwait4io
- tm 8(%r5),0x82 # do we have a problem ?
- bnz .Lrecov
- slr %r7,%r7
- icm %r7,3,10(%r5) # get residual count
- lcr %r7,%r7
- la %r7,IPL_BS(%r7) # IPL_BS-residual=#bytes read
- ar %r2,%r7 # add to total size
- tm 8(%r5),0x01 # found a tape mark ?
- bnz .Ldone
- l %r0,.Lccwread+4 # update CCW data addresses
- ar %r0,%r7
- st %r0,.Lccwread+4
- b .Lldlp
-.Ldone:
- l %r14,.Lldret
- br %r14 # r2 contains the total size
-.Lrecov:
- bas %r14,.Lsense # do the sensing
- bct %r6,.Lssch # dec. retry count & branch
- b .Llderr
-#
-# Sense subroutine
-#
-.Lsense:
- st %r14,.Lsnsret
- la %r7,.Lorbsense
- ssch 0(%r7) # start sense command
- bnz .Llderr
- bas %r14,.Lwait4io
- l %r14,.Lsnsret
- tm 8(%r5),0x82 # do we have a problem ?
- bnz .Llderr
- br %r14
-#
-# Wait for interrupt subroutine
-#
-.Lwait4io:
- lpsw .Lwaitpsw
-.Lioint:
- c %r1,0xb8 # compare subchannel number
- bne .Lwait4io
- tsch 0(%r5)
- slr %r0,%r0
- tm 8(%r5),0x82 # do we have a problem ?
- bnz .Lwtexit
- tm 8(%r5),0x04 # got device end ?
- bz .Lwait4io
-.Lwtexit:
- br %r14
-.Llderr:
- lpsw .Lcrash
-
- .align 8
-.Lorbread:
- .long 0x00000000,0x0080ff00,.Lccwread
- .align 8
-.Lorbsense:
- .long 0x00000000,0x0080ff00,.Lccwsense
- .align 8
-.Lccwread:
- .long 0x02200000+IPL_BS,0x00000000
-.Lccwsense:
- .long 0x04200001,0x00000000
-.Lwaitpsw:
- .long 0x020a0000,0x80000000+.Lioint
-
-.Lirb: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
-.Lcr6: .long 0xff000000
- .align 8
-.Lcrash:.long 0x000a0000,0x00000000
-.Lldret:.long 0
-.Lsnsret: .long 0
-#endif /* CONFIG_IPL_TAPE */
-
-#ifdef CONFIG_IPL_VM
-#define IPL_BS 0x730
- .org 0
- .long 0x00080000,0x80000000+iplstart # The first 24 bytes are loaded
- .long 0x02000018,0x60000050 # by ipl to addresses 0-23.
- .long 0x02000068,0x60000050 # (a PSW and two CCWs).
- .fill 80-24,1,0x40 # bytes 24-79 are discarded !!
- .long 0x020000f0,0x60000050 # The next 160 byte are loaded
- .long 0x02000140,0x60000050 # to addresses 0x18-0xb7
- .long 0x02000190,0x60000050 # They form the continuation
- .long 0x020001e0,0x60000050 # of the CCW program started
- .long 0x02000230,0x60000050 # by ipl and load the range
- .long 0x02000280,0x60000050 # 0x0f0-0x730 from the image
- .long 0x020002d0,0x60000050 # to the range 0x0f0-0x730
- .long 0x02000320,0x60000050 # in memory. At the end of
- .long 0x02000370,0x60000050 # the channel program the PSW
- .long 0x020003c0,0x60000050 # at location 0 is loaded.
- .long 0x02000410,0x60000050 # Initial processing starts
- .long 0x02000460,0x60000050 # at 0xf0 = iplstart.
- .long 0x020004b0,0x60000050
- .long 0x02000500,0x60000050
- .long 0x02000550,0x60000050
- .long 0x020005a0,0x60000050
- .long 0x020005f0,0x60000050
- .long 0x02000640,0x60000050
- .long 0x02000690,0x60000050
- .long 0x020006e0,0x20000050
-
- .org 0xf0
-#
-# subroutine for loading cards from the reader
-#
-.Lloader:
- la %r3,.Lorb # r2 = address of orb into r2
- la %r5,.Lirb # r4 = address of irb
- la %r6,.Lccws
- la %r7,20
-.Linit:
- st %r2,4(%r6) # initialize CCW data addresses
- la %r2,0x50(%r2)
- la %r6,8(%r6)
- bct 7,.Linit
-
- lctl %c6,%c6,.Lcr6 # set IO subclass mask
- slr %r2,%r2
-.Lldlp:
- ssch 0(%r3) # load chunk of 1600 bytes
- bnz .Llderr
-.Lwait4irq:
- mvc 0x78(8),.Lnewpsw # set up IO interrupt psw
- lpsw .Lwaitpsw
-.Lioint:
- c %r1,0xb8 # compare subchannel number
- bne .Lwait4irq
- tsch 0(%r5)
-
- slr %r0,%r0
- ic %r0,8(%r5) # get device status
- chi %r0,8 # channel end ?
- be .Lcont
- chi %r0,12 # channel end + device end ?
- be .Lcont
-
- l %r0,4(%r5)
- s %r0,8(%r3) # r0/8 = number of ccws executed
- mhi %r0,10 # *10 = number of bytes in ccws
- lh %r3,10(%r5) # get residual count
- sr %r0,%r3 # #ccws*80-residual=#bytes read
- ar %r2,%r0
-
- br %r14 # r2 contains the total size
-
-.Lcont:
- ahi %r2,0x640 # add 0x640 to total size
- la %r6,.Lccws
- la %r7,20
-.Lincr:
- l %r0,4(%r6) # update CCW data addresses
- ahi %r0,0x640
- st %r0,4(%r6)
- ahi %r6,8
- bct 7,.Lincr
-
- b .Lldlp
-.Llderr:
- lpsw .Lcrash
-
- .align 8
-.Lorb: .long 0x00000000,0x0080ff00,.Lccws
-.Lirb: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
-.Lcr6: .long 0xff000000
-.Lloadp:.long 0,0
- .align 8
-.Lcrash:.long 0x000a0000,0x00000000
-.Lnewpsw:
- .long 0x00080000,0x80000000+.Lioint
-.Lwaitpsw:
- .long 0x020a0000,0x80000000+.Lioint
-
- .align 8
-.Lccws: .rept 19
- .long 0x02600050,0x00000000
- .endr
- .long 0x02200050,0x00000000
-#endif /* CONFIG_IPL_VM */
-
-iplstart:
- lh %r1,0xb8 # test if subchannel number
- bct %r1,.Lnoload # is valid
- l %r1,0xb8 # load ipl subchannel number
- la %r2,IPL_BS # load start address
- bas %r14,.Lloader # load rest of ipl image
- larl %r12,_pstart # pointer to parameter area
- st %r1,IPL_DEVICE+4-PARMAREA(%r12) # store ipl device number
-
-#
-# load parameter file from ipl device
#
-.Lagain1:
- l %r2,INITRD_START+4-PARMAREA(%r12)# use ramdisk location as temp
- bas %r14,.Lloader # load parameter file
- ltr %r2,%r2 # got anything ?
- bz .Lnopf
- chi %r2,895
- bnh .Lnotrunc
- la %r2,895
-.Lnotrunc:
- l %r4,INITRD_START+4-PARMAREA(%r12)
- clc 0(3,%r4),.L_hdr # if it is HDRx
- bz .Lagain1 # skip dataset header
- clc 0(3,%r4),.L_eof # if it is EOFx
- bz .Lagain1 # skip dateset trailer
- la %r5,0(%r4,%r2)
- lr %r3,%r2
-.Lidebc:
- tm 0(%r5),0x80 # high order bit set ?
- bo .Ldocv # yes -> convert from EBCDIC
- ahi %r5,-1
- bct %r3,.Lidebc
- b .Lnocv
-.Ldocv:
- l %r3,.Lcvtab
- tr 0(256,%r4),0(%r3) # convert parameters to ascii
- tr 256(256,%r4),0(%r3)
- tr 512(256,%r4),0(%r3)
- tr 768(122,%r4),0(%r3)
-.Lnocv: la %r3,COMMAND_LINE-PARMAREA(%r12) # load adr. of command line
- mvc 0(256,%r3),0(%r4)
- mvc 256(256,%r3),256(%r4)
- mvc 512(256,%r3),512(%r4)
- mvc 768(122,%r3),768(%r4)
- slr %r0,%r0
- b .Lcntlp
-.Ldelspc:
- ic %r0,0(%r2,%r3)
- chi %r0,0x20 # is it a space ?
- be .Lcntlp
- ahi %r2,1
- b .Leolp
-.Lcntlp:
- brct %r2,.Ldelspc
-.Leolp:
- slr %r0,%r0
- stc %r0,0(%r2,%r3) # terminate buffer
-.Lnopf:
-
-#
-# load ramdisk from ipl device
-#
-.Lagain2:
- l %r2,INITRD_START+4-PARMAREA(%r12)# load adr. of ramdisk
- bas %r14,.Lloader # load ramdisk
- st %r2,INITRD_SIZE+4-PARMAREA(%r12) # store size of ramdisk
- ltr %r2,%r2
- bnz .Lrdcont
- st %r2,INITRD_START+4-PARMAREA(%r12)# no ramdisk found, null it
-.Lrdcont:
- l %r2,INITRD_START+4-PARMAREA(%r12)
- clc 0(3,%r2),.L_hdr # skip HDRx and EOFx
- bz .Lagain2
- clc 0(3,%r2),.L_eof
- bz .Lagain2
-
-#ifdef CONFIG_IPL_VM
-#
-# reset files in VM reader
-#
- stidp __LC_CPUID # store cpuid
- tm __LC_CPUID,0xff # running VM ?
- bno .Lnoreset
- la %r2,.Lreset
- lhi %r3,26
- diag %r2,%r3,8
- la %r5,.Lirb
- stsch 0(%r5) # check if irq is pending
- tm 30(%r5),0x0f # by verifying if any of the
- bnz .Lwaitforirq # activity or status control
- tm 31(%r5),0xff # bits is set in the schib
- bz .Lnoreset
-.Lwaitforirq:
- mvc 0x78(8),.Lrdrnewpsw # set up IO interrupt psw
-.Lwaitrdrirq:
- lpsw .Lrdrwaitpsw
-.Lrdrint:
- c %r1,0xb8 # compare subchannel number
- bne .Lwaitrdrirq
- la %r5,.Lirb
- tsch 0(%r5)
-.Lnoreset:
- b .Lnoload
-
- .align 8
-.Lrdrnewpsw:
- .long 0x00080000,0x80000000+.Lrdrint
-.Lrdrwaitpsw:
- .long 0x020a0000,0x80000000+.Lrdrint
-#endif
-
-#
-# everything loaded, go for it
-#
-.Lnoload:
- l %r1,.Lstartup
- br %r1
-
-.Lstartup: .long startup
-.Lcvtab:.long _ebcasc # ebcdic to ascii table
-.Lreset:.byte 0xc3,0xc8,0xc1,0xd5,0xc7,0xc5,0x40,0xd9,0xc4,0xd9,0x40
- .byte 0xc1,0xd3,0xd3,0x40,0xd2,0xc5,0xc5,0xd7,0x40,0xd5,0xd6
- .byte 0xc8,0xd6,0xd3,0xc4 # "change rdr all keep nohold"
-.L_eof: .long 0xc5d6c600 /* C'EOF' */
-.L_hdr: .long 0xc8c4d900 /* C'HDR' */
-#endif /* CONFIG_IPL */
-
-#
-# SALIPL loader support. Based on a patch by Rob van der Heij.
-# This entry point is called directly from the SALIPL loader and
-# doesn't need a builtin ipl record.
-#
- .org 0x800
- .globl start
-start:
- stm %r0,%r15,0x07b0 # store registers
- basr %r12,%r0
-.base:
- l %r11,.parm
- l %r8,.cmd # pointer to command buffer
-
- ltr %r9,%r9 # do we have SALIPL parameters?
- bp .sk8x8
-
- mvc 0(64,%r8),0x00b0 # copy saved registers
- xc 64(240-64,%r8),0(%r8) # remainder of buffer
- tr 0(64,%r8),.lowcase
- b .gotr
-.sk8x8:
- mvc 0(240,%r8),0(%r9) # copy iplparms into buffer
-.gotr:
- l %r10,.tbl # EBCDIC to ASCII table
- tr 0(240,%r8),0(%r10)
- stidp __LC_CPUID # Are we running on VM maybe
- cli __LC_CPUID,0xff
- bnz .test
- .long 0x83300060 # diag 3,0,x'0060' - storage size
- b .done
-.test:
- mvc 0x68(8),.pgmnw # set up pgm check handler
- l %r2,.fourmeg
- lr %r3,%r2
- bctr %r3,%r0 # 4M-1
-.loop: iske %r0,%r3
- ar %r3,%r2
-.pgmx:
- sr %r3,%r2
- la %r3,1(%r3)
-.done:
- l %r1,.memsize
- st %r3,4(%r1)
- slr %r0,%r0
- st %r0,INITRD_SIZE+4-PARMAREA(%r11)
- st %r0,INITRD_START+4-PARMAREA(%r11)
- j startup # continue with startup
-.tbl: .long _ebcasc # translate table
-.cmd: .long COMMAND_LINE # address of command line buffer
-.parm: .long PARMAREA
-.fourmeg: .long 0x00400000 # 4M
-.pgmnw: .long 0x00080000,.pgmx
-.memsize: .long memory_size
-.lowcase:
- .byte 0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07
- .byte 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f
- .byte 0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17
- .byte 0x18,0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f
- .byte 0x20,0x21,0x22,0x23,0x24,0x25,0x26,0x27
- .byte 0x28,0x29,0x2a,0x2b,0x2c,0x2d,0x2e,0x2f
- .byte 0x30,0x31,0x32,0x33,0x34,0x35,0x36,0x37
- .byte 0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f
- .byte 0x40,0x41,0x42,0x43,0x44,0x45,0x46,0x47
- .byte 0x48,0x49,0x4a,0x4b,0x4c,0x4d,0x4e,0x4f
- .byte 0x50,0x51,0x52,0x53,0x54,0x55,0x56,0x57
- .byte 0x58,0x59,0x5a,0x5b,0x5c,0x5d,0x5e,0x5f
- .byte 0x60,0x61,0x62,0x63,0x64,0x65,0x66,0x67
- .byte 0x68,0x69,0x6a,0x6b,0x6c,0x6d,0x6e,0x6f
- .byte 0x70,0x71,0x72,0x73,0x74,0x75,0x76,0x77
- .byte 0x78,0x79,0x7a,0x7b,0x7c,0x7d,0x7e,0x7f
-
- .byte 0x80,0x81,0x82,0x83,0x84,0x85,0x86,0x87
- .byte 0x88,0x89,0x8a,0x8b,0x8c,0x8d,0x8e,0x8f
- .byte 0x90,0x91,0x92,0x93,0x94,0x95,0x96,0x97
- .byte 0x98,0x99,0x9a,0x9b,0x9c,0x9d,0x9e,0x9f
- .byte 0xa0,0xa1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7
- .byte 0xa8,0xa9,0xaa,0xab,0xac,0xad,0xae,0xaf
- .byte 0xb0,0xb1,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7
- .byte 0xb8,0xb9,0xba,0xbb,0xbc,0xbd,0xbe,0xbf
- .byte 0xc0,0x81,0x82,0x83,0x84,0x85,0x86,0x87 # .abcdefg
- .byte 0x88,0x89,0xca,0xcb,0xcc,0xcd,0xce,0xcf # hi
- .byte 0xd0,0x91,0x92,0x93,0x94,0x95,0x96,0x97 # .jklmnop
- .byte 0x98,0x99,0xda,0xdb,0xdc,0xdd,0xde,0xdf # qr
- .byte 0xe0,0xe1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7 # ..stuvwx
- .byte 0xa8,0xa9,0xea,0xeb,0xec,0xed,0xee,0xef # yz
- .byte 0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7
- .byte 0xf8,0xf9,0xfa,0xfb,0xfc,0xfd,0xfe,0xff
-
-#
-# startup-code at 0x10000, running in real mode
+# startup-code at 0x10000, running in absolute addressing mode
# this is called either by the ipl loader or directly by PSW restart
# or linload or SALIPL
#
@@ -530,7 +65,7 @@ startup:basr %r13,0 # get base
be .Lfchunk-.LPG1(%r13) # leave
chi %r1,2
be .Lservicecall-.LPG1(%r13)
- lpsw .Lwaitsclp-.LPG1(%r13)
+ lpswe .Lwaitsclp-.LPG1(%r13)
.Lsclph:
lh %r1,.Lsccbr-PARMAREA(%r4)
chi %r1,0x10 # 0x0010 is the sucess code
@@ -567,8 +102,7 @@ startup:basr %r13,0 # get base
.Lcr:
.quad 0x00 # place holder for cr0
.Lwaitsclp:
- .long 0x020A0000
- .quad .Lsclph
+ .quad 0x0102000180000000,.Lsclph
.Lrcp:
.int 0x00120001 # Read SCP forced code
.Lrcp2:
@@ -751,62 +285,7 @@ _pstart:
.global _pend
_pend:
-.Lget_ipl_device:
- basr %r12,0
-.LPG2: l %r1,0xb8 # get sid
- sll %r1,15 # test if subchannel is enabled
- srl %r1,31
- ltr %r1,%r1
- bz 0(%r14) # subchannel disabled
- l %r1,0xb8
- la %r5,.Lipl_schib-.LPG2(%r12)
- stsch 0(%r5) # get schib of subchannel
- bnz 0(%r14) # schib not available
- tm 5(%r5),0x01 # devno valid?
- bno 0(%r14)
- la %r6,ipl_parameter_flags-.LPG2(%r12)
- oi 3(%r6),0x01 # set flag
- la %r2,ipl_devno-.LPG2(%r12)
- mvc 0(2,%r2),6(%r5) # store devno
- tm 4(%r5),0x80 # qdio capable device?
- bno 0(%r14)
- oi 3(%r6),0x02 # set flag
-
- # copy ipl parameters
-
- lhi %r0,4096
- l %r2,20(%r0) # get address of parameter list
- lhi %r3,IPL_PARMBLOCK_ORIGIN
- st %r3,20(%r0)
- lhi %r4,1
- cr %r2,%r3 # start parameters < destination ?
- jl 0f
- lhi %r1,1 # copy direction is upwards
- j 1f
-0: lhi %r1,-1 # copy direction is downwards
- ar %r2,%r0
- ar %r3,%r0
- ar %r2,%r1
- ar %r3,%r1
-1: mvc 0(1,%r3),0(%r2) # finally copy ipl parameters
- ar %r3,%r1
- ar %r2,%r1
- sr %r0,%r4
- jne 1b
- b 0(%r14)
-
- .align 4
-.Lipl_schib:
- .rept 13
- .long 0
- .endr
-
- .globl ipl_parameter_flags
-ipl_parameter_flags:
- .long 0
- .globl ipl_devno
-ipl_devno:
- .word 0
+ GET_IPL_DEVICE
#ifdef CONFIG_SHARED_KERNEL
.org 0x100000
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 9a1d95894f3..c36353e8c14 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -237,6 +237,8 @@ int sysctl_hz_timer = 1;
*/
static inline void stop_hz_timer(void)
{
+ unsigned long flags;
+ unsigned long seq, next;
__u64 timer, todval;
if (sysctl_hz_timer != 0)
@@ -257,7 +259,11 @@ static inline void stop_hz_timer(void)
* This cpu is going really idle. Set up the clock comparator
* for the next event.
*/
- timer = (__u64) (next_timer_interrupt() - jiffies) + jiffies_64;
+ next = next_timer_interrupt();
+ do {
+ seq = read_seqbegin_irqsave(&xtime_lock, flags);
+ timer = (__u64)(next - jiffies) + jiffies_64;
+ } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
todval = -1ULL;
/* Be careful about overflows. */
if (timer < (-1ULL / CLK_TICKS_PER_JIFFY)) {
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 6b8703ec2ae..c5bd36fae56 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -57,7 +57,6 @@ int sysctl_userprocess_debug = 0;
extern pgm_check_handler_t do_protection_exception;
extern pgm_check_handler_t do_dat_exception;
-extern pgm_check_handler_t do_pseudo_page_fault;
#ifdef CONFIG_PFAULT
extern int pfault_init(void);
extern void pfault_fini(void);
@@ -676,20 +675,6 @@ asmlinkage void kernel_stack_overflow(struct pt_regs * regs)
panic("Corrupt kernel stack, can't continue.");
}
-#ifndef CONFIG_ARCH_S390X
-static int
-pagex_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
-{
- if (MACHINE_IS_VM)
- cpcmd("SET PAGEX OFF", NULL, 0, NULL);
- return NOTIFY_DONE;
-}
-
-static struct notifier_block pagex_reboot_notifier = {
- .notifier_call = &pagex_reboot_event,
-};
-#endif
-
/* init is done in lowcore.S and head.S */
void __init trap_init(void)
@@ -717,9 +702,7 @@ void __init trap_init(void)
pgm_check_table[0x11] = &do_dat_exception;
pgm_check_table[0x12] = &translation_exception;
pgm_check_table[0x13] = &special_op_exception;
-#ifndef CONFIG_ARCH_S390X
- pgm_check_table[0x14] = &do_pseudo_page_fault;
-#else /* CONFIG_ARCH_S390X */
+#ifdef CONFIG_ARCH_S390X
pgm_check_table[0x38] = &do_dat_exception;
pgm_check_table[0x39] = &do_dat_exception;
pgm_check_table[0x3A] = &do_dat_exception;
@@ -731,12 +714,10 @@ void __init trap_init(void)
pgm_check_table[0x40] = &do_monitor_call;
if (MACHINE_IS_VM) {
+#ifdef CONFIG_PFAULT
/*
- * First try to get pfault pseudo page faults going.
- * If this isn't available turn on pagex page faults.
+ * Try to get pfault pseudo page faults going.
*/
-#ifdef CONFIG_PFAULT
- /* request the 0x2603 external interrupt */
if (register_early_external_interrupt(0x2603, pfault_interrupt,
&ext_int_pfault) != 0)
panic("Couldn't request external interrupt 0x2603");
@@ -748,9 +729,5 @@ void __init trap_init(void)
unregister_early_external_interrupt(0x2603, pfault_interrupt,
&ext_int_pfault);
#endif
-#ifndef CONFIG_ARCH_S390X
- register_reboot_notifier(&pagex_reboot_notifier);
- cpcmd("SET PAGEX ON", NULL, 0, NULL);
-#endif
}
}
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index c5348108ca3..506a33b51e4 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -234,8 +234,8 @@ query_segment_type (struct dcss_segment *seg)
rc = 0;
out_free:
- if (qin) kfree(qin);
- if (qout) kfree(qout);
+ kfree(qin);
+ kfree(qout);
return rc;
}
@@ -394,7 +394,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
segtype_string[seg->vm_segtype]);
goto out;
out_free:
- kfree (seg);
+ kfree(seg);
out:
return rc;
}
@@ -505,7 +505,7 @@ segment_modify_shared (char *name, int do_nonshared)
list_del(&seg->list);
dcss_diag(DCSS_PURGESEG, seg->dcss_name,
&dummy, &dummy);
- kfree (seg);
+ kfree(seg);
out_unlock:
spin_unlock(&dcss_lock);
return rc;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 856a971759b..64e32da7775 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -352,115 +352,6 @@ void do_dat_exception(struct pt_regs *regs, unsigned long error_code)
do_exception(regs, error_code & 0xff, 0);
}
-#ifndef CONFIG_ARCH_S390X
-
-typedef struct _pseudo_wait_t {
- struct _pseudo_wait_t *next;
- wait_queue_head_t queue;
- unsigned long address;
- int resolved;
-} pseudo_wait_t;
-
-static pseudo_wait_t *pseudo_lock_queue = NULL;
-static spinlock_t pseudo_wait_spinlock; /* spinlock to protect lock queue */
-
-/*
- * This routine handles 'pagex' pseudo page faults.
- */
-asmlinkage void
-do_pseudo_page_fault(struct pt_regs *regs, unsigned long error_code)
-{
- pseudo_wait_t wait_struct;
- pseudo_wait_t *ptr, *last, *next;
- unsigned long address;
-
- /*
- * get the failing address
- * more specific the segment and page table portion of
- * the address
- */
- address = S390_lowcore.trans_exc_code & 0xfffff000;
-
- if (address & 0x80000000) {
- /* high bit set -> a page has been swapped in by VM */
- address &= 0x7fffffff;
- spin_lock(&pseudo_wait_spinlock);
- last = NULL;
- ptr = pseudo_lock_queue;
- while (ptr != NULL) {
- next = ptr->next;
- if (address == ptr->address) {
- /*
- * This is one of the processes waiting
- * for the page. Unchain from the queue.
- * There can be more than one process
- * waiting for the same page. VM presents
- * an initial and a completion interrupt for
- * every process that tries to access a
- * page swapped out by VM.
- */
- if (last == NULL)
- pseudo_lock_queue = next;
- else
- last->next = next;
- /* now wake up the process */
- ptr->resolved = 1;
- wake_up(&ptr->queue);
- } else
- last = ptr;
- ptr = next;
- }
- spin_unlock(&pseudo_wait_spinlock);
- } else {
- /* Pseudo page faults in kernel mode is a bad idea */
- if (!(regs->psw.mask & PSW_MASK_PSTATE)) {
- /*
- * VM presents pseudo page faults if the interrupted
- * state was not disabled for interrupts. So we can
- * get pseudo page fault interrupts while running
- * in kernel mode. We simply access the page here
- * while we are running disabled. VM will then swap
- * in the page synchronously.
- */
- if (check_user_space(regs, error_code) == 0)
- /* dereference a virtual kernel address */
- __asm__ __volatile__ (
- " ic 0,0(%0)"
- : : "a" (address) : "0");
- else
- /* dereference a virtual user address */
- __asm__ __volatile__ (
- " la 2,0(%0)\n"
- " sacf 512\n"
- " ic 2,0(2)\n"
- "0:sacf 0\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 0b,0b\n"
- ".previous"
- : : "a" (address) : "2" );
-
- return;
- }
- /* initialize and add element to pseudo_lock_queue */
- init_waitqueue_head (&wait_struct.queue);
- wait_struct.address = address;
- wait_struct.resolved = 0;
- spin_lock(&pseudo_wait_spinlock);
- wait_struct.next = pseudo_lock_queue;
- pseudo_lock_queue = &wait_struct;
- spin_unlock(&pseudo_wait_spinlock);
- /*
- * The instruction that caused the program check will
- * be repeated. Don't signal single step via SIGTRAP.
- */
- clear_tsk_thread_flag(current, TIF_SINGLE_STEP);
- /* go to sleep */
- wait_event(wait_struct.queue, wait_struct.resolved);
- }
-}
-#endif /* CONFIG_ARCH_S390X */
-
#ifdef CONFIG_PFAULT
/*
* 'pfault' pseudo page faults routines.
@@ -508,7 +399,7 @@ int pfault_init(void)
" .quad 0b,1b\n"
#endif /* CONFIG_ARCH_S390X */
".previous"
- : "=d" (rc) : "a" (&refbk) : "cc" );
+ : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc" );
__ctl_set_bit(0, 9);
return rc;
}
@@ -532,7 +423,7 @@ void pfault_fini(void)
" .quad 0b,0b\n"
#endif /* CONFIG_ARCH_S390X */
".previous"
- : : "a" (&refbk) : "cc" );
+ : : "a" (&refbk), "m" (refbk) : "cc" );
}
asmlinkage void
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 3e804c736e6..64f5ae0ff96 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -490,16 +490,6 @@ config CPU_SUBTYPE_ST40
depends on CPU_SUBTYPE_ST40STB1 || CPU_SUBTYPE_ST40GX1
default y
-config ARCH_DISCONTIGMEM_ENABLE
- bool
- depends on SH_HP690
- default y
- help
- Say Y to upport efficient handling of discontiguous physical memory,
- for architectures which are either NUMA (Non-Uniform Memory Access)
- or have huge holes in the physical address space for other reasons.
- See <file:Documentation/vm/numa> for more.
-
source "mm/Kconfig"
config ZERO_PAGE_OFFSET
@@ -770,24 +760,6 @@ source "fs/Kconfig.binfmt"
endmenu
-menu "SH initrd options"
- depends on BLK_DEV_INITRD
-
-config EMBEDDED_RAMDISK
- bool "Embed root filesystem ramdisk into the kernel"
-
-config EMBEDDED_RAMDISK_IMAGE
- string "Filename of gziped ramdisk image"
- depends on EMBEDDED_RAMDISK
- default "ramdisk.gz"
- help
- This is the filename of the ramdisk image to be built into the
- kernel. Relative pathnames are relative to arch/sh/ramdisk/.
- The ramdisk image is not part of the kernel distribution; you must
- provide one yourself.
-
-endmenu
-
source "net/Kconfig"
source "drivers/Kconfig"
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index 4a3049080b4..67192d6b00d 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -60,14 +60,6 @@ LIBGCC := $(shell $(CC) $(CFLAGS) -print-libgcc-file-name)
core-y += arch/sh/kernel/ arch/sh/mm/
-#
-# ramdisk/initrd support
-# You need a compressed ramdisk image, named
-# CONFIG_EMBEDDED_RAMDISK_IMAGE. Relative pathnames
-# are relative to arch/sh/ramdisk/.
-#
-core-$(CONFIG_EMBEDDED_RAMDISK) += arch/sh/ramdisk/
-
# Boards
machdir-$(CONFIG_SH_SOLUTION_ENGINE) := se/770x
machdir-$(CONFIG_SH_7751_SOLUTION_ENGINE) := se/7751
diff --git a/arch/sh/drivers/Makefile b/arch/sh/drivers/Makefile
index bd6726cde39..338c3729d27 100644
--- a/arch/sh/drivers/Makefile
+++ b/arch/sh/drivers/Makefile
@@ -2,6 +2,7 @@
# Makefile for the Linux SuperH-specific device drivers.
#
-obj-$(CONFIG_PCI) += pci/
-obj-$(CONFIG_SH_DMA) += dma/
+obj-$(CONFIG_PCI) += pci/
+obj-$(CONFIG_SH_DMA) += dma/
+obj-$(CONFIG_SUPERHYWAY) += superhyway/
diff --git a/arch/sh/drivers/superhyway/Makefile b/arch/sh/drivers/superhyway/Makefile
new file mode 100644
index 00000000000..5b8e0c7ca3a
--- /dev/null
+++ b/arch/sh/drivers/superhyway/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the SuperHyway specific kernel interface routines under Linux.
+#
+
+obj-$(CONFIG_CPU_SUBTYPE_SH4_202) += ops-sh4-202.o
+
diff --git a/arch/sh/drivers/superhyway/ops-sh4-202.c b/arch/sh/drivers/superhyway/ops-sh4-202.c
new file mode 100644
index 00000000000..a55c98a9052
--- /dev/null
+++ b/arch/sh/drivers/superhyway/ops-sh4-202.c
@@ -0,0 +1,171 @@
+/*
+ * arch/sh/drivers/superhyway/ops-sh4-202.c
+ *
+ * SuperHyway bus support for SH4-202
+ *
+ * Copyright (C) 2005 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU
+ * General Public License. See the file "COPYING" in the main
+ * directory of this archive for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/superhyway.h>
+#include <linux/string.h>
+#include <asm/addrspace.h>
+#include <asm/io.h>
+
+#define PHYS_EMI_CBLOCK P4SEGADDR(0x1ec00000)
+#define PHYS_EMI_DBLOCK P4SEGADDR(0x08000000)
+#define PHYS_FEMI_CBLOCK P4SEGADDR(0x1f800000)
+#define PHYS_FEMI_DBLOCK P4SEGADDR(0x00000000)
+
+#define PHYS_EPBR_BLOCK P4SEGADDR(0x1de00000)
+#define PHYS_DMAC_BLOCK P4SEGADDR(0x1fa00000)
+#define PHYS_PBR_BLOCK P4SEGADDR(0x1fc00000)
+
+static struct resource emi_resources[] = {
+ [0] = {
+ .start = PHYS_EMI_CBLOCK,
+ .end = PHYS_EMI_CBLOCK + 0x00300000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = PHYS_EMI_DBLOCK,
+ .end = PHYS_EMI_DBLOCK + 0x08000000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct superhyway_device emi_device = {
+ .name = "emi",
+ .num_resources = ARRAY_SIZE(emi_resources),
+ .resource = emi_resources,
+};
+
+static struct resource femi_resources[] = {
+ [0] = {
+ .start = PHYS_FEMI_CBLOCK,
+ .end = PHYS_FEMI_CBLOCK + 0x00100000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = PHYS_FEMI_DBLOCK,
+ .end = PHYS_FEMI_DBLOCK + 0x08000000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct superhyway_device femi_device = {
+ .name = "femi",
+ .num_resources = ARRAY_SIZE(femi_resources),
+ .resource = femi_resources,
+};
+
+static struct resource epbr_resources[] = {
+ [0] = {
+ .start = P4SEGADDR(0x1e7ffff8),
+ .end = P4SEGADDR(0x1e7ffff8 + (sizeof(u32) * 2) - 1),
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = PHYS_EPBR_BLOCK,
+ .end = PHYS_EPBR_BLOCK + 0x00a00000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct superhyway_device epbr_device = {
+ .name = "epbr",
+ .num_resources = ARRAY_SIZE(epbr_resources),
+ .resource = epbr_resources,
+};
+
+static struct resource dmac_resource = {
+ .start = PHYS_DMAC_BLOCK,
+ .end = PHYS_DMAC_BLOCK + 0x00100000 - 1,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct superhyway_device dmac_device = {
+ .name = "dmac",
+ .num_resources = 1,
+ .resource = &dmac_resource,
+};
+
+static struct resource pbr_resources[] = {
+ [0] = {
+ .start = P4SEGADDR(0x1ffffff8),
+ .end = P4SEGADDR(0x1ffffff8 + (sizeof(u32) * 2) - 1),
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = PHYS_PBR_BLOCK,
+ .end = PHYS_PBR_BLOCK + 0x00400000 - (sizeof(u32) * 2) - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct superhyway_device pbr_device = {
+ .name = "pbr",
+ .num_resources = ARRAY_SIZE(pbr_resources),
+ .resource = pbr_resources,
+};
+
+static struct superhyway_device *sh4202_devices[] __initdata = {
+ &emi_device, &femi_device, &epbr_device, &dmac_device, &pbr_device,
+};
+
+static int sh4202_read_vcr(unsigned long base, struct superhyway_vcr_info *vcr)
+{
+ u32 vcrh, vcrl;
+ u64 tmp;
+
+ /*
+ * XXX: Even though the SH4-202 Evaluation Device documentation
+ * indicates that VCRL is mapped first with VCRH at a + 0x04
+ * offset, the opposite seems to be true.
+ *
+ * Some modules (PBR and ePBR for instance) also appear to have
+ * VCRL/VCRH flipped in the documentation, but on the SH4-202
+ * itself it appears that these are all consistently mapped with
+ * VCRH preceeding VCRL.
+ *
+ * Do not trust the documentation, for it is evil.
+ */
+ vcrh = ctrl_inl(base);
+ vcrl = ctrl_inl(base + sizeof(u32));
+
+ tmp = ((u64)vcrh << 32) | vcrl;
+ memcpy(vcr, &tmp, sizeof(u64));
+
+ return 0;
+}
+
+static int sh4202_write_vcr(unsigned long base, struct superhyway_vcr_info vcr)
+{
+ u64 tmp = *(u64 *)&vcr;
+
+ ctrl_outl((tmp >> 32) & 0xffffffff, base);
+ ctrl_outl(tmp & 0xffffffff, base + sizeof(u32));
+
+ return 0;
+}
+
+static struct superhyway_ops sh4202_superhyway_ops = {
+ .read_vcr = sh4202_read_vcr,
+ .write_vcr = sh4202_write_vcr,
+};
+
+struct superhyway_bus superhyway_channels[] = {
+ { &sh4202_superhyway_ops, },
+ { 0, },
+};
+
+int __init superhyway_scan_bus(struct superhyway_bus *bus)
+{
+ return superhyway_add_devices(bus, sh4202_devices,
+ ARRAY_SIZE(sh4202_devices));
+}
+
diff --git a/arch/sh/kernel/ptrace.c b/arch/sh/kernel/ptrace.c
index 1fbe5a428e3..1a8be06519e 100644
--- a/arch/sh/kernel/ptrace.c
+++ b/arch/sh/kernel/ptrace.c
@@ -80,48 +80,11 @@ void ptrace_disable(struct task_struct *child)
/* nothing to do.. */
}
-asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
- struct task_struct *child;
struct user * dummy = NULL;
int ret;
- lock_kernel();
- ret = -EPERM;
- if (request == PTRACE_TRACEME) {
- /* are we already being traced? */
- if (current->ptrace & PT_PTRACED)
- goto out;
- ret = security_ptrace(current->parent, current);
- if (ret)
- goto out;
- /* set the ptrace bit in the process flags. */
- current->ptrace |= PT_PTRACED;
- ret = 0;
- goto out;
- }
- ret = -ESRCH;
- read_lock(&tasklist_lock);
- child = find_task_by_pid(pid);
- if (child)
- get_task_struct(child);
- read_unlock(&tasklist_lock);
- if (!child)
- goto out;
-
- ret = -EPERM;
- if (pid == 1) /* you may not mess with init */
- goto out_tsk;
-
- if (request == PTRACE_ATTACH) {
- ret = ptrace_attach(child);
- goto out_tsk;
- }
-
- ret = ptrace_check_attach(child, request == PTRACE_KILL);
- if (ret < 0)
- goto out_tsk;
-
switch (request) {
/* when I and D space are separate, these will need to be fixed. */
case PTRACE_PEEKTEXT: /* read word at location addr. */
@@ -289,10 +252,7 @@ asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
ret = ptrace_request(child, request, addr, data);
break;
}
-out_tsk:
- put_task_struct(child);
-out:
- unlock_kernel();
+
return ret;
}
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 25b9d9ebe85..036050b377c 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -83,9 +83,9 @@ static struct sh_machine_vector* __init get_mv_byname(const char* name);
/* ... */
#define COMMAND_LINE ((char *) (PARAM+0x100))
-#define RAMDISK_IMAGE_START_MASK 0x07FF
+#define RAMDISK_IMAGE_START_MASK 0x07FF
#define RAMDISK_PROMPT_FLAG 0x8000
-#define RAMDISK_LOAD_FLAG 0x4000
+#define RAMDISK_LOAD_FLAG 0x4000
static char command_line[COMMAND_LINE_SIZE] = { 0, };
@@ -284,18 +284,6 @@ void __init setup_arch(char **cmdline_p)
#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
-#ifdef CONFIG_DISCONTIGMEM
- NODE_DATA(0)->bdata = &discontig_node_bdata[0];
- NODE_DATA(1)->bdata = &discontig_node_bdata[1];
-
- bootmap_size = init_bootmem_node(NODE_DATA(1),
- PFN_UP(__MEMORY_START_2ND),
- PFN_UP(__MEMORY_START_2ND),
- PFN_DOWN(__MEMORY_START_2ND+__MEMORY_SIZE_2ND));
- free_bootmem_node(NODE_DATA(1), __MEMORY_START_2ND, __MEMORY_SIZE_2ND);
- reserve_bootmem_node(NODE_DATA(1), __MEMORY_START_2ND, bootmap_size);
-#endif
-
/*
* Find the highest page frame number we have available
*/
@@ -306,10 +294,10 @@ void __init setup_arch(char **cmdline_p)
*/
max_low_pfn = max_pfn;
- /*
+ /*
* Partially used pages are not usable - thus
* we are rounding upwards:
- */
+ */
start_pfn = PFN_UP(__pa(_end));
/*
@@ -360,12 +348,12 @@ void __init setup_arch(char **cmdline_p)
reserve_bootmem_node(NODE_DATA(0), __MEMORY_START, PAGE_SIZE);
#ifdef CONFIG_BLK_DEV_INITRD
- ROOT_DEV = MKDEV(RAMDISK_MAJOR, 0);
- if (&__rd_start != &__rd_end) {
+ ROOT_DEV = MKDEV(RAMDISK_MAJOR, 0);
+ if (&__rd_start != &__rd_end) {
LOADER_TYPE = 1;
INITRD_START = PHYSADDR((unsigned long)&__rd_start) - __MEMORY_START;
INITRD_SIZE = (unsigned long)&__rd_end - (unsigned long)&__rd_start;
- }
+ }
if (LOADER_TYPE && INITRD_START) {
if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 4e9c854845a..e342565f75f 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -51,11 +51,6 @@ unsigned long mmu_context_cache = NO_CONTEXT;
#define MAX_LOW_PFN (NODE_DATA(0)->bdata->node_low_pfn)
#endif
-#ifdef CONFIG_DISCONTIGMEM
-pg_data_t discontig_page_data[MAX_NUMNODES];
-bootmem_data_t discontig_node_bdata[MAX_NUMNODES];
-#endif
-
void (*copy_page)(void *from, void *to);
void (*clear_page)(void *to);
@@ -216,15 +211,6 @@ void __init paging_init(void)
#endif
NODE_DATA(0)->node_mem_map = NULL;
free_area_init_node(0, NODE_DATA(0), zones_size, __MEMORY_START >> PAGE_SHIFT, 0);
-
-#ifdef CONFIG_DISCONTIGMEM
- /*
- * And for discontig, do some more fixups on the zone sizes..
- */
- zones_size[ZONE_DMA] = __MEMORY_SIZE_2ND >> PAGE_SHIFT;
- zones_size[ZONE_NORMAL] = 0;
- free_area_init_node(1, NODE_DATA(1), zones_size, __MEMORY_START_2ND >> PAGE_SHIFT, 0);
-#endif
}
void __init mem_init(void)
@@ -248,7 +234,7 @@ void __init mem_init(void)
memset(empty_zero_page, 0, PAGE_SIZE);
__flush_wback_region(empty_zero_page, PAGE_SIZE);
- /*
+ /*
* Setup wrappers for copy/clear_page(), these will get overridden
* later in the boot process if a better method is available.
*/
@@ -257,9 +243,6 @@ void __init mem_init(void)
/* this will put all low memory onto the freelists */
totalram_pages += free_all_bootmem_node(NODE_DATA(0));
-#ifdef CONFIG_DISCONTIGMEM
- totalram_pages += free_all_bootmem_node(NODE_DATA(1));
-#endif
reservedpages = 0;
for (tmp = 0; tmp < num_physpages; tmp++)
/*
@@ -286,7 +269,7 @@ void __init mem_init(void)
void free_initmem(void)
{
unsigned long addr;
-
+
addr = (unsigned long)(&__init_begin);
for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
diff --git a/arch/sh/mm/tlb-sh3.c b/arch/sh/mm/tlb-sh3.c
index 7a0d5c10bf2..46b09e26e08 100644
--- a/arch/sh/mm/tlb-sh3.c
+++ b/arch/sh/mm/tlb-sh3.c
@@ -40,12 +40,17 @@ void update_mmu_cache(struct vm_area_struct * vma,
return;
#if defined(CONFIG_SH7705_CACHE_32KB)
- struct page *page;
- page = pte_page(pte);
- if (VALID_PAGE(page) && !test_bit(PG_mapped, &page->flags)) {
- unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
- __flush_wback_region((void *)P1SEGADDR(phys), PAGE_SIZE);
- __set_bit(PG_mapped, &page->flags);
+ {
+ struct page *page = pte_page(pte);
+ unsigned long pfn = pte_pfn(pte);
+
+ if (pfn_valid(pfn) && !test_bit(PG_mapped, &page->flags)) {
+ unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
+
+ __flush_wback_region((void *)P1SEGADDR(phys),
+ PAGE_SIZE);
+ __set_bit(PG_mapped, &page->flags);
+ }
}
#endif
@@ -80,7 +85,7 @@ void __flush_tlb_page(unsigned long asid, unsigned long page)
*/
addr = MMU_TLB_ADDRESS_ARRAY | (page & 0x1F000);
data = (page & 0xfffe0000) | asid; /* VALID bit is off */
-
+
if ((cpu_data->flags & CPU_HAS_MMU_PAGE_ASSOC)) {
addr |= MMU_PAGE_ASSOC_BIT;
ways = 1; /* we already know the way .. */
diff --git a/arch/sh/ramdisk/Makefile b/arch/sh/ramdisk/Makefile
deleted file mode 100644
index 99e1c68673c..00000000000
--- a/arch/sh/ramdisk/Makefile
+++ /dev/null
@@ -1,20 +0,0 @@
-#
-# Makefile for a ramdisk image
-#
-
-obj-y += ramdisk.o
-
-
-O_FORMAT = $(shell $(OBJDUMP) -i | head -n 2 | grep elf32)
-img := $(subst ",,$(CONFIG_EMBEDDED_RAMDISK_IMAGE))
-# add $(src) when $(img) is relative
-img := $(subst $(src)//,/,$(src)/$(img))
-
-quiet_cmd_ramdisk = LD $@
-define cmd_ramdisk
- $(LD) -T $(srctree)/$(src)/ld.script -b binary --oformat $(O_FORMAT) \
- -o $@ $(img)
-endef
-
-$(obj)/ramdisk.o: $(img) $(srctree)/$(src)/ld.script
- $(call cmd,ramdisk)
diff --git a/arch/sh/ramdisk/ld.script b/arch/sh/ramdisk/ld.script
deleted file mode 100644
index 94beee248c0..00000000000
--- a/arch/sh/ramdisk/ld.script
+++ /dev/null
@@ -1,9 +0,0 @@
-OUTPUT_ARCH(sh)
-SECTIONS
-{
- .initrd :
- {
- *(.data)
- }
-}
-
diff --git a/arch/sh64/kernel/ptrace.c b/arch/sh64/kernel/ptrace.c
index 71f2eec00b9..cd22e947131 100644
--- a/arch/sh64/kernel/ptrace.c
+++ b/arch/sh64/kernel/ptrace.c
@@ -28,6 +28,7 @@
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/signal.h>
+#include <linux/syscalls.h>
#include <asm/io.h>
#include <asm/uaccess.h>
@@ -121,61 +122,11 @@ put_fpu_long(struct task_struct *task, unsigned long addr, unsigned long data)
return 0;
}
-asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
+
+long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
- struct task_struct *child;
- extern void poke_real_address_q(unsigned long long addr, unsigned long long data);
-#define WPC_DBRMODE 0x0d104008
- static int first_call = 1;
int ret;
- lock_kernel();
-
- if (first_call) {
- /* Set WPC.DBRMODE to 0. This makes all debug events get
- * delivered through RESVEC, i.e. into the handlers in entry.S.
- * (If the kernel was downloaded using a remote gdb, WPC.DBRMODE
- * would normally be left set to 1, which makes debug events get
- * delivered through DBRVEC, i.e. into the remote gdb's
- * handlers. This prevents ptrace getting them, and confuses
- * the remote gdb.) */
- printk("DBRMODE set to 0 to permit native debugging\n");
- poke_real_address_q(WPC_DBRMODE, 0);
- first_call = 0;
- }
-
- ret = -EPERM;
- if (request == PTRACE_TRACEME) {
- /* are we already being traced? */
- if (current->ptrace & PT_PTRACED)
- goto out;
- /* set the ptrace bit in the process flags. */
- current->ptrace |= PT_PTRACED;
- ret = 0;
- goto out;
- }
- ret = -ESRCH;
- read_lock(&tasklist_lock);
- child = find_task_by_pid(pid);
- if (child)
- get_task_struct(child);
- read_unlock(&tasklist_lock);
- if (!child)
- goto out;
-
- ret = -EPERM;
- if (pid == 1) /* you may not mess with init */
- goto out_tsk;
-
- if (request == PTRACE_ATTACH) {
- ret = ptrace_attach(child);
- goto out_tsk;
- }
-
- ret = ptrace_check_attach(child, request == PTRACE_KILL);
- if (ret < 0)
- goto out_tsk;
-
switch (request) {
/* when I and D space are separate, these will need to be fixed. */
case PTRACE_PEEKTEXT: /* read word at location addr. */
@@ -313,13 +264,33 @@ asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
ret = ptrace_request(child, request, addr, data);
break;
}
-out_tsk:
- put_task_struct(child);
-out:
- unlock_kernel();
return ret;
}
+asmlinkage int sh64_ptrace(long request, long pid, long addr, long data)
+{
+ extern void poke_real_address_q(unsigned long long addr, unsigned long long data);
+#define WPC_DBRMODE 0x0d104008
+ static int first_call = 1;
+
+ lock_kernel();
+ if (first_call) {
+ /* Set WPC.DBRMODE to 0. This makes all debug events get
+ * delivered through RESVEC, i.e. into the handlers in entry.S.
+ * (If the kernel was downloaded using a remote gdb, WPC.DBRMODE
+ * would normally be left set to 1, which makes debug events get
+ * delivered through DBRVEC, i.e. into the remote gdb's
+ * handlers. This prevents ptrace getting them, and confuses
+ * the remote gdb.) */
+ printk("DBRMODE set to 0 to permit native debugging\n");
+ poke_real_address_q(WPC_DBRMODE, 0);
+ first_call = 0;
+ }
+ unlock_kernel();
+
+ return sys_ptrace(request, pid, addr, data);
+}
+
asmlinkage void syscall_trace(void)
{
struct task_struct *tsk = current;
diff --git a/arch/sh64/kernel/syscalls.S b/arch/sh64/kernel/syscalls.S
index a3d037805f1..c0079d54c85 100644
--- a/arch/sh64/kernel/syscalls.S
+++ b/arch/sh64/kernel/syscalls.S
@@ -46,7 +46,7 @@ sys_call_table:
.long sys_setuid16
.long sys_getuid16
.long sys_stime /* 25 */
- .long sys_ptrace
+ .long sh64_ptrace
.long sys_alarm
.long sys_fstat
.long sys_pause
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 6537445dac0..3cfb8be3ff6 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -201,6 +201,14 @@ config SUN_OPENPROMFS
Only choose N if you know in advance that you will not need to modify
OpenPROM settings on the running system.
+config SPARC_LED
+ tristate "Sun4m LED driver"
+ help
+ This driver toggles the front-panel LED on sun4m systems
+ in a user-specifyable manner. It's state can be probed
+ by reading /proc/led and it's blinking mode can be changed
+ via writes to /proc/led
+
source "fs/Kconfig.binfmt"
config SUNOS_EMUL
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index 3d22ba2af01..1b83e21841b 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_SUN_AUXIO) += auxio.o
obj-$(CONFIG_PCI) += ebus.o
obj-$(CONFIG_SUN_PM) += apc.o pmc.o
obj-$(CONFIG_MODULES) += module.o sparc_ksyms.o
+obj-$(CONFIG_SPARC_LED) += led.o
ifdef CONFIG_SUNOS_EMUL
obj-y += sys_sunos.o sunos_ioctl.o
diff --git a/arch/sparc/kernel/led.c b/arch/sparc/kernel/led.c
new file mode 100644
index 00000000000..2a3afca453c
--- /dev/null
+++ b/arch/sparc/kernel/led.c
@@ -0,0 +1,139 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/string.h>
+
+#include <asm/auxio.h>
+
+#define LED_MAX_LENGTH 8 /* maximum chars written to proc file */
+
+static inline void led_toggle(void)
+{
+ unsigned char val = get_auxio();
+ unsigned char on, off;
+
+ if (val & AUXIO_LED) {
+ on = 0;
+ off = AUXIO_LED;
+ } else {
+ on = AUXIO_LED;
+ off = 0;
+ }
+
+ set_auxio(on, off);
+}
+
+static struct timer_list led_blink_timer;
+
+static void led_blink(unsigned long timeout)
+{
+ led_toggle();
+
+ /* reschedule */
+ if (!timeout) { /* blink according to load */
+ led_blink_timer.expires = jiffies +
+ ((1 + (avenrun[0] >> FSHIFT)) * HZ);
+ led_blink_timer.data = 0;
+ } else { /* blink at user specified interval */
+ led_blink_timer.expires = jiffies + (timeout * HZ);
+ led_blink_timer.data = timeout;
+ }
+ add_timer(&led_blink_timer);
+}
+
+static int led_read_proc(char *buf, char **start, off_t offset, int count,
+ int *eof, void *data)
+{
+ int len = 0;
+
+ if (get_auxio() & AUXIO_LED)
+ len = sprintf(buf, "on\n");
+ else
+ len = sprintf(buf, "off\n");
+
+ return len;
+}
+
+static int led_write_proc(struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{
+ char *buf = NULL;
+
+ if (count > LED_MAX_LENGTH)
+ count = LED_MAX_LENGTH;
+
+ buf = kmalloc(sizeof(char) * (count + 1), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user(buf, buffer, count)) {
+ kfree(buf);
+ return -EFAULT;
+ }
+
+ buf[count] = '\0';
+
+ /* work around \n when echo'ing into proc */
+ if (buf[count - 1] == '\n')
+ buf[count - 1] = '\0';
+
+ /* before we change anything we want to stop any running timers,
+ * otherwise calls such as on will have no persistent effect
+ */
+ del_timer_sync(&led_blink_timer);
+
+ if (!strcmp(buf, "on")) {
+ auxio_set_led(AUXIO_LED_ON);
+ } else if (!strcmp(buf, "toggle")) {
+ led_toggle();
+ } else if ((*buf > '0') && (*buf <= '9')) {
+ led_blink(simple_strtoul(buf, NULL, 10));
+ } else if (!strcmp(buf, "load")) {
+ led_blink(0);
+ } else {
+ auxio_set_led(AUXIO_LED_OFF);
+ }
+
+ kfree(buf);
+
+ return count;
+}
+
+static struct proc_dir_entry *led;
+
+#define LED_VERSION "0.1"
+
+static int __init led_init(void)
+{
+ init_timer(&led_blink_timer);
+ led_blink_timer.function = led_blink;
+
+ led = create_proc_entry("led", 0, NULL);
+ if (!led)
+ return -ENOMEM;
+
+ led->read_proc = led_read_proc; /* reader function */
+ led->write_proc = led_write_proc; /* writer function */
+ led->owner = THIS_MODULE;
+
+ printk(KERN_INFO
+ "led: version %s, Lars Kotthoff <metalhead@metalhead.ws>\n",
+ LED_VERSION);
+
+ return 0;
+}
+
+static void __exit led_exit(void)
+{
+ remove_proc_entry("led", NULL);
+ del_timer_sync(&led_blink_timer);
+}
+
+module_init(led_init);
+module_exit(led_exit);
+
+MODULE_AUTHOR("Lars Kotthoff <metalhead@metalhead.ws>");
+MODULE_DESCRIPTION("Provides control of the front LED on SPARC systems.");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(LED_VERSION);
diff --git a/arch/sparc/kernel/sunos_ioctl.c b/arch/sparc/kernel/sunos_ioctl.c
index df1c0b31a93..a6ba3d26222 100644
--- a/arch/sparc/kernel/sunos_ioctl.c
+++ b/arch/sparc/kernel/sunos_ioctl.c
@@ -23,7 +23,6 @@
#include <linux/smp_lock.h>
#include <linux/syscalls.h>
#include <linux/file.h>
-#include <asm/kbio.h>
#if 0
extern char sunkbd_type;
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
index 1e9d8638a28..3fded69b192 100644
--- a/arch/sparc64/Kconfig
+++ b/arch/sparc64/Kconfig
@@ -377,8 +377,21 @@ source "drivers/fc4/Kconfig"
source "fs/Kconfig"
+menu "Instrumentation Support"
+ depends on EXPERIMENTAL
+
source "arch/sparc64/oprofile/Kconfig"
+config KPROBES
+ bool "Kprobes (EXPERIMENTAL)"
+ help
+ Kprobes allows you to trap at almost any kernel address and
+ execute a callback function. register_kprobe() establishes
+ a probepoint and specifies the callback. Kprobes is useful
+ for kernel debugging, non-intrusive instrumentation and testing.
+ If in doubt, say "N".
+endmenu
+
source "arch/sparc64/Kconfig.debug"
source "security/Kconfig"
diff --git a/arch/sparc64/Kconfig.debug b/arch/sparc64/Kconfig.debug
index fa06ea04837..3e31be494e5 100644
--- a/arch/sparc64/Kconfig.debug
+++ b/arch/sparc64/Kconfig.debug
@@ -11,16 +11,6 @@ config DEBUG_STACK_USAGE
This option will slow down process creation somewhat.
-config KPROBES
- bool "Kprobes"
- depends on DEBUG_KERNEL
- help
- Kprobes allows you to trap at almost any kernel address and
- execute a callback function. register_kprobe() establishes
- a probepoint and specifies the callback. Kprobes is useful
- for kernel debugging, non-intrusive instrumentation and testing.
- If in doubt, say "N".
-
config DEBUG_DCFLUSH
bool "D-cache flush debugging"
depends on DEBUG_KERNEL
diff --git a/arch/sparc64/kernel/ioctl32.c b/arch/sparc64/kernel/ioctl32.c
index e6a00325075..92e26304de9 100644
--- a/arch/sparc64/kernel/ioctl32.c
+++ b/arch/sparc64/kernel/ioctl32.c
@@ -11,33 +11,14 @@
#define INCLUDES
#include "compat_ioctl.c"
-#include <linux/ncp_fs.h>
#include <linux/syscalls.h>
#include <asm/fbio.h>
-#include <asm/kbio.h>
-#include <asm/vuid_event.h>
-#include <asm/envctrl.h>
-#include <asm/display7seg.h>
-#include <asm/openpromio.h>
-#include <asm/audioio.h>
-#include <asm/watchdog.h>
/* Use this to get at 32-bit user passed pointers.
* See sys_sparc32.c for description about it.
*/
#define A(__x) compat_ptr(__x)
-static __inline__ void *alloc_user_space(long len)
-{
- struct pt_regs *regs = current_thread_info()->kregs;
- unsigned long usp = regs->u_regs[UREG_I6];
-
- if (!(test_thread_flag(TIF_32BIT)))
- usp += STACK_BIAS;
-
- return (void *) (usp - len);
-}
-
#define CODE
#include "compat_ioctl.c"
@@ -111,357 +92,6 @@ static int fbiogscursor(unsigned int fd, unsigned int cmd, unsigned long arg)
return sys_ioctl (fd, FBIOSCURSOR, (unsigned long)p);
}
-#if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE)
-/* This really belongs in include/linux/drm.h -DaveM */
-#include "../../../drivers/char/drm/drm.h"
-
-typedef struct drm32_version {
- int version_major; /* Major version */
- int version_minor; /* Minor version */
- int version_patchlevel;/* Patch level */
- int name_len; /* Length of name buffer */
- u32 name; /* Name of driver */
- int date_len; /* Length of date buffer */
- u32 date; /* User-space buffer to hold date */
- int desc_len; /* Length of desc buffer */
- u32 desc; /* User-space buffer to hold desc */
-} drm32_version_t;
-#define DRM32_IOCTL_VERSION DRM_IOWR(0x00, drm32_version_t)
-
-static int drm32_version(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- drm32_version_t __user *uversion = (drm32_version_t __user *)arg;
- drm_version_t __user *p = compat_alloc_user_space(sizeof(*p));
- compat_uptr_t addr;
- int n;
- int ret;
-
- if (clear_user(p, 3 * sizeof(int)) ||
- get_user(n, &uversion->name_len) ||
- put_user(n, &p->name_len) ||
- get_user(addr, &uversion->name) ||
- put_user(compat_ptr(addr), &p->name) ||
- get_user(n, &uversion->date_len) ||
- put_user(n, &p->date_len) ||
- get_user(addr, &uversion->date) ||
- put_user(compat_ptr(addr), &p->date) ||
- get_user(n, &uversion->desc_len) ||
- put_user(n, &p->desc_len) ||
- get_user(addr, &uversion->desc) ||
- put_user(compat_ptr(addr), &p->desc))
- return -EFAULT;
-
- ret = sys_ioctl(fd, DRM_IOCTL_VERSION, (unsigned long)p);
- if (ret)
- return ret;
-
- if (copy_in_user(uversion, p, 3 * sizeof(int)) ||
- get_user(n, &p->name_len) ||
- put_user(n, &uversion->name_len) ||
- get_user(n, &p->date_len) ||
- put_user(n, &uversion->date_len) ||
- get_user(n, &p->desc_len) ||
- put_user(n, &uversion->desc_len))
- return -EFAULT;
-
- return 0;
-}
-
-typedef struct drm32_unique {
- int unique_len; /* Length of unique */
- u32 unique; /* Unique name for driver instantiation */
-} drm32_unique_t;
-#define DRM32_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm32_unique_t)
-#define DRM32_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm32_unique_t)
-
-static int drm32_getsetunique(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- drm32_unique_t __user *uarg = (drm32_unique_t __user *)arg;
- drm_unique_t __user *p = compat_alloc_user_space(sizeof(*p));
- compat_uptr_t addr;
- int n;
- int ret;
-
- if (get_user(n, &uarg->unique_len) ||
- put_user(n, &p->unique_len) ||
- get_user(addr, &uarg->unique) ||
- put_user(compat_ptr(addr), &p->unique))
- return -EFAULT;
-
- if (cmd == DRM32_IOCTL_GET_UNIQUE)
- ret = sys_ioctl (fd, DRM_IOCTL_GET_UNIQUE, (unsigned long)p);
- else
- ret = sys_ioctl (fd, DRM_IOCTL_SET_UNIQUE, (unsigned long)p);
-
- if (ret)
- return ret;
-
- if (get_user(n, &p->unique_len) || put_user(n, &uarg->unique_len))
- return -EFAULT;
-
- return 0;
-}
-
-typedef struct drm32_map {
- u32 offset; /* Requested physical address (0 for SAREA)*/
- u32 size; /* Requested physical size (bytes) */
- drm_map_type_t type; /* Type of memory to map */
- drm_map_flags_t flags; /* Flags */
- u32 handle; /* User-space: "Handle" to pass to mmap */
- /* Kernel-space: kernel-virtual address */
- int mtrr; /* MTRR slot used */
- /* Private data */
-} drm32_map_t;
-#define DRM32_IOCTL_ADD_MAP DRM_IOWR(0x15, drm32_map_t)
-
-static int drm32_addmap(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- drm32_map_t __user *uarg = (drm32_map_t __user *) arg;
- drm_map_t karg;
- mm_segment_t old_fs;
- u32 tmp;
- int ret;
-
- ret = get_user(karg.offset, &uarg->offset);
- ret |= get_user(karg.size, &uarg->size);
- ret |= get_user(karg.type, &uarg->type);
- ret |= get_user(karg.flags, &uarg->flags);
- ret |= get_user(tmp, &uarg->handle);
- ret |= get_user(karg.mtrr, &uarg->mtrr);
- if (ret)
- return -EFAULT;
-
- karg.handle = (void *) (unsigned long) tmp;
-
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- ret = sys_ioctl(fd, DRM_IOCTL_ADD_MAP, (unsigned long) &karg);
- set_fs(old_fs);
-
- if (!ret) {
- ret = put_user(karg.offset, &uarg->offset);
- ret |= put_user(karg.size, &uarg->size);
- ret |= put_user(karg.type, &uarg->type);
- ret |= put_user(karg.flags, &uarg->flags);
- tmp = (u32) (long)karg.handle;
- ret |= put_user(tmp, &uarg->handle);
- ret |= put_user(karg.mtrr, &uarg->mtrr);
- if (ret)
- ret = -EFAULT;
- }
-
- return ret;
-}
-
-typedef struct drm32_buf_info {
- int count; /* Entries in list */
- u32 list; /* (drm_buf_desc_t *) */
-} drm32_buf_info_t;
-#define DRM32_IOCTL_INFO_BUFS DRM_IOWR(0x18, drm32_buf_info_t)
-
-static int drm32_info_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- drm32_buf_info_t __user *uarg = (drm32_buf_info_t __user *)arg;
- drm_buf_info_t __user *p = compat_alloc_user_space(sizeof(*p));
- compat_uptr_t addr;
- int n;
- int ret;
-
- if (get_user(n, &uarg->count) || put_user(n, &p->count) ||
- get_user(addr, &uarg->list) || put_user(compat_ptr(addr), &p->list))
- return -EFAULT;
-
- ret = sys_ioctl(fd, DRM_IOCTL_INFO_BUFS, (unsigned long)p);
- if (ret)
- return ret;
-
- if (get_user(n, &p->count) || put_user(n, &uarg->count))
- return -EFAULT;
-
- return 0;
-}
-
-typedef struct drm32_buf_free {
- int count;
- u32 list; /* (int *) */
-} drm32_buf_free_t;
-#define DRM32_IOCTL_FREE_BUFS DRM_IOW( 0x1a, drm32_buf_free_t)
-
-static int drm32_free_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- drm32_buf_free_t __user *uarg = (drm32_buf_free_t __user *)arg;
- drm_buf_free_t __user *p = compat_alloc_user_space(sizeof(*p));
- compat_uptr_t addr;
- int n;
-
- if (get_user(n, &uarg->count) || put_user(n, &p->count) ||
- get_user(addr, &uarg->list) || put_user(compat_ptr(addr), &p->list))
- return -EFAULT;
-
- return sys_ioctl(fd, DRM_IOCTL_FREE_BUFS, (unsigned long)p);
-}
-
-typedef struct drm32_buf_pub {
- int idx; /* Index into master buflist */
- int total; /* Buffer size */
- int used; /* Amount of buffer in use (for DMA) */
- u32 address; /* Address of buffer (void *) */
-} drm32_buf_pub_t;
-
-typedef struct drm32_buf_map {
- int count; /* Length of buflist */
- u32 virtual; /* Mmaped area in user-virtual (void *) */
- u32 list; /* Buffer information (drm_buf_pub_t *) */
-} drm32_buf_map_t;
-#define DRM32_IOCTL_MAP_BUFS DRM_IOWR(0x19, drm32_buf_map_t)
-
-static int drm32_map_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- drm32_buf_map_t __user *uarg = (drm32_buf_map_t __user *)arg;
- drm32_buf_pub_t __user *ulist;
- drm_buf_map_t __user *arg64;
- drm_buf_pub_t __user *list;
- int orig_count, ret, i;
- int n;
- compat_uptr_t addr;
-
- if (get_user(orig_count, &uarg->count))
- return -EFAULT;
-
- arg64 = compat_alloc_user_space(sizeof(drm_buf_map_t) +
- (size_t)orig_count * sizeof(drm_buf_pub_t));
- list = (void __user *)(arg64 + 1);
-
- if (put_user(orig_count, &arg64->count) ||
- put_user(list, &arg64->list) ||
- get_user(addr, &uarg->virtual) ||
- put_user(compat_ptr(addr), &arg64->virtual) ||
- get_user(addr, &uarg->list))
- return -EFAULT;
-
- ulist = compat_ptr(addr);
-
- for (i = 0; i < orig_count; i++) {
- if (get_user(n, &ulist[i].idx) ||
- put_user(n, &list[i].idx) ||
- get_user(n, &ulist[i].total) ||
- put_user(n, &list[i].total) ||
- get_user(n, &ulist[i].used) ||
- put_user(n, &list[i].used) ||
- get_user(addr, &ulist[i].address) ||
- put_user(compat_ptr(addr), &list[i].address))
- return -EFAULT;
- }
-
- ret = sys_ioctl(fd, DRM_IOCTL_MAP_BUFS, (unsigned long) arg64);
- if (ret)
- return ret;
-
- for (i = 0; i < orig_count; i++) {
- void __user *p;
- if (get_user(n, &list[i].idx) ||
- put_user(n, &ulist[i].idx) ||
- get_user(n, &list[i].total) ||
- put_user(n, &ulist[i].total) ||
- get_user(n, &list[i].used) ||
- put_user(n, &ulist[i].used) ||
- get_user(p, &list[i].address) ||
- put_user((unsigned long)p, &ulist[i].address))
- return -EFAULT;
- }
-
- if (get_user(n, &arg64->count) || put_user(n, &uarg->count))
- return -EFAULT;
-
- return 0;
-}
-
-typedef struct drm32_dma {
- /* Indices here refer to the offset into
- buflist in drm_buf_get_t. */
- int context; /* Context handle */
- int send_count; /* Number of buffers to send */
- u32 send_indices; /* List of handles to buffers (int *) */
- u32 send_sizes; /* Lengths of data to send (int *) */
- drm_dma_flags_t flags; /* Flags */
- int request_count; /* Number of buffers requested */
- int request_size; /* Desired size for buffers */
- u32 request_indices; /* Buffer information (int *) */
- u32 request_sizes; /* (int *) */
- int granted_count; /* Number of buffers granted */
-} drm32_dma_t;
-#define DRM32_IOCTL_DMA DRM_IOWR(0x29, drm32_dma_t)
-
-/* RED PEN The DRM layer blindly dereferences the send/request
- * index/size arrays even though they are userland
- * pointers. -DaveM
- */
-static int drm32_dma(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- drm32_dma_t __user *uarg = (drm32_dma_t __user *) arg;
- drm_dma_t __user *p = compat_alloc_user_space(sizeof(*p));
- compat_uptr_t addr;
- int ret;
-
- if (copy_in_user(p, uarg, 2 * sizeof(int)) ||
- get_user(addr, &uarg->send_indices) ||
- put_user(compat_ptr(addr), &p->send_indices) ||
- get_user(addr, &uarg->send_sizes) ||
- put_user(compat_ptr(addr), &p->send_sizes) ||
- copy_in_user(&p->flags, &uarg->flags, sizeof(drm_dma_flags_t)) ||
- copy_in_user(&p->request_count, &uarg->request_count, sizeof(int))||
- copy_in_user(&p->request_size, &uarg->request_size, sizeof(int)) ||
- get_user(addr, &uarg->request_indices) ||
- put_user(compat_ptr(addr), &p->request_indices) ||
- get_user(addr, &uarg->request_sizes) ||
- put_user(compat_ptr(addr), &p->request_sizes) ||
- copy_in_user(&p->granted_count, &uarg->granted_count, sizeof(int)))
- return -EFAULT;
-
- ret = sys_ioctl(fd, DRM_IOCTL_DMA, (unsigned long)p);
- if (ret)
- return ret;
-
- if (copy_in_user(uarg, p, 2 * sizeof(int)) ||
- copy_in_user(&uarg->flags, &p->flags, sizeof(drm_dma_flags_t)) ||
- copy_in_user(&uarg->request_count, &p->request_count, sizeof(int))||
- copy_in_user(&uarg->request_size, &p->request_size, sizeof(int)) ||
- copy_in_user(&uarg->granted_count, &p->granted_count, sizeof(int)))
- return -EFAULT;
-
- return 0;
-}
-
-typedef struct drm32_ctx_res {
- int count;
- u32 contexts; /* (drm_ctx_t *) */
-} drm32_ctx_res_t;
-#define DRM32_IOCTL_RES_CTX DRM_IOWR(0x26, drm32_ctx_res_t)
-
-static int drm32_res_ctx(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- drm32_ctx_res_t __user *uarg = (drm32_ctx_res_t __user *) arg;
- drm_ctx_res_t __user *p = compat_alloc_user_space(sizeof(*p));
- compat_uptr_t addr;
- int ret;
-
- if (copy_in_user(p, uarg, sizeof(int)) ||
- get_user(addr, &uarg->contexts) ||
- put_user(compat_ptr(addr), &p->contexts))
- return -EFAULT;
-
- ret = sys_ioctl(fd, DRM_IOCTL_RES_CTX, (unsigned long)p);
- if (ret)
- return ret;
-
- if (copy_in_user(uarg, p, sizeof(int)))
- return -EFAULT;
-
- return 0;
-}
-
-#endif
-
typedef int (* ioctl32_handler_t)(unsigned int, unsigned int, unsigned long, struct file *);
#define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL((cmd),sys_ioctl)
@@ -485,103 +115,14 @@ COMPATIBLE_IOCTL(FBIOSCURPOS)
COMPATIBLE_IOCTL(FBIOGCURPOS)
COMPATIBLE_IOCTL(FBIOGCURMAX)
/* Little k */
-COMPATIBLE_IOCTL(KIOCTYPE)
-COMPATIBLE_IOCTL(KIOCLAYOUT)
-COMPATIBLE_IOCTL(KIOCGTRANS)
-COMPATIBLE_IOCTL(KIOCTRANS)
-COMPATIBLE_IOCTL(KIOCCMD)
-COMPATIBLE_IOCTL(KIOCSDIRECT)
-COMPATIBLE_IOCTL(KIOCSLED)
-COMPATIBLE_IOCTL(KIOCGLED)
-COMPATIBLE_IOCTL(KIOCSRATE)
-COMPATIBLE_IOCTL(KIOCGRATE)
-COMPATIBLE_IOCTL(VUIDSFORMAT)
-COMPATIBLE_IOCTL(VUIDGFORMAT)
/* Little v, the video4linux ioctls */
COMPATIBLE_IOCTL(_IOR('p', 20, int[7])) /* RTCGET */
COMPATIBLE_IOCTL(_IOW('p', 21, int[7])) /* RTCSET */
-COMPATIBLE_IOCTL(ENVCTRL_RD_WARNING_TEMPERATURE)
-COMPATIBLE_IOCTL(ENVCTRL_RD_SHUTDOWN_TEMPERATURE)
-COMPATIBLE_IOCTL(ENVCTRL_RD_CPU_TEMPERATURE)
-COMPATIBLE_IOCTL(ENVCTRL_RD_FAN_STATUS)
-COMPATIBLE_IOCTL(ENVCTRL_RD_VOLTAGE_STATUS)
-COMPATIBLE_IOCTL(ENVCTRL_RD_SCSI_TEMPERATURE)
-COMPATIBLE_IOCTL(ENVCTRL_RD_ETHERNET_TEMPERATURE)
-COMPATIBLE_IOCTL(ENVCTRL_RD_MTHRBD_TEMPERATURE)
-COMPATIBLE_IOCTL(ENVCTRL_RD_CPU_VOLTAGE)
-COMPATIBLE_IOCTL(ENVCTRL_RD_GLOBALADDRESS)
-/* COMPATIBLE_IOCTL(D7SIOCRD) same value as ENVCTRL_RD_VOLTAGE_STATUS */
-COMPATIBLE_IOCTL(D7SIOCWR)
-COMPATIBLE_IOCTL(D7SIOCTM)
-/* OPENPROMIO, SunOS/Solaris only, the NetBSD one's have
- * embedded pointers in the arg which we'd need to clean up...
- */
-COMPATIBLE_IOCTL(OPROMGETOPT)
-COMPATIBLE_IOCTL(OPROMSETOPT)
-COMPATIBLE_IOCTL(OPROMNXTOPT)
-COMPATIBLE_IOCTL(OPROMSETOPT2)
-COMPATIBLE_IOCTL(OPROMNEXT)
-COMPATIBLE_IOCTL(OPROMCHILD)
-COMPATIBLE_IOCTL(OPROMGETPROP)
-COMPATIBLE_IOCTL(OPROMNXTPROP)
-COMPATIBLE_IOCTL(OPROMU2P)
-COMPATIBLE_IOCTL(OPROMGETCONS)
-COMPATIBLE_IOCTL(OPROMGETFBNAME)
-COMPATIBLE_IOCTL(OPROMGETBOOTARGS)
-COMPATIBLE_IOCTL(OPROMSETCUR)
-COMPATIBLE_IOCTL(OPROMPCI2NODE)
-COMPATIBLE_IOCTL(OPROMPATH2NODE)
-/* Big L */
-COMPATIBLE_IOCTL(LOOP_SET_STATUS64)
-COMPATIBLE_IOCTL(LOOP_GET_STATUS64)
-/* Big A */
-COMPATIBLE_IOCTL(AUDIO_GETINFO)
-COMPATIBLE_IOCTL(AUDIO_SETINFO)
-COMPATIBLE_IOCTL(AUDIO_DRAIN)
-COMPATIBLE_IOCTL(AUDIO_GETDEV)
-COMPATIBLE_IOCTL(AUDIO_GETDEV_SUNOS)
-COMPATIBLE_IOCTL(AUDIO_FLUSH)
-COMPATIBLE_IOCTL(AUTOFS_IOC_EXPIRE_MULTI)
-#if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE)
-COMPATIBLE_IOCTL(DRM_IOCTL_GET_MAGIC)
-COMPATIBLE_IOCTL(DRM_IOCTL_IRQ_BUSID)
-COMPATIBLE_IOCTL(DRM_IOCTL_AUTH_MAGIC)
-COMPATIBLE_IOCTL(DRM_IOCTL_BLOCK)
-COMPATIBLE_IOCTL(DRM_IOCTL_UNBLOCK)
-COMPATIBLE_IOCTL(DRM_IOCTL_CONTROL)
-COMPATIBLE_IOCTL(DRM_IOCTL_ADD_BUFS)
-COMPATIBLE_IOCTL(DRM_IOCTL_MARK_BUFS)
-COMPATIBLE_IOCTL(DRM_IOCTL_ADD_CTX)
-COMPATIBLE_IOCTL(DRM_IOCTL_RM_CTX)
-COMPATIBLE_IOCTL(DRM_IOCTL_MOD_CTX)
-COMPATIBLE_IOCTL(DRM_IOCTL_GET_CTX)
-COMPATIBLE_IOCTL(DRM_IOCTL_SWITCH_CTX)
-COMPATIBLE_IOCTL(DRM_IOCTL_NEW_CTX)
-COMPATIBLE_IOCTL(DRM_IOCTL_ADD_DRAW)
-COMPATIBLE_IOCTL(DRM_IOCTL_RM_DRAW)
-COMPATIBLE_IOCTL(DRM_IOCTL_LOCK)
-COMPATIBLE_IOCTL(DRM_IOCTL_UNLOCK)
-COMPATIBLE_IOCTL(DRM_IOCTL_FINISH)
-#endif /* DRM */
-COMPATIBLE_IOCTL(WIOCSTART)
-COMPATIBLE_IOCTL(WIOCSTOP)
-COMPATIBLE_IOCTL(WIOCGSTAT)
/* And these ioctls need translation */
/* Note SIOCRTMSG is no longer, so this is safe and * the user would have seen just an -EINVAL anyways. */
HANDLE_IOCTL(FBIOPUTCMAP32, fbiogetputcmap)
HANDLE_IOCTL(FBIOGETCMAP32, fbiogetputcmap)
HANDLE_IOCTL(FBIOSCURSOR32, fbiogscursor)
-#if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE)
-HANDLE_IOCTL(DRM32_IOCTL_VERSION, drm32_version)
-HANDLE_IOCTL(DRM32_IOCTL_GET_UNIQUE, drm32_getsetunique)
-HANDLE_IOCTL(DRM32_IOCTL_SET_UNIQUE, drm32_getsetunique)
-HANDLE_IOCTL(DRM32_IOCTL_ADD_MAP, drm32_addmap)
-HANDLE_IOCTL(DRM32_IOCTL_INFO_BUFS, drm32_info_bufs)
-HANDLE_IOCTL(DRM32_IOCTL_FREE_BUFS, drm32_free_bufs)
-HANDLE_IOCTL(DRM32_IOCTL_MAP_BUFS, drm32_map_bufs)
-HANDLE_IOCTL(DRM32_IOCTL_DMA, drm32_dma)
-HANDLE_IOCTL(DRM32_IOCTL_RES_CTX, drm32_res_ctx)
-#endif /* DRM */
#if 0
HANDLE_IOCTL(RTC32_IRQP_READ, do_rtc_ioctl)
HANDLE_IOCTL(RTC32_IRQP_SET, do_rtc_ioctl)
diff --git a/arch/sparc64/kernel/kprobes.c b/arch/sparc64/kernel/kprobes.c
index 0d66d07c8c6..96bd09b098f 100644
--- a/arch/sparc64/kernel/kprobes.c
+++ b/arch/sparc64/kernel/kprobes.c
@@ -38,6 +38,9 @@
* - Mark that we are no longer actively in a kprobe.
*/
+DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
return 0;
@@ -66,46 +69,39 @@ void __kprobes arch_remove_kprobe(struct kprobe *p)
{
}
-static struct kprobe *current_kprobe;
-static unsigned long current_kprobe_orig_tnpc;
-static unsigned long current_kprobe_orig_tstate_pil;
-static unsigned int kprobe_status;
-static struct kprobe *kprobe_prev;
-static unsigned long kprobe_orig_tnpc_prev;
-static unsigned long kprobe_orig_tstate_pil_prev;
-static unsigned int kprobe_status_prev;
-
-static inline void save_previous_kprobe(void)
+static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
{
- kprobe_status_prev = kprobe_status;
- kprobe_orig_tnpc_prev = current_kprobe_orig_tnpc;
- kprobe_orig_tstate_pil_prev = current_kprobe_orig_tstate_pil;
- kprobe_prev = current_kprobe;
+ kcb->prev_kprobe.kp = kprobe_running();
+ kcb->prev_kprobe.status = kcb->kprobe_status;
+ kcb->prev_kprobe.orig_tnpc = kcb->kprobe_orig_tnpc;
+ kcb->prev_kprobe.orig_tstate_pil = kcb->kprobe_orig_tstate_pil;
}
-static inline void restore_previous_kprobe(void)
+static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
- kprobe_status = kprobe_status_prev;
- current_kprobe_orig_tnpc = kprobe_orig_tnpc_prev;
- current_kprobe_orig_tstate_pil = kprobe_orig_tstate_pil_prev;
- current_kprobe = kprobe_prev;
+ __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+ kcb->kprobe_status = kcb->prev_kprobe.status;
+ kcb->kprobe_orig_tnpc = kcb->prev_kprobe.orig_tnpc;
+ kcb->kprobe_orig_tstate_pil = kcb->prev_kprobe.orig_tstate_pil;
}
-static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs)
+static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
{
- current_kprobe_orig_tnpc = regs->tnpc;
- current_kprobe_orig_tstate_pil = (regs->tstate & TSTATE_PIL);
- current_kprobe = p;
+ __get_cpu_var(current_kprobe) = p;
+ kcb->kprobe_orig_tnpc = regs->tnpc;
+ kcb->kprobe_orig_tstate_pil = (regs->tstate & TSTATE_PIL);
}
-static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
+static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
{
regs->tstate |= TSTATE_PIL;
/*single step inline, if it a breakpoint instruction*/
if (p->opcode == BREAKPOINT_INSTRUCTION) {
regs->tpc = (unsigned long) p->addr;
- regs->tnpc = current_kprobe_orig_tnpc;
+ regs->tnpc = kcb->kprobe_orig_tnpc;
} else {
regs->tpc = (unsigned long) &p->ainsn.insn[0];
regs->tnpc = (unsigned long) &p->ainsn.insn[1];
@@ -117,19 +113,21 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
struct kprobe *p;
void *addr = (void *) regs->tpc;
int ret = 0;
+ struct kprobe_ctlblk *kcb;
+ /*
+ * We don't want to be preempted for the entire
+ * duration of kprobe processing
+ */
preempt_disable();
+ kcb = get_kprobe_ctlblk();
if (kprobe_running()) {
- /* We *are* holding lock here, so this is safe.
- * Disarm the probe we just hit, and ignore it.
- */
p = get_kprobe(addr);
if (p) {
- if (kprobe_status == KPROBE_HIT_SS) {
+ if (kcb->kprobe_status == KPROBE_HIT_SS) {
regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
- current_kprobe_orig_tstate_pil);
- unlock_kprobes();
+ kcb->kprobe_orig_tstate_pil);
goto no_kprobe;
}
/* We have reentered the kprobe_handler(), since
@@ -138,25 +136,22 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
* just single step on the instruction of the new probe
* without calling any user handlers.
*/
- save_previous_kprobe();
- set_current_kprobe(p, regs);
+ save_previous_kprobe(kcb);
+ set_current_kprobe(p, regs, kcb);
p->nmissed++;
- kprobe_status = KPROBE_REENTER;
- prepare_singlestep(p, regs);
+ kcb->kprobe_status = KPROBE_REENTER;
+ prepare_singlestep(p, regs, kcb);
return 1;
} else {
- p = current_kprobe;
+ p = __get_cpu_var(current_kprobe);
if (p->break_handler && p->break_handler(p, regs))
goto ss_probe;
}
- /* If it's not ours, can't be delete race, (we hold lock). */
goto no_kprobe;
}
- lock_kprobes();
p = get_kprobe(addr);
if (!p) {
- unlock_kprobes();
if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) {
/*
* The breakpoint instruction was removed right
@@ -171,14 +166,14 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
goto no_kprobe;
}
- set_current_kprobe(p, regs);
- kprobe_status = KPROBE_HIT_ACTIVE;
+ set_current_kprobe(p, regs, kcb);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
if (p->pre_handler && p->pre_handler(p, regs))
return 1;
ss_probe:
- prepare_singlestep(p, regs);
- kprobe_status = KPROBE_HIT_SS;
+ prepare_singlestep(p, regs, kcb);
+ kcb->kprobe_status = KPROBE_HIT_SS;
return 1;
no_kprobe:
@@ -260,11 +255,12 @@ static void __kprobes retpc_fixup(struct pt_regs *regs, u32 insn,
* This function prepares to return from the post-single-step
* breakpoint trap.
*/
-static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
+static void __kprobes resume_execution(struct kprobe *p,
+ struct pt_regs *regs, struct kprobe_ctlblk *kcb)
{
u32 insn = p->ainsn.insn[0];
- regs->tpc = current_kprobe_orig_tnpc;
+ regs->tpc = kcb->kprobe_orig_tnpc;
regs->tnpc = relbranch_fixup(insn,
(unsigned long) p->addr,
(unsigned long) &p->ainsn.insn[0],
@@ -272,44 +268,48 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
retpc_fixup(regs, insn, (unsigned long) p->addr);
regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
- current_kprobe_orig_tstate_pil);
+ kcb->kprobe_orig_tstate_pil);
}
static inline int post_kprobe_handler(struct pt_regs *regs)
{
- if (!kprobe_running())
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ if (!cur)
return 0;
- if ((kprobe_status != KPROBE_REENTER) && current_kprobe->post_handler) {
- kprobe_status = KPROBE_HIT_SSDONE;
- current_kprobe->post_handler(current_kprobe, regs, 0);
+ if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ cur->post_handler(cur, regs, 0);
}
- resume_execution(current_kprobe, regs);
+ resume_execution(cur, regs, kcb);
/*Restore back the original saved kprobes variables and continue. */
- if (kprobe_status == KPROBE_REENTER) {
- restore_previous_kprobe();
+ if (kcb->kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
goto out;
}
- unlock_kprobes();
+ reset_current_kprobe();
out:
preempt_enable_no_resched();
return 1;
}
-/* Interrupts disabled, kprobe_lock held. */
static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
- if (current_kprobe->fault_handler
- && current_kprobe->fault_handler(current_kprobe, regs, trapnr))
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
return 1;
- if (kprobe_status & KPROBE_HIT_SS) {
- resume_execution(current_kprobe, regs);
+ if (kcb->kprobe_status & KPROBE_HIT_SS) {
+ resume_execution(cur, regs, kcb);
- unlock_kprobes();
+ reset_current_kprobe();
preempt_enable_no_resched();
}
return 0;
@@ -322,29 +322,30 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data)
{
struct die_args *args = (struct die_args *)data;
+ int ret = NOTIFY_DONE;
+
switch (val) {
case DIE_DEBUG:
if (kprobe_handler(args->regs))
- return NOTIFY_STOP;
+ ret = NOTIFY_STOP;
break;
case DIE_DEBUG_2:
if (post_kprobe_handler(args->regs))
- return NOTIFY_STOP;
+ ret = NOTIFY_STOP;
break;
case DIE_GPF:
- if (kprobe_running() &&
- kprobe_fault_handler(args->regs, args->trapnr))
- return NOTIFY_STOP;
- break;
case DIE_PAGE_FAULT:
+ /* kprobe_running() needs smp_processor_id() */
+ preempt_disable();
if (kprobe_running() &&
kprobe_fault_handler(args->regs, args->trapnr))
- return NOTIFY_STOP;
+ ret = NOTIFY_STOP;
+ preempt_enable();
break;
default:
break;
}
- return NOTIFY_DONE;
+ return ret;
}
asmlinkage void __kprobes kprobe_trap(unsigned long trap_level,
@@ -368,24 +369,21 @@ asmlinkage void __kprobes kprobe_trap(unsigned long trap_level,
}
/* Jprobes support. */
-static struct pt_regs jprobe_saved_regs;
-static struct pt_regs *jprobe_saved_regs_location;
-static struct sparc_stackf jprobe_saved_stack;
-
int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
struct jprobe *jp = container_of(p, struct jprobe, kp);
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- jprobe_saved_regs_location = regs;
- memcpy(&jprobe_saved_regs, regs, sizeof(*regs));
+ kcb->jprobe_saved_regs_location = regs;
+ memcpy(&(kcb->jprobe_saved_regs), regs, sizeof(*regs));
/* Save a whole stack frame, this gets arguments
* pushed onto the stack after using up all the
* arg registers.
*/
- memcpy(&jprobe_saved_stack,
+ memcpy(&(kcb->jprobe_saved_stack),
(char *) (regs->u_regs[UREG_FP] + STACK_BIAS),
- sizeof(jprobe_saved_stack));
+ sizeof(kcb->jprobe_saved_stack));
regs->tpc = (unsigned long) jp->entry;
regs->tnpc = ((unsigned long) jp->entry) + 0x4UL;
@@ -396,7 +394,6 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
void __kprobes jprobe_return(void)
{
- preempt_enable_no_resched();
__asm__ __volatile__(
".globl jprobe_return_trap_instruction\n"
"jprobe_return_trap_instruction:\n\t"
@@ -410,14 +407,15 @@ extern void __show_regs(struct pt_regs * regs);
int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
{
u32 *addr = (u32 *) regs->tpc;
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
if (addr == (u32 *) jprobe_return_trap_instruction) {
- if (jprobe_saved_regs_location != regs) {
+ if (kcb->jprobe_saved_regs_location != regs) {
printk("JPROBE: Current regs (%p) does not match "
"saved regs (%p).\n",
- regs, jprobe_saved_regs_location);
+ regs, kcb->jprobe_saved_regs_location);
printk("JPROBE: Saved registers\n");
- __show_regs(jprobe_saved_regs_location);
+ __show_regs(kcb->jprobe_saved_regs_location);
printk("JPROBE: Current registers\n");
__show_regs(regs);
BUG();
@@ -426,12 +424,13 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
* first so that UREG_FP is the original one for
* the stack frame restore.
*/
- memcpy(regs, &jprobe_saved_regs, sizeof(*regs));
+ memcpy(regs, &(kcb->jprobe_saved_regs), sizeof(*regs));
memcpy((char *) (regs->u_regs[UREG_FP] + STACK_BIAS),
- &jprobe_saved_stack,
- sizeof(jprobe_saved_stack));
+ &(kcb->jprobe_saved_stack),
+ sizeof(kcb->jprobe_saved_stack));
+ preempt_enable_no_resched();
return 1;
}
return 0;
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index c1f34237cdf..bf1849dd9c4 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -154,6 +154,7 @@ int prom_callback(long *args)
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
+ pte_t pte;
for_each_process(p) {
mm = p->mm;
@@ -178,8 +179,9 @@ int prom_callback(long *args)
* being called from inside OBP.
*/
ptep = pte_offset_map(pmdp, va);
- if (pte_present(*ptep)) {
- tte = pte_val(*ptep);
+ pte = *ptep;
+ if (pte_present(pte)) {
+ tte = pte_val(pte);
res = PROM_TRUE;
}
pte_unmap(ptep);
@@ -218,6 +220,7 @@ int prom_callback(long *args)
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
+ pte_t pte;
int error;
if ((va >= LOW_OBP_ADDRESS) && (va < HI_OBP_ADDRESS)) {
@@ -240,8 +243,9 @@ int prom_callback(long *args)
* being called from inside OBP.
*/
ptep = pte_offset_kernel(pmdp, va);
- if (pte_present(*ptep)) {
- tte = pte_val(*ptep);
+ pte = *ptep;
+ if (pte_present(pte)) {
+ tte = pte_val(pte);
res = PROM_TRUE;
}
goto done;
diff --git a/arch/sparc64/kernel/signal32.c b/arch/sparc64/kernel/signal32.c
index aecccd0df1d..009a86e5ded 100644
--- a/arch/sparc64/kernel/signal32.c
+++ b/arch/sparc64/kernel/signal32.c
@@ -863,6 +863,7 @@ static void new_setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
pud_t *pudp = pud_offset(pgdp, address);
pmd_t *pmdp = pmd_offset(pudp, address);
pte_t *ptep;
+ pte_t pte;
regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
@@ -873,9 +874,10 @@ static void new_setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
preempt_disable();
ptep = pte_offset_map(pmdp, address);
- if (pte_present(*ptep)) {
+ pte = *ptep;
+ if (pte_present(pte)) {
unsigned long page = (unsigned long)
- page_address(pte_page(*ptep));
+ page_address(pte_page(pte));
wmb();
__asm__ __volatile__("flush %0 + %1"
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index b137fd63f5e..5d90ee9aebf 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -839,43 +839,29 @@ void smp_flush_tlb_all(void)
* questionable (in theory the big win for threads is the massive sharing of
* address space state across processors).
*/
+
+/* This currently is only used by the hugetlb arch pre-fault
+ * hook on UltraSPARC-III+ and later when changing the pagesize
+ * bits of the context register for an address space.
+ */
void smp_flush_tlb_mm(struct mm_struct *mm)
{
- /*
- * This code is called from two places, dup_mmap and exit_mmap. In the
- * former case, we really need a flush. In the later case, the callers
- * are single threaded exec_mmap (really need a flush), multithreaded
- * exec_mmap case (do not need to flush, since the caller gets a new
- * context via activate_mm), and all other callers of mmput() whence
- * the flush can be optimized since the associated threads are dead and
- * the mm is being torn down (__exit_mm and other mmput callers) or the
- * owning thread is dissociating itself from the mm. The
- * (atomic_read(&mm->mm_users) == 0) check ensures real work is done
- * for single thread exec and dup_mmap cases. An alternate check might
- * have been (current->mm != mm).
- * Kanoj Sarcar
- */
- if (atomic_read(&mm->mm_users) == 0)
- return;
-
- {
- u32 ctx = CTX_HWBITS(mm->context);
- int cpu = get_cpu();
+ u32 ctx = CTX_HWBITS(mm->context);
+ int cpu = get_cpu();
- if (atomic_read(&mm->mm_users) == 1) {
- mm->cpu_vm_mask = cpumask_of_cpu(cpu);
- goto local_flush_and_out;
- }
+ if (atomic_read(&mm->mm_users) == 1) {
+ mm->cpu_vm_mask = cpumask_of_cpu(cpu);
+ goto local_flush_and_out;
+ }
- smp_cross_call_masked(&xcall_flush_tlb_mm,
- ctx, 0, 0,
- mm->cpu_vm_mask);
+ smp_cross_call_masked(&xcall_flush_tlb_mm,
+ ctx, 0, 0,
+ mm->cpu_vm_mask);
- local_flush_and_out:
- __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
+local_flush_and_out:
+ __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
- put_cpu();
- }
+ put_cpu();
}
void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
@@ -883,34 +869,13 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long
u32 ctx = CTX_HWBITS(mm->context);
int cpu = get_cpu();
- if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1) {
+ if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1)
mm->cpu_vm_mask = cpumask_of_cpu(cpu);
- goto local_flush_and_out;
- } else {
- /* This optimization is not valid. Normally
- * we will be holding the page_table_lock, but
- * there is an exception which is copy_page_range()
- * when forking. The lock is held during the individual
- * page table updates in the parent, but not at the
- * top level, which is where we are invoked.
- */
- if (0) {
- cpumask_t this_cpu_mask = cpumask_of_cpu(cpu);
-
- /* By virtue of running under the mm->page_table_lock,
- * and mmu_context.h:switch_mm doing the same, the
- * following operation is safe.
- */
- if (cpus_equal(mm->cpu_vm_mask, this_cpu_mask))
- goto local_flush_and_out;
- }
- }
-
- smp_cross_call_masked(&xcall_flush_tlb_pending,
- ctx, nr, (unsigned long) vaddrs,
- mm->cpu_vm_mask);
+ else
+ smp_cross_call_masked(&xcall_flush_tlb_pending,
+ ctx, nr, (unsigned long) vaddrs,
+ mm->cpu_vm_mask);
-local_flush_and_out:
__flush_tlb_pending(ctx, nr, vaddrs);
put_cpu();
diff --git a/arch/sparc64/kernel/sunos_ioctl32.c b/arch/sparc64/kernel/sunos_ioctl32.c
index 7654b8a7f03..3f619ead22c 100644
--- a/arch/sparc64/kernel/sunos_ioctl32.c
+++ b/arch/sparc64/kernel/sunos_ioctl32.c
@@ -24,7 +24,6 @@
#include <linux/smp_lock.h>
#include <linux/syscalls.h>
#include <linux/compat.h>
-#include <asm/kbio.h>
#define SUNOS_NR_OPEN 256
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
index 38c5525087a..459c8fbe02b 100644
--- a/arch/sparc64/kernel/time.c
+++ b/arch/sparc64/kernel/time.c
@@ -60,17 +60,6 @@ static void __iomem *mstk48t59_regs;
static int set_rtc_mmss(unsigned long);
-static __init unsigned long dummy_get_tick(void)
-{
- return 0;
-}
-
-static __initdata struct sparc64_tick_ops dummy_tick_ops = {
- .get_tick = dummy_get_tick,
-};
-
-struct sparc64_tick_ops *tick_ops __read_mostly = &dummy_tick_ops;
-
#define TICK_PRIV_BIT (1UL << 63)
#ifdef CONFIG_SMP
@@ -200,6 +189,8 @@ static struct sparc64_tick_ops tick_operations __read_mostly = {
.softint_mask = 1UL << 0,
};
+struct sparc64_tick_ops *tick_ops __read_mostly = &tick_operations;
+
static void stick_init_tick(unsigned long offset)
{
tick_disable_protection();
diff --git a/arch/sparc64/kernel/us2e_cpufreq.c b/arch/sparc64/kernel/us2e_cpufreq.c
index 686e526bec0..b35dc8dc995 100644
--- a/arch/sparc64/kernel/us2e_cpufreq.c
+++ b/arch/sparc64/kernel/us2e_cpufreq.c
@@ -388,10 +388,8 @@ err_out:
kfree(driver);
cpufreq_us2e_driver = NULL;
}
- if (us2e_freq_table) {
- kfree(us2e_freq_table);
- us2e_freq_table = NULL;
- }
+ kfree(us2e_freq_table);
+ us2e_freq_table = NULL;
return ret;
}
@@ -402,7 +400,6 @@ static void __exit us2e_freq_exit(void)
{
if (cpufreq_us2e_driver) {
cpufreq_unregister_driver(cpufreq_us2e_driver);
-
kfree(cpufreq_us2e_driver);
cpufreq_us2e_driver = NULL;
kfree(us2e_freq_table);
diff --git a/arch/sparc64/kernel/us3_cpufreq.c b/arch/sparc64/kernel/us3_cpufreq.c
index 0340041f614..6d1f9a3c464 100644
--- a/arch/sparc64/kernel/us3_cpufreq.c
+++ b/arch/sparc64/kernel/us3_cpufreq.c
@@ -249,10 +249,8 @@ err_out:
kfree(driver);
cpufreq_us3_driver = NULL;
}
- if (us3_freq_table) {
- kfree(us3_freq_table);
- us3_freq_table = NULL;
- }
+ kfree(us3_freq_table);
+ us3_freq_table = NULL;
return ret;
}
@@ -263,7 +261,6 @@ static void __exit us3_freq_exit(void)
{
if (cpufreq_us3_driver) {
cpufreq_unregister_driver(cpufreq_us3_driver);
-
kfree(cpufreq_us3_driver);
cpufreq_us3_driver = NULL;
kfree(us3_freq_table);
diff --git a/arch/sparc64/oprofile/Kconfig b/arch/sparc64/oprofile/Kconfig
index 5ade19801b9..d8a84088471 100644
--- a/arch/sparc64/oprofile/Kconfig
+++ b/arch/sparc64/oprofile/Kconfig
@@ -1,7 +1,3 @@
-
-menu "Profiling support"
- depends on EXPERIMENTAL
-
config PROFILING
bool "Profiling support (EXPERIMENTAL)"
help
@@ -19,5 +15,3 @@ config OPROFILE
If unsure, say N.
-endmenu
-
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index cd06ed7d842..3b5f47c4690 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -65,6 +65,30 @@ config STATIC_LINK
chroot, and you disable CONFIG_MODE_TT, you probably want to say Y
here.
+config HOST_2G_2G
+ bool "2G/2G host address space split"
+ default n
+ depends on MODE_TT
+ help
+ This is needed when the host on which you run has a 2G/2G memory
+ split, instead of the customary 3G/1G.
+
+ Note that to enable such a host
+ configuration, which makes sense only in some cases, you need special
+ host patches.
+
+ So, if you do not know what to do here, say 'N'.
+
+config KERNEL_HALF_GIGS
+ int "Kernel address space size (in .5G units)"
+ default "1"
+ depends on MODE_TT
+ help
+ This determines the amount of address space that UML will allocate for
+ its own, measured in half Gigabyte units. The default is 1.
+ Change this only if you need to boot UML with an unusually large amount
+ of physical memory.
+
config MODE_SKAS
bool "Separate Kernel Address Space support"
default y
@@ -182,19 +206,6 @@ config MAGIC_SYSRQ
The keys are documented in <file:Documentation/sysrq.txt>. Don't say Y
unless you really know what this hack does.
-config HOST_2G_2G
- bool "2G/2G host address space split"
- default n
- help
- This is needed when the host on which you run has a 2G/2G memory
- split, instead of the customary 3G/1G.
-
- Note that to enable such a host
- configuration, which makes sense only in some cases, you need special
- host patches.
-
- So, if you do not know what to do here, say 'N'.
-
config SMP
bool "Symmetric multi-processing support (EXPERIMENTAL)"
default n
@@ -241,15 +252,6 @@ config NEST_LEVEL
set to the host's CONFIG_NEST_LEVEL + CONFIG_KERNEL_HALF_GIGS.
Only change this if you are running nested UMLs.
-config KERNEL_HALF_GIGS
- int "Kernel address space size (in .5G units)"
- default "1"
- help
- This determines the amount of address space that UML will allocate for
- its own, measured in half Gigabyte units. The default is 1.
- Change this only if you need to boot UML with an unusually large amount
- of physical memory.
-
config HIGHMEM
bool "Highmem support"
depends on !64BIT
diff --git a/arch/um/Makefile b/arch/um/Makefile
index e1ffad22460..e55d32e903b 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -60,7 +60,7 @@ AFLAGS += $(ARCH_INCLUDE)
USER_CFLAGS := $(patsubst -I%,,$(CFLAGS))
USER_CFLAGS := $(patsubst -D__KERNEL__,,$(USER_CFLAGS)) $(ARCH_INCLUDE) \
- $(MODE_INCLUDE)
+ $(MODE_INCLUDE) -D_FILE_OFFSET_BITS=64
# -Derrno=kernel_errno - This turns all kernel references to errno into
# kernel_errno to separate them from the libc errno. This allows -fno-common
diff --git a/arch/um/Makefile-i386 b/arch/um/Makefile-i386
index aef7c50f8e1..1f7dcb064ae 100644
--- a/arch/um/Makefile-i386
+++ b/arch/um/Makefile-i386
@@ -17,8 +17,6 @@ ifeq ("$(origin SUBARCH)", "command line")
ifneq ("$(shell uname -m | sed -e s/i.86/i386/)", "$(SUBARCH)")
CFLAGS += $(call cc-option,-m32)
USER_CFLAGS += $(call cc-option,-m32)
-HOSTCFLAGS += $(call cc-option,-m32)
-HOSTLDFLAGS += $(call cc-option,-m32)
AFLAGS += $(call cc-option,-m32)
LINK-y += $(call cc-option,-m32)
UML_OBJCOPYFLAGS += -F $(ELF_FORMAT)
diff --git a/arch/um/drivers/chan_user.c b/arch/um/drivers/chan_user.c
index de3bce71aeb..1c55d580248 100644
--- a/arch/um/drivers/chan_user.c
+++ b/arch/um/drivers/chan_user.c
@@ -16,7 +16,6 @@
#include "user_util.h"
#include "chan_user.h"
#include "user.h"
-#include "helper.h"
#include "os.h"
#include "choose-mode.h"
#include "mode.h"
diff --git a/arch/um/drivers/harddog_kern.c b/arch/um/drivers/harddog_kern.c
index 147ec19f6bb..49acb2badf3 100644
--- a/arch/um/drivers/harddog_kern.c
+++ b/arch/um/drivers/harddog_kern.c
@@ -46,7 +46,6 @@
#include <linux/smp_lock.h>
#include <linux/init.h>
#include <asm/uaccess.h>
-#include "helper.h"
#include "mconsole.h"
MODULE_LICENSE("GPL");
diff --git a/arch/um/drivers/harddog_user.c b/arch/um/drivers/harddog_user.c
index d934181b8d4..def013b5a3c 100644
--- a/arch/um/drivers/harddog_user.c
+++ b/arch/um/drivers/harddog_user.c
@@ -8,7 +8,6 @@
#include <errno.h>
#include "user_util.h"
#include "user.h"
-#include "helper.h"
#include "mconsole.h"
#include "os.h"
#include "choose-mode.h"
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index 721e2601a75..fe865d9a372 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -96,7 +96,6 @@ irqreturn_t uml_net_interrupt(int irq, void *dev_id, struct pt_regs *regs)
static int uml_net_open(struct net_device *dev)
{
struct uml_net_private *lp = dev->priv;
- char addr[sizeof("255.255.255.255\0")];
int err;
spin_lock(&lp->lock);
@@ -107,7 +106,7 @@ static int uml_net_open(struct net_device *dev)
}
if(!lp->have_mac){
- dev_ip_addr(dev, addr, &lp->mac[2]);
+ dev_ip_addr(dev, &lp->mac[2]);
set_ether_mac(dev, lp->mac);
}
@@ -664,8 +663,6 @@ static int uml_inetaddr_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
struct in_ifaddr *ifa = ptr;
- u32 addr = ifa->ifa_address;
- u32 netmask = ifa->ifa_mask;
struct net_device *dev = ifa->ifa_dev->dev;
struct uml_net_private *lp;
void (*proc)(unsigned char *, unsigned char *, void *);
@@ -685,14 +682,8 @@ static int uml_inetaddr_event(struct notifier_block *this, unsigned long event,
break;
}
if(proc != NULL){
- addr_buf[0] = addr & 0xff;
- addr_buf[1] = (addr >> 8) & 0xff;
- addr_buf[2] = (addr >> 16) & 0xff;
- addr_buf[3] = addr >> 24;
- netmask_buf[0] = netmask & 0xff;
- netmask_buf[1] = (netmask >> 8) & 0xff;
- netmask_buf[2] = (netmask >> 16) & 0xff;
- netmask_buf[3] = netmask >> 24;
+ memcpy(addr_buf, &ifa->ifa_address, sizeof(addr_buf));
+ memcpy(netmask_buf, &ifa->ifa_mask, sizeof(netmask_buf));
(*proc)(addr_buf, netmask_buf, &lp->user);
}
return(NOTIFY_DONE);
@@ -774,27 +765,18 @@ int setup_etheraddr(char *str, unsigned char *addr)
return(1);
}
-void dev_ip_addr(void *d, char *buf, char *bin_buf)
+void dev_ip_addr(void *d, unsigned char *bin_buf)
{
struct net_device *dev = d;
struct in_device *ip = dev->ip_ptr;
struct in_ifaddr *in;
- u32 addr;
if((ip == NULL) || ((in = ip->ifa_list) == NULL)){
printk(KERN_WARNING "dev_ip_addr - device not assigned an "
"IP address\n");
return;
}
- addr = in->ifa_address;
- sprintf(buf, "%d.%d.%d.%d", addr & 0xff, (addr >> 8) & 0xff,
- (addr >> 16) & 0xff, addr >> 24);
- if(bin_buf){
- bin_buf[0] = addr & 0xff;
- bin_buf[1] = (addr >> 8) & 0xff;
- bin_buf[2] = (addr >> 16) & 0xff;
- bin_buf[3] = addr >> 24;
- }
+ memcpy(bin_buf, &in->ifa_address, sizeof(in->ifa_address));
}
void set_ether_mac(void *d, unsigned char *addr)
@@ -829,14 +811,8 @@ void iter_addresses(void *d, void (*cb)(unsigned char *, unsigned char *,
if(ip == NULL) return;
in = ip->ifa_list;
while(in != NULL){
- address[0] = in->ifa_address & 0xff;
- address[1] = (in->ifa_address >> 8) & 0xff;
- address[2] = (in->ifa_address >> 16) & 0xff;
- address[3] = in->ifa_address >> 24;
- netmask[0] = in->ifa_mask & 0xff;
- netmask[1] = (in->ifa_mask >> 8) & 0xff;
- netmask[2] = (in->ifa_mask >> 16) & 0xff;
- netmask[3] = in->ifa_mask >> 24;
+ memcpy(address, &in->ifa_address, sizeof(address));
+ memcpy(netmask, &in->ifa_mask, sizeof(netmask));
(*cb)(address, netmask, arg);
in = in->ifa_next;
}
diff --git a/arch/um/drivers/net_user.c b/arch/um/drivers/net_user.c
index 3730d4f1271..098fa65981a 100644
--- a/arch/um/drivers/net_user.c
+++ b/arch/um/drivers/net_user.c
@@ -16,7 +16,6 @@
#include "user_util.h"
#include "kern_util.h"
#include "net_user.h"
-#include "helper.h"
#include "os.h"
int tap_open_common(void *dev, char *gate_addr)
diff --git a/arch/um/drivers/port_user.c b/arch/um/drivers/port_user.c
index 14dd2002d2d..ed4a1a6c5d8 100644
--- a/arch/um/drivers/port_user.c
+++ b/arch/um/drivers/port_user.c
@@ -18,7 +18,6 @@
#include "user.h"
#include "chan_user.h"
#include "port.h"
-#include "helper.h"
#include "os.h"
struct port_chan {
diff --git a/arch/um/drivers/random.c b/arch/um/drivers/random.c
index f9e22198e01..ba471f5864a 100644
--- a/arch/um/drivers/random.c
+++ b/arch/um/drivers/random.c
@@ -58,10 +58,8 @@ static ssize_t rng_dev_read (struct file *filp, char __user *buf, size_t size,
if (filp->f_flags & O_NONBLOCK)
return ret ? : -EAGAIN;
- if(need_resched()){
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(1);
- }
+ if(need_resched())
+ schedule_timeout_interruptible(1);
}
else return n;
if (signal_pending (current))
diff --git a/arch/um/drivers/slip_user.c b/arch/um/drivers/slip_user.c
index 71af444e591..89fbec185cc 100644
--- a/arch/um/drivers/slip_user.c
+++ b/arch/um/drivers/slip_user.c
@@ -14,7 +14,6 @@
#include "net_user.h"
#include "slip.h"
#include "slip_common.h"
-#include "helper.h"
#include "os.h"
void slip_user_init(void *data, void *dev)
diff --git a/arch/um/drivers/slirp_user.c b/arch/um/drivers/slirp_user.c
index 8d91f663d82..b94c66114bc 100644
--- a/arch/um/drivers/slirp_user.c
+++ b/arch/um/drivers/slirp_user.c
@@ -13,7 +13,6 @@
#include "net_user.h"
#include "slirp.h"
#include "slip_common.h"
-#include "helper.h"
#include "os.h"
void slirp_user_init(void *data, void *dev)
diff --git a/arch/um/drivers/xterm.c b/arch/um/drivers/xterm.c
index 90e0e5ff451..b530f1a6540 100644
--- a/arch/um/drivers/xterm.c
+++ b/arch/um/drivers/xterm.c
@@ -14,7 +14,6 @@
#include <sys/socket.h>
#include "kern_util.h"
#include "chan_user.h"
-#include "helper.h"
#include "user_util.h"
#include "user.h"
#include "os.h"
diff --git a/arch/um/include/helper.h b/arch/um/include/helper.h
deleted file mode 100644
index 162ac31192f..00000000000
--- a/arch/um/include/helper.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
- * Licensed under the GPL
- */
-
-#ifndef __HELPER_H__
-#define __HELPER_H__
-
-extern int run_helper(void (*pre_exec)(void *), void *pre_data, char **argv,
- unsigned long *stack_out);
-extern int run_helper_thread(int (*proc)(void *), void *arg,
- unsigned int flags, unsigned long *stack_out,
- int stack_order);
-extern int helper_wait(int pid);
-
-#endif
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/arch/um/include/mem_user.h b/arch/um/include/mem_user.h
index 9fef4123a65..a1064c5823b 100644
--- a/arch/um/include/mem_user.h
+++ b/arch/um/include/mem_user.h
@@ -57,7 +57,7 @@ extern int init_maps(unsigned long physmem, unsigned long iomem,
unsigned long highmem);
extern unsigned long get_vm(unsigned long len);
extern void setup_physmem(unsigned long start, unsigned long usable,
- unsigned long len, unsigned long highmem);
+ unsigned long len, unsigned long long highmem);
extern void add_iomem(char *name, int fd, unsigned long size);
extern unsigned long phys_offset(unsigned long phys);
extern void unmap_physmem(void);
diff --git a/arch/um/include/net_user.h b/arch/um/include/net_user.h
index 89885a77a77..800c403920b 100644
--- a/arch/um/include/net_user.h
+++ b/arch/um/include/net_user.h
@@ -25,7 +25,7 @@ struct net_user_info {
};
extern void ether_user_init(void *data, void *dev);
-extern void dev_ip_addr(void *d, char *buf, char *bin_buf);
+extern void dev_ip_addr(void *d, unsigned char *bin_buf);
extern void set_ether_mac(void *d, unsigned char *addr);
extern void iter_addresses(void *d, void (*cb)(unsigned char *,
unsigned char *, void *),
diff --git a/arch/um/include/os.h b/arch/um/include/os.h
index 2e58e304b8b..2cccfa5b8ab 100644
--- a/arch/um/include/os.h
+++ b/arch/um/include/os.h
@@ -167,7 +167,7 @@ extern int can_do_skas(void);
#endif
/* mem.c */
-extern int create_mem_file(unsigned long len);
+extern int create_mem_file(unsigned long long len);
/* process.c */
extern unsigned long os_process_pc(int pid);
@@ -199,6 +199,20 @@ extern void forward_pending_sigio(int target);
extern int start_fork_tramp(void *arg, unsigned long temp_stack,
int clone_flags, int (*tramp)(void *));
+/* uaccess.c */
+extern unsigned long __do_user_copy(void *to, const void *from, int n,
+ void **fault_addr, void **fault_catcher,
+ void (*op)(void *to, const void *from,
+ int n), int *faulted_out);
+
+/* helper.c */
+extern int run_helper(void (*pre_exec)(void *), void *pre_data, char **argv,
+ unsigned long *stack_out);
+extern int run_helper_thread(int (*proc)(void *), void *arg,
+ unsigned int flags, unsigned long *stack_out,
+ int stack_order);
+extern int helper_wait(int pid);
+
#endif
/*
diff --git a/arch/um/include/sysdep-i386/stub.h b/arch/um/include/sysdep-i386/stub.h
index d3699fe1c61..a49ceb199ee 100644
--- a/arch/um/include/sysdep-i386/stub.h
+++ b/arch/um/include/sysdep-i386/stub.h
@@ -16,45 +16,69 @@ extern void stub_clone_handler(void);
#define STUB_MMAP_NR __NR_mmap2
#define MMAP_OFFSET(o) ((o) >> PAGE_SHIFT)
+static inline long stub_syscall1(long syscall, long arg1)
+{
+ long ret;
+
+ __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1));
+
+ return ret;
+}
+
static inline long stub_syscall2(long syscall, long arg1, long arg2)
{
long ret;
- __asm__("movl %0, %%ecx; " : : "g" (arg2) : "%ecx");
- __asm__("movl %0, %%ebx; " : : "g" (arg1) : "%ebx");
- __asm__("movl %0, %%eax; " : : "g" (syscall) : "%eax");
- __asm__("int $0x80;" : : : "%eax");
- __asm__ __volatile__("movl %%eax, %0; " : "=g" (ret) :);
- return(ret);
+ __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1),
+ "c" (arg2));
+
+ return ret;
}
static inline long stub_syscall3(long syscall, long arg1, long arg2, long arg3)
{
- __asm__("movl %0, %%edx; " : : "g" (arg3) : "%edx");
- return(stub_syscall2(syscall, arg1, arg2));
+ long ret;
+
+ __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1),
+ "c" (arg2), "d" (arg3));
+
+ return ret;
}
static inline long stub_syscall4(long syscall, long arg1, long arg2, long arg3,
long arg4)
{
- __asm__("movl %0, %%esi; " : : "g" (arg4) : "%esi");
- return(stub_syscall3(syscall, arg1, arg2, arg3));
+ long ret;
+
+ __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1),
+ "c" (arg2), "d" (arg3), "S" (arg4));
+
+ return ret;
+}
+
+static inline long stub_syscall5(long syscall, long arg1, long arg2, long arg3,
+ long arg4, long arg5)
+{
+ long ret;
+
+ __asm__ volatile ("int $0x80" : "=a" (ret) : "0" (syscall), "b" (arg1),
+ "c" (arg2), "d" (arg3), "S" (arg4), "D" (arg5));
+
+ return ret;
}
static inline long stub_syscall6(long syscall, long arg1, long arg2, long arg3,
long arg4, long arg5, long arg6)
{
long ret;
- __asm__("movl %0, %%eax; " : : "g" (syscall) : "%eax");
- __asm__("movl %0, %%ebx; " : : "g" (arg1) : "%ebx");
- __asm__("movl %0, %%ecx; " : : "g" (arg2) : "%ecx");
- __asm__("movl %0, %%edx; " : : "g" (arg3) : "%edx");
- __asm__("movl %0, %%esi; " : : "g" (arg4) : "%esi");
- __asm__("movl %0, %%edi; " : : "g" (arg5) : "%edi");
- __asm__ __volatile__("pushl %%ebp ; movl %1, %%ebp; "
- "int $0x80; popl %%ebp ; "
- "movl %%eax, %0; " : "=g" (ret) : "g" (arg6) : "%eax");
- return(ret);
+
+ __asm__ volatile ("push %%ebp ; movl %%eax,%%ebp ; movl %1,%%eax ; "
+ "int $0x80 ; pop %%ebp"
+ : "=a" (ret)
+ : "g" (syscall), "b" (arg1), "c" (arg2), "d" (arg3),
+ "S" (arg4), "D" (arg5), "0" (arg6));
+
+ return ret;
}
static inline void trap_myself(void)
diff --git a/arch/um/include/sysdep-x86_64/stub.h b/arch/um/include/sysdep-x86_64/stub.h
index f599058d826..2bd6e7a9728 100644
--- a/arch/um/include/sysdep-x86_64/stub.h
+++ b/arch/um/include/sysdep-x86_64/stub.h
@@ -17,37 +17,72 @@ extern void stub_clone_handler(void);
#define STUB_MMAP_NR __NR_mmap
#define MMAP_OFFSET(o) (o)
+#define __syscall_clobber "r11","rcx","memory"
+#define __syscall "syscall"
+
static inline long stub_syscall2(long syscall, long arg1, long arg2)
{
long ret;
- __asm__("movq %0, %%rsi; " : : "g" (arg2) : "%rsi");
- __asm__("movq %0, %%rdi; " : : "g" (arg1) : "%rdi");
- __asm__("movq %0, %%rax; " : : "g" (syscall) : "%rax");
- __asm__("syscall;" : : : "%rax", "%r11", "%rcx");
- __asm__ __volatile__("movq %%rax, %0; " : "=g" (ret) :);
- return(ret);
+ __asm__ volatile (__syscall
+ : "=a" (ret)
+ : "0" (syscall), "D" (arg1), "S" (arg2) : __syscall_clobber );
+
+ return ret;
}
static inline long stub_syscall3(long syscall, long arg1, long arg2, long arg3)
{
- __asm__("movq %0, %%rdx; " : : "g" (arg3) : "%rdx");
- return(stub_syscall2(syscall, arg1, arg2));
+ long ret;
+
+ __asm__ volatile (__syscall
+ : "=a" (ret)
+ : "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3)
+ : __syscall_clobber );
+
+ return ret;
}
static inline long stub_syscall4(long syscall, long arg1, long arg2, long arg3,
long arg4)
{
- __asm__("movq %0, %%r10; " : : "g" (arg4) : "%r10");
- return(stub_syscall3(syscall, arg1, arg2, arg3));
+ long ret;
+
+ __asm__ volatile ("movq %5,%%r10 ; " __syscall
+ : "=a" (ret)
+ : "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3),
+ "g" (arg4)
+ : __syscall_clobber, "r10" );
+
+ return ret;
+}
+
+static inline long stub_syscall5(long syscall, long arg1, long arg2, long arg3,
+ long arg4, long arg5)
+{
+ long ret;
+
+ __asm__ volatile ("movq %5,%%r10 ; movq %6,%%r8 ; " __syscall
+ : "=a" (ret)
+ : "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3),
+ "g" (arg4), "g" (arg5)
+ : __syscall_clobber, "r10", "r8" );
+
+ return ret;
}
static inline long stub_syscall6(long syscall, long arg1, long arg2, long arg3,
long arg4, long arg5, long arg6)
{
- __asm__("movq %0, %%r9; " : : "g" (arg6) : "%r9");
- __asm__("movq %0, %%r8; " : : "g" (arg5) : "%r8");
- return(stub_syscall4(syscall, arg1, arg2, arg3, arg4));
+ long ret;
+
+ __asm__ volatile ("movq %5,%%r10 ; movq %6,%%r8 ; "
+ "movq %7, %%r9; " __syscall : "=a" (ret)
+ : "0" (syscall), "D" (arg1), "S" (arg2), "d" (arg3),
+ "g" (arg4), "g" (arg5), "g" (arg6)
+ : __syscall_clobber, "r10", "r8", "r9" );
+
+ return ret;
}
static inline void trap_myself(void)
diff --git a/arch/um/include/uml_uaccess.h b/arch/um/include/uml_uaccess.h
index f77eb642845..c0df11d06f5 100644
--- a/arch/um/include/uml_uaccess.h
+++ b/arch/um/include/uml_uaccess.h
@@ -8,10 +8,6 @@
extern int __do_copy_to_user(void *to, const void *from, int n,
void **fault_addr, void **fault_catcher);
-extern unsigned long __do_user_copy(void *to, const void *from, int n,
- void **fault_addr, void **fault_catcher,
- void (*op)(void *to, const void *from,
- int n), int *faulted_out);
void __do_copy(void *to, const void *from, int n);
#endif
diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile
index 1a0001b3850..3de9d21e36b 100644
--- a/arch/um/kernel/Makefile
+++ b/arch/um/kernel/Makefile
@@ -7,10 +7,10 @@ extra-y := vmlinux.lds
clean-files :=
obj-y = config.o exec_kern.o exitcode.o \
- helper.o init_task.o irq.o irq_user.o ksyms.o main.o mem.o physmem.o \
+ init_task.o irq.o irq_user.o ksyms.o mem.o physmem.o \
process_kern.o ptrace.o reboot.o resource.o sigio_user.o sigio_kern.o \
signal_kern.o signal_user.o smp.o syscall_kern.o sysrq.o time.o \
- time_kern.o tlb.o trap_kern.o trap_user.o uaccess_user.o um_arch.o \
+ time_kern.o tlb.o trap_kern.o trap_user.o uaccess.o um_arch.o \
umid.o user_util.o
obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o
@@ -24,8 +24,7 @@ obj-$(CONFIG_MODE_SKAS) += skas/
user-objs-$(CONFIG_TTY_LOG) += tty_log.o
-USER_OBJS := $(user-objs-y) config.o helper.o main.o time.o tty_log.o umid.o \
- user_util.o
+USER_OBJS := $(user-objs-y) config.o time.o tty_log.o umid.o user_util.o
include arch/um/scripts/Makefile.rules
diff --git a/arch/um/kernel/ksyms.c b/arch/um/kernel/ksyms.c
index a97a72e516a..7713e7a6f47 100644
--- a/arch/um/kernel/ksyms.c
+++ b/arch/um/kernel/ksyms.c
@@ -20,7 +20,6 @@
#include "user_util.h"
#include "mem_user.h"
#include "os.h"
-#include "helper.h"
EXPORT_SYMBOL(stop);
EXPORT_SYMBOL(uml_physmem);
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index 462cc9d6538..fa4f915be5c 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -234,8 +234,8 @@ void paging_init(void)
empty_bad_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE);
for(i=0;i<sizeof(zones_size)/sizeof(zones_size[0]);i++)
zones_size[i] = 0;
- zones_size[0] = (end_iomem >> PAGE_SHIFT) - (uml_physmem >> PAGE_SHIFT);
- zones_size[2] = highmem >> PAGE_SHIFT;
+ zones_size[ZONE_DMA] = (end_iomem >> PAGE_SHIFT) - (uml_physmem >> PAGE_SHIFT);
+ zones_size[ZONE_HIGHMEM] = highmem >> PAGE_SHIFT;
free_area_init(zones_size);
/*
diff --git a/arch/um/kernel/physmem.c b/arch/um/kernel/physmem.c
index ea670fcc8af..f3b583a878a 100644
--- a/arch/um/kernel/physmem.c
+++ b/arch/um/kernel/physmem.c
@@ -246,7 +246,7 @@ int is_remapped(void *virt)
/* Changed during early boot */
unsigned long high_physmem;
-extern unsigned long physmem_size;
+extern unsigned long long physmem_size;
int init_maps(unsigned long physmem, unsigned long iomem, unsigned long highmem)
{
@@ -321,7 +321,7 @@ void map_memory(unsigned long virt, unsigned long phys, unsigned long len,
extern int __syscall_stub_start, __binary_start;
void setup_physmem(unsigned long start, unsigned long reserve_end,
- unsigned long len, unsigned long highmem)
+ unsigned long len, unsigned long long highmem)
{
unsigned long reserve = reserve_end - start;
int pfn = PFN_UP(__pa(reserve_end));
diff --git a/arch/um/kernel/ptrace.c b/arch/um/kernel/ptrace.c
index 71af4d50389..98e09395c09 100644
--- a/arch/um/kernel/ptrace.c
+++ b/arch/um/kernel/ptrace.c
@@ -43,53 +43,10 @@ void ptrace_disable(struct task_struct *child)
extern int peek_user(struct task_struct * child, long addr, long data);
extern int poke_user(struct task_struct * child, long addr, long data);
-long sys_ptrace(long request, long pid, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
- struct task_struct *child;
int i, ret;
- lock_kernel();
- ret = -EPERM;
- if (request == PTRACE_TRACEME) {
- /* are we already being traced? */
- if (current->ptrace & PT_PTRACED)
- goto out;
-
- ret = security_ptrace(current->parent, current);
- if (ret)
- goto out;
-
- /* set the ptrace bit in the process flags. */
- current->ptrace |= PT_PTRACED;
- ret = 0;
- goto out;
- }
- ret = -ESRCH;
- read_lock(&tasklist_lock);
- child = find_task_by_pid(pid);
- if (child)
- get_task_struct(child);
- read_unlock(&tasklist_lock);
- if (!child)
- goto out;
-
- ret = -EPERM;
- if (pid == 1) /* you may not mess with init */
- goto out_tsk;
-
- if (request == PTRACE_ATTACH) {
- ret = ptrace_attach(child);
- goto out_tsk;
- }
-
-#ifdef SUBACH_PTRACE_SPECIAL
- SUBARCH_PTRACE_SPECIAL(child,request,addr,data);
-#endif
-
- ret = ptrace_check_attach(child, request == PTRACE_KILL);
- if (ret < 0)
- goto out_tsk;
-
switch (request) {
/* when I and D space are separate, these will need to be fixed. */
case PTRACE_PEEKTEXT: /* read word at location addr. */
@@ -282,10 +239,7 @@ long sys_ptrace(long request, long pid, long addr, long data)
ret = ptrace_request(child, request, addr, data);
break;
}
- out_tsk:
- put_task_struct(child);
- out:
- unlock_kernel();
+
return ret;
}
diff --git a/arch/um/kernel/sigio_user.c b/arch/um/kernel/sigio_user.c
index a52751108aa..48b1f644b9a 100644
--- a/arch/um/kernel/sigio_user.c
+++ b/arch/um/kernel/sigio_user.c
@@ -18,7 +18,6 @@
#include "kern_util.h"
#include "user_util.h"
#include "sigio.h"
-#include "helper.h"
#include "os.h"
/* Changed during early boot */
@@ -225,7 +224,7 @@ static int need_poll(int n)
next_poll.used = n;
return(0);
}
- if(next_poll.poll != NULL) kfree(next_poll.poll);
+ kfree(next_poll.poll);
next_poll.poll = um_kmalloc_atomic(n * sizeof(struct pollfd));
if(next_poll.poll == NULL){
printk("need_poll : failed to allocate new pollfds\n");
diff --git a/arch/um/kernel/skas/include/mmu-skas.h b/arch/um/kernel/skas/include/mmu-skas.h
index 09536f81ee4..44110c521e4 100644
--- a/arch/um/kernel/skas/include/mmu-skas.h
+++ b/arch/um/kernel/skas/include/mmu-skas.h
@@ -8,6 +8,7 @@
#include "linux/config.h"
#include "mm_id.h"
+#include "asm/ldt.h"
struct mmu_context_skas {
struct mm_id id;
@@ -15,6 +16,7 @@ struct mmu_context_skas {
#ifdef CONFIG_3_LEVEL_PGTABLES
unsigned long last_pmd;
#endif
+ uml_ldt_t ldt;
};
extern void switch_mm_skas(struct mm_id * mm_idp);
diff --git a/arch/um/kernel/skas/include/skas.h b/arch/um/kernel/skas/include/skas.h
index 060934740f9..daa2f85b684 100644
--- a/arch/um/kernel/skas/include/skas.h
+++ b/arch/um/kernel/skas/include/skas.h
@@ -10,7 +10,8 @@
#include "sysdep/ptrace.h"
extern int userspace_pid[];
-extern int proc_mm, ptrace_faultinfo;
+extern int proc_mm, ptrace_faultinfo, ptrace_ldt;
+extern int skas_needs_stub;
extern void switch_threads(void *me, void *next);
extern void thread_wait(void *sw, void *fb);
diff --git a/arch/um/kernel/skas/mem.c b/arch/um/kernel/skas/mem.c
index 147466d7ff4..88ab96c609c 100644
--- a/arch/um/kernel/skas/mem.c
+++ b/arch/um/kernel/skas/mem.c
@@ -20,7 +20,7 @@ unsigned long set_task_sizes_skas(int arg, unsigned long *host_size_out,
*task_size_out = CONFIG_HOST_TASK_SIZE;
#else
*host_size_out = top;
- if (proc_mm && ptrace_faultinfo)
+ if (!skas_needs_stub)
*task_size_out = top;
else *task_size_out = CONFIG_STUB_START & PGDIR_MASK;
#endif
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c
index 9e5e39cea82..677871f1b37 100644
--- a/arch/um/kernel/skas/mmu.c
+++ b/arch/um/kernel/skas/mmu.c
@@ -15,6 +15,7 @@
#include "asm/mmu.h"
#include "asm/pgalloc.h"
#include "asm/pgtable.h"
+#include "asm/ldt.h"
#include "os.h"
#include "skas.h"
@@ -74,13 +75,12 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
int init_new_context_skas(struct task_struct *task, struct mm_struct *mm)
{
- struct mm_struct *cur_mm = current->mm;
- struct mm_id *cur_mm_id = &cur_mm->context.skas.id;
- struct mm_id *mm_id = &mm->context.skas.id;
+ struct mmu_context_skas *from_mm = NULL;
+ struct mmu_context_skas *to_mm = &mm->context.skas;
unsigned long stack = 0;
- int from, ret = -ENOMEM;
+ int from_fd, ret = -ENOMEM;
- if(!proc_mm || !ptrace_faultinfo){
+ if(skas_needs_stub){
stack = get_zeroed_page(GFP_KERNEL);
if(stack == 0)
goto out;
@@ -102,33 +102,43 @@ int init_new_context_skas(struct task_struct *task, struct mm_struct *mm)
mm->nr_ptes--;
}
- mm_id->stack = stack;
+
+ to_mm->id.stack = stack;
+ if(current->mm != NULL && current->mm != &init_mm)
+ from_mm = &current->mm->context.skas;
if(proc_mm){
- if((cur_mm != NULL) && (cur_mm != &init_mm))
- from = cur_mm_id->u.mm_fd;
- else from = -1;
+ if(from_mm)
+ from_fd = from_mm->id.u.mm_fd;
+ else from_fd = -1;
- ret = new_mm(from, stack);
+ ret = new_mm(from_fd, stack);
if(ret < 0){
printk("init_new_context_skas - new_mm failed, "
"errno = %d\n", ret);
goto out_free;
}
- mm_id->u.mm_fd = ret;
+ to_mm->id.u.mm_fd = ret;
}
else {
- if((cur_mm != NULL) && (cur_mm != &init_mm))
- mm_id->u.pid = copy_context_skas0(stack,
- cur_mm_id->u.pid);
- else mm_id->u.pid = start_userspace(stack);
+ if(from_mm)
+ to_mm->id.u.pid = copy_context_skas0(stack,
+ from_mm->id.u.pid);
+ else to_mm->id.u.pid = start_userspace(stack);
+ }
+
+ ret = init_new_ldt(to_mm, from_mm);
+ if(ret < 0){
+ printk("init_new_context_skas - init_ldt"
+ " failed, errno = %d\n", ret);
+ goto out_free;
}
return 0;
out_free:
- if(mm_id->stack != 0)
- free_page(mm_id->stack);
+ if(to_mm->id.stack != 0)
+ free_page(to_mm->id.stack);
out:
return ret;
}
diff --git a/arch/um/kernel/skas/process.c b/arch/um/kernel/skas/process.c
index 5cd0e992978..599d679bd4f 100644
--- a/arch/um/kernel/skas/process.c
+++ b/arch/um/kernel/skas/process.c
@@ -69,6 +69,17 @@ void wait_stub_done(int pid, int sig, char * fname)
if((n < 0) || !WIFSTOPPED(status) ||
(WSTOPSIG(status) != SIGUSR1 && WSTOPSIG(status) != SIGTRAP)){
+ unsigned long regs[FRAME_SIZE];
+ if(ptrace(PTRACE_GETREGS, pid, 0, regs) < 0)
+ printk("Failed to get registers from stub, "
+ "errno = %d\n", errno);
+ else {
+ int i;
+
+ printk("Stub registers -\n");
+ for(i = 0; i < FRAME_SIZE; i++)
+ printk("\t%d - %lx\n", i, regs[i]);
+ }
panic("%s : failed to wait for SIGUSR1/SIGTRAP, "
"pid = %d, n = %d, errno = %d, status = 0x%x\n",
fname, pid, n, errno, status);
@@ -370,9 +381,9 @@ int copy_context_skas0(unsigned long new_stack, int pid)
}
/*
- * This is used only, if proc_mm is available, while PTRACE_FAULTINFO
- * isn't. Opening /proc/mm creates a new mm_context, which lacks the stub-pages
- * Thus, we map them using /proc/mm-fd
+ * This is used only, if stub pages are needed, while proc_mm is
+ * availabl. Opening /proc/mm creates a new mm_context, which lacks
+ * the stub-pages. Thus, we map them using /proc/mm-fd
*/
void map_stub_pages(int fd, unsigned long code,
unsigned long data, unsigned long stack)
diff --git a/arch/um/kernel/skas/process_kern.c b/arch/um/kernel/skas/process_kern.c
index efe92e8aa2a..9c990253966 100644
--- a/arch/um/kernel/skas/process_kern.c
+++ b/arch/um/kernel/skas/process_kern.c
@@ -145,7 +145,7 @@ int new_mm(int from, unsigned long stack)
"err = %d\n", -n);
}
- if(!ptrace_faultinfo)
+ if(skas_needs_stub)
map_stub_pages(fd, CONFIG_STUB_CODE, CONFIG_STUB_DATA, stack);
return(fd);
diff --git a/arch/um/kernel/tt/uaccess_user.c b/arch/um/kernel/tt/uaccess_user.c
index 8c220f054b6..6c92bbccb49 100644
--- a/arch/um/kernel/tt/uaccess_user.c
+++ b/arch/um/kernel/tt/uaccess_user.c
@@ -10,6 +10,7 @@
#include "uml_uaccess.h"
#include "task.h"
#include "kern_util.h"
+#include "os.h"
int __do_copy_from_user(void *to, const void *from, int n,
void **fault_addr, void **fault_catcher)
diff --git a/arch/um/kernel/uaccess.c b/arch/um/kernel/uaccess.c
new file mode 100644
index 00000000000..054e3de0784
--- /dev/null
+++ b/arch/um/kernel/uaccess.c
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2001 Chris Emerson (cemerson@chiark.greenend.org.uk)
+ * Copyright (C) 2001, 2002 Jeff Dike (jdike@karaya.com)
+ * Licensed under the GPL
+ */
+
+/* These are here rather than tt/uaccess.c because skas mode needs them in
+ * order to do SIGBUS recovery when a tmpfs mount runs out of room.
+ */
+
+#include <linux/string.h>
+#include "os.h"
+
+void __do_copy(void *to, const void *from, int n)
+{
+ memcpy(to, from, n);
+}
+
+
+int __do_copy_to_user(void *to, const void *from, int n,
+ void **fault_addr, void **fault_catcher)
+{
+ unsigned long fault;
+ int faulted;
+
+ fault = __do_user_copy(to, from, n, fault_addr, fault_catcher,
+ __do_copy, &faulted);
+ if(!faulted) return(0);
+ else return(n - (fault - (unsigned long) to));
+}
diff --git a/arch/um/kernel/uaccess_user.c b/arch/um/kernel/uaccess_user.c
deleted file mode 100644
index d035257ed0a..00000000000
--- a/arch/um/kernel/uaccess_user.c
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (C) 2001 Chris Emerson (cemerson@chiark.greenend.org.uk)
- * Copyright (C) 2001, 2002 Jeff Dike (jdike@karaya.com)
- * Licensed under the GPL
- */
-
-#include <setjmp.h>
-#include <string.h>
-
-/* These are here rather than tt/uaccess.c because skas mode needs them in
- * order to do SIGBUS recovery when a tmpfs mount runs out of room.
- */
-
-unsigned long __do_user_copy(void *to, const void *from, int n,
- void **fault_addr, void **fault_catcher,
- void (*op)(void *to, const void *from,
- int n), int *faulted_out)
-{
- unsigned long *faddrp = (unsigned long *) fault_addr, ret;
-
- sigjmp_buf jbuf;
- *fault_catcher = &jbuf;
- if(sigsetjmp(jbuf, 1) == 0){
- (*op)(to, from, n);
- ret = 0;
- *faulted_out = 0;
- }
- else {
- ret = *faddrp;
- *faulted_out = 1;
- }
- *fault_addr = NULL;
- *fault_catcher = NULL;
- return ret;
-}
-
-void __do_copy(void *to, const void *from, int n)
-{
- memcpy(to, from, n);
-}
-
-
-int __do_copy_to_user(void *to, const void *from, int n,
- void **fault_addr, void **fault_catcher)
-{
- unsigned long fault;
- int faulted;
-
- fault = __do_user_copy(to, from, n, fault_addr, fault_catcher,
- __do_copy, &faulted);
- if(!faulted) return(0);
- else return(n - (fault - (unsigned long) to));
-}
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index 93dc782dc1c..142a9493912 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -137,7 +137,7 @@ static char *argv1_end = NULL;
/* Set in early boot */
static int have_root __initdata = 0;
-long physmem_size = 32 * 1024 * 1024;
+long long physmem_size = 32 * 1024 * 1024;
void set_cmdline(char *cmd)
{
@@ -402,7 +402,7 @@ int linux_main(int argc, char **argv)
#ifndef CONFIG_HIGHMEM
highmem = 0;
printf("CONFIG_HIGHMEM not enabled - physical memory shrunk "
- "to %ld bytes\n", physmem_size);
+ "to %lu bytes\n", physmem_size);
#endif
}
@@ -414,8 +414,8 @@ int linux_main(int argc, char **argv)
setup_physmem(uml_physmem, uml_reserved, physmem_size, highmem);
if(init_maps(physmem_size, iomem_size, highmem)){
- printf("Failed to allocate mem_map for %ld bytes of physical "
- "memory and %ld bytes of highmem\n", physmem_size,
+ printf("Failed to allocate mem_map for %lu bytes of physical "
+ "memory and %lu bytes of highmem\n", physmem_size,
highmem);
exit(1);
}
@@ -426,7 +426,7 @@ int linux_main(int argc, char **argv)
end_vm = start_vm + virtmem_size;
if(virtmem_size < physmem_size)
- printf("Kernel virtual memory size shrunk to %ld bytes\n",
+ printf("Kernel virtual memory size shrunk to %lu bytes\n",
virtmem_size);
uml_postsetup();
diff --git a/arch/um/kernel/user_util.c b/arch/um/kernel/user_util.c
index 41d17c71511..4c231161f25 100644
--- a/arch/um/kernel/user_util.c
+++ b/arch/um/kernel/user_util.c
@@ -27,7 +27,6 @@
#include "user.h"
#include "mem_user.h"
#include "init.h"
-#include "helper.h"
#include "ptrace_user.h"
#include "uml-config.h"
diff --git a/arch/um/os-Linux/Makefile b/arch/um/os-Linux/Makefile
index d15ec2af6a2..b83ac8e21c3 100644
--- a/arch/um/os-Linux/Makefile
+++ b/arch/um/os-Linux/Makefile
@@ -3,11 +3,12 @@
# Licensed under the GPL
#
-obj-y = aio.o elf_aux.o file.o mem.o process.o signal.o start_up.o time.o \
- tt.o tty.o user_syms.o drivers/ sys-$(SUBARCH)/
+obj-y = aio.o elf_aux.o file.o helper.o main.o mem.o process.o signal.o \
+ start_up.o time.o tt.o tty.o uaccess.o user_syms.o drivers/ \
+ sys-$(SUBARCH)/
-USER_OBJS := aio.o elf_aux.o file.o mem.o process.o signal.o start_up.o \
- time.o tt.o tty.o
+USER_OBJS := aio.o elf_aux.o file.o helper.o main.o mem.o process.o signal.o \
+ start_up.o time.o tt.o tty.o uaccess.o
elf_aux.o: $(ARCH_DIR)/kernel-offsets.h
CFLAGS_elf_aux.o += -I$(objtree)/arch/um
diff --git a/arch/um/os-Linux/aio.c b/arch/um/os-Linux/aio.c
index 41cfb094420..ffa759addd3 100644
--- a/arch/um/os-Linux/aio.c
+++ b/arch/um/os-Linux/aio.c
@@ -10,7 +10,6 @@
#include <sched.h>
#include <sys/syscall.h>
#include "os.h"
-#include "helper.h"
#include "aio.h"
#include "init.h"
#include "user.h"
diff --git a/arch/um/os-Linux/drivers/ethertap_user.c b/arch/um/os-Linux/drivers/ethertap_user.c
index cd4d6544da7..901b85e8a1c 100644
--- a/arch/um/os-Linux/drivers/ethertap_user.c
+++ b/arch/um/os-Linux/drivers/ethertap_user.c
@@ -19,7 +19,6 @@
#include "user_util.h"
#include "net_user.h"
#include "etap.h"
-#include "helper.h"
#include "os.h"
#define MAX_PACKET ETH_MAX_PACKET
diff --git a/arch/um/os-Linux/drivers/tuntap_user.c b/arch/um/os-Linux/drivers/tuntap_user.c
index 4ba9b17adf1..52945338b64 100644
--- a/arch/um/os-Linux/drivers/tuntap_user.c
+++ b/arch/um/os-Linux/drivers/tuntap_user.c
@@ -20,7 +20,6 @@
#include "kern_util.h"
#include "user_util.h"
#include "user.h"
-#include "helper.h"
#include "os.h"
#define MAX_PACKET ETH_MAX_PACKET
diff --git a/arch/um/kernel/helper.c b/arch/um/os-Linux/helper.c
index 33fb0bd3b11..36cc8475bcd 100644
--- a/arch/um/kernel/helper.c
+++ b/arch/um/os-Linux/helper.c
@@ -1,4 +1,4 @@
-/*
+/*
* Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
* Licensed under the GPL
*/
@@ -13,7 +13,6 @@
#include "user.h"
#include "kern_util.h"
#include "user_util.h"
-#include "helper.h"
#include "os.h"
struct helper_data {
@@ -46,7 +45,7 @@ static int helper_child(void *arg)
errval = errno;
printk("execvp of '%s' failed - errno = %d\n", argv[0], errno);
os_write_file(data->fd, &errval, sizeof(errval));
- os_kill_process(os_getpid(), 0);
+ kill(os_getpid(), SIGKILL);
return(0);
}
@@ -90,7 +89,7 @@ int run_helper(void (*pre_exec)(void *), void *pre_data, char **argv,
goto out_close;
}
- os_close_file(fds[1]);
+ close(fds[1]);
fds[1] = -1;
/*Read the errno value from the child.*/
@@ -98,7 +97,8 @@ int run_helper(void (*pre_exec)(void *), void *pre_data, char **argv,
if(n < 0){
printk("run_helper : read on pipe failed, ret = %d\n", -n);
ret = n;
- os_kill_process(pid, 1);
+ kill(pid, SIGKILL);
+ CATCH_EINTR(waitpid(pid, NULL, 0));
}
else if(n != 0){
CATCH_EINTR(n = waitpid(pid, NULL, 0));
@@ -109,8 +109,8 @@ int run_helper(void (*pre_exec)(void *), void *pre_data, char **argv,
out_close:
if (fds[1] != -1)
- os_close_file(fds[1]);
- os_close_file(fds[0]);
+ close(fds[1]);
+ close(fds[0]);
out_free:
if(stack_out == NULL)
free_stack(stack, 0);
@@ -118,7 +118,7 @@ out_free:
return(ret);
}
-int run_helper_thread(int (*proc)(void *), void *arg, unsigned int flags,
+int run_helper_thread(int (*proc)(void *), void *arg, unsigned int flags,
unsigned long *stack_out, int stack_order)
{
unsigned long stack, sp;
@@ -131,7 +131,7 @@ int run_helper_thread(int (*proc)(void *), void *arg, unsigned int flags,
pid = clone(proc, (void *) sp, flags | SIGCHLD, arg);
if(pid < 0){
err = -errno;
- printk("run_helper_thread : clone failed, errno = %d\n",
+ printk("run_helper_thread : clone failed, errno = %d\n",
errno);
return err;
}
diff --git a/arch/um/kernel/main.c b/arch/um/os-Linux/main.c
index d31027f0fe3..23da27d2256 100644
--- a/arch/um/kernel/main.c
+++ b/arch/um/os-Linux/main.c
@@ -157,25 +157,25 @@ int main(int argc, char **argv, char **envp)
*/
change_sig(SIGPROF, 0);
- /* This signal stuff used to be in the reboot case. However,
- * sometimes a SIGVTALRM can come in when we're halting (reproducably
- * when writing out gcov information, presumably because that takes
- * some time) and cause a segfault.
- */
-
- /* stop timers and set SIG*ALRM to be ignored */
- disable_timer();
-
- /* disable SIGIO for the fds and set SIGIO to be ignored */
- err = deactivate_all_fds();
- if(err)
- printf("deactivate_all_fds failed, errno = %d\n", -err);
-
- /* Let any pending signals fire now. This ensures
- * that they won't be delivered after the exec, when
- * they are definitely not expected.
- */
- unblock_signals();
+ /* This signal stuff used to be in the reboot case. However,
+ * sometimes a SIGVTALRM can come in when we're halting (reproducably
+ * when writing out gcov information, presumably because that takes
+ * some time) and cause a segfault.
+ */
+
+ /* stop timers and set SIG*ALRM to be ignored */
+ disable_timer();
+
+ /* disable SIGIO for the fds and set SIGIO to be ignored */
+ err = deactivate_all_fds();
+ if(err)
+ printf("deactivate_all_fds failed, errno = %d\n", -err);
+
+ /* Let any pending signals fire now. This ensures
+ * that they won't be delivered after the exec, when
+ * they are definitely not expected.
+ */
+ unblock_signals();
/* Reboot */
if(ret){
@@ -257,14 +257,3 @@ void __wrap_free(void *ptr)
}
else __real_free(ptr);
}
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/arch/um/os-Linux/mem.c b/arch/um/os-Linux/mem.c
index 8e71edaaf80..9d7d69a523b 100644
--- a/arch/um/os-Linux/mem.c
+++ b/arch/um/os-Linux/mem.c
@@ -88,7 +88,7 @@ int make_tempfile(const char *template, char **out_tempname, int do_unlink)
* This proc is used in start_up.c
* So it isn't 'static'.
*/
-int create_tmp_file(unsigned long len)
+int create_tmp_file(unsigned long long len)
{
int fd, err;
char zero;
@@ -121,7 +121,7 @@ int create_tmp_file(unsigned long len)
return(fd);
}
-static int create_anon_file(unsigned long len)
+static int create_anon_file(unsigned long long len)
{
void *addr;
int fd;
@@ -144,7 +144,7 @@ static int create_anon_file(unsigned long len)
extern int have_devanon;
-int create_mem_file(unsigned long len)
+int create_mem_file(unsigned long long len)
{
int err, fd;
diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c
index b99ab414542..37517d49c4a 100644
--- a/arch/um/os-Linux/start_up.c
+++ b/arch/um/os-Linux/start_up.c
@@ -135,7 +135,9 @@ static int stop_ptraced_child(int pid, void *stack, int exitcode,
}
int ptrace_faultinfo = 1;
+int ptrace_ldt = 1;
int proc_mm = 1;
+int skas_needs_stub = 0;
static int __init skas0_cmd_param(char *str, int* add)
{
@@ -294,7 +296,7 @@ static void __init check_ptrace(void)
check_sysemu();
}
-extern int create_tmp_file(unsigned long len);
+extern int create_tmp_file(unsigned long long len);
static void check_tmpexec(void)
{
@@ -352,14 +354,26 @@ __uml_setup("noptracefaultinfo", noptracefaultinfo_cmd_param,
" it. To support PTRACE_FAULTINFO, the host needs to be patched\n"
" using the current skas3 patch.\n\n");
+static int __init noptraceldt_cmd_param(char *str, int* add)
+{
+ ptrace_ldt = 0;
+ return 0;
+}
+
+__uml_setup("noptraceldt", noptraceldt_cmd_param,
+"noptraceldt\n"
+" Turns off usage of PTRACE_LDT, even if host supports it.\n"
+" To support PTRACE_LDT, the host needs to be patched using\n"
+" the current skas3 patch.\n\n");
+
#ifdef UML_CONFIG_MODE_SKAS
-static inline void check_skas3_ptrace_support(void)
+static inline void check_skas3_ptrace_faultinfo(void)
{
struct ptrace_faultinfo fi;
void *stack;
int pid, n;
- printf("Checking for the skas3 patch in the host...");
+ printf(" - PTRACE_FAULTINFO...");
pid = start_ptraced_child(&stack);
n = ptrace(PTRACE_FAULTINFO, pid, 0, &fi);
@@ -381,9 +395,49 @@ static inline void check_skas3_ptrace_support(void)
stop_ptraced_child(pid, stack, 1, 1);
}
-int can_do_skas(void)
+static inline void check_skas3_ptrace_ldt(void)
+{
+#ifdef PTRACE_LDT
+ void *stack;
+ int pid, n;
+ unsigned char ldtbuf[40];
+ struct ptrace_ldt ldt_op = (struct ptrace_ldt) {
+ .func = 2, /* read default ldt */
+ .ptr = ldtbuf,
+ .bytecount = sizeof(ldtbuf)};
+
+ printf(" - PTRACE_LDT...");
+ pid = start_ptraced_child(&stack);
+
+ n = ptrace(PTRACE_LDT, pid, 0, (unsigned long) &ldt_op);
+ if (n < 0) {
+ if(errno == EIO)
+ printf("not found\n");
+ else {
+ perror("not found");
+ }
+ ptrace_ldt = 0;
+ }
+ else {
+ if(ptrace_ldt)
+ printf("found\n");
+ else
+ printf("found, but use is disabled\n");
+ }
+
+ stop_ptraced_child(pid, stack, 1, 1);
+#else
+ /* PTRACE_LDT might be disabled via cmdline option.
+ * We want to override this, else we might use the stub
+ * without real need
+ */
+ ptrace_ldt = 1;
+#endif
+}
+
+static inline void check_skas3_proc_mm(void)
{
- printf("Checking for /proc/mm...");
+ printf(" - /proc/mm...");
if (os_access("/proc/mm", OS_ACC_W_OK) < 0) {
proc_mm = 0;
printf("not found\n");
@@ -394,8 +448,19 @@ int can_do_skas(void)
else
printf("found\n");
}
+}
+
+int can_do_skas(void)
+{
+ printf("Checking for the skas3 patch in the host:\n");
+
+ check_skas3_proc_mm();
+ check_skas3_ptrace_faultinfo();
+ check_skas3_ptrace_ldt();
+
+ if(!proc_mm || !ptrace_faultinfo || !ptrace_ldt)
+ skas_needs_stub = 1;
- check_skas3_ptrace_support();
return 1;
}
#else
diff --git a/arch/um/os-Linux/uaccess.c b/arch/um/os-Linux/uaccess.c
new file mode 100644
index 00000000000..38d710158c3
--- /dev/null
+++ b/arch/um/os-Linux/uaccess.c
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2001 Chris Emerson (cemerson@chiark.greenend.org.uk)
+ * Copyright (C) 2001, 2002 Jeff Dike (jdike@karaya.com)
+ * Licensed under the GPL
+ */
+
+#include <setjmp.h>
+#include <string.h>
+
+unsigned long __do_user_copy(void *to, const void *from, int n,
+ void **fault_addr, void **fault_catcher,
+ void (*op)(void *to, const void *from,
+ int n), int *faulted_out)
+{
+ unsigned long *faddrp = (unsigned long *) fault_addr, ret;
+
+ sigjmp_buf jbuf;
+ *fault_catcher = &jbuf;
+ if(sigsetjmp(jbuf, 1) == 0){
+ (*op)(to, from, n);
+ ret = 0;
+ *faulted_out = 0;
+ }
+ else {
+ ret = *faddrp;
+ *faulted_out = 1;
+ }
+ *fault_addr = NULL;
+ *fault_catcher = NULL;
+ return ret;
+}
+
diff --git a/arch/um/scripts/Makefile.rules b/arch/um/scripts/Makefile.rules
index 651d9d88b65..b3fbf125709 100644
--- a/arch/um/scripts/Makefile.rules
+++ b/arch/um/scripts/Makefile.rules
@@ -26,8 +26,13 @@ define unprofile
$(patsubst -pg,,$(patsubst -fprofile-arcs -ftest-coverage,,$(1)))
endef
+# cmd_make_link checks to see if the $(foo-dir) variable starts with a /. If
+# so, it's considered to be a path relative to $(srcdir) rather than
+# $(srcdir)/arch/$(SUBARCH). This is because x86_64 wants to get ldt.c from
+# arch/um/sys-i386 rather than arch/i386 like the other borrowed files. So,
+# it sets $(ldt.c-dir) to /arch/um/sys-i386.
quiet_cmd_make_link = SYMLINK $@
-cmd_make_link = ln -sf $(srctree)/arch/$(SUBARCH)/$($(notdir $@)-dir)/$(notdir $@) $@
+cmd_make_link = rm -f $@; ln -sf $(srctree)$(if $(filter-out /%,$($(notdir $@)-dir)),/arch/$(SUBARCH))/$($(notdir $@)-dir)/$(notdir $@) $@
# this needs to be before the foreach, because targets does not accept
# complete paths like $(obj)/$(f). To make sure this works, use a := assignment
diff --git a/arch/um/sys-i386/ldt.c b/arch/um/sys-i386/ldt.c
index 36b5c2c1328..6360f1c958d 100644
--- a/arch/um/sys-i386/ldt.c
+++ b/arch/um/sys-i386/ldt.c
@@ -3,53 +3,26 @@
* Licensed under the GPL
*/
+#include "linux/stddef.h"
#include "linux/config.h"
#include "linux/sched.h"
#include "linux/slab.h"
#include "linux/types.h"
+#include "linux/errno.h"
#include "asm/uaccess.h"
-#include "asm/ptrace.h"
#include "asm/smp.h"
#include "asm/ldt.h"
+#include "asm/unistd.h"
#include "choose-mode.h"
#include "kern.h"
#include "mode_kern.h"
-#ifdef CONFIG_MODE_TT
-
extern int modify_ldt(int func, void *ptr, unsigned long bytecount);
-static int do_modify_ldt_tt(int func, void *ptr, unsigned long bytecount)
-{
- return modify_ldt(func, ptr, bytecount);
-}
-
-#endif
-
-#ifdef CONFIG_MODE_SKAS
-
-#include "skas.h"
-#include "skas_ptrace.h"
-
-static int do_modify_ldt_skas(int func, void *ptr, unsigned long bytecount)
-{
- struct ptrace_ldt ldt;
- u32 cpu;
- int res;
-
- ldt = ((struct ptrace_ldt) { .func = func,
- .ptr = ptr,
- .bytecount = bytecount });
-
- cpu = get_cpu();
- res = ptrace(PTRACE_LDT, userspace_pid[cpu], 0, (unsigned long) &ldt);
- put_cpu();
-
- return res;
-}
-#endif
+#ifdef CONFIG_MODE_TT
-int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
+static long do_modify_ldt_tt(int func, void __user *ptr,
+ unsigned long bytecount)
{
struct user_desc info;
int res = 0;
@@ -89,8 +62,7 @@ int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
goto out;
}
- res = CHOOSE_MODE_PROC(do_modify_ldt_tt, do_modify_ldt_skas, func,
- p, bytecount);
+ res = modify_ldt(func, p, bytecount);
if(res < 0)
goto out;
@@ -108,3 +80,467 @@ out:
kfree(buf);
return res;
}
+
+#endif
+
+#ifdef CONFIG_MODE_SKAS
+
+#include "skas.h"
+#include "skas_ptrace.h"
+#include "asm/mmu_context.h"
+
+long write_ldt_entry(struct mm_id * mm_idp, int func, struct user_desc * desc,
+ void **addr, int done)
+{
+ long res;
+
+ if(proc_mm){
+ /* This is a special handling for the case, that the mm to
+ * modify isn't current->active_mm.
+ * If this is called directly by modify_ldt,
+ * (current->active_mm->context.skas.u == mm_idp)
+ * will be true. So no call to switch_mm_skas(mm_idp) is done.
+ * If this is called in case of init_new_ldt or PTRACE_LDT,
+ * mm_idp won't belong to current->active_mm, but child->mm.
+ * So we need to switch child's mm into our userspace, then
+ * later switch back.
+ *
+ * Note: I'm unshure: should interrupts be disabled here?
+ */
+ if(!current->active_mm || current->active_mm == &init_mm ||
+ mm_idp != &current->active_mm->context.skas.id)
+ switch_mm_skas(mm_idp);
+ }
+
+ if(ptrace_ldt) {
+ struct ptrace_ldt ldt_op = (struct ptrace_ldt) {
+ .func = func,
+ .ptr = desc,
+ .bytecount = sizeof(*desc)};
+ u32 cpu;
+ int pid;
+
+ if(!proc_mm)
+ pid = mm_idp->u.pid;
+ else {
+ cpu = get_cpu();
+ pid = userspace_pid[cpu];
+ }
+
+ res = ptrace(PTRACE_LDT, pid, 0, (unsigned long) &ldt_op);
+ if(res)
+ res = errno;
+
+ if(proc_mm)
+ put_cpu();
+ }
+ else {
+ void *stub_addr;
+ res = syscall_stub_data(mm_idp, (unsigned long *)desc,
+ (sizeof(*desc) + sizeof(long) - 1) &
+ ~(sizeof(long) - 1),
+ addr, &stub_addr);
+ if(!res){
+ unsigned long args[] = { func,
+ (unsigned long)stub_addr,
+ sizeof(*desc),
+ 0, 0, 0 };
+ res = run_syscall_stub(mm_idp, __NR_modify_ldt, args,
+ 0, addr, done);
+ }
+ }
+
+ if(proc_mm){
+ /* This is the second part of special handling, that makes
+ * PTRACE_LDT possible to implement.
+ */
+ if(current->active_mm && current->active_mm != &init_mm &&
+ mm_idp != &current->active_mm->context.skas.id)
+ switch_mm_skas(&current->active_mm->context.skas.id);
+ }
+
+ return res;
+}
+
+static long read_ldt_from_host(void __user * ptr, unsigned long bytecount)
+{
+ int res, n;
+ struct ptrace_ldt ptrace_ldt = (struct ptrace_ldt) {
+ .func = 0,
+ .bytecount = bytecount,
+ .ptr = (void *)kmalloc(bytecount, GFP_KERNEL)};
+ u32 cpu;
+
+ if(ptrace_ldt.ptr == NULL)
+ return -ENOMEM;
+
+ /* This is called from sys_modify_ldt only, so userspace_pid gives
+ * us the right number
+ */
+
+ cpu = get_cpu();
+ res = ptrace(PTRACE_LDT, userspace_pid[cpu], 0,
+ (unsigned long) &ptrace_ldt);
+ put_cpu();
+ if(res < 0)
+ goto out;
+
+ n = copy_to_user(ptr, ptrace_ldt.ptr, res);
+ if(n != 0)
+ res = -EFAULT;
+
+ out:
+ kfree(ptrace_ldt.ptr);
+
+ return res;
+}
+
+/*
+ * In skas mode, we hold our own ldt data in UML.
+ * Thus, the code implementing sys_modify_ldt_skas
+ * is very similar to (and mostly stolen from) sys_modify_ldt
+ * for arch/i386/kernel/ldt.c
+ * The routines copied and modified in part are:
+ * - read_ldt
+ * - read_default_ldt
+ * - write_ldt
+ * - sys_modify_ldt_skas
+ */
+
+static int read_ldt(void __user * ptr, unsigned long bytecount)
+{
+ int i, err = 0;
+ unsigned long size;
+ uml_ldt_t * ldt = &current->mm->context.skas.ldt;
+
+ if(!ldt->entry_count)
+ goto out;
+ if(bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
+ bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
+ err = bytecount;
+
+ if(ptrace_ldt){
+ return read_ldt_from_host(ptr, bytecount);
+ }
+
+ down(&ldt->semaphore);
+ if(ldt->entry_count <= LDT_DIRECT_ENTRIES){
+ size = LDT_ENTRY_SIZE*LDT_DIRECT_ENTRIES;
+ if(size > bytecount)
+ size = bytecount;
+ if(copy_to_user(ptr, ldt->entries, size))
+ err = -EFAULT;
+ bytecount -= size;
+ ptr += size;
+ }
+ else {
+ for(i=0; i<ldt->entry_count/LDT_ENTRIES_PER_PAGE && bytecount;
+ i++){
+ size = PAGE_SIZE;
+ if(size > bytecount)
+ size = bytecount;
+ if(copy_to_user(ptr, ldt->pages[i], size)){
+ err = -EFAULT;
+ break;
+ }
+ bytecount -= size;
+ ptr += size;
+ }
+ }
+ up(&ldt->semaphore);
+
+ if(bytecount == 0 || err == -EFAULT)
+ goto out;
+
+ if(clear_user(ptr, bytecount))
+ err = -EFAULT;
+
+out:
+ return err;
+}
+
+static int read_default_ldt(void __user * ptr, unsigned long bytecount)
+{
+ int err;
+
+ if(bytecount > 5*LDT_ENTRY_SIZE)
+ bytecount = 5*LDT_ENTRY_SIZE;
+
+ err = bytecount;
+ /* UML doesn't support lcall7 and lcall27.
+ * So, we don't really have a default ldt, but emulate
+ * an empty ldt of common host default ldt size.
+ */
+ if(clear_user(ptr, bytecount))
+ err = -EFAULT;
+
+ return err;
+}
+
+static int write_ldt(void __user * ptr, unsigned long bytecount, int func)
+{
+ uml_ldt_t * ldt = &current->mm->context.skas.ldt;
+ struct mm_id * mm_idp = &current->mm->context.skas.id;
+ int i, err;
+ struct user_desc ldt_info;
+ struct ldt_entry entry0, *ldt_p;
+ void *addr = NULL;
+
+ err = -EINVAL;
+ if(bytecount != sizeof(ldt_info))
+ goto out;
+ err = -EFAULT;
+ if(copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
+ goto out;
+
+ err = -EINVAL;
+ if(ldt_info.entry_number >= LDT_ENTRIES)
+ goto out;
+ if(ldt_info.contents == 3){
+ if (func == 1)
+ goto out;
+ if (ldt_info.seg_not_present == 0)
+ goto out;
+ }
+
+ if(!ptrace_ldt)
+ down(&ldt->semaphore);
+
+ err = write_ldt_entry(mm_idp, func, &ldt_info, &addr, 1);
+ if(err)
+ goto out_unlock;
+ else if(ptrace_ldt) {
+ /* With PTRACE_LDT available, this is used as a flag only */
+ ldt->entry_count = 1;
+ goto out;
+ }
+
+ if(ldt_info.entry_number >= ldt->entry_count &&
+ ldt_info.entry_number >= LDT_DIRECT_ENTRIES){
+ for(i=ldt->entry_count/LDT_ENTRIES_PER_PAGE;
+ i*LDT_ENTRIES_PER_PAGE <= ldt_info.entry_number;
+ i++){
+ if(i == 0)
+ memcpy(&entry0, ldt->entries, sizeof(entry0));
+ ldt->pages[i] = (struct ldt_entry *)
+ __get_free_page(GFP_KERNEL|__GFP_ZERO);
+ if(!ldt->pages[i]){
+ err = -ENOMEM;
+ /* Undo the change in host */
+ memset(&ldt_info, 0, sizeof(ldt_info));
+ write_ldt_entry(mm_idp, 1, &ldt_info, &addr, 1);
+ goto out_unlock;
+ }
+ if(i == 0) {
+ memcpy(ldt->pages[0], &entry0, sizeof(entry0));
+ memcpy(ldt->pages[0]+1, ldt->entries+1,
+ sizeof(entry0)*(LDT_DIRECT_ENTRIES-1));
+ }
+ ldt->entry_count = (i + 1) * LDT_ENTRIES_PER_PAGE;
+ }
+ }
+ if(ldt->entry_count <= ldt_info.entry_number)
+ ldt->entry_count = ldt_info.entry_number + 1;
+
+ if(ldt->entry_count <= LDT_DIRECT_ENTRIES)
+ ldt_p = ldt->entries + ldt_info.entry_number;
+ else
+ ldt_p = ldt->pages[ldt_info.entry_number/LDT_ENTRIES_PER_PAGE] +
+ ldt_info.entry_number%LDT_ENTRIES_PER_PAGE;
+
+ if(ldt_info.base_addr == 0 && ldt_info.limit == 0 &&
+ (func == 1 || LDT_empty(&ldt_info))){
+ ldt_p->a = 0;
+ ldt_p->b = 0;
+ }
+ else{
+ if (func == 1)
+ ldt_info.useable = 0;
+ ldt_p->a = LDT_entry_a(&ldt_info);
+ ldt_p->b = LDT_entry_b(&ldt_info);
+ }
+ err = 0;
+
+out_unlock:
+ up(&ldt->semaphore);
+out:
+ return err;
+}
+
+static long do_modify_ldt_skas(int func, void __user *ptr,
+ unsigned long bytecount)
+{
+ int ret = -ENOSYS;
+
+ switch (func) {
+ case 0:
+ ret = read_ldt(ptr, bytecount);
+ break;
+ case 1:
+ case 0x11:
+ ret = write_ldt(ptr, bytecount, func);
+ break;
+ case 2:
+ ret = read_default_ldt(ptr, bytecount);
+ break;
+ }
+ return ret;
+}
+
+short dummy_list[9] = {0, -1};
+short * host_ldt_entries = NULL;
+
+void ldt_get_host_info(void)
+{
+ long ret;
+ struct ldt_entry * ldt;
+ int i, size, k, order;
+
+ host_ldt_entries = dummy_list+1;
+
+ for(i = LDT_PAGES_MAX-1, order=0; i; i>>=1, order++);
+
+ ldt = (struct ldt_entry *)
+ __get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
+ if(ldt == NULL) {
+ printk("ldt_get_host_info: couldn't allocate buffer for host ldt\n");
+ return;
+ }
+
+ ret = modify_ldt(0, ldt, (1<<order)*PAGE_SIZE);
+ if(ret < 0) {
+ printk("ldt_get_host_info: couldn't read host ldt\n");
+ goto out_free;
+ }
+ if(ret == 0) {
+ /* default_ldt is active, simply write an empty entry 0 */
+ host_ldt_entries = dummy_list;
+ goto out_free;
+ }
+
+ for(i=0, size=0; i<ret/LDT_ENTRY_SIZE; i++){
+ if(ldt[i].a != 0 || ldt[i].b != 0)
+ size++;
+ }
+
+ if(size < sizeof(dummy_list)/sizeof(dummy_list[0])) {
+ host_ldt_entries = dummy_list;
+ }
+ else {
+ size = (size + 1) * sizeof(dummy_list[0]);
+ host_ldt_entries = (short *)kmalloc(size, GFP_KERNEL);
+ if(host_ldt_entries == NULL) {
+ printk("ldt_get_host_info: couldn't allocate host ldt list\n");
+ goto out_free;
+ }
+ }
+
+ for(i=0, k=0; i<ret/LDT_ENTRY_SIZE; i++){
+ if(ldt[i].a != 0 || ldt[i].b != 0) {
+ host_ldt_entries[k++] = i;
+ }
+ }
+ host_ldt_entries[k] = -1;
+
+out_free:
+ free_pages((unsigned long)ldt, order);
+}
+
+long init_new_ldt(struct mmu_context_skas * new_mm,
+ struct mmu_context_skas * from_mm)
+{
+ struct user_desc desc;
+ short * num_p;
+ int i;
+ long page, err=0;
+ void *addr = NULL;
+
+ memset(&desc, 0, sizeof(desc));
+
+ if(!ptrace_ldt)
+ init_MUTEX(&new_mm->ldt.semaphore);
+
+ if(!from_mm){
+ /*
+ * We have to initialize a clean ldt.
+ */
+ if(proc_mm) {
+ /*
+ * If the new mm was created using proc_mm, host's
+ * default-ldt currently is assigned, which normally
+ * contains the call-gates for lcall7 and lcall27.
+ * To remove these gates, we simply write an empty
+ * entry as number 0 to the host.
+ */
+ err = write_ldt_entry(&new_mm->id, 1, &desc,
+ &addr, 1);
+ }
+ else{
+ /*
+ * Now we try to retrieve info about the ldt, we
+ * inherited from the host. All ldt-entries found
+ * will be reset in the following loop
+ */
+ if(host_ldt_entries == NULL)
+ ldt_get_host_info();
+ for(num_p=host_ldt_entries; *num_p != -1; num_p++){
+ desc.entry_number = *num_p;
+ err = write_ldt_entry(&new_mm->id, 1, &desc,
+ &addr, *(num_p + 1) == -1);
+ if(err)
+ break;
+ }
+ }
+ new_mm->ldt.entry_count = 0;
+ }
+ else if (!ptrace_ldt) {
+ /* Our local LDT is used to supply the data for
+ * modify_ldt(READLDT), if PTRACE_LDT isn't available,
+ * i.e., we have to use the stub for modify_ldt, which
+ * can't handle the big read buffer of up to 64kB.
+ */
+ down(&from_mm->ldt.semaphore);
+ if(from_mm->ldt.entry_count <= LDT_DIRECT_ENTRIES){
+ memcpy(new_mm->ldt.entries, from_mm->ldt.entries,
+ sizeof(new_mm->ldt.entries));
+ }
+ else{
+ i = from_mm->ldt.entry_count / LDT_ENTRIES_PER_PAGE;
+ while(i-->0){
+ page = __get_free_page(GFP_KERNEL|__GFP_ZERO);
+ if (!page){
+ err = -ENOMEM;
+ break;
+ }
+ new_mm->ldt.pages[i] = (struct ldt_entry*)page;
+ memcpy(new_mm->ldt.pages[i],
+ from_mm->ldt.pages[i], PAGE_SIZE);
+ }
+ }
+ new_mm->ldt.entry_count = from_mm->ldt.entry_count;
+ up(&from_mm->ldt.semaphore);
+ }
+
+ return err;
+}
+
+
+void free_ldt(struct mmu_context_skas * mm)
+{
+ int i;
+
+ if(!ptrace_ldt && mm->ldt.entry_count > LDT_DIRECT_ENTRIES){
+ i = mm->ldt.entry_count / LDT_ENTRIES_PER_PAGE;
+ while(i-- > 0){
+ free_page((long )mm->ldt.pages[i]);
+ }
+ }
+ mm->ldt.entry_count = 0;
+}
+#endif
+
+int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
+{
+ return(CHOOSE_MODE_PROC(do_modify_ldt_tt, do_modify_ldt_skas, func,
+ ptr, bytecount));
+}
diff --git a/arch/um/sys-x86_64/Makefile b/arch/um/sys-x86_64/Makefile
index 06c3633457a..ea977df395a 100644
--- a/arch/um/sys-x86_64/Makefile
+++ b/arch/um/sys-x86_64/Makefile
@@ -5,7 +5,7 @@
#
#XXX: why into lib-y?
-lib-y = bitops.o bugs.o csum-partial.o delay.o fault.o mem.o memcpy.o \
+lib-y = bitops.o bugs.o csum-partial.o delay.o fault.o ldt.o mem.o memcpy.o \
ptrace.o ptrace_user.o sigcontext.o signal.o stub.o \
stub_segv.o syscalls.o syscall_table.o sysrq.o thunk.o
@@ -14,7 +14,7 @@ obj-$(CONFIG_MODULES) += module.o um_module.o
USER_OBJS := ptrace_user.o sigcontext.o
-SYMLINKS = bitops.c csum-copy.S csum-partial.c csum-wrappers.c memcpy.S \
+SYMLINKS = bitops.c csum-copy.S csum-partial.c csum-wrappers.c ldt.c memcpy.S \
thunk.S module.c
include arch/um/scripts/Makefile.rules
@@ -23,6 +23,7 @@ bitops.c-dir = lib
csum-copy.S-dir = lib
csum-partial.c-dir = lib
csum-wrappers.c-dir = lib
+ldt.c-dir = /arch/um/sys-i386
memcpy.S-dir = lib
thunk.S-dir = lib
module.c-dir = kernel
diff --git a/arch/um/sys-x86_64/syscalls.c b/arch/um/sys-x86_64/syscalls.c
index 3259a4db453..6acee5c4ada 100644
--- a/arch/um/sys-x86_64/syscalls.c
+++ b/arch/um/sys-x86_64/syscalls.c
@@ -29,81 +29,6 @@ asmlinkage long sys_uname64(struct new_utsname __user * name)
}
#ifdef CONFIG_MODE_TT
-extern int modify_ldt(int func, void *ptr, unsigned long bytecount);
-
-long sys_modify_ldt_tt(int func, void *ptr, unsigned long bytecount)
-{
- /* XXX This should check VERIFY_WRITE depending on func, check this
- * in i386 as well.
- */
- if (!access_ok(VERIFY_READ, ptr, bytecount))
- return -EFAULT;
- return(modify_ldt(func, ptr, bytecount));
-}
-#endif
-
-#ifdef CONFIG_MODE_SKAS
-extern int userspace_pid[];
-
-#include "skas_ptrace.h"
-
-long sys_modify_ldt_skas(int func, void *ptr, unsigned long bytecount)
-{
- struct ptrace_ldt ldt;
- void *buf;
- int res, n;
-
- buf = kmalloc(bytecount, GFP_KERNEL);
- if(buf == NULL)
- return(-ENOMEM);
-
- res = 0;
-
- switch(func){
- case 1:
- case 0x11:
- res = copy_from_user(buf, ptr, bytecount);
- break;
- }
-
- if(res != 0){
- res = -EFAULT;
- goto out;
- }
-
- ldt = ((struct ptrace_ldt) { .func = func,
- .ptr = buf,
- .bytecount = bytecount });
-#warning Need to look up userspace_pid by cpu
- res = ptrace(PTRACE_LDT, userspace_pid[0], 0, (unsigned long) &ldt);
- if(res < 0)
- goto out;
-
- switch(func){
- case 0:
- case 2:
- n = res;
- res = copy_to_user(ptr, buf, n);
- if(res != 0)
- res = -EFAULT;
- else
- res = n;
- break;
- }
-
- out:
- kfree(buf);
- return(res);
-}
-#endif
-
-long sys_modify_ldt(int func, void *ptr, unsigned long bytecount)
-{
- return(CHOOSE_MODE_PROC(sys_modify_ldt_tt, sys_modify_ldt_skas, func,
- ptr, bytecount));
-}
-
-#ifdef CONFIG_MODE_TT
extern long arch_prctl(int code, unsigned long addr);
static long arch_prctl_tt(int code, unsigned long addr)
diff --git a/arch/v850/kernel/ptrace.c b/arch/v850/kernel/ptrace.c
index d6077ff47d2..18492d02aaf 100644
--- a/arch/v850/kernel/ptrace.c
+++ b/arch/v850/kernel/ptrace.c
@@ -113,45 +113,10 @@ static int set_single_step (struct task_struct *t, int val)
return 1;
}
-long sys_ptrace(long request, long pid, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
- struct task_struct *child;
int rval;
- lock_kernel();
-
- if (request == PTRACE_TRACEME) {
- /* are we already being traced? */
- if (current->ptrace & PT_PTRACED) {
- rval = -EPERM;
- goto out;
- }
- /* set the ptrace bit in the process flags. */
- current->ptrace |= PT_PTRACED;
- rval = 0;
- goto out;
- }
- rval = -ESRCH;
- read_lock(&tasklist_lock);
- child = find_task_by_pid(pid);
- if (child)
- get_task_struct(child);
- read_unlock(&tasklist_lock);
- if (!child)
- goto out;
-
- rval = -EPERM;
- if (pid == 1) /* you may not mess with init */
- goto out_tsk;
-
- if (request == PTRACE_ATTACH) {
- rval = ptrace_attach(child);
- goto out_tsk;
- }
- rval = ptrace_check_attach(child, request == PTRACE_KILL);
- if (rval < 0)
- goto out_tsk;
-
switch (request) {
unsigned long val, copied;
@@ -248,11 +213,7 @@ long sys_ptrace(long request, long pid, long addr, long data)
rval = -EIO;
goto out;
}
-
-out_tsk:
- put_task_struct(child);
-out:
- unlock_kernel();
+ out:
return rval;
}
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index 21afa69a086..4cce2f6f170 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -532,8 +532,21 @@ source "drivers/firmware/Kconfig"
source fs/Kconfig
+menu "Instrumentation Support"
+ depends on EXPERIMENTAL
+
source "arch/x86_64/oprofile/Kconfig"
+config KPROBES
+ bool "Kprobes (EXPERIMENTAL)"
+ help
+ Kprobes allows you to trap at almost any kernel address and
+ execute a callback function. register_kprobe() establishes
+ a probepoint and specifies the callback. Kprobes is useful
+ for kernel debugging, non-intrusive instrumentation and testing.
+ If in doubt, say "N".
+endmenu
+
source "arch/x86_64/Kconfig.debug"
source "security/Kconfig"
diff --git a/arch/x86_64/Kconfig.debug b/arch/x86_64/Kconfig.debug
index 9cf1410d2f5..d584ecc27ea 100644
--- a/arch/x86_64/Kconfig.debug
+++ b/arch/x86_64/Kconfig.debug
@@ -33,16 +33,6 @@ config IOMMU_DEBUG
options. See Documentation/x86_64/boot-options.txt for more
details.
-config KPROBES
- bool "Kprobes"
- depends on DEBUG_KERNEL
- help
- Kprobes allows you to trap at almost any kernel address and
- execute a callback function. register_kprobe() establishes
- a probepoint and specifies the callback. Kprobes is useful
- for kernel debugging, non-intrusive instrumentation and testing.
- If in doubt, say "N".
-
config IOMMU_LEAK
bool "IOMMU leak tracing"
depends on DEBUG_KERNEL
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c
index 76a28b007be..dddeb678b44 100644
--- a/arch/x86_64/kernel/kprobes.c
+++ b/arch/x86_64/kernel/kprobes.c
@@ -34,7 +34,6 @@
#include <linux/config.h>
#include <linux/kprobes.h>
#include <linux/ptrace.h>
-#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/preempt.h>
@@ -44,17 +43,10 @@
#include <asm/kdebug.h>
static DECLARE_MUTEX(kprobe_mutex);
-
-static struct kprobe *current_kprobe;
-static unsigned long kprobe_status, kprobe_old_rflags, kprobe_saved_rflags;
-static struct kprobe *kprobe_prev;
-static unsigned long kprobe_status_prev, kprobe_old_rflags_prev, kprobe_saved_rflags_prev;
-static struct pt_regs jprobe_saved_regs;
-static long *jprobe_saved_rsp;
void jprobe_return_end(void);
-/* copy of the kernel stack at the probe fire time */
-static kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
+DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
/*
* returns non-zero if opcode modifies the interrupt flag.
@@ -236,29 +228,30 @@ void __kprobes arch_remove_kprobe(struct kprobe *p)
up(&kprobe_mutex);
}
-static inline void save_previous_kprobe(void)
+static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
{
- kprobe_prev = current_kprobe;
- kprobe_status_prev = kprobe_status;
- kprobe_old_rflags_prev = kprobe_old_rflags;
- kprobe_saved_rflags_prev = kprobe_saved_rflags;
+ kcb->prev_kprobe.kp = kprobe_running();
+ kcb->prev_kprobe.status = kcb->kprobe_status;
+ kcb->prev_kprobe.old_rflags = kcb->kprobe_old_rflags;
+ kcb->prev_kprobe.saved_rflags = kcb->kprobe_saved_rflags;
}
-static inline void restore_previous_kprobe(void)
+static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
- current_kprobe = kprobe_prev;
- kprobe_status = kprobe_status_prev;
- kprobe_old_rflags = kprobe_old_rflags_prev;
- kprobe_saved_rflags = kprobe_saved_rflags_prev;
+ __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+ kcb->kprobe_status = kcb->prev_kprobe.status;
+ kcb->kprobe_old_rflags = kcb->prev_kprobe.old_rflags;
+ kcb->kprobe_saved_rflags = kcb->prev_kprobe.saved_rflags;
}
-static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs)
+static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
{
- current_kprobe = p;
- kprobe_saved_rflags = kprobe_old_rflags
+ __get_cpu_var(current_kprobe) = p;
+ kcb->kprobe_saved_rflags = kcb->kprobe_old_rflags
= (regs->eflags & (TF_MASK | IF_MASK));
if (is_IF_modifier(p->ainsn.insn))
- kprobe_saved_rflags &= ~IF_MASK;
+ kcb->kprobe_saved_rflags &= ~IF_MASK;
}
static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
@@ -272,6 +265,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
regs->rip = (unsigned long)p->ainsn.insn;
}
+/* Called with kretprobe_lock held */
void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
struct pt_regs *regs)
{
@@ -292,32 +286,30 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
}
}
-/*
- * Interrupts are disabled on entry as trap3 is an interrupt gate and they
- * remain disabled thorough out this function.
- */
int __kprobes kprobe_handler(struct pt_regs *regs)
{
struct kprobe *p;
int ret = 0;
kprobe_opcode_t *addr = (kprobe_opcode_t *)(regs->rip - sizeof(kprobe_opcode_t));
+ struct kprobe_ctlblk *kcb;
- /* We're in an interrupt, but this is clear and BUG()-safe. */
+ /*
+ * We don't want to be preempted for the entire
+ * duration of kprobe processing
+ */
preempt_disable();
+ kcb = get_kprobe_ctlblk();
/* Check we're not actually recursing */
if (kprobe_running()) {
- /* We *are* holding lock here, so this is safe.
- Disarm the probe we just hit, and ignore it. */
p = get_kprobe(addr);
if (p) {
- if (kprobe_status == KPROBE_HIT_SS &&
+ if (kcb->kprobe_status == KPROBE_HIT_SS &&
*p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
regs->eflags &= ~TF_MASK;
- regs->eflags |= kprobe_saved_rflags;
- unlock_kprobes();
+ regs->eflags |= kcb->kprobe_saved_rflags;
goto no_kprobe;
- } else if (kprobe_status == KPROBE_HIT_SSDONE) {
+ } else if (kcb->kprobe_status == KPROBE_HIT_SSDONE) {
/* TODO: Provide re-entrancy from
* post_kprobes_handler() and avoid exception
* stack corruption while single-stepping on
@@ -325,6 +317,7 @@ int __kprobes kprobe_handler(struct pt_regs *regs)
*/
arch_disarm_kprobe(p);
regs->rip = (unsigned long)p->addr;
+ reset_current_kprobe();
ret = 1;
} else {
/* We have reentered the kprobe_handler(), since
@@ -334,27 +327,24 @@ int __kprobes kprobe_handler(struct pt_regs *regs)
* of the new probe without calling any user
* handlers.
*/
- save_previous_kprobe();
- set_current_kprobe(p, regs);
+ save_previous_kprobe(kcb);
+ set_current_kprobe(p, regs, kcb);
p->nmissed++;
prepare_singlestep(p, regs);
- kprobe_status = KPROBE_REENTER;
+ kcb->kprobe_status = KPROBE_REENTER;
return 1;
}
} else {
- p = current_kprobe;
+ p = __get_cpu_var(current_kprobe);
if (p->break_handler && p->break_handler(p, regs)) {
goto ss_probe;
}
}
- /* If it's not ours, can't be delete race, (we hold lock). */
goto no_kprobe;
}
- lock_kprobes();
p = get_kprobe(addr);
if (!p) {
- unlock_kprobes();
if (*addr != BREAKPOINT_INSTRUCTION) {
/*
* The breakpoint instruction was removed right
@@ -372,8 +362,8 @@ int __kprobes kprobe_handler(struct pt_regs *regs)
goto no_kprobe;
}
- kprobe_status = KPROBE_HIT_ACTIVE;
- set_current_kprobe(p, regs);
+ set_current_kprobe(p, regs, kcb);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
if (p->pre_handler && p->pre_handler(p, regs))
/* handler has already set things up, so skip ss setup */
@@ -381,7 +371,7 @@ int __kprobes kprobe_handler(struct pt_regs *regs)
ss_probe:
prepare_singlestep(p, regs);
- kprobe_status = KPROBE_HIT_SS;
+ kcb->kprobe_status = KPROBE_HIT_SS;
return 1;
no_kprobe:
@@ -409,9 +399,10 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
struct kretprobe_instance *ri = NULL;
struct hlist_head *head;
struct hlist_node *node, *tmp;
- unsigned long orig_ret_address = 0;
+ unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
+ spin_lock_irqsave(&kretprobe_lock, flags);
head = kretprobe_inst_table_head(current);
/*
@@ -450,13 +441,14 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address));
regs->rip = orig_ret_address;
- unlock_kprobes();
+ reset_current_kprobe();
+ spin_unlock_irqrestore(&kretprobe_lock, flags);
preempt_enable_no_resched();
/*
* By returning a non-zero value, we are telling
- * kprobe_handler() that we have handled unlocking
- * and re-enabling preemption.
+ * kprobe_handler() that we don't want the post_handler
+ * to run (and have re-enabled preemption)
*/
return 1;
}
@@ -483,7 +475,8 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
* that is atop the stack is the address following the copied instruction.
* We need to make it the address following the original instruction.
*/
-static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
+static void __kprobes resume_execution(struct kprobe *p,
+ struct pt_regs *regs, struct kprobe_ctlblk *kcb)
{
unsigned long *tos = (unsigned long *)regs->rsp;
unsigned long next_rip = 0;
@@ -498,7 +491,7 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
switch (*insn) {
case 0x9c: /* pushfl */
*tos &= ~(TF_MASK | IF_MASK);
- *tos |= kprobe_old_rflags;
+ *tos |= kcb->kprobe_old_rflags;
break;
case 0xc3: /* ret/lret */
case 0xcb:
@@ -537,30 +530,28 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
}
}
-/*
- * Interrupts are disabled on entry as trap1 is an interrupt gate and they
- * remain disabled thoroughout this function. And we hold kprobe lock.
- */
int __kprobes post_kprobe_handler(struct pt_regs *regs)
{
- if (!kprobe_running())
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ if (!cur)
return 0;
- if ((kprobe_status != KPROBE_REENTER) && current_kprobe->post_handler) {
- kprobe_status = KPROBE_HIT_SSDONE;
- current_kprobe->post_handler(current_kprobe, regs, 0);
+ if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ cur->post_handler(cur, regs, 0);
}
- resume_execution(current_kprobe, regs);
- regs->eflags |= kprobe_saved_rflags;
+ resume_execution(cur, regs, kcb);
+ regs->eflags |= kcb->kprobe_saved_rflags;
/* Restore the original saved kprobes variables and continue. */
- if (kprobe_status == KPROBE_REENTER) {
- restore_previous_kprobe();
+ if (kcb->kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
goto out;
- } else {
- unlock_kprobes();
}
+ reset_current_kprobe();
out:
preempt_enable_no_resched();
@@ -575,18 +566,19 @@ out:
return 1;
}
-/* Interrupts disabled, kprobe_lock held. */
int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
- if (current_kprobe->fault_handler
- && current_kprobe->fault_handler(current_kprobe, regs, trapnr))
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
return 1;
- if (kprobe_status & KPROBE_HIT_SS) {
- resume_execution(current_kprobe, regs);
- regs->eflags |= kprobe_old_rflags;
+ if (kcb->kprobe_status & KPROBE_HIT_SS) {
+ resume_execution(cur, regs, kcb);
+ regs->eflags |= kcb->kprobe_old_rflags;
- unlock_kprobes();
+ reset_current_kprobe();
preempt_enable_no_resched();
}
return 0;
@@ -599,39 +591,41 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data)
{
struct die_args *args = (struct die_args *)data;
+ int ret = NOTIFY_DONE;
+
switch (val) {
case DIE_INT3:
if (kprobe_handler(args->regs))
- return NOTIFY_STOP;
+ ret = NOTIFY_STOP;
break;
case DIE_DEBUG:
if (post_kprobe_handler(args->regs))
- return NOTIFY_STOP;
+ ret = NOTIFY_STOP;
break;
case DIE_GPF:
- if (kprobe_running() &&
- kprobe_fault_handler(args->regs, args->trapnr))
- return NOTIFY_STOP;
- break;
case DIE_PAGE_FAULT:
+ /* kprobe_running() needs smp_processor_id() */
+ preempt_disable();
if (kprobe_running() &&
kprobe_fault_handler(args->regs, args->trapnr))
- return NOTIFY_STOP;
+ ret = NOTIFY_STOP;
+ preempt_enable();
break;
default:
break;
}
- return NOTIFY_DONE;
+ return ret;
}
int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
struct jprobe *jp = container_of(p, struct jprobe, kp);
unsigned long addr;
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- jprobe_saved_regs = *regs;
- jprobe_saved_rsp = (long *) regs->rsp;
- addr = (unsigned long)jprobe_saved_rsp;
+ kcb->jprobe_saved_regs = *regs;
+ kcb->jprobe_saved_rsp = (long *) regs->rsp;
+ addr = (unsigned long)(kcb->jprobe_saved_rsp);
/*
* As Linus pointed out, gcc assumes that the callee
* owns the argument space and could overwrite it, e.g.
@@ -639,7 +633,8 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
* we also save and restore enough stack bytes to cover
* the argument area.
*/
- memcpy(jprobes_stack, (kprobe_opcode_t *) addr, MIN_STACK_SIZE(addr));
+ memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr,
+ MIN_STACK_SIZE(addr));
regs->eflags &= ~IF_MASK;
regs->rip = (unsigned long)(jp->entry);
return 1;
@@ -647,36 +642,40 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
void __kprobes jprobe_return(void)
{
- preempt_enable_no_resched();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
asm volatile (" xchg %%rbx,%%rsp \n"
" int3 \n"
" .globl jprobe_return_end \n"
" jprobe_return_end: \n"
" nop \n"::"b"
- (jprobe_saved_rsp):"memory");
+ (kcb->jprobe_saved_rsp):"memory");
}
int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
u8 *addr = (u8 *) (regs->rip - 1);
- unsigned long stack_addr = (unsigned long)jprobe_saved_rsp;
+ unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_rsp);
struct jprobe *jp = container_of(p, struct jprobe, kp);
if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) {
- if ((long *)regs->rsp != jprobe_saved_rsp) {
+ if ((long *)regs->rsp != kcb->jprobe_saved_rsp) {
struct pt_regs *saved_regs =
- container_of(jprobe_saved_rsp, struct pt_regs, rsp);
+ container_of(kcb->jprobe_saved_rsp,
+ struct pt_regs, rsp);
printk("current rsp %p does not match saved rsp %p\n",
- (long *)regs->rsp, jprobe_saved_rsp);
+ (long *)regs->rsp, kcb->jprobe_saved_rsp);
printk("Saved registers for jprobe %p\n", jp);
show_registers(saved_regs);
printk("Current registers\n");
show_registers(regs);
BUG();
}
- *regs = jprobe_saved_regs;
- memcpy((kprobe_opcode_t *) stack_addr, jprobes_stack,
+ *regs = kcb->jprobe_saved_regs;
+ memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack,
MIN_STACK_SIZE(stack_addr));
+ preempt_enable_no_resched();
return 1;
}
return 0;
diff --git a/arch/x86_64/kernel/ptrace.c b/arch/x86_64/kernel/ptrace.c
index bbf64b59a21..a87b6cebe80 100644
--- a/arch/x86_64/kernel/ptrace.c
+++ b/arch/x86_64/kernel/ptrace.c
@@ -313,48 +313,11 @@ static unsigned long getreg(struct task_struct *child, unsigned long regno)
}
-asmlinkage long sys_ptrace(long request, long pid, unsigned long addr, long data)
+long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
- struct task_struct *child;
long i, ret;
unsigned ui;
- /* This lock_kernel fixes a subtle race with suid exec */
- lock_kernel();
- ret = -EPERM;
- if (request == PTRACE_TRACEME) {
- /* are we already being traced? */
- if (current->ptrace & PT_PTRACED)
- goto out;
- ret = security_ptrace(current->parent, current);
- if (ret)
- goto out;
- /* set the ptrace bit in the process flags. */
- current->ptrace |= PT_PTRACED;
- ret = 0;
- goto out;
- }
- ret = -ESRCH;
- read_lock(&tasklist_lock);
- child = find_task_by_pid(pid);
- if (child)
- get_task_struct(child);
- read_unlock(&tasklist_lock);
- if (!child)
- goto out;
-
- ret = -EPERM;
- if (pid == 1) /* you may not mess with init */
- goto out_tsk;
-
- if (request == PTRACE_ATTACH) {
- ret = ptrace_attach(child);
- goto out_tsk;
- }
- ret = ptrace_check_attach(child, request == PTRACE_KILL);
- if (ret < 0)
- goto out_tsk;
-
switch (request) {
/* when I and D space are separate, these will need to be fixed. */
case PTRACE_PEEKTEXT: /* read word at location addr. */
@@ -608,10 +571,6 @@ asmlinkage long sys_ptrace(long request, long pid, unsigned long addr, long data
ret = ptrace_request(child, request, addr, data);
break;
}
-out_tsk:
- put_task_struct(child);
-out:
- unlock_kernel();
return ret;
}
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index 658a81b33f3..4b5b088ec10 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -65,8 +65,6 @@ int smp_num_siblings = 1;
/* Package ID of each logical CPU */
u8 phys_proc_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
u8 cpu_core_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
-EXPORT_SYMBOL(phys_proc_id);
-EXPORT_SYMBOL(cpu_core_id);
/* Bitmask of currently online CPUs */
cpumask_t cpu_online_map __read_mostly;
diff --git a/arch/x86_64/oprofile/Kconfig b/arch/x86_64/oprofile/Kconfig
index 5ade19801b9..d8a84088471 100644
--- a/arch/x86_64/oprofile/Kconfig
+++ b/arch/x86_64/oprofile/Kconfig
@@ -1,7 +1,3 @@
-
-menu "Profiling support"
- depends on EXPERIMENTAL
-
config PROFILING
bool "Profiling support (EXPERIMENTAL)"
help
@@ -19,5 +15,3 @@ config OPROFILE
If unsure, say N.
-endmenu
-
diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c
index 14460743de0..ab5c4c65b5c 100644
--- a/arch/xtensa/kernel/ptrace.c
+++ b/arch/xtensa/kernel/ptrace.c
@@ -45,58 +45,10 @@ void ptrace_disable(struct task_struct *child)
/* Nothing to do.. */
}
-long sys_ptrace(long request, long pid, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
- struct task_struct *child;
int ret = -EPERM;
- lock_kernel();
-
-#if 0
- if ((int)request != 1)
- printk("ptrace(r=%d,pid=%d,addr=%08lx,data=%08lx)\n",
- (int) request, (int) pid, (unsigned long) addr,
- (unsigned long) data);
-#endif
-
- if (request == PTRACE_TRACEME) {
-
- /* Are we already being traced? */
-
- if (current->ptrace & PT_PTRACED)
- goto out;
-
- if ((ret = security_ptrace(current->parent, current)))
- goto out;
-
- /* Set the ptrace bit in the process flags. */
-
- current->ptrace |= PT_PTRACED;
- ret = 0;
- goto out;
- }
-
- ret = -ESRCH;
- read_lock(&tasklist_lock);
- child = find_task_by_pid(pid);
- if (child)
- get_task_struct(child);
- read_unlock(&tasklist_lock);
- if (!child)
- goto out;
-
- ret = -EPERM;
- if (pid == 1) /* you may not mess with init */
- goto out;
-
- if (request == PTRACE_ATTACH) {
- ret = ptrace_attach(child);
- goto out_tsk;
- }
-
- if ((ret = ptrace_check_attach(child, request == PTRACE_KILL)) < 0)
- goto out_tsk;
-
switch (request) {
case PTRACE_PEEKTEXT: /* read word at location addr. */
case PTRACE_PEEKDATA:
@@ -375,10 +327,7 @@ long sys_ptrace(long request, long pid, long addr, long data)
ret = ptrace_request(child, request, addr, data);
goto out;
}
-out_tsk:
- put_task_struct(child);
-out:
- unlock_kernel();
+ out:
return ret;
}