aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig12
-rw-r--r--arch/powerpc/Makefile1
-rw-r--r--arch/powerpc/boot/dts/virtex440-ml510.dts465
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h11
-rw-r--r--arch/powerpc/include/asm/lppaca.h6
-rw-r--r--arch/powerpc/include/asm/mmu.h4
-rw-r--r--arch/powerpc/include/asm/paca.h12
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h5
-rw-r--r--arch/powerpc/include/asm/ptrace.h4
-rw-r--r--arch/powerpc/include/asm/swiotlb.h27
-rw-r--r--arch/powerpc/include/asm/system.h2
-rw-r--r--arch/powerpc/include/asm/xilinx_pci.h21
-rw-r--r--arch/powerpc/kernel/Makefile4
-rw-r--r--arch/powerpc/kernel/asm-offsets.c32
-rw-r--r--arch/powerpc/kernel/dma-swiotlb.c163
-rw-r--r--arch/powerpc/kernel/dma.c2
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S978
-rw-r--r--arch/powerpc/kernel/head_32.S95
-rw-r--r--arch/powerpc/kernel/head_64.S1095
-rw-r--r--arch/powerpc/kernel/head_booke.h10
-rw-r--r--arch/powerpc/kernel/irq.c2
-rw-r--r--arch/powerpc/kernel/misc_64.S92
-rw-r--r--arch/powerpc/kernel/paca.c14
-rw-r--r--arch/powerpc/kernel/pci_64.c7
-rw-r--r--arch/powerpc/kernel/process.c2
-rw-r--r--arch/powerpc/kernel/prom.c2
-rw-r--r--arch/powerpc/kernel/ptrace.c23
-rw-r--r--arch/powerpc/kernel/setup_32.c6
-rw-r--r--arch/powerpc/kernel/setup_64.c11
-rw-r--r--arch/powerpc/kernel/traps.c34
-rw-r--r--arch/powerpc/kernel/vector.S210
-rw-r--r--arch/powerpc/mm/Makefile7
-rw-r--r--arch/powerpc/mm/init_64.c2
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c19
-rw-r--r--arch/powerpc/mm/numa.c2
-rw-r--r--arch/powerpc/platforms/40x/virtex.c2
-rw-r--r--arch/powerpc/platforms/44x/Kconfig13
-rw-r--r--arch/powerpc/platforms/44x/Makefile1
-rw-r--r--arch/powerpc/platforms/44x/virtex.c2
-rw-r--r--arch/powerpc/platforms/44x/virtex_ml510.c29
-rw-r--r--arch/powerpc/platforms/Kconfig4
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype22
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c2
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c4
-rw-r--r--arch/powerpc/sysdev/Makefile1
-rw-r--r--arch/powerpc/sysdev/xilinx_intc.c81
-rw-r--r--arch/powerpc/sysdev/xilinx_pci.c132
-rw-r--r--arch/powerpc/xmon/xmon.c2
48 files changed, 2327 insertions, 1350 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index ff755398ce2..93a61898b25 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -300,9 +300,19 @@ config IOMMU_VMERGE
config IOMMU_HELPER
def_bool PPC64
+config SWIOTLB
+ bool "SWIOTLB support"
+ default n
+ select IOMMU_HELPER
+ ---help---
+ Support for IO bounce buffering for systems without an IOMMU.
+ This allows us to DMA to the full physical address space on
+ platforms where the size of a physical address is larger
+ than the bus address. Not all platforms support this.
+
config PPC_NEED_DMA_SYNC_OPS
def_bool y
- depends on NOT_COHERENT_CACHE
+ depends on (NOT_COHERENT_CACHE || SWIOTLB)
config HOTPLUG_CPU
bool "Support for enabling/disabling CPUs"
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 551fc58c05c..bc35f4e2b81 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -142,6 +142,7 @@ head-$(CONFIG_FSL_BOOKE) := arch/powerpc/kernel/head_fsl_booke.o
head-$(CONFIG_PPC64) += arch/powerpc/kernel/entry_64.o
head-$(CONFIG_PPC_FPU) += arch/powerpc/kernel/fpu.o
+head-$(CONFIG_ALTIVEC) += arch/powerpc/kernel/vector.o
core-y += arch/powerpc/kernel/ \
arch/powerpc/mm/ \
diff --git a/arch/powerpc/boot/dts/virtex440-ml510.dts b/arch/powerpc/boot/dts/virtex440-ml510.dts
new file mode 100644
index 00000000000..81a8dc2c636
--- /dev/null
+++ b/arch/powerpc/boot/dts/virtex440-ml510.dts
@@ -0,0 +1,465 @@
+/*
+ * Xilinx ML510 Reference Design support
+ *
+ * This DTS file was created for the ml510_bsb1_pcores_ppc440 reference design.
+ * The reference design contains a bug which prevent PCI DMA from working
+ * properly. A description of the bug is given in the plbv46_pci section. It
+ * needs to be fixed by the user until Xilinx updates their reference design.
+ *
+ * Copyright 2009, Roderick Colenbrander
+ */
+
+/dts-v1/;
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "xlnx,ml510-ref-design", "xlnx,virtex440";
+ dcr-parent = <&ppc440_0>;
+ DDR2_SDRAM_DIMM0: memory@0 {
+ device_type = "memory";
+ reg = < 0x0 0x20000000 >;
+ } ;
+ alias {
+ ethernet0 = &Hard_Ethernet_MAC;
+ serial0 = &RS232_Uart_1;
+ } ;
+ chosen {
+ bootargs = "console=ttyS0 root=/dev/ram";
+ linux,stdout-path = "/plb@0/serial@83e00000";
+ } ;
+ cpus {
+ #address-cells = <1>;
+ #cpus = <0x1>;
+ #size-cells = <0>;
+ ppc440_0: cpu@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ clock-frequency = <300000000>;
+ compatible = "PowerPC,440", "ibm,ppc440";
+ d-cache-line-size = <0x20>;
+ d-cache-size = <0x8000>;
+ dcr-access-method = "native";
+ dcr-controller ;
+ device_type = "cpu";
+ i-cache-line-size = <0x20>;
+ i-cache-size = <0x8000>;
+ model = "PowerPC,440";
+ reg = <0>;
+ timebase-frequency = <300000000>;
+ xlnx,apu-control = <0x2000>;
+ xlnx,apu-udi-0 = <0x0>;
+ xlnx,apu-udi-1 = <0x0>;
+ xlnx,apu-udi-10 = <0x0>;
+ xlnx,apu-udi-11 = <0x0>;
+ xlnx,apu-udi-12 = <0x0>;
+ xlnx,apu-udi-13 = <0x0>;
+ xlnx,apu-udi-14 = <0x0>;
+ xlnx,apu-udi-15 = <0x0>;
+ xlnx,apu-udi-2 = <0x0>;
+ xlnx,apu-udi-3 = <0x0>;
+ xlnx,apu-udi-4 = <0x0>;
+ xlnx,apu-udi-5 = <0x0>;
+ xlnx,apu-udi-6 = <0x0>;
+ xlnx,apu-udi-7 = <0x0>;
+ xlnx,apu-udi-8 = <0x0>;
+ xlnx,apu-udi-9 = <0x0>;
+ xlnx,dcr-autolock-enable = <0x1>;
+ xlnx,dcu-rd-ld-cache-plb-prio = <0x0>;
+ xlnx,dcu-rd-noncache-plb-prio = <0x0>;
+ xlnx,dcu-rd-touch-plb-prio = <0x0>;
+ xlnx,dcu-rd-urgent-plb-prio = <0x0>;
+ xlnx,dcu-wr-flush-plb-prio = <0x0>;
+ xlnx,dcu-wr-store-plb-prio = <0x0>;
+ xlnx,dcu-wr-urgent-plb-prio = <0x0>;
+ xlnx,dma0-control = <0x0>;
+ xlnx,dma0-plb-prio = <0x0>;
+ xlnx,dma0-rxchannelctrl = <0x1010000>;
+ xlnx,dma0-rxirqtimer = <0x3ff>;
+ xlnx,dma0-txchannelctrl = <0x1010000>;
+ xlnx,dma0-txirqtimer = <0x3ff>;
+ xlnx,dma1-control = <0x0>;
+ xlnx,dma1-plb-prio = <0x0>;
+ xlnx,dma1-rxchannelctrl = <0x1010000>;
+ xlnx,dma1-rxirqtimer = <0x3ff>;
+ xlnx,dma1-txchannelctrl = <0x1010000>;
+ xlnx,dma1-txirqtimer = <0x3ff>;
+ xlnx,dma2-control = <0x0>;
+ xlnx,dma2-plb-prio = <0x0>;
+ xlnx,dma2-rxchannelctrl = <0x1010000>;
+ xlnx,dma2-rxirqtimer = <0x3ff>;
+ xlnx,dma2-txchannelctrl = <0x1010000>;
+ xlnx,dma2-txirqtimer = <0x3ff>;
+ xlnx,dma3-control = <0x0>;
+ xlnx,dma3-plb-prio = <0x0>;
+ xlnx,dma3-rxchannelctrl = <0x1010000>;
+ xlnx,dma3-rxirqtimer = <0x3ff>;
+ xlnx,dma3-txchannelctrl = <0x1010000>;
+ xlnx,dma3-txirqtimer = <0x3ff>;
+ xlnx,endian-reset = <0x0>;
+ xlnx,generate-plb-timespecs = <0x1>;
+ xlnx,icu-rd-fetch-plb-prio = <0x0>;
+ xlnx,icu-rd-spec-plb-prio = <0x0>;
+ xlnx,icu-rd-touch-plb-prio = <0x0>;
+ xlnx,interconnect-imask = <0xffffffff>;
+ xlnx,mplb-allow-lock-xfer = <0x1>;
+ xlnx,mplb-arb-mode = <0x0>;
+ xlnx,mplb-awidth = <0x20>;
+ xlnx,mplb-counter = <0x500>;
+ xlnx,mplb-dwidth = <0x80>;
+ xlnx,mplb-max-burst = <0x8>;
+ xlnx,mplb-native-dwidth = <0x80>;
+ xlnx,mplb-p2p = <0x0>;
+ xlnx,mplb-prio-dcur = <0x2>;
+ xlnx,mplb-prio-dcuw = <0x3>;
+ xlnx,mplb-prio-icu = <0x4>;
+ xlnx,mplb-prio-splb0 = <0x1>;
+ xlnx,mplb-prio-splb1 = <0x0>;
+ xlnx,mplb-read-pipe-enable = <0x1>;
+ xlnx,mplb-sync-tattribute = <0x0>;
+ xlnx,mplb-wdog-enable = <0x1>;
+ xlnx,mplb-write-pipe-enable = <0x1>;
+ xlnx,mplb-write-post-enable = <0x1>;
+ xlnx,num-dma = <0x0>;
+ xlnx,pir = <0xf>;
+ xlnx,ppc440mc-addr-base = <0x0>;
+ xlnx,ppc440mc-addr-high = <0x1fffffff>;
+ xlnx,ppc440mc-arb-mode = <0x0>;
+ xlnx,ppc440mc-bank-conflict-mask = <0x1800000>;
+ xlnx,ppc440mc-control = <0xf810008f>;
+ xlnx,ppc440mc-max-burst = <0x8>;
+ xlnx,ppc440mc-prio-dcur = <0x2>;
+ xlnx,ppc440mc-prio-dcuw = <0x3>;
+ xlnx,ppc440mc-prio-icu = <0x4>;
+ xlnx,ppc440mc-prio-splb0 = <0x1>;
+ xlnx,ppc440mc-prio-splb1 = <0x0>;
+ xlnx,ppc440mc-row-conflict-mask = <0x7ffe00>;
+ xlnx,ppcdm-asyncmode = <0x0>;
+ xlnx,ppcds-asyncmode = <0x0>;
+ xlnx,user-reset = <0x0>;
+ } ;
+ } ;
+ plb_v46_0: plb@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "xlnx,plb-v46-1.03.a", "simple-bus";
+ ranges ;
+ FLASH: flash@fc000000 {
+ bank-width = <2>;
+ compatible = "xlnx,xps-mch-emc-2.00.a", "cfi-flash";
+ reg = < 0xfc000000 0x2000000 >;
+ xlnx,family = "virtex5";
+ xlnx,include-datawidth-matching-0 = <0x1>;
+ xlnx,include-datawidth-matching-1 = <0x0>;
+ xlnx,include-datawidth-matching-2 = <0x0>;
+ xlnx,include-datawidth-matching-3 = <0x0>;
+ xlnx,include-negedge-ioregs = <0x0>;
+ xlnx,include-plb-ipif = <0x1>;
+ xlnx,include-wrbuf = <0x1>;
+ xlnx,max-mem-width = <0x10>;
+ xlnx,mch-native-dwidth = <0x20>;
+ xlnx,mch-plb-clk-period-ps = <0x2710>;
+ xlnx,mch-splb-awidth = <0x20>;
+ xlnx,mch0-accessbuf-depth = <0x10>;
+ xlnx,mch0-protocol = <0x0>;
+ xlnx,mch0-rddatabuf-depth = <0x10>;
+ xlnx,mch1-accessbuf-depth = <0x10>;
+ xlnx,mch1-protocol = <0x0>;
+ xlnx,mch1-rddatabuf-depth = <0x10>;
+ xlnx,mch2-accessbuf-depth = <0x10>;
+ xlnx,mch2-protocol = <0x0>;
+ xlnx,mch2-rddatabuf-depth = <0x10>;
+ xlnx,mch3-accessbuf-depth = <0x10>;
+ xlnx,mch3-protocol = <0x0>;
+ xlnx,mch3-rddatabuf-depth = <0x10>;
+ xlnx,mem0-width = <0x10>;
+ xlnx,mem1-width = <0x20>;
+ xlnx,mem2-width = <0x20>;
+ xlnx,mem3-width = <0x20>;
+ xlnx,num-banks-mem = <0x1>;
+ xlnx,num-channels = <0x2>;
+ xlnx,priority-mode = <0x0>;
+ xlnx,synch-mem-0 = <0x0>;
+ xlnx,synch-mem-1 = <0x0>;
+ xlnx,synch-mem-2 = <0x0>;
+ xlnx,synch-mem-3 = <0x0>;
+ xlnx,synch-pipedelay-0 = <0x2>;
+ xlnx,synch-pipedelay-1 = <0x2>;
+ xlnx,synch-pipedelay-2 = <0x2>;
+ xlnx,synch-pipedelay-3 = <0x2>;
+ xlnx,tavdv-ps-mem-0 = <0x1adb0>;
+ xlnx,tavdv-ps-mem-1 = <0x3a98>;
+ xlnx,tavdv-ps-mem-2 = <0x3a98>;
+ xlnx,tavdv-ps-mem-3 = <0x3a98>;
+ xlnx,tcedv-ps-mem-0 = <0x1adb0>;
+ xlnx,tcedv-ps-mem-1 = <0x3a98>;
+ xlnx,tcedv-ps-mem-2 = <0x3a98>;
+ xlnx,tcedv-ps-mem-3 = <0x3a98>;
+ xlnx,thzce-ps-mem-0 = <0x88b8>;
+ xlnx,thzce-ps-mem-1 = <0x1b58>;
+ xlnx,thzce-ps-mem-2 = <0x1b58>;
+ xlnx,thzce-ps-mem-3 = <0x1b58>;
+ xlnx,thzoe-ps-mem-0 = <0x1b58>;
+ xlnx,thzoe-ps-mem-1 = <0x1b58>;
+ xlnx,thzoe-ps-mem-2 = <0x1b58>;
+ xlnx,thzoe-ps-mem-3 = <0x1b58>;
+ xlnx,tlzwe-ps-mem-0 = <0x88b8>;
+ xlnx,tlzwe-ps-mem-1 = <0x0>;
+ xlnx,tlzwe-ps-mem-2 = <0x0>;
+ xlnx,tlzwe-ps-mem-3 = <0x0>;
+ xlnx,twc-ps-mem-0 = <0x1adb0>;
+ xlnx,twc-ps-mem-1 = <0x3a98>;
+ xlnx,twc-ps-mem-2 = <0x3a98>;
+ xlnx,twc-ps-mem-3 = <0x3a98>;
+ xlnx,twp-ps-mem-0 = <0x11170>;
+ xlnx,twp-ps-mem-1 = <0x2ee0>;
+ xlnx,twp-ps-mem-2 = <0x2ee0>;
+ xlnx,twp-ps-mem-3 = <0x2ee0>;
+ xlnx,xcl0-linesize = <0x4>;
+ xlnx,xcl0-writexfer = <0x1>;
+ xlnx,xcl1-linesize = <0x4>;
+ xlnx,xcl1-writexfer = <0x1>;
+ xlnx,xcl2-linesize = <0x4>;
+ xlnx,xcl2-writexfer = <0x1>;
+ xlnx,xcl3-linesize = <0x4>;
+ xlnx,xcl3-writexfer = <0x1>;
+ } ;
+ Hard_Ethernet_MAC: xps-ll-temac@81c00000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "xlnx,compound";
+ ethernet@81c00000 {
+ compatible = "xlnx,xps-ll-temac-1.01.b";
+ device_type = "network";
+ interrupt-parent = <&xps_intc_0>;
+ interrupts = < 8 2 >;
+ llink-connected = <&Hard_Ethernet_MAC_fifo>;
+ local-mac-address = [ 02 00 00 00 00 00 ];
+ reg = < 0x81c00000 0x40 >;
+ xlnx,bus2core-clk-ratio = <0x1>;
+ xlnx,phy-type = <0x3>;
+ xlnx,phyaddr = <0x1>;
+ xlnx,rxcsum = <0x0>;
+ xlnx,rxfifo = <0x8000>;
+ xlnx,temac-type = <0x0>;
+ xlnx,txcsum = <0x0>;
+ xlnx,txfifo = <0x8000>;
+ } ;
+ } ;
+ Hard_Ethernet_MAC_fifo: xps-ll-fifo@81a00000 {
+ compatible = "xlnx,xps-ll-fifo-1.01.a";
+ interrupt-parent = <&xps_intc_0>;
+ interrupts = < 6 2 >;
+ reg = < 0x81a00000 0x10000 >;
+ xlnx,family = "virtex5";
+ } ;
+ IIC_EEPROM: i2c@81600000 {
+ compatible = "xlnx,xps-iic-2.00.a";
+ interrupt-parent = <&xps_intc_0>;
+ interrupts = < 9 2 >;
+ reg = < 0x81600000 0x10000 >;
+ xlnx,clk-freq = <0x5f5e100>;
+ xlnx,family = "virtex5";
+ xlnx,gpo-width = <0x1>;
+ xlnx,iic-freq = <0x186a0>;
+ xlnx,scl-inertial-delay = <0x5>;
+ xlnx,sda-inertial-delay = <0x5>;
+ xlnx,ten-bit-adr = <0x0>;
+ } ;
+ LCD_OPTIONAL: gpio@81420000 {
+ compatible = "xlnx,xps-gpio-1.00.a";
+ reg = < 0x81420000 0x10000 >;
+ xlnx,all-inputs = <0x0>;
+ xlnx,all-inputs-2 = <0x0>;
+ xlnx,dout-default = <0x0>;
+ xlnx,dout-default-2 = <0x0>;
+ xlnx,family = "virtex5";
+ xlnx,gpio-width = <0xb>;
+ xlnx,interrupt-present = <0x0>;
+ xlnx,is-bidir = <0x1>;
+ xlnx,is-bidir-2 = <0x1>;
+ xlnx,is-dual = <0x0>;
+ xlnx,tri-default = <0xffffffff>;
+ xlnx,tri-default-2 = <0xffffffff>;
+ } ;
+ LEDs_4Bit: gpio@81400000 {
+ compatible = "xlnx,xps-gpio-1.00.a";
+ reg = < 0x81400000 0x10000 >;
+ xlnx,all-inputs = <0x0>;
+ xlnx,all-inputs-2 = <0x0>;
+ xlnx,dout-default = <0x0>;
+ xlnx,dout-default-2 = <0x0>;
+ xlnx,family = "virtex5";
+ xlnx,gpio-width = <0x4>;
+ xlnx,interrupt-present = <0x0>;
+ xlnx,is-bidir = <0x1>;
+ xlnx,is-bidir-2 = <0x1>;
+ xlnx,is-dual = <0x0>;
+ xlnx,tri-default = <0xffffffff>;
+ xlnx,tri-default-2 = <0xffffffff>;
+ } ;
+ RS232_Uart_1: serial@83e00000 {
+ clock-frequency = <100000000>;
+ compatible = "xlnx,xps-uart16550-2.00.b", "ns16550";
+ current-speed = <9600>;
+ device_type = "serial";
+ interrupt-parent = <&xps_intc_0>;
+ interrupts = < 11 2 >;
+ reg = < 0x83e00000 0x10000 >;
+ reg-offset = <0x1003>;
+ reg-shift = <2>;
+ xlnx,family = "virtex5";
+ xlnx,has-external-rclk = <0x0>;
+ xlnx,has-external-xin = <0x0>;
+ xlnx,is-a-16550 = <0x1>;
+ } ;
+ SPI_EEPROM: xps-spi@feff8000 {
+ compatible = "xlnx,xps-spi-2.00.b";
+ interrupt-parent = <&xps_intc_0>;
+ interrupts = < 10 2 >;
+ reg = < 0xfeff8000 0x80 >;
+ xlnx,family = "virtex5";
+ xlnx,fifo-exist = <0x1>;
+ xlnx,num-ss-bits = <0x1>;
+ xlnx,num-transfer-bits = <0x8>;
+ xlnx,sck-ratio = <0x80>;
+ } ;
+ SysACE_CompactFlash: sysace@83600000 {
+ compatible = "xlnx,xps-sysace-1.00.a";
+ interrupt-parent = <&xps_intc_0>;
+ interrupts = < 7 2 >;
+ reg = < 0x83600000 0x10000 >;
+ xlnx,family = "virtex5";
+ xlnx,mem-width = <0x10>;
+ } ;
+ plbv46_pci_0: plbv46-pci@85e00000 {
+ #size-cells = <2>;
+ #address-cells = <3>;
+ compatible = "xlnx,plbv46-pci-1.03.a";
+ device_type = "pci";
+ reg = < 0x85e00000 0x10000 >;
+
+ /*
+ * The default ML510 BSB has C_IPIFBAR2PCIBAR_0 set to
+ * 0 which means that a read/write to the memory mapped
+ * i/o region (which starts at 0xa0000000) for pci
+ * bar 0 on the plb side translates to 0.
+ * It is important to set this value to 0xa0000000, so
+ * that inbound and outbound pci transactions work
+ * properly including DMA.
+ */
+ ranges = <0x02000000 0 0xa0000000 0xa0000000 0 0x20000000
+ 0x01000000 0 0x00000000 0xf0000000 0 0x00010000>;
+
+ #interrupt-cells = <1>;
+ interrupt-parent = <&xps_intc_0>;
+ interrupt-map-mask = <0xff00 0x0 0x0 0x7>;
+ interrupt-map = <
+ /* IRQ mapping for pci slots and ALI M1533
+ * periperhals. In total there are 5 interrupt
+ * lines connected to a xps_intc controller.
+ * Four of them are PCI IRQ A, B, C, D and
+ * which correspond to respectively xpx_intc
+ * 5, 4, 3 and 2. The fifth interrupt line is
+ * connected to the south bridge and this one
+ * uses irq 1 and is active high instead of
+ * active low.
+ *
+ * The M1533 contains various peripherals
+ * including AC97 audio, a modem, USB, IDE and
+ * some power management stuff. The modem
+ * isn't connected on the ML510 and the power
+ * management core also isn't used.
+ */
+
+ /* IDSEL 0x16 / dev=6, bus=0 / PCI slot 3 */
+ 0x3000 0 0 1 &xps_intc_0 3 2
+ 0x3000 0 0 2 &xps_intc_0 2 2
+ 0x3000 0 0 3 &xps_intc_0 5 2
+ 0x3000 0 0 4 &xps_intc_0 4 2
+
+ /* IDSEL 0x13 / dev=3, bus=1 / PCI slot 4 */
+ /*
+ 0x11800 0 0 1 &xps_intc_0 5 0 2
+ 0x11800 0 0 2 &xps_intc_0 4 0 2
+ 0x11800 0 0 3 &xps_intc_0 3 0 2
+ 0x11800 0 0 4 &xps_intc_0 2 0 2
+ */
+
+ /* According to the datasheet + schematic
+ * ABCD [FPGA] of slot 5 is mapped to DABC.
+ * Testing showed that at least A maps to B,
+ * the mapping of the other pins is a guess
+ * and for that reason the lines have been
+ * commented out.
+ */
+ /* IDSEL 0x15 / dev=5, bus=0 / PCI slot 5 */
+ 0x2800 0 0 1 &xps_intc_0 4 2
+ /*
+ 0x2800 0 0 2 &xps_intc_0 3 2
+ 0x2800 0 0 3 &xps_intc_0 2 2
+ 0x2800 0 0 4 &xps_intc_0 5 2
+ */
+
+ /* IDSEL 0x12 / dev=2, bus=1 / PCI slot 6 */
+ /*
+ 0x11000 0 0 1 &xps_intc_0 4 0 2
+ 0x11000 0 0 2 &xps_intc_0 3 0 2
+ 0x11000 0 0 3 &xps_intc_0 2 0 2
+ 0x11000 0 0 4 &xps_intc_0 5 0 2
+ */
+
+ /* IDSEL 0x11 / dev=1, bus=0 / AC97 audio */
+ 0x0800 0 0 1 &i8259 7 2
+
+ /* IDSEL 0x1b / dev=11, bus=0 / IDE */
+ 0x5800 0 0 1 &i8259 14 2
+
+ /* IDSEL 0x1f / dev 15, bus=0 / 2x USB 1.1 */
+ 0x7800 0 0 1 &i8259 7 2
+ >;
+ ali_m1533 {
+ #size-cells = <1>;
+ #address-cells = <2>;
+ i8259: interrupt-controller@20 {
+ reg = <1 0x20 2
+ 1 0xa0 2
+ 1 0x4d0 2>;
+ interrupt-controller;
+ device_type = "interrupt-controller";
+ #address-cells = <0>;
+ #interrupt-cells = <2>;
+ compatible = "chrp,iic";
+
+ /* south bridge irq is active high */
+ interrupts = <1 3>;
+ interrupt-parent = <&xps_intc_0>;
+ };
+ };
+ } ;
+ xps_bram_if_cntlr_1: xps-bram-if-cntlr@ffff0000 {
+ compatible = "xlnx,xps-bram-if-cntlr-1.00.a";
+ reg = < 0xffff0000 0x10000 >;
+ xlnx,family = "virtex5";
+ } ;
+ xps_intc_0: interrupt-controller@81800000 {
+ #interrupt-cells = <0x2>;
+ compatible = "xlnx,xps-intc-1.00.a";
+ interrupt-controller ;
+ reg = < 0x81800000 0x10000 >;
+ xlnx,num-intr-inputs = <0xc>;
+ } ;
+ xps_tft_0: tft@86e00000 {
+ compatible = "xlnx,xps-tft-1.00.a";
+ reg = < 0x86e00000 0x10000 >;
+ xlnx,dcr-splb-slave-if = <0x1>;
+ xlnx,default-tft-base-addr = <0x0>;
+ xlnx,family = "virtex5";
+ xlnx,i2c-slave-addr = <0x76>;
+ xlnx,mplb-awidth = <0x20>;
+ xlnx,mplb-dwidth = <0x80>;
+ xlnx,mplb-native-dwidth = <0x40>;
+ xlnx,mplb-smallest-slave = <0x20>;
+ xlnx,tft-interface = <0x1>;
+ } ;
+ } ;
+} ;
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index cb448d68452..3d9e887c3c0 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -15,9 +15,18 @@
#include <linux/scatterlist.h>
#include <linux/dma-attrs.h>
#include <asm/io.h>
+#include <asm/swiotlb.h>
#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
+/* Some dma direct funcs must be visible for use in other dma_ops */
+extern void *dma_direct_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag);
+extern void dma_direct_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle);
+
+extern unsigned long get_dma_direct_offset(struct device *dev);
+
#ifdef CONFIG_NOT_COHERENT_CACHE
/*
* DMA-consistent mapping functions for PowerPCs that don't support
@@ -78,6 +87,8 @@ struct dma_mapping_ops {
dma_addr_t dma_address, size_t size,
enum dma_data_direction direction,
struct dma_attrs *attrs);
+ int (*addr_needs_map)(struct device *dev, dma_addr_t addr,
+ size_t size);
#ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS
void (*sync_single_range_for_cpu)(struct device *hwdev,
dma_addr_t dma_handle, unsigned long offset,
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
index d2a65e8ca6a..f78f65c38f0 100644
--- a/arch/powerpc/include/asm/lppaca.h
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -20,6 +20,11 @@
#define _ASM_POWERPC_LPPACA_H
#ifdef __KERNEL__
+/* These definitions relate to hypervisors that only exist when using
+ * a server type processor
+ */
+#ifdef CONFIG_PPC_BOOK3S
+
//=============================================================================
//
// This control block contains the data that is shared between the
@@ -158,5 +163,6 @@ struct slb_shadow {
extern struct slb_shadow slb_shadow[];
+#endif /* CONFIG_PPC_BOOK3S */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_LPPACA_H */
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 325b7208a14..fb57ded592f 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -74,10 +74,10 @@ extern void early_init_mmu_secondary(void);
#endif /* !__ASSEMBLY__ */
-#ifdef CONFIG_PPC64
+#if defined(CONFIG_PPC_STD_MMU_64)
/* 64-bit classic hash table MMU */
# include <asm/mmu-hash64.h>
-#elif defined(CONFIG_PPC_STD_MMU)
+#elif defined(CONFIG_PPC_STD_MMU_32)
/* 32-bit classic hash table MMU */
# include <asm/mmu-hash32.h>
#elif defined(CONFIG_40x)
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 082b3aedf14..0ea1985bdb8 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -43,6 +43,7 @@ struct task_struct;
* processor.
*/
struct paca_struct {
+#ifdef CONFIG_PPC_BOOK3S
/*
* Because hw_cpu_id, unlike other paca fields, is accessed
* routinely from other CPUs (from the IRQ code), we stick to
@@ -51,7 +52,7 @@ struct paca_struct {
*/
struct lppaca *lppaca_ptr; /* Pointer to LpPaca for PLIC */
-
+#endif /* CONFIG_PPC_BOOK3S */
/*
* MAGIC: the spinlock functions in arch/powerpc/lib/locks.c
* load lock_token and paca_index with a single lwz
@@ -64,13 +65,16 @@ struct paca_struct {
u64 kernel_toc; /* Kernel TOC address */
u64 kernelbase; /* Base address of kernel */
u64 kernel_msr; /* MSR while running in kernel */
+#ifdef CONFIG_PPC_STD_MMU_64
u64 stab_real; /* Absolute address of segment table */
u64 stab_addr; /* Virtual address of segment table */
+#endif /* CONFIG_PPC_STD_MMU_64 */
void *emergency_sp; /* pointer to emergency stack */
u64 data_offset; /* per cpu data offset */
s16 hw_cpu_id; /* Physical processor number */
u8 cpu_start; /* At startup, processor spins until */
/* this becomes non-zero. */
+#ifdef CONFIG_PPC_STD_MMU_64
struct slb_shadow *slb_shadow_ptr;
/*
@@ -81,11 +85,13 @@ struct paca_struct {
u64 exmc[10]; /* used for machine checks */
u64 exslb[10]; /* used for SLB/segment table misses
* on the linear mapping */
-
- mm_context_t context;
+ /* SLB related definitions */
u16 vmalloc_sllp;
u16 slb_cache_ptr;
u16 slb_cache[SLB_CACHE_ENTRIES];
+#endif /* CONFIG_PPC_STD_MMU_64 */
+
+ mm_context_t context;
/*
* then miscellaneous read-write fields
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index c40db05f21e..8cd083c6150 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -31,9 +31,11 @@
#error TASK_SIZE_USER64 exceeds pagetable range
#endif
+#ifdef CONFIG_PPC_STD_MMU_64
#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT))
#error TASK_SIZE_USER64 exceeds user VSID range
#endif
+#endif
/*
* Define the address range of the vmalloc VM area.
@@ -199,8 +201,11 @@ static inline unsigned long pte_update(struct mm_struct *mm,
if (!huge)
assert_pte_locked(mm, addr);
+#ifdef CONFIG_PPC_STD_MMU_64
if (old & _PAGE_HASHPTE)
hpte_need_flush(mm, addr, ptep, old, huge);
+#endif
+
return old;
}
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index c9c678fb253..8c341490cfc 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -135,7 +135,9 @@ do { \
* These are defined as per linux/ptrace.h, which see.
*/
#define arch_has_single_step() (1)
+#define arch_has_block_step() (!cpu_has_feature(CPU_FTR_601))
extern void user_enable_single_step(struct task_struct *);
+extern void user_enable_block_step(struct task_struct *);
extern void user_disable_single_step(struct task_struct *);
#endif /* __ASSEMBLY__ */
@@ -288,4 +290,6 @@ extern void user_disable_single_step(struct task_struct *);
#define PPC_PTRACE_PEEKUSR_3264 0x91
#define PPC_PTRACE_POKEUSR_3264 0x90
+#define PTRACE_SINGLEBLOCK 0x100 /* resume execution until next branch */
+
#endif /* _ASM_POWERPC_PTRACE_H */
diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h
new file mode 100644
index 00000000000..30891d6e2bc
--- /dev/null
+++ b/arch/powerpc/include/asm/swiotlb.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2009 Becky Bruce, Freescale Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __ASM_SWIOTLB_H
+#define __ASM_SWIOTLB_H
+
+#include <linux/swiotlb.h>
+
+extern struct dma_mapping_ops swiotlb_dma_ops;
+extern struct dma_mapping_ops swiotlb_pci_dma_ops;
+
+int swiotlb_arch_address_needs_mapping(struct device *, dma_addr_t,
+ size_t size);
+
+static inline void dma_mark_clean(void *addr, size_t size) {}
+
+extern unsigned int ppc_swiotlb_enable;
+int __init swiotlb_setup_bus_notifier(void);
+
+#endif /* __ASM_SWIOTLB_H */
diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
index 2b2420a4988..bb8e006a47c 100644
--- a/arch/powerpc/include/asm/system.h
+++ b/arch/powerpc/include/asm/system.h
@@ -211,7 +211,7 @@ extern struct task_struct *_switch(struct thread_struct *prev,
extern unsigned int rtas_data;
extern int mem_init_done; /* set on boot once kmalloc can be called */
-extern int init_bootmem_done; /* set on !NUMA once bootmem is available */
+extern int init_bootmem_done; /* set once bootmem is available */
extern phys_addr_t memory_limit;
extern unsigned long klimit;
diff --git a/arch/powerpc/include/asm/xilinx_pci.h b/arch/powerpc/include/asm/xilinx_pci.h
new file mode 100644
index 00000000000..7a8275caf6a
--- /dev/null
+++ b/arch/powerpc/include/asm/xilinx_pci.h
@@ -0,0 +1,21 @@
+/*
+ * Xilinx pci external definitions
+ *
+ * Copyright 2009 Roderick Colenbrander
+ * Copyright 2009 Secret Lab Technologies Ltd.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef INCLUDE_XILINX_PCI
+#define INCLUDE_XILINX_PCI
+
+#ifdef CONFIG_XILINX_PCI
+extern void __init xilinx_pci_init(void);
+#else
+static inline void __init xilinx_pci_init(void) { return; }
+#endif
+
+#endif /* INCLUDE_XILINX_PCI */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 71901fbda4a..65cf36502aa 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -36,7 +36,7 @@ obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
firmware.o nvram_64.o
obj64-$(CONFIG_RELOCATABLE) += reloc_64.o
obj-$(CONFIG_PPC64) += vdso64/
-obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o
+obj-$(CONFIG_ALTIVEC) += vecemu.o
obj-$(CONFIG_PPC_970_NAP) += idle_power4.o
obj-$(CONFIG_PPC_OF) += of_device.o of_platform.o prom_parse.o
obj-$(CONFIG_PPC_CLOCK) += clock.o
@@ -82,6 +82,7 @@ obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
+obj-$(CONFIG_SWIOTLB) += dma-swiotlb.o
pci64-$(CONFIG_PPC64) += pci_dn.o isa-bridge.o
obj-$(CONFIG_PCI) += pci_$(CONFIG_WORD_SIZE).o $(pci64-y) \
@@ -108,6 +109,7 @@ obj-y += ppc_save_regs.o
endif
extra-$(CONFIG_PPC_FPU) += fpu.o
+extra-$(CONFIG_ALTIVEC) += vector.o
extra-$(CONFIG_PPC64) += entry_64.o
extra-y += systbl_chk.i
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 1e40bc05394..ce90c570cd8 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -122,8 +122,6 @@ int main(void)
DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack));
DEFINE(PACACURRENT, offsetof(struct paca_struct, __current));
DEFINE(PACASAVEDMSR, offsetof(struct paca_struct, saved_msr));
- DEFINE(PACASTABREAL, offsetof(struct paca_struct, stab_real));
- DEFINE(PACASTABVIRT, offsetof(struct paca_struct, stab_addr));
DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr));
DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1));
DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc));
@@ -131,35 +129,30 @@ int main(void)
DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr));
DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled));
- DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
- DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
- DEFINE(PACAVMALLOCSLLP, offsetof(struct paca_struct, vmalloc_sllp));
#ifdef CONFIG_PPC_MM_SLICES
DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct,
context.low_slices_psize));
DEFINE(PACAHIGHSLICEPSIZE, offsetof(struct paca_struct,
context.high_slices_psize));
DEFINE(MMUPSIZEDEFSIZE, sizeof(struct mmu_psize_def));
+#endif /* CONFIG_PPC_MM_SLICES */
+#ifdef CONFIG_PPC_STD_MMU_64
+ DEFINE(PACASTABREAL, offsetof(struct paca_struct, stab_real));
+ DEFINE(PACASTABVIRT, offsetof(struct paca_struct, stab_addr));
+ DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
+ DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
+ DEFINE(PACAVMALLOCSLLP, offsetof(struct paca_struct, vmalloc_sllp));
+#ifdef CONFIG_PPC_MM_SLICES
DEFINE(MMUPSIZESLLP, offsetof(struct mmu_psize_def, sllp));
#else
DEFINE(PACACONTEXTSLLP, offsetof(struct paca_struct, context.sllp));
-
#endif /* CONFIG_PPC_MM_SLICES */
DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen));
DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc));
DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb));
- DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
DEFINE(PACALPPACAPTR, offsetof(struct paca_struct, lppaca_ptr));
- DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
- DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr));
- DEFINE(PACA_STARTSPURR, offsetof(struct paca_struct, startspurr));
- DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time));
- DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time));
DEFINE(PACA_SLBSHADOWPTR, offsetof(struct paca_struct, slb_shadow_ptr));
- DEFINE(PACA_DATA_OFFSET, offsetof(struct paca_struct, data_offset));
- DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save));
-
DEFINE(SLBSHADOW_STACKVSID,
offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid));
DEFINE(SLBSHADOW_STACKESID,
@@ -169,6 +162,15 @@ int main(void)
DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int));
DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int));
DEFINE(SLBSHADOW_SAVEAREA, offsetof(struct slb_shadow, save_area));
+#endif /* CONFIG_PPC_STD_MMU_64 */
+ DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
+ DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
+ DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr));
+ DEFINE(PACA_STARTSPURR, offsetof(struct paca_struct, startspurr));
+ DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time));
+ DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time));
+ DEFINE(PACA_DATA_OFFSET, offsetof(struct paca_struct, data_offset));
+ DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save));
#endif /* CONFIG_PPC64 */
/* RTAS */
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
new file mode 100644
index 00000000000..68ccf11e4f1
--- /dev/null
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -0,0 +1,163 @@
+/*
+ * Contains routines needed to support swiotlb for ppc.
+ *
+ * Copyright (C) 2009 Becky Bruce, Freescale Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/pfn.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+
+#include <asm/machdep.h>
+#include <asm/swiotlb.h>
+#include <asm/dma.h>
+#include <asm/abs_addr.h>
+
+int swiotlb __read_mostly;
+unsigned int ppc_swiotlb_enable;
+
+void *swiotlb_bus_to_virt(struct device *hwdev, dma_addr_t addr)
+{
+ unsigned long pfn = PFN_DOWN(swiotlb_bus_to_phys(hwdev, addr));
+ void *pageaddr = page_address(pfn_to_page(pfn));
+
+ if (pageaddr != NULL)
+ return pageaddr + (addr % PAGE_SIZE);
+ return NULL;
+}
+
+dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr)
+{
+ return paddr + get_dma_direct_offset(hwdev);
+}
+
+phys_addr_t swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr)
+
+{
+ return baddr - get_dma_direct_offset(hwdev);
+}
+
+/*
+ * Determine if an address needs bounce buffering via swiotlb.
+ * Going forward I expect the swiotlb code to generalize on using
+ * a dma_ops->addr_needs_map, and this function will move from here to the
+ * generic swiotlb code.
+ */
+int
+swiotlb_arch_address_needs_mapping(struct device *hwdev, dma_addr_t addr,
+ size_t size)
+{
+ struct dma_mapping_ops *dma_ops = get_dma_ops(hwdev);
+
+ BUG_ON(!dma_ops);
+ return dma_ops->addr_needs_map(hwdev, addr, size);
+}
+
+/*
+ * Determine if an address is reachable by a pci device, or if we must bounce.
+ */
+static int
+swiotlb_pci_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size)
+{
+ u64 mask = dma_get_mask(hwdev);
+ dma_addr_t max;
+ struct pci_controller *hose;
+ struct pci_dev *pdev = to_pci_dev(hwdev);
+
+ hose = pci_bus_to_host(pdev->bus);
+ max = hose->dma_window_base_cur + hose->dma_window_size;
+
+ /* check that we're within mapped pci window space */
+ if ((addr + size > max) | (addr < hose->dma_window_base_cur))
+ return 1;
+
+ return !is_buffer_dma_capable(mask, addr, size);
+}
+
+static int
+swiotlb_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size)
+{
+ return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
+}
+
+
+/*
+ * At the moment, all platforms that use this code only require
+ * swiotlb to be used if we're operating on HIGHMEM. Since
+ * we don't ever call anything other than map_sg, unmap_sg,
+ * map_page, and unmap_page on highmem, use normal dma_ops
+ * for everything else.
+ */
+struct dma_mapping_ops swiotlb_dma_ops = {
+ .alloc_coherent = dma_direct_alloc_coherent,
+ .free_coherent = dma_direct_free_coherent,
+ .map_sg = swiotlb_map_sg_attrs,
+ .unmap_sg = swiotlb_unmap_sg_attrs,
+ .dma_supported = swiotlb_dma_supported,
+ .map_page = swiotlb_map_page,
+ .unmap_page = swiotlb_unmap_page,
+ .addr_needs_map = swiotlb_addr_needs_map,
+ .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
+ .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
+ .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
+ .sync_sg_for_device = swiotlb_sync_sg_for_device
+};
+
+struct dma_mapping_ops swiotlb_pci_dma_ops = {
+ .alloc_coherent = dma_direct_alloc_coherent,
+ .free_coherent = dma_direct_free_coherent,
+ .map_sg = swiotlb_map_sg_attrs,
+ .unmap_sg = swiotlb_unmap_sg_attrs,
+ .dma_supported = swiotlb_dma_supported,
+ .map_page = swiotlb_map_page,
+ .unmap_page = swiotlb_unmap_page,
+ .addr_needs_map = swiotlb_pci_addr_needs_map,
+ .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
+ .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
+ .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
+ .sync_sg_for_device = swiotlb_sync_sg_for_device
+};
+
+static int ppc_swiotlb_bus_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+
+ /* We are only intereted in device addition */
+ if (action != BUS_NOTIFY_ADD_DEVICE)
+ return 0;
+
+ /* May need to bounce if the device can't address all of DRAM */
+ if (dma_get_mask(dev) < lmb_end_of_DRAM())
+ set_dma_ops(dev, &swiotlb_dma_ops);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block ppc_swiotlb_plat_bus_notifier = {
+ .notifier_call = ppc_swiotlb_bus_notify,
+ .priority = 0,
+};
+
+static struct notifier_block ppc_swiotlb_of_bus_notifier = {
+ .notifier_call = ppc_swiotlb_bus_notify,
+ .priority = 0,
+};
+
+int __init swiotlb_setup_bus_notifier(void)
+{
+ bus_register_notifier(&platform_bus_type,
+ &ppc_swiotlb_plat_bus_notifier);
+ bus_register_notifier(&of_platform_bus_type,
+ &ppc_swiotlb_of_bus_notifier);
+
+ return 0;
+}
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 6b02793dc75..20a60d661ba 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -19,7 +19,7 @@
* default the offset is PCI_DRAM_OFFSET.
*/
-static unsigned long get_dma_direct_offset(struct device *dev)
+unsigned long get_dma_direct_offset(struct device *dev)
{
if (dev)
return (unsigned long)dev->archdata.dma_data;
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
new file mode 100644
index 00000000000..eb898112e57
--- /dev/null
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -0,0 +1,978 @@
+/*
+ * This file contains the 64-bit "server" PowerPC variant
+ * of the low level exception handling including exception
+ * vectors, exception return, part of the slb and stab
+ * handling and other fixed offset specific things.
+ *
+ * This file is meant to be #included from head_64.S due to
+ * position dependant assembly.
+ *
+ * Most of this originates from head_64.S and thus has the same
+ * copyright history.
+ *
+ */
+
+/*
+ * We layout physical memory as follows:
+ * 0x0000 - 0x00ff : Secondary processor spin code
+ * 0x0100 - 0x2fff : pSeries Interrupt prologs
+ * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
+ * 0x6000 - 0x6fff : Initial (CPU0) segment table
+ * 0x7000 - 0x7fff : FWNMI data area
+ * 0x8000 - : Early init and support code
+ */
+
+
+/*
+ * SPRG Usage
+ *
+ * Register Definition
+ *
+ * SPRG0 reserved for hypervisor
+ * SPRG1 temp - used to save gpr
+ * SPRG2 temp - used to save gpr
+ * SPRG3 virt addr of paca
+ */
+
+/*
+ * This is the start of the interrupt handlers for pSeries
+ * This code runs with relocation off.
+ * Code from here to __end_interrupts gets copied down to real
+ * address 0x100 when we are running a relocatable kernel.
+ * Therefore any relative branches in this section must only
+ * branch to labels in this section.
+ */
+ . = 0x100
+ .globl __start_interrupts
+__start_interrupts:
+
+ STD_EXCEPTION_PSERIES(0x100, system_reset)
+
+ . = 0x200
+_machine_check_pSeries:
+ HMT_MEDIUM
+ mtspr SPRN_SPRG1,r13 /* save r13 */
+ EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
+
+ . = 0x300
+ .globl data_access_pSeries
+data_access_pSeries:
+ HMT_MEDIUM
+ mtspr SPRN_SPRG1,r13
+BEGIN_FTR_SECTION
+ mtspr SPRN_SPRG2,r12
+ mfspr r13,SPRN_DAR
+ mfspr r12,SPRN_DSISR
+ srdi r13,r13,60
+ rlwimi r13,r12,16,0x20
+ mfcr r12
+ cmpwi r13,0x2c
+ beq do_stab_bolted_pSeries
+ mtcrf 0x80,r12
+ mfspr r12,SPRN_SPRG2
+END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
+ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
+
+ . = 0x380
+ .globl data_access_slb_pSeries
+data_access_slb_pSeries:
+ HMT_MEDIUM
+ mtspr SPRN_SPRG1,r13
+ mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
+ std r3,PACA_EXSLB+EX_R3(r13)
+ mfspr r3,SPRN_DAR
+ std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
+ mfcr r9
+#ifdef __DISABLED__
+ /* Keep that around for when we re-implement dynamic VSIDs */
+ cmpdi r3,0
+ bge slb_miss_user_pseries
+#endif /* __DISABLED__ */
+ std r10,PACA_EXSLB+EX_R10(r13)
+ std r11,PACA_EXSLB+EX_R11(r13)
+ std r12,PACA_EXSLB+EX_R12(r13)
+ mfspr r10,SPRN_SPRG1
+ std r10,PACA_EXSLB+EX_R13(r13)
+ mfspr r12,SPRN_SRR1 /* and SRR1 */
+#ifndef CONFIG_RELOCATABLE
+ b .slb_miss_realmode
+#else
+ /*
+ * We can't just use a direct branch to .slb_miss_realmode
+ * because the distance from here to there depends on where
+ * the kernel ends up being put.
+ */
+ mfctr r11
+ ld r10,PACAKBASE(r13)
+ LOAD_HANDLER(r10, .slb_miss_realmode)
+ mtctr r10
+ bctr
+#endif
+
+ STD_EXCEPTION_PSERIES(0x400, instruction_access)
+
+ . = 0x480
+ .globl instruction_access_slb_pSeries
+instruction_access_slb_pSeries:
+ HMT_MEDIUM
+ mtspr SPRN_SPRG1,r13
+ mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
+ std r3,PACA_EXSLB+EX_R3(r13)
+ mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
+ std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
+ mfcr r9
+#ifdef __DISABLED__
+ /* Keep that around for when we re-implement dynamic VSIDs */
+ cmpdi r3,0
+ bge slb_miss_user_pseries
+#endif /* __DISABLED__ */
+ std r10,PACA_EXSLB+EX_R10(r13)
+ std r11,PACA_EXSLB+EX_R11(r13)
+ std r12,PACA_EXSLB+EX_R12(r13)
+ mfspr r10,SPRN_SPRG1
+ std r10,PACA_EXSLB+EX_R13(r13)
+ mfspr r12,SPRN_SRR1 /* and SRR1 */
+#ifndef CONFIG_RELOCATABLE
+ b .slb_miss_realmode
+#else
+ mfctr r11
+ ld r10,PACAKBASE(r13)
+ LOAD_HANDLER(r10, .slb_miss_realmode)
+ mtctr r10
+ bctr
+#endif
+
+ MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt)
+ STD_EXCEPTION_PSERIES(0x600, alignment)
+ STD_EXCEPTION_PSERIES(0x700, program_check)
+ STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
+ MASKABLE_EXCEPTION_PSERIES(0x900, decrementer)
+ STD_EXCEPTION_PSERIES(0xa00, trap_0a)
+ STD_EXCEPTION_PSERIES(0xb00, trap_0b)
+
+ . = 0xc00
+ .globl system_call_pSeries
+system_call_pSeries:
+ HMT_MEDIUM
+BEGIN_FTR_SECTION
+ cmpdi r0,0x1ebe
+ beq- 1f
+END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
+ mr r9,r13
+ mfspr r13,SPRN_SPRG3
+ mfspr r11,SPRN_SRR0
+ ld r12,PACAKBASE(r13)
+ ld r10,PACAKMSR(r13)
+ LOAD_HANDLER(r12, system_call_entry)
+ mtspr SPRN_SRR0,r12
+ mfspr r12,SPRN_SRR1
+ mtspr SPRN_SRR1,r10
+ rfid
+ b . /* prevent speculative execution */
+
+/* Fast LE/BE switch system call */
+1: mfspr r12,SPRN_SRR1
+ xori r12,r12,MSR_LE
+ mtspr SPRN_SRR1,r12
+ rfid /* return to userspace */
+ b .
+
+ STD_EXCEPTION_PSERIES(0xd00, single_step)
+ STD_EXCEPTION_PSERIES(0xe00, trap_0e)
+
+ /* We need to deal with the Altivec unavailable exception
+ * here which is at 0xf20, thus in the middle of the
+ * prolog code of the PerformanceMonitor one. A little
+ * trickery is thus necessary
+ */
+ . = 0xf00
+ b performance_monitor_pSeries
+
+ . = 0xf20
+ b altivec_unavailable_pSeries
+
+ . = 0xf40
+ b vsx_unavailable_pSeries
+
+#ifdef CONFIG_CBE_RAS
+ HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error)
+#endif /* CONFIG_CBE_RAS */
+ STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
+#ifdef CONFIG_CBE_RAS
+ HSTD_EXCEPTION_PSERIES(0x1600, cbe_maintenance)
+#endif /* CONFIG_CBE_RAS */
+ STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
+#ifdef CONFIG_CBE_RAS
+ HSTD_EXCEPTION_PSERIES(0x1800, cbe_thermal)
+#endif /* CONFIG_CBE_RAS */
+
+ . = 0x3000
+
+/*** pSeries interrupt support ***/
+
+ /* moved from 0xf00 */
+ STD_EXCEPTION_PSERIES(., performance_monitor)
+ STD_EXCEPTION_PSERIES(., altivec_unavailable)
+ STD_EXCEPTION_PSERIES(., vsx_unavailable)
+
+/*
+ * An interrupt came in while soft-disabled; clear EE in SRR1,
+ * clear paca->hard_enabled and return.
+ */
+masked_interrupt:
+ stb r10,PACAHARDIRQEN(r13)
+ mtcrf 0x80,r9
+ ld r9,PACA_EXGEN+EX_R9(r13)
+ mfspr r10,SPRN_SRR1
+ rldicl r10,r10,48,1 /* clear MSR_EE */
+ rotldi r10,r10,16
+ mtspr SPRN_SRR1,r10
+ ld r10,PACA_EXGEN+EX_R10(r13)
+ mfspr r13,SPRN_SPRG1
+ rfid
+ b .
+
+ .align 7
+do_stab_bolted_pSeries:
+ mtcrf 0x80,r12
+ mfspr r12,SPRN_SPRG2
+ EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
+
+#ifdef CONFIG_PPC_PSERIES
+/*
+ * Vectors for the FWNMI option. Share common code.
+ */
+ .globl system_reset_fwnmi
+ .align 7
+system_reset_fwnmi:
+ HMT_MEDIUM
+ mtspr SPRN_SPRG1,r13 /* save r13 */
+ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
+
+ .globl machine_check_fwnmi
+ .align 7
+machine_check_fwnmi:
+ HMT_MEDIUM
+ mtspr SPRN_SPRG1,r13 /* save r13 */
+ EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
+
+#endif /* CONFIG_PPC_PSERIES */
+
+#ifdef __DISABLED__
+/*
+ * This is used for when the SLB miss handler has to go virtual,
+ * which doesn't happen for now anymore but will once we re-implement
+ * dynamic VSIDs for shared page tables
+ */
+slb_miss_user_pseries:
+ std r10,PACA_EXGEN+EX_R10(r13)
+ std r11,PACA_EXGEN+EX_R11(r13)
+ std r12,PACA_EXGEN+EX_R12(r13)
+ mfspr r10,SPRG1
+ ld r11,PACA_EXSLB+EX_R9(r13)
+ ld r12,PACA_EXSLB+EX_R3(r13)
+ std r10,PACA_EXGEN+EX_R13(r13)
+ std r11,PACA_EXGEN+EX_R9(r13)
+ std r12,PACA_EXGEN+EX_R3(r13)
+ clrrdi r12,r13,32
+ mfmsr r10
+ mfspr r11,SRR0 /* save SRR0 */
+ ori r12,r12,slb_miss_user_common@l /* virt addr of handler */
+ ori r10,r10,MSR_IR|MSR_DR|MSR_RI
+ mtspr SRR0,r12
+ mfspr r12,SRR1 /* and SRR1 */
+ mtspr SRR1,r10
+ rfid
+ b . /* prevent spec. execution */
+#endif /* __DISABLED__ */
+
+ .align 7
+ .globl __end_interrupts
+__end_interrupts:
+
+/*
+ * Code from here down to __end_handlers is invoked from the
+ * exception prologs above. Because the prologs assemble the
+ * addresses of these handlers using the LOAD_HANDLER macro,
+ * which uses an addi instruction, these handlers must be in
+ * the first 32k of the kernel image.
+ */
+
+/*** Common interrupt handlers ***/
+
+ STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
+
+ /*
+ * Machine check is different because we use a different
+ * save area: PACA_EXMC instead of PACA_EXGEN.
+ */
+ .align 7
+ .globl machine_check_common
+machine_check_common:
+ EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
+ FINISH_NAP
+ DISABLE_INTS
+ bl .save_nvgprs
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .machine_check_exception
+ b .ret_from_except
+
+ STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
+ STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
+ STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
+ STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
+ STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
+ STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception)
+ STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
+#ifdef CONFIG_ALTIVEC
+ STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
+#else
+ STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
+#endif
+#ifdef CONFIG_CBE_RAS
+ STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception)
+ STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception)
+ STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
+#endif /* CONFIG_CBE_RAS */
+
+ .align 7
+system_call_entry:
+ b system_call_common
+
+/*
+ * Here we have detected that the kernel stack pointer is bad.
+ * R9 contains the saved CR, r13 points to the paca,
+ * r10 contains the (bad) kernel stack pointer,
+ * r11 and r12 contain the saved SRR0 and SRR1.
+ * We switch to using an emergency stack, save the registers there,
+ * and call kernel_bad_stack(), which panics.
+ */
+bad_stack:
+ ld r1,PACAEMERGSP(r13)
+ subi r1,r1,64+INT_FRAME_SIZE
+ std r9,_CCR(r1)
+ std r10,GPR1(r1)
+ std r11,_NIP(r1)
+ std r12,_MSR(r1)
+ mfspr r11,SPRN_DAR
+ mfspr r12,SPRN_DSISR
+ std r11,_DAR(r1)
+ std r12,_DSISR(r1)
+ mflr r10
+ mfctr r11
+ mfxer r12
+ std r10,_LINK(r1)
+ std r11,_CTR(r1)
+ std r12,_XER(r1)
+ SAVE_GPR(0,r1)
+ SAVE_GPR(2,r1)
+ SAVE_4GPRS(3,r1)
+ SAVE_2GPRS(7,r1)
+ SAVE_10GPRS(12,r1)
+ SAVE_10GPRS(22,r1)
+ lhz r12,PACA_TRAP_SAVE(r13)
+ std r12,_TRAP(r1)
+ addi r11,r1,INT_FRAME_SIZE
+ std r11,0(r1)
+ li r12,0
+ std r12,0(r11)
+ ld r2,PACATOC(r13)
+1: addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .kernel_bad_stack
+ b 1b
+
+/*
+ * Here r13 points to the paca, r9 contains the saved CR,
+ * SRR0 and SRR1 are saved in r11 and r12,
+ * r9 - r13 are saved in paca->exgen.
+ */
+ .align 7
+ .globl data_access_common
+data_access_common:
+ mfspr r10,SPRN_DAR
+ std r10,PACA_EXGEN+EX_DAR(r13)
+ mfspr r10,SPRN_DSISR
+ stw r10,PACA_EXGEN+EX_DSISR(r13)
+ EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
+ ld r3,PACA_EXGEN+EX_DAR(r13)
+ lwz r4,PACA_EXGEN+EX_DSISR(r13)
+ li r5,0x300
+ b .do_hash_page /* Try to handle as hpte fault */
+
+ .align 7
+ .globl instruction_access_common
+instruction_access_common:
+ EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
+ ld r3,_NIP(r1)
+ andis. r4,r12,0x5820
+ li r5,0x400
+ b .do_hash_page /* Try to handle as hpte fault */
+
+/*
+ * Here is the common SLB miss user that is used when going to virtual
+ * mode for SLB misses, that is currently not used
+ */
+#ifdef __DISABLED__
+ .align 7
+ .globl slb_miss_user_common
+slb_miss_user_common:
+ mflr r10
+ std r3,PACA_EXGEN+EX_DAR(r13)
+ stw r9,PACA_EXGEN+EX_CCR(r13)
+ std r10,PACA_EXGEN+EX_LR(r13)
+ std r11,PACA_EXGEN+EX_SRR0(r13)
+ bl .slb_allocate_user
+
+ ld r10,PACA_EXGEN+EX_LR(r13)
+ ld r3,PACA_EXGEN+EX_R3(r13)
+ lwz r9,PACA_EXGEN+EX_CCR(r13)
+ ld r11,PACA_EXGEN+EX_SRR0(r13)
+ mtlr r10
+ beq- slb_miss_fault
+
+ andi. r10,r12,MSR_RI /* check for unrecoverable exception */
+ beq- unrecov_user_slb
+ mfmsr r10
+
+.machine push
+.machine "power4"
+ mtcrf 0x80,r9
+.machine pop
+
+ clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */
+ mtmsrd r10,1
+
+ mtspr SRR0,r11
+ mtspr SRR1,r12
+
+ ld r9,PACA_EXGEN+EX_R9(r13)
+ ld r10,PACA_EXGEN+EX_R10(r13)
+ ld r11,PACA_EXGEN+EX_R11(r13)
+ ld r12,PACA_EXGEN+EX_R12(r13)
+ ld r13,PACA_EXGEN+EX_R13(r13)
+ rfid
+ b .
+
+slb_miss_fault:
+ EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
+ ld r4,PACA_EXGEN+EX_DAR(r13)
+ li r5,0
+ std r4,_DAR(r1)
+ std r5,_DSISR(r1)
+ b handle_page_fault
+
+unrecov_user_slb:
+ EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
+ DISABLE_INTS
+ bl .save_nvgprs
+1: addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .unrecoverable_exception
+ b 1b
+
+#endif /* __DISABLED__ */
+
+
+/*
+ * r13 points to the PACA, r9 contains the saved CR,
+ * r12 contain the saved SRR1, SRR0 is still ready for return
+ * r3 has the faulting address
+ * r9 - r13 are saved in paca->exslb.
+ * r3 is saved in paca->slb_r3
+ * We assume we aren't going to take any exceptions during this procedure.
+ */
+_GLOBAL(slb_miss_realmode)
+ mflr r10
+#ifdef CONFIG_RELOCATABLE
+ mtctr r11
+#endif
+
+ stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
+ std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
+
+ bl .slb_allocate_realmode
+
+ /* All done -- return from exception. */
+
+ ld r10,PACA_EXSLB+EX_LR(r13)
+ ld r3,PACA_EXSLB+EX_R3(r13)
+ lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
+#ifdef CONFIG_PPC_ISERIES
+BEGIN_FW_FTR_SECTION
+ ld r11,PACALPPACAPTR(r13)
+ ld r11,LPPACASRR0(r11) /* get SRR0 value */
+END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
+#endif /* CONFIG_PPC_ISERIES */
+
+ mtlr r10
+
+ andi. r10,r12,MSR_RI /* check for unrecoverable exception */
+ beq- 2f
+
+.machine push
+.machine "power4"
+ mtcrf 0x80,r9
+ mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
+.machine pop
+
+#ifdef CONFIG_PPC_ISERIES
+BEGIN_FW_FTR_SECTION
+ mtspr SPRN_SRR0,r11
+ mtspr SPRN_SRR1,r12
+END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
+#endif /* CONFIG_PPC_ISERIES */
+ ld r9,PACA_EXSLB+EX_R9(r13)
+ ld r10,PACA_EXSLB+EX_R10(r13)
+ ld r11,PACA_EXSLB+EX_R11(r13)
+ ld r12,PACA_EXSLB+EX_R12(r13)
+ ld r13,PACA_EXSLB+EX_R13(r13)
+ rfid
+ b . /* prevent speculative execution */
+
+2:
+#ifdef CONFIG_PPC_ISERIES
+BEGIN_FW_FTR_SECTION
+ b unrecov_slb
+END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
+#endif /* CONFIG_PPC_ISERIES */
+ mfspr r11,SPRN_SRR0
+ ld r10,PACAKBASE(r13)
+ LOAD_HANDLER(r10,unrecov_slb)
+ mtspr SPRN_SRR0,r10
+ ld r10,PACAKMSR(r13)
+ mtspr SPRN_SRR1,r10
+ rfid
+ b .
+
+unrecov_slb:
+ EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
+ DISABLE_INTS
+ bl .save_nvgprs
+1: addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .unrecoverable_exception
+ b 1b
+
+ .align 7
+ .globl hardware_interrupt_common
+ .globl hardware_interrupt_entry
+hardware_interrupt_common:
+ EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
+ FINISH_NAP
+hardware_interrupt_entry:
+ DISABLE_INTS
+BEGIN_FTR_SECTION
+ bl .ppc64_runlatch_on
+END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .do_IRQ
+ b .ret_from_except_lite
+
+#ifdef CONFIG_PPC_970_NAP
+power4_fixup_nap:
+ andc r9,r9,r10
+ std r9,TI_LOCAL_FLAGS(r11)
+ ld r10,_LINK(r1) /* make idle task do the */
+ std r10,_NIP(r1) /* equivalent of a blr */
+ blr
+#endif
+
+ .align 7
+ .globl alignment_common
+alignment_common:
+ mfspr r10,SPRN_DAR
+ std r10,PACA_EXGEN+EX_DAR(r13)
+ mfspr r10,SPRN_DSISR
+ stw r10,PACA_EXGEN+EX_DSISR(r13)
+ EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
+ ld r3,PACA_EXGEN+EX_DAR(r13)
+ lwz r4,PACA_EXGEN+EX_DSISR(r13)
+ std r3,_DAR(r1)
+ std r4,_DSISR(r1)
+ bl .save_nvgprs
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ ENABLE_INTS
+ bl .alignment_exception
+ b .ret_from_except
+
+ .align 7
+ .globl program_check_common
+program_check_common:
+ EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
+ bl .save_nvgprs
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ ENABLE_INTS
+ bl .program_check_exception
+ b .ret_from_except
+
+ .align 7
+ .globl fp_unavailable_common
+fp_unavailable_common:
+ EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
+ bne 1f /* if from user, just load it up */
+ bl .save_nvgprs
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ ENABLE_INTS
+ bl .kernel_fp_unavailable_exception
+ BUG_OPCODE
+1: bl .load_up_fpu
+ b fast_exception_return
+
+ .align 7
+ .globl altivec_unavailable_common
+altivec_unavailable_common:
+ EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
+#ifdef CONFIG_ALTIVEC
+BEGIN_FTR_SECTION
+ beq 1f
+ bl .load_up_altivec
+ b fast_exception_return
+1:
+END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+#endif
+ bl .save_nvgprs
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ ENABLE_INTS
+ bl .altivec_unavailable_exception
+ b .ret_from_except
+
+ .align 7
+ .globl vsx_unavailable_common
+vsx_unavailable_common:
+ EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
+#ifdef CONFIG_VSX
+BEGIN_FTR_SECTION
+ bne .load_up_vsx
+1:
+END_FTR_SECTION_IFSET(CPU_FTR_VSX)
+#endif
+ bl .save_nvgprs
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ ENABLE_INTS
+ bl .vsx_unavailable_exception
+ b .ret_from_except
+
+ .align 7
+ .globl __end_handlers
+__end_handlers:
+
+/*
+ * Return from an exception with minimal checks.
+ * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
+ * If interrupts have been enabled, or anything has been
+ * done that might have changed the scheduling status of
+ * any task or sent any task a signal, you should use
+ * ret_from_except or ret_from_except_lite instead of this.
+ */
+fast_exc_return_irq: /* restores irq state too */
+ ld r3,SOFTE(r1)
+ TRACE_AND_RESTORE_IRQ(r3);
+ ld r12,_MSR(r1)
+ rldicl r4,r12,49,63 /* get MSR_EE to LSB */
+ stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */
+ b 1f
+
+ .globl fast_exception_return
+fast_exception_return:
+ ld r12,_MSR(r1)
+1: ld r11,_NIP(r1)
+ andi. r3,r12,MSR_RI /* check if RI is set */
+ beq- unrecov_fer
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+ andi. r3,r12,MSR_PR
+ beq 2f
+ ACCOUNT_CPU_USER_EXIT(r3, r4)
+2:
+#endif
+
+ ld r3,_CCR(r1)
+ ld r4,_LINK(r1)
+ ld r5,_CTR(r1)
+ ld r6,_XER(r1)
+ mtcr r3
+ mtlr r4
+ mtctr r5
+ mtxer r6
+ REST_GPR(0, r1)
+ REST_8GPRS(2, r1)
+
+ mfmsr r10
+ rldicl r10,r10,48,1 /* clear EE */
+ rldicr r10,r10,16,61 /* clear RI (LE is 0 already) */
+ mtmsrd r10,1
+
+ mtspr SPRN_SRR1,r12
+ mtspr SPRN_SRR0,r11
+ REST_4GPRS(10, r1)
+ ld r1,GPR1(r1)
+ rfid
+ b . /* prevent speculative execution */
+
+unrecov_fer:
+ bl .save_nvgprs
+1: addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .unrecoverable_exception
+ b 1b
+
+
+/*
+ * Hash table stuff
+ */
+ .align 7
+_STATIC(do_hash_page)
+ std r3,_DAR(r1)
+ std r4,_DSISR(r1)
+
+ andis. r0,r4,0xa450 /* weird error? */
+ bne- handle_page_fault /* if not, try to insert a HPTE */
+BEGIN_FTR_SECTION
+ andis. r0,r4,0x0020 /* Is it a segment table fault? */
+ bne- do_ste_alloc /* If so handle it */
+END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
+
+ /*
+ * On iSeries, we soft-disable interrupts here, then
+ * hard-enable interrupts so that the hash_page code can spin on
+ * the hash_table_lock without problems on a shared processor.
+ */
+ DISABLE_INTS
+
+ /*
+ * Currently, trace_hardirqs_off() will be called by DISABLE_INTS
+ * and will clobber volatile registers when irq tracing is enabled
+ * so we need to reload them. It may be possible to be smarter here
+ * and move the irq tracing elsewhere but let's keep it simple for
+ * now
+ */
+#ifdef CONFIG_TRACE_IRQFLAGS
+ ld r3,_DAR(r1)
+ ld r4,_DSISR(r1)
+ ld r5,_TRAP(r1)
+ ld r12,_MSR(r1)
+ clrrdi r5,r5,4
+#endif /* CONFIG_TRACE_IRQFLAGS */
+ /*
+ * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
+ * accessing a userspace segment (even from the kernel). We assume
+ * kernel addresses always have the high bit set.
+ */
+ rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
+ rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
+ orc r0,r12,r0 /* MSR_PR | ~high_bit */
+ rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
+ ori r4,r4,1 /* add _PAGE_PRESENT */
+ rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */
+
+ /*
+ * r3 contains the faulting address
+ * r4 contains the required access permissions
+ * r5 contains the trap number
+ *
+ * at return r3 = 0 for success
+ */
+ bl .hash_page /* build HPTE if possible */
+ cmpdi r3,0 /* see if hash_page succeeded */
+
+BEGIN_FW_FTR_SECTION
+ /*
+ * If we had interrupts soft-enabled at the point where the
+ * DSI/ISI occurred, and an interrupt came in during hash_page,
+ * handle it now.
+ * We jump to ret_from_except_lite rather than fast_exception_return
+ * because ret_from_except_lite will check for and handle pending
+ * interrupts if necessary.
+ */
+ beq 13f
+END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
+
+BEGIN_FW_FTR_SECTION
+ /*
+ * Here we have interrupts hard-disabled, so it is sufficient
+ * to restore paca->{soft,hard}_enable and get out.
+ */
+ beq fast_exc_return_irq /* Return from exception on success */
+END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
+
+ /* For a hash failure, we don't bother re-enabling interrupts */
+ ble- 12f
+
+ /*
+ * hash_page couldn't handle it, set soft interrupt enable back
+ * to what it was before the trap. Note that .raw_local_irq_restore
+ * handles any interrupts pending at this point.
+ */
+ ld r3,SOFTE(r1)
+ TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f)
+ bl .raw_local_irq_restore
+ b 11f
+
+/* Here we have a page fault that hash_page can't handle. */
+handle_page_fault:
+ ENABLE_INTS
+11: ld r4,_DAR(r1)
+ ld r5,_DSISR(r1)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .do_page_fault
+ cmpdi r3,0
+ beq+ 13f
+ bl .save_nvgprs
+ mr r5,r3
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ lwz r4,_DAR(r1)
+ bl .bad_page_fault
+ b .ret_from_except
+
+13: b .ret_from_except_lite
+
+/* We have a page fault that hash_page could handle but HV refused
+ * the PTE insertion
+ */
+12: bl .save_nvgprs
+ mr r5,r3
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ ld r4,_DAR(r1)
+ bl .low_hash_fault
+ b .ret_from_except
+
+ /* here we have a segment miss */
+do_ste_alloc:
+ bl .ste_allocate /* try to insert stab entry */
+ cmpdi r3,0
+ bne- handle_page_fault
+ b fast_exception_return
+
+/*
+ * r13 points to the PACA, r9 contains the saved CR,
+ * r11 and r12 contain the saved SRR0 and SRR1.
+ * r9 - r13 are saved in paca->exslb.
+ * We assume we aren't going to take any exceptions during this procedure.
+ * We assume (DAR >> 60) == 0xc.
+ */
+ .align 7
+_GLOBAL(do_stab_bolted)
+ stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
+ std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
+
+ /* Hash to the primary group */
+ ld r10,PACASTABVIRT(r13)
+ mfspr r11,SPRN_DAR
+ srdi r11,r11,28
+ rldimi r10,r11,7,52 /* r10 = first ste of the group */
+
+ /* Calculate VSID */
+ /* This is a kernel address, so protovsid = ESID */
+ ASM_VSID_SCRAMBLE(r11, r9, 256M)
+ rldic r9,r11,12,16 /* r9 = vsid << 12 */
+
+ /* Search the primary group for a free entry */
+1: ld r11,0(r10) /* Test valid bit of the current ste */
+ andi. r11,r11,0x80
+ beq 2f
+ addi r10,r10,16
+ andi. r11,r10,0x70
+ bne 1b
+
+ /* Stick for only searching the primary group for now. */
+ /* At least for now, we use a very simple random castout scheme */
+ /* Use the TB as a random number ; OR in 1 to avoid entry 0 */
+ mftb r11
+ rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */
+ ori r11,r11,0x10
+
+ /* r10 currently points to an ste one past the group of interest */
+ /* make it point to the randomly selected entry */
+ subi r10,r10,128
+ or r10,r10,r11 /* r10 is the entry to invalidate */
+
+ isync /* mark the entry invalid */
+ ld r11,0(r10)
+ rldicl r11,r11,56,1 /* clear the valid bit */
+ rotldi r11,r11,8
+ std r11,0(r10)
+ sync
+
+ clrrdi r11,r11,28 /* Get the esid part of the ste */
+ slbie r11
+
+2: std r9,8(r10) /* Store the vsid part of the ste */
+ eieio
+
+ mfspr r11,SPRN_DAR /* Get the new esid */
+ clrrdi r11,r11,28 /* Permits a full 32b of ESID */
+ ori r11,r11,0x90 /* Turn on valid and kp */
+ std r11,0(r10) /* Put new entry back into the stab */
+
+ sync
+
+ /* All done -- return from exception. */
+ lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
+ ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
+
+ andi. r10,r12,MSR_RI
+ beq- unrecov_slb
+
+ mtcrf 0x80,r9 /* restore CR */
+
+ mfmsr r10
+ clrrdi r10,r10,2
+ mtmsrd r10,1
+
+ mtspr SPRN_SRR0,r11
+ mtspr SPRN_SRR1,r12
+ ld r9,PACA_EXSLB+EX_R9(r13)
+ ld r10,PACA_EXSLB+EX_R10(r13)
+ ld r11,PACA_EXSLB+EX_R11(r13)
+ ld r12,PACA_EXSLB+EX_R12(r13)
+ ld r13,PACA_EXSLB+EX_R13(r13)
+ rfid
+ b . /* prevent speculative execution */
+
+/*
+ * Space for CPU0's segment table.
+ *
+ * On iSeries, the hypervisor must fill in at least one entry before
+ * we get control (with relocate on). The address is given to the hv
+ * as a page number (see xLparMap below), so this must be at a
+ * fixed address (the linker can't compute (u64)&initial_stab >>
+ * PAGE_SHIFT).
+ */
+ . = STAB0_OFFSET /* 0x6000 */
+ .globl initial_stab
+initial_stab:
+ .space 4096
+
+#ifdef CONFIG_PPC_PSERIES
+/*
+ * Data area reserved for FWNMI option.
+ * This address (0x7000) is fixed by the RPA.
+ */
+ .= 0x7000
+ .globl fwnmi_data_area
+fwnmi_data_area:
+#endif /* CONFIG_PPC_PSERIES */
+
+ /* iSeries does not use the FWNMI stuff, so it is safe to put
+ * this here, even if we later allow kernels that will boot on
+ * both pSeries and iSeries */
+#ifdef CONFIG_PPC_ISERIES
+ . = LPARMAP_PHYS
+ .globl xLparMap
+xLparMap:
+ .quad HvEsidsToMap /* xNumberEsids */
+ .quad HvRangesToMap /* xNumberRanges */
+ .quad STAB0_PAGE /* xSegmentTableOffs */
+ .zero 40 /* xRsvd */
+ /* xEsids (HvEsidsToMap entries of 2 quads) */
+ .quad PAGE_OFFSET_ESID /* xKernelEsid */
+ .quad PAGE_OFFSET_VSID /* xKernelVsid */
+ .quad VMALLOC_START_ESID /* xKernelEsid */
+ .quad VMALLOC_START_VSID /* xKernelVsid */
+ /* xRanges (HvRangesToMap entries of 3 quads) */
+ .quad HvPagesToMap /* xPages */
+ .quad 0 /* xOffset */
+ .quad PAGE_OFFSET_VSID << (SID_SHIFT - HW_PAGE_SHIFT) /* xVPN */
+
+#endif /* CONFIG_PPC_ISERIES */
+
+#ifdef CONFIG_PPC_PSERIES
+ . = 0x8000
+#endif /* CONFIG_PPC_PSERIES */
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index c01467f952d..6437f905c56 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -743,101 +743,6 @@ PerformanceMonitor:
addi r3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_STD(0xf00, performance_monitor_exception)
-#ifdef CONFIG_ALTIVEC
-/* Note that the AltiVec support is closely modeled after the FP
- * support. Changes to one are likely to be applicable to the
- * other! */
-load_up_altivec:
-/*
- * Disable AltiVec for the task which had AltiVec previously,
- * and save its AltiVec registers in its thread_struct.
- * Enables AltiVec for use in the kernel on return.
- * On SMP we know the AltiVec units are free, since we give it up every
- * switch. -- Kumar
- */
- mfmsr r5
- oris r5,r5,MSR_VEC@h
- MTMSRD(r5) /* enable use of AltiVec now */
- isync
-/*
- * For SMP, we don't do lazy AltiVec switching because it just gets too
- * horrendously complex, especially when a task switches from one CPU
- * to another. Instead we call giveup_altivec in switch_to.
- */
-#ifndef CONFIG_SMP
- tophys(r6,0)
- addis r3,r6,last_task_used_altivec@ha
- lwz r4,last_task_used_altivec@l(r3)
- cmpwi 0,r4,0
- beq 1f
- add r4,r4,r6
- addi r4,r4,THREAD /* want THREAD of last_task_used_altivec */
- SAVE_32VRS(0,r10,r4)
- mfvscr vr0
- li r10,THREAD_VSCR
- stvx vr0,r10,r4
- lwz r5,PT_REGS(r4)
- add r5,r5,r6
- lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
- lis r10,MSR_VEC@h
- andc r4,r4,r10 /* disable altivec for previous task */
- stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-1:
-#endif /* CONFIG_SMP */
- /* enable use of AltiVec after return */
- oris r9,r9,MSR_VEC@h
- mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */
- li r4,1
- li r10,THREAD_VSCR
- stw r4,THREAD_USED_VR(r5)
- lvx vr0,r10,r5
- mtvscr vr0
- REST_32VRS(0,r10,r5)
-#ifndef CONFIG_SMP
- subi r4,r5,THREAD
- sub r4,r4,r6
- stw r4,last_task_used_altivec@l(r3)
-#endif /* CONFIG_SMP */
- /* restore registers and return */
- /* we haven't used ctr or xer or lr */
- b fast_exception_return
-
-/*
- * giveup_altivec(tsk)
- * Disable AltiVec for the task given as the argument,
- * and save the AltiVec registers in its thread_struct.
- * Enables AltiVec for use in the kernel on return.
- */
-
- .globl giveup_altivec
-giveup_altivec:
- mfmsr r5
- oris r5,r5,MSR_VEC@h
- SYNC
- MTMSRD(r5) /* enable use of AltiVec now */
- isync
- cmpwi 0,r3,0
- beqlr- /* if no previous owner, done */
- addi r3,r3,THREAD /* want THREAD of task */
- lwz r5,PT_REGS(r3)
- cmpwi 0,r5,0
- SAVE_32VRS(0, r4, r3)
- mfvscr vr0
- li r4,THREAD_VSCR
- stvx vr0,r4,r3
- beq 1f
- lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
- lis r3,MSR_VEC@h
- andc r4,r4,r3 /* disable AltiVec for previous task */
- stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-1:
-#ifndef CONFIG_SMP
- li r5,0
- lis r4,last_task_used_altivec@ha
- stw r5,last_task_used_altivec@l(r4)
-#endif /* CONFIG_SMP */
- blr
-#endif /* CONFIG_ALTIVEC */
/*
* This code is jumped to from the startup code to copy
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 50ef505b8fb..012505ebd9f 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -12,8 +12,9 @@
* Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
* Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
*
- * This file contains the low-level support and setup for the
- * PowerPC-64 platform, including trap and interrupt dispatch.
+ * This file contains the entry point for the 64-bit kernel along
+ * with some early initialization code common to all 64-bit powerpc
+ * variants.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -38,36 +39,25 @@
#include <asm/exception.h>
#include <asm/irqflags.h>
-/*
- * We layout physical memory as follows:
- * 0x0000 - 0x00ff : Secondary processor spin code
- * 0x0100 - 0x2fff : pSeries Interrupt prologs
- * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
- * 0x6000 - 0x6fff : Initial (CPU0) segment table
- * 0x7000 - 0x7fff : FWNMI data area
- * 0x8000 - : Early init and support code
- */
-
-/*
- * SPRG Usage
- *
- * Register Definition
- *
- * SPRG0 reserved for hypervisor
- * SPRG1 temp - used to save gpr
- * SPRG2 temp - used to save gpr
- * SPRG3 virt addr of paca
+/* The physical memory is layed out such that the secondary processor
+ * spin code sits at 0x0000...0x00ff. On server, the vectors follow
+ * using the layout described in exceptions-64s.S
*/
/*
* Entering into this code we make the following assumptions:
- * For pSeries:
+ *
+ * For pSeries or server processors:
* 1. The MMU is off & open firmware is running in real mode.
* 2. The kernel is entered at __start
*
* For iSeries:
* 1. The MMU is on (as it always is for iSeries)
* 2. The kernel is entered at system_reset_iSeries
+ *
+ * For Book3E processors:
+ * 1. The MMU is on running in AS0 in a state defined in ePAPR
+ * 2. The kernel is entered at __start
*/
.text
@@ -166,1065 +156,14 @@ exception_marker:
.text
/*
- * This is the start of the interrupt handlers for pSeries
- * This code runs with relocation off.
- * Code from here to __end_interrupts gets copied down to real
- * address 0x100 when we are running a relocatable kernel.
- * Therefore any relative branches in this section must only
- * branch to labels in this section.
- */
- . = 0x100
- .globl __start_interrupts
-__start_interrupts:
-
- STD_EXCEPTION_PSERIES(0x100, system_reset)
-
- . = 0x200
-_machine_check_pSeries:
- HMT_MEDIUM
- mtspr SPRN_SPRG1,r13 /* save r13 */
- EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
-
- . = 0x300
- .globl data_access_pSeries
-data_access_pSeries:
- HMT_MEDIUM
- mtspr SPRN_SPRG1,r13
-BEGIN_FTR_SECTION
- mtspr SPRN_SPRG2,r12
- mfspr r13,SPRN_DAR
- mfspr r12,SPRN_DSISR
- srdi r13,r13,60
- rlwimi r13,r12,16,0x20
- mfcr r12
- cmpwi r13,0x2c
- beq do_stab_bolted_pSeries
- mtcrf 0x80,r12
- mfspr r12,SPRN_SPRG2
-END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
- EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
-
- . = 0x380
- .globl data_access_slb_pSeries
-data_access_slb_pSeries:
- HMT_MEDIUM
- mtspr SPRN_SPRG1,r13
- mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
- std r3,PACA_EXSLB+EX_R3(r13)
- mfspr r3,SPRN_DAR
- std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
- mfcr r9
-#ifdef __DISABLED__
- /* Keep that around for when we re-implement dynamic VSIDs */
- cmpdi r3,0
- bge slb_miss_user_pseries
-#endif /* __DISABLED__ */
- std r10,PACA_EXSLB+EX_R10(r13)
- std r11,PACA_EXSLB+EX_R11(r13)
- std r12,PACA_EXSLB+EX_R12(r13)
- mfspr r10,SPRN_SPRG1
- std r10,PACA_EXSLB+EX_R13(r13)
- mfspr r12,SPRN_SRR1 /* and SRR1 */
-#ifndef CONFIG_RELOCATABLE
- b .slb_miss_realmode
-#else
- /*
- * We can't just use a direct branch to .slb_miss_realmode
- * because the distance from here to there depends on where
- * the kernel ends up being put.
- */
- mfctr r11
- ld r10,PACAKBASE(r13)
- LOAD_HANDLER(r10, .slb_miss_realmode)
- mtctr r10
- bctr
-#endif
-
- STD_EXCEPTION_PSERIES(0x400, instruction_access)
-
- . = 0x480
- .globl instruction_access_slb_pSeries
-instruction_access_slb_pSeries:
- HMT_MEDIUM
- mtspr SPRN_SPRG1,r13
- mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
- std r3,PACA_EXSLB+EX_R3(r13)
- mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
- std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
- mfcr r9
-#ifdef __DISABLED__
- /* Keep that around for when we re-implement dynamic VSIDs */
- cmpdi r3,0
- bge slb_miss_user_pseries
-#endif /* __DISABLED__ */
- std r10,PACA_EXSLB+EX_R10(r13)
- std r11,PACA_EXSLB+EX_R11(r13)
- std r12,PACA_EXSLB+EX_R12(r13)
- mfspr r10,SPRN_SPRG1
- std r10,PACA_EXSLB+EX_R13(r13)
- mfspr r12,SPRN_SRR1 /* and SRR1 */
-#ifndef CONFIG_RELOCATABLE
- b .slb_miss_realmode
-#else
- mfctr r11
- ld r10,PACAKBASE(r13)
- LOAD_HANDLER(r10, .slb_miss_realmode)
- mtctr r10
- bctr
-#endif
-
- MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt)
- STD_EXCEPTION_PSERIES(0x600, alignment)
- STD_EXCEPTION_PSERIES(0x700, program_check)
- STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
- MASKABLE_EXCEPTION_PSERIES(0x900, decrementer)
- STD_EXCEPTION_PSERIES(0xa00, trap_0a)
- STD_EXCEPTION_PSERIES(0xb00, trap_0b)
-
- . = 0xc00
- .globl system_call_pSeries
-system_call_pSeries:
- HMT_MEDIUM
-BEGIN_FTR_SECTION
- cmpdi r0,0x1ebe
- beq- 1f
-END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
- mr r9,r13
- mfspr r13,SPRN_SPRG3
- mfspr r11,SPRN_SRR0
- ld r12,PACAKBASE(r13)
- ld r10,PACAKMSR(r13)
- LOAD_HANDLER(r12, system_call_entry)
- mtspr SPRN_SRR0,r12
- mfspr r12,SPRN_SRR1
- mtspr SPRN_SRR1,r10
- rfid
- b . /* prevent speculative execution */
-
-/* Fast LE/BE switch system call */
-1: mfspr r12,SPRN_SRR1
- xori r12,r12,MSR_LE
- mtspr SPRN_SRR1,r12
- rfid /* return to userspace */
- b .
-
- STD_EXCEPTION_PSERIES(0xd00, single_step)
- STD_EXCEPTION_PSERIES(0xe00, trap_0e)
-
- /* We need to deal with the Altivec unavailable exception
- * here which is at 0xf20, thus in the middle of the
- * prolog code of the PerformanceMonitor one. A little
- * trickery is thus necessary
- */
- . = 0xf00
- b performance_monitor_pSeries
-
- . = 0xf20
- b altivec_unavailable_pSeries
-
- . = 0xf40
- b vsx_unavailable_pSeries
-
-#ifdef CONFIG_CBE_RAS
- HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error)
-#endif /* CONFIG_CBE_RAS */
- STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
-#ifdef CONFIG_CBE_RAS
- HSTD_EXCEPTION_PSERIES(0x1600, cbe_maintenance)
-#endif /* CONFIG_CBE_RAS */
- STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
-#ifdef CONFIG_CBE_RAS
- HSTD_EXCEPTION_PSERIES(0x1800, cbe_thermal)
-#endif /* CONFIG_CBE_RAS */
-
- . = 0x3000
-
-/*** pSeries interrupt support ***/
-
- /* moved from 0xf00 */
- STD_EXCEPTION_PSERIES(., performance_monitor)
- STD_EXCEPTION_PSERIES(., altivec_unavailable)
- STD_EXCEPTION_PSERIES(., vsx_unavailable)
-
-/*
- * An interrupt came in while soft-disabled; clear EE in SRR1,
- * clear paca->hard_enabled and return.
- */
-masked_interrupt:
- stb r10,PACAHARDIRQEN(r13)
- mtcrf 0x80,r9
- ld r9,PACA_EXGEN+EX_R9(r13)
- mfspr r10,SPRN_SRR1
- rldicl r10,r10,48,1 /* clear MSR_EE */
- rotldi r10,r10,16
- mtspr SPRN_SRR1,r10
- ld r10,PACA_EXGEN+EX_R10(r13)
- mfspr r13,SPRN_SPRG1
- rfid
- b .
-
- .align 7
-do_stab_bolted_pSeries:
- mtcrf 0x80,r12
- mfspr r12,SPRN_SPRG2
- EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
-
-#ifdef CONFIG_PPC_PSERIES
-/*
- * Vectors for the FWNMI option. Share common code.
- */
- .globl system_reset_fwnmi
- .align 7
-system_reset_fwnmi:
- HMT_MEDIUM
- mtspr SPRN_SPRG1,r13 /* save r13 */
- EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
-
- .globl machine_check_fwnmi
- .align 7
-machine_check_fwnmi:
- HMT_MEDIUM
- mtspr SPRN_SPRG1,r13 /* save r13 */
- EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
-
-#endif /* CONFIG_PPC_PSERIES */
-
-#ifdef __DISABLED__
-/*
- * This is used for when the SLB miss handler has to go virtual,
- * which doesn't happen for now anymore but will once we re-implement
- * dynamic VSIDs for shared page tables
- */
-slb_miss_user_pseries:
- std r10,PACA_EXGEN+EX_R10(r13)
- std r11,PACA_EXGEN+EX_R11(r13)
- std r12,PACA_EXGEN+EX_R12(r13)
- mfspr r10,SPRG1
- ld r11,PACA_EXSLB+EX_R9(r13)
- ld r12,PACA_EXSLB+EX_R3(r13)
- std r10,PACA_EXGEN+EX_R13(r13)
- std r11,PACA_EXGEN+EX_R9(r13)
- std r12,PACA_EXGEN+EX_R3(r13)
- clrrdi r12,r13,32
- mfmsr r10
- mfspr r11,SRR0 /* save SRR0 */
- ori r12,r12,slb_miss_user_common@l /* virt addr of handler */
- ori r10,r10,MSR_IR|MSR_DR|MSR_RI
- mtspr SRR0,r12
- mfspr r12,SRR1 /* and SRR1 */
- mtspr SRR1,r10
- rfid
- b . /* prevent spec. execution */
-#endif /* __DISABLED__ */
-
- .align 7
- .globl __end_interrupts
-__end_interrupts:
-
-/*
- * Code from here down to __end_handlers is invoked from the
- * exception prologs above. Because the prologs assemble the
- * addresses of these handlers using the LOAD_HANDLER macro,
- * which uses an addi instruction, these handlers must be in
- * the first 32k of the kernel image.
- */
-
-/*** Common interrupt handlers ***/
-
- STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
-
- /*
- * Machine check is different because we use a different
- * save area: PACA_EXMC instead of PACA_EXGEN.
- */
- .align 7
- .globl machine_check_common
-machine_check_common:
- EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
- FINISH_NAP
- DISABLE_INTS
- bl .save_nvgprs
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl .machine_check_exception
- b .ret_from_except
-
- STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
- STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
- STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
- STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
- STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
- STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception)
- STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
-#ifdef CONFIG_ALTIVEC
- STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
-#else
- STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
-#endif
-#ifdef CONFIG_CBE_RAS
- STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception)
- STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception)
- STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
-#endif /* CONFIG_CBE_RAS */
-
- .align 7
-system_call_entry:
- b system_call_common
-
-/*
- * Here we have detected that the kernel stack pointer is bad.
- * R9 contains the saved CR, r13 points to the paca,
- * r10 contains the (bad) kernel stack pointer,
- * r11 and r12 contain the saved SRR0 and SRR1.
- * We switch to using an emergency stack, save the registers there,
- * and call kernel_bad_stack(), which panics.
- */
-bad_stack:
- ld r1,PACAEMERGSP(r13)
- subi r1,r1,64+INT_FRAME_SIZE
- std r9,_CCR(r1)
- std r10,GPR1(r1)
- std r11,_NIP(r1)
- std r12,_MSR(r1)
- mfspr r11,SPRN_DAR
- mfspr r12,SPRN_DSISR
- std r11,_DAR(r1)
- std r12,_DSISR(r1)
- mflr r10
- mfctr r11
- mfxer r12
- std r10,_LINK(r1)
- std r11,_CTR(r1)
- std r12,_XER(r1)
- SAVE_GPR(0,r1)
- SAVE_GPR(2,r1)
- SAVE_4GPRS(3,r1)
- SAVE_2GPRS(7,r1)
- SAVE_10GPRS(12,r1)
- SAVE_10GPRS(22,r1)
- lhz r12,PACA_TRAP_SAVE(r13)
- std r12,_TRAP(r1)
- addi r11,r1,INT_FRAME_SIZE
- std r11,0(r1)
- li r12,0
- std r12,0(r11)
- ld r2,PACATOC(r13)
-1: addi r3,r1,STACK_FRAME_OVERHEAD
- bl .kernel_bad_stack
- b 1b
-
-/*
- * Here r13 points to the paca, r9 contains the saved CR,
- * SRR0 and SRR1 are saved in r11 and r12,
- * r9 - r13 are saved in paca->exgen.
- */
- .align 7
- .globl data_access_common
-data_access_common:
- mfspr r10,SPRN_DAR
- std r10,PACA_EXGEN+EX_DAR(r13)
- mfspr r10,SPRN_DSISR
- stw r10,PACA_EXGEN+EX_DSISR(r13)
- EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
- ld r3,PACA_EXGEN+EX_DAR(r13)
- lwz r4,PACA_EXGEN+EX_DSISR(r13)
- li r5,0x300
- b .do_hash_page /* Try to handle as hpte fault */
-
- .align 7
- .globl instruction_access_common
-instruction_access_common:
- EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
- ld r3,_NIP(r1)
- andis. r4,r12,0x5820
- li r5,0x400
- b .do_hash_page /* Try to handle as hpte fault */
-
-/*
- * Here is the common SLB miss user that is used when going to virtual
- * mode for SLB misses, that is currently not used
- */
-#ifdef __DISABLED__
- .align 7
- .globl slb_miss_user_common
-slb_miss_user_common:
- mflr r10
- std r3,PACA_EXGEN+EX_DAR(r13)
- stw r9,PACA_EXGEN+EX_CCR(r13)
- std r10,PACA_EXGEN+EX_LR(r13)
- std r11,PACA_EXGEN+EX_SRR0(r13)
- bl .slb_allocate_user
-
- ld r10,PACA_EXGEN+EX_LR(r13)
- ld r3,PACA_EXGEN+EX_R3(r13)
- lwz r9,PACA_EXGEN+EX_CCR(r13)
- ld r11,PACA_EXGEN+EX_SRR0(r13)
- mtlr r10
- beq- slb_miss_fault
-
- andi. r10,r12,MSR_RI /* check for unrecoverable exception */
- beq- unrecov_user_slb
- mfmsr r10
-
-.machine push
-.machine "power4"
- mtcrf 0x80,r9
-.machine pop
-
- clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */
- mtmsrd r10,1
-
- mtspr SRR0,r11
- mtspr SRR1,r12
-
- ld r9,PACA_EXGEN+EX_R9(r13)
- ld r10,PACA_EXGEN+EX_R10(r13)
- ld r11,PACA_EXGEN+EX_R11(r13)
- ld r12,PACA_EXGEN+EX_R12(r13)
- ld r13,PACA_EXGEN+EX_R13(r13)
- rfid
- b .
-
-slb_miss_fault:
- EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
- ld r4,PACA_EXGEN+EX_DAR(r13)
- li r5,0
- std r4,_DAR(r1)
- std r5,_DSISR(r1)
- b handle_page_fault
-
-unrecov_user_slb:
- EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
- DISABLE_INTS
- bl .save_nvgprs
-1: addi r3,r1,STACK_FRAME_OVERHEAD
- bl .unrecoverable_exception
- b 1b
-
-#endif /* __DISABLED__ */
-
-
-/*
- * r13 points to the PACA, r9 contains the saved CR,
- * r12 contain the saved SRR1, SRR0 is still ready for return
- * r3 has the faulting address
- * r9 - r13 are saved in paca->exslb.
- * r3 is saved in paca->slb_r3
- * We assume we aren't going to take any exceptions during this procedure.
- */
-_GLOBAL(slb_miss_realmode)
- mflr r10
-#ifdef CONFIG_RELOCATABLE
- mtctr r11
-#endif
-
- stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
- std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
-
- bl .slb_allocate_realmode
-
- /* All done -- return from exception. */
-
- ld r10,PACA_EXSLB+EX_LR(r13)
- ld r3,PACA_EXSLB+EX_R3(r13)
- lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
-#ifdef CONFIG_PPC_ISERIES
-BEGIN_FW_FTR_SECTION
- ld r11,PACALPPACAPTR(r13)
- ld r11,LPPACASRR0(r11) /* get SRR0 value */
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
-#endif /* CONFIG_PPC_ISERIES */
-
- mtlr r10
-
- andi. r10,r12,MSR_RI /* check for unrecoverable exception */
- beq- 2f
-
-.machine push
-.machine "power4"
- mtcrf 0x80,r9
- mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
-.machine pop
-
-#ifdef CONFIG_PPC_ISERIES
-BEGIN_FW_FTR_SECTION
- mtspr SPRN_SRR0,r11
- mtspr SPRN_SRR1,r12
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
-#endif /* CONFIG_PPC_ISERIES */
- ld r9,PACA_EXSLB+EX_R9(r13)
- ld r10,PACA_EXSLB+EX_R10(r13)
- ld r11,PACA_EXSLB+EX_R11(r13)
- ld r12,PACA_EXSLB+EX_R12(r13)
- ld r13,PACA_EXSLB+EX_R13(r13)
- rfid
- b . /* prevent speculative execution */
-
-2:
-#ifdef CONFIG_PPC_ISERIES
-BEGIN_FW_FTR_SECTION
- b unrecov_slb
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
-#endif /* CONFIG_PPC_ISERIES */
- mfspr r11,SPRN_SRR0
- ld r10,PACAKBASE(r13)
- LOAD_HANDLER(r10,unrecov_slb)
- mtspr SPRN_SRR0,r10
- ld r10,PACAKMSR(r13)
- mtspr SPRN_SRR1,r10
- rfid
- b .
-
-unrecov_slb:
- EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
- DISABLE_INTS
- bl .save_nvgprs
-1: addi r3,r1,STACK_FRAME_OVERHEAD
- bl .unrecoverable_exception
- b 1b
-
- .align 7
- .globl hardware_interrupt_common
- .globl hardware_interrupt_entry
-hardware_interrupt_common:
- EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
- FINISH_NAP
-hardware_interrupt_entry:
- DISABLE_INTS
-BEGIN_FTR_SECTION
- bl .ppc64_runlatch_on
-END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl .do_IRQ
- b .ret_from_except_lite
-
-#ifdef CONFIG_PPC_970_NAP
-power4_fixup_nap:
- andc r9,r9,r10
- std r9,TI_LOCAL_FLAGS(r11)
- ld r10,_LINK(r1) /* make idle task do the */
- std r10,_NIP(r1) /* equivalent of a blr */
- blr
-#endif
-
- .align 7
- .globl alignment_common
-alignment_common:
- mfspr r10,SPRN_DAR
- std r10,PACA_EXGEN+EX_DAR(r13)
- mfspr r10,SPRN_DSISR
- stw r10,PACA_EXGEN+EX_DSISR(r13)
- EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
- ld r3,PACA_EXGEN+EX_DAR(r13)
- lwz r4,PACA_EXGEN+EX_DSISR(r13)
- std r3,_DAR(r1)
- std r4,_DSISR(r1)
- bl .save_nvgprs
- addi r3,r1,STACK_FRAME_OVERHEAD
- ENABLE_INTS
- bl .alignment_exception
- b .ret_from_except
-
- .align 7
- .globl program_check_common
-program_check_common:
- EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
- bl .save_nvgprs
- addi r3,r1,STACK_FRAME_OVERHEAD
- ENABLE_INTS
- bl .program_check_exception
- b .ret_from_except
-
- .align 7
- .globl fp_unavailable_common
-fp_unavailable_common:
- EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
- bne 1f /* if from user, just load it up */
- bl .save_nvgprs
- addi r3,r1,STACK_FRAME_OVERHEAD
- ENABLE_INTS
- bl .kernel_fp_unavailable_exception
- BUG_OPCODE
-1: bl .load_up_fpu
- b fast_exception_return
-
- .align 7
- .globl altivec_unavailable_common
-altivec_unavailable_common:
- EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
-#ifdef CONFIG_ALTIVEC
-BEGIN_FTR_SECTION
- beq 1f
- bl .load_up_altivec
- b fast_exception_return
-1:
-END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
-#endif
- bl .save_nvgprs
- addi r3,r1,STACK_FRAME_OVERHEAD
- ENABLE_INTS
- bl .altivec_unavailable_exception
- b .ret_from_except
-
- .align 7
- .globl vsx_unavailable_common
-vsx_unavailable_common:
- EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
-#ifdef CONFIG_VSX
-BEGIN_FTR_SECTION
- bne .load_up_vsx
-1:
-END_FTR_SECTION_IFSET(CPU_FTR_VSX)
-#endif
- bl .save_nvgprs
- addi r3,r1,STACK_FRAME_OVERHEAD
- ENABLE_INTS
- bl .vsx_unavailable_exception
- b .ret_from_except
-
- .align 7
- .globl __end_handlers
-__end_handlers:
-
-/*
- * Return from an exception with minimal checks.
- * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
- * If interrupts have been enabled, or anything has been
- * done that might have changed the scheduling status of
- * any task or sent any task a signal, you should use
- * ret_from_except or ret_from_except_lite instead of this.
+ * On server, we include the exception vectors code here as it
+ * relies on absolute addressing which is only possible within
+ * this compilation unit
*/
-fast_exc_return_irq: /* restores irq state too */
- ld r3,SOFTE(r1)
- TRACE_AND_RESTORE_IRQ(r3);
- ld r12,_MSR(r1)
- rldicl r4,r12,49,63 /* get MSR_EE to LSB */
- stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */
- b 1f
-
- .globl fast_exception_return
-fast_exception_return:
- ld r12,_MSR(r1)
-1: ld r11,_NIP(r1)
- andi. r3,r12,MSR_RI /* check if RI is set */
- beq- unrecov_fer
-
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
- andi. r3,r12,MSR_PR
- beq 2f
- ACCOUNT_CPU_USER_EXIT(r3, r4)
-2:
+#ifdef CONFIG_PPC_BOOK3S
+#include "exceptions-64s.S"
#endif
- ld r3,_CCR(r1)
- ld r4,_LINK(r1)
- ld r5,_CTR(r1)
- ld r6,_XER(r1)
- mtcr r3
- mtlr r4
- mtctr r5
- mtxer r6
- REST_GPR(0, r1)
- REST_8GPRS(2, r1)
-
- mfmsr r10
- rldicl r10,r10,48,1 /* clear EE */
- rldicr r10,r10,16,61 /* clear RI (LE is 0 already) */
- mtmsrd r10,1
-
- mtspr SPRN_SRR1,r12
- mtspr SPRN_SRR0,r11
- REST_4GPRS(10, r1)
- ld r1,GPR1(r1)
- rfid
- b . /* prevent speculative execution */
-
-unrecov_fer:
- bl .save_nvgprs
-1: addi r3,r1,STACK_FRAME_OVERHEAD
- bl .unrecoverable_exception
- b 1b
-
-#ifdef CONFIG_ALTIVEC
-/*
- * load_up_altivec(unused, unused, tsk)
- * Disable VMX for the task which had it previously,
- * and save its vector registers in its thread_struct.
- * Enables the VMX for use in the kernel on return.
- * On SMP we know the VMX is free, since we give it up every
- * switch (ie, no lazy save of the vector registers).
- * On entry: r13 == 'current' && last_task_used_altivec != 'current'
- */
-_STATIC(load_up_altivec)
- mfmsr r5 /* grab the current MSR */
- oris r5,r5,MSR_VEC@h
- mtmsrd r5 /* enable use of VMX now */
- isync
-
-/*
- * For SMP, we don't do lazy VMX switching because it just gets too
- * horrendously complex, especially when a task switches from one CPU
- * to another. Instead we call giveup_altvec in switch_to.
- * VRSAVE isn't dealt with here, that is done in the normal context
- * switch code. Note that we could rely on vrsave value to eventually
- * avoid saving all of the VREGs here...
- */
-#ifndef CONFIG_SMP
- ld r3,last_task_used_altivec@got(r2)
- ld r4,0(r3)
- cmpdi 0,r4,0
- beq 1f
- /* Save VMX state to last_task_used_altivec's THREAD struct */
- addi r4,r4,THREAD
- SAVE_32VRS(0,r5,r4)
- mfvscr vr0
- li r10,THREAD_VSCR
- stvx vr0,r10,r4
- /* Disable VMX for last_task_used_altivec */
- ld r5,PT_REGS(r4)
- ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
- lis r6,MSR_VEC@h
- andc r4,r4,r6
- std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-1:
-#endif /* CONFIG_SMP */
- /* Hack: if we get an altivec unavailable trap with VRSAVE
- * set to all zeros, we assume this is a broken application
- * that fails to set it properly, and thus we switch it to
- * all 1's
- */
- mfspr r4,SPRN_VRSAVE
- cmpdi 0,r4,0
- bne+ 1f
- li r4,-1
- mtspr SPRN_VRSAVE,r4
-1:
- /* enable use of VMX after return */
- ld r4,PACACURRENT(r13)
- addi r5,r4,THREAD /* Get THREAD */
- oris r12,r12,MSR_VEC@h
- std r12,_MSR(r1)
- li r4,1
- li r10,THREAD_VSCR
- stw r4,THREAD_USED_VR(r5)
- lvx vr0,r10,r5
- mtvscr vr0
- REST_32VRS(0,r4,r5)
-#ifndef CONFIG_SMP
- /* Update last_task_used_math to 'current' */
- subi r4,r5,THREAD /* Back to 'current' */
- std r4,0(r3)
-#endif /* CONFIG_SMP */
- /* restore registers and return */
- blr
-#endif /* CONFIG_ALTIVEC */
-
-#ifdef CONFIG_VSX
-/*
- * load_up_vsx(unused, unused, tsk)
- * Disable VSX for the task which had it previously,
- * and save its vector registers in its thread_struct.
- * Reuse the fp and vsx saves, but first check to see if they have
- * been saved already.
- * On entry: r13 == 'current' && last_task_used_vsx != 'current'
- */
-_STATIC(load_up_vsx)
-/* Load FP and VSX registers if they haven't been done yet */
- andi. r5,r12,MSR_FP
- beql+ load_up_fpu /* skip if already loaded */
- andis. r5,r12,MSR_VEC@h
- beql+ load_up_altivec /* skip if already loaded */
-
-#ifndef CONFIG_SMP
- ld r3,last_task_used_vsx@got(r2)
- ld r4,0(r3)
- cmpdi 0,r4,0
- beq 1f
- /* Disable VSX for last_task_used_vsx */
- addi r4,r4,THREAD
- ld r5,PT_REGS(r4)
- ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
- lis r6,MSR_VSX@h
- andc r6,r4,r6
- std r6,_MSR-STACK_FRAME_OVERHEAD(r5)
-1:
-#endif /* CONFIG_SMP */
- ld r4,PACACURRENT(r13)
- addi r4,r4,THREAD /* Get THREAD */
- li r6,1
- stw r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */
- /* enable use of VSX after return */
- oris r12,r12,MSR_VSX@h
- std r12,_MSR(r1)
-#ifndef CONFIG_SMP
- /* Update last_task_used_math to 'current' */
- ld r4,PACACURRENT(r13)
- std r4,0(r3)
-#endif /* CONFIG_SMP */
- b fast_exception_return
-#endif /* CONFIG_VSX */
-
-/*
- * Hash table stuff
- */
- .align 7
-_STATIC(do_hash_page)
- std r3,_DAR(r1)
- std r4,_DSISR(r1)
-
- andis. r0,r4,0xa450 /* weird error? */
- bne- handle_page_fault /* if not, try to insert a HPTE */
-BEGIN_FTR_SECTION
- andis. r0,r4,0x0020 /* Is it a segment table fault? */
- bne- do_ste_alloc /* If so handle it */
-END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
-
- /*
- * On iSeries, we soft-disable interrupts here, then
- * hard-enable interrupts so that the hash_page code can spin on
- * the hash_table_lock without problems on a shared processor.
- */
- DISABLE_INTS
-
- /*
- * Currently, trace_hardirqs_off() will be called by DISABLE_INTS
- * and will clobber volatile registers when irq tracing is enabled
- * so we need to reload them. It may be possible to be smarter here
- * and move the irq tracing elsewhere but let's keep it simple for
- * now
- */
-#ifdef CONFIG_TRACE_IRQFLAGS
- ld r3,_DAR(r1)
- ld r4,_DSISR(r1)
- ld r5,_TRAP(r1)
- ld r12,_MSR(r1)
- clrrdi r5,r5,4
-#endif /* CONFIG_TRACE_IRQFLAGS */
- /*
- * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
- * accessing a userspace segment (even from the kernel). We assume
- * kernel addresses always have the high bit set.
- */
- rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
- rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
- orc r0,r12,r0 /* MSR_PR | ~high_bit */
- rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
- ori r4,r4,1 /* add _PAGE_PRESENT */
- rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */
-
- /*
- * r3 contains the faulting address
- * r4 contains the required access permissions
- * r5 contains the trap number
- *
- * at return r3 = 0 for success
- */
- bl .hash_page /* build HPTE if possible */
- cmpdi r3,0 /* see if hash_page succeeded */
-
-BEGIN_FW_FTR_SECTION
- /*
- * If we had interrupts soft-enabled at the point where the
- * DSI/ISI occurred, and an interrupt came in during hash_page,
- * handle it now.
- * We jump to ret_from_except_lite rather than fast_exception_return
- * because ret_from_except_lite will check for and handle pending
- * interrupts if necessary.
- */
- beq 13f
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
-
-BEGIN_FW_FTR_SECTION
- /*
- * Here we have interrupts hard-disabled, so it is sufficient
- * to restore paca->{soft,hard}_enable and get out.
- */
- beq fast_exc_return_irq /* Return from exception on success */
-END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
-
- /* For a hash failure, we don't bother re-enabling interrupts */
- ble- 12f
-
- /*
- * hash_page couldn't handle it, set soft interrupt enable back
- * to what it was before the trap. Note that .raw_local_irq_restore
- * handles any interrupts pending at this point.
- */
- ld r3,SOFTE(r1)
- TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f)
- bl .raw_local_irq_restore
- b 11f
-
-/* Here we have a page fault that hash_page can't handle. */
-handle_page_fault:
- ENABLE_INTS
-11: ld r4,_DAR(r1)
- ld r5,_DSISR(r1)
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl .do_page_fault
- cmpdi r3,0
- beq+ 13f
- bl .save_nvgprs
- mr r5,r3
- addi r3,r1,STACK_FRAME_OVERHEAD
- lwz r4,_DAR(r1)
- bl .bad_page_fault
- b .ret_from_except
-
-13: b .ret_from_except_lite
-
-/* We have a page fault that hash_page could handle but HV refused
- * the PTE insertion
- */
-12: bl .save_nvgprs
- mr r5,r3
- addi r3,r1,STACK_FRAME_OVERHEAD
- ld r4,_DAR(r1)
- bl .low_hash_fault
- b .ret_from_except
-
- /* here we have a segment miss */
-do_ste_alloc:
- bl .ste_allocate /* try to insert stab entry */
- cmpdi r3,0
- bne- handle_page_fault
- b fast_exception_return
-
-/*
- * r13 points to the PACA, r9 contains the saved CR,
- * r11 and r12 contain the saved SRR0 and SRR1.
- * r9 - r13 are saved in paca->exslb.
- * We assume we aren't going to take any exceptions during this procedure.
- * We assume (DAR >> 60) == 0xc.
- */
- .align 7
-_GLOBAL(do_stab_bolted)
- stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
- std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
-
- /* Hash to the primary group */
- ld r10,PACASTABVIRT(r13)
- mfspr r11,SPRN_DAR
- srdi r11,r11,28
- rldimi r10,r11,7,52 /* r10 = first ste of the group */
-
- /* Calculate VSID */
- /* This is a kernel address, so protovsid = ESID */
- ASM_VSID_SCRAMBLE(r11, r9, 256M)
- rldic r9,r11,12,16 /* r9 = vsid << 12 */
-
- /* Search the primary group for a free entry */
-1: ld r11,0(r10) /* Test valid bit of the current ste */
- andi. r11,r11,0x80
- beq 2f
- addi r10,r10,16
- andi. r11,r10,0x70
- bne 1b
-
- /* Stick for only searching the primary group for now. */
- /* At least for now, we use a very simple random castout scheme */
- /* Use the TB as a random number ; OR in 1 to avoid entry 0 */
- mftb r11
- rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */
- ori r11,r11,0x10
-
- /* r10 currently points to an ste one past the group of interest */
- /* make it point to the randomly selected entry */
- subi r10,r10,128
- or r10,r10,r11 /* r10 is the entry to invalidate */
-
- isync /* mark the entry invalid */
- ld r11,0(r10)
- rldicl r11,r11,56,1 /* clear the valid bit */
- rotldi r11,r11,8
- std r11,0(r10)
- sync
-
- clrrdi r11,r11,28 /* Get the esid part of the ste */
- slbie r11
-
-2: std r9,8(r10) /* Store the vsid part of the ste */
- eieio
-
- mfspr r11,SPRN_DAR /* Get the new esid */
- clrrdi r11,r11,28 /* Permits a full 32b of ESID */
- ori r11,r11,0x90 /* Turn on valid and kp */
- std r11,0(r10) /* Put new entry back into the stab */
-
- sync
-
- /* All done -- return from exception. */
- lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
- ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
-
- andi. r10,r12,MSR_RI
- beq- unrecov_slb
-
- mtcrf 0x80,r9 /* restore CR */
-
- mfmsr r10
- clrrdi r10,r10,2
- mtmsrd r10,1
-
- mtspr SPRN_SRR0,r11
- mtspr SPRN_SRR1,r12
- ld r9,PACA_EXSLB+EX_R9(r13)
- ld r10,PACA_EXSLB+EX_R10(r13)
- ld r11,PACA_EXSLB+EX_R11(r13)
- ld r12,PACA_EXSLB+EX_R12(r13)
- ld r13,PACA_EXSLB+EX_R13(r13)
- rfid
- b . /* prevent speculative execution */
-
-/*
- * Space for CPU0's segment table.
- *
- * On iSeries, the hypervisor must fill in at least one entry before
- * we get control (with relocate on). The address is given to the hv
- * as a page number (see xLparMap below), so this must be at a
- * fixed address (the linker can't compute (u64)&initial_stab >>
- * PAGE_SHIFT).
- */
- . = STAB0_OFFSET /* 0x6000 */
- .globl initial_stab
-initial_stab:
- .space 4096
-
-#ifdef CONFIG_PPC_PSERIES
-/*
- * Data area reserved for FWNMI option.
- * This address (0x7000) is fixed by the RPA.
- */
- .= 0x7000
- .globl fwnmi_data_area
-fwnmi_data_area:
-#endif /* CONFIG_PPC_PSERIES */
-
- /* iSeries does not use the FWNMI stuff, so it is safe to put
- * this here, even if we later allow kernels that will boot on
- * both pSeries and iSeries */
-#ifdef CONFIG_PPC_ISERIES
- . = LPARMAP_PHYS
- .globl xLparMap
-xLparMap:
- .quad HvEsidsToMap /* xNumberEsids */
- .quad HvRangesToMap /* xNumberRanges */
- .quad STAB0_PAGE /* xSegmentTableOffs */
- .zero 40 /* xRsvd */
- /* xEsids (HvEsidsToMap entries of 2 quads) */
- .quad PAGE_OFFSET_ESID /* xKernelEsid */
- .quad PAGE_OFFSET_VSID /* xKernelVsid */
- .quad VMALLOC_START_ESID /* xKernelEsid */
- .quad VMALLOC_START_VSID /* xKernelVsid */
- /* xRanges (HvRangesToMap entries of 3 quads) */
- .quad HvPagesToMap /* xPages */
- .quad 0 /* xOffset */
- .quad PAGE_OFFSET_VSID << (SID_SHIFT - HW_PAGE_SHIFT) /* xVPN */
-
-#endif /* CONFIG_PPC_ISERIES */
-
-#ifdef CONFIG_PPC_PSERIES
- . = 0x8000
-#endif /* CONFIG_PPC_PSERIES */
/*
* On pSeries and most other platforms, secondary processors spin
diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h
index 95f39f1e68d..5f9febc8d14 100644
--- a/arch/powerpc/kernel/head_booke.h
+++ b/arch/powerpc/kernel/head_booke.h
@@ -256,7 +256,7 @@ label:
* off DE in the DSRR1 value and clearing the debug status. \
*/ \
mfspr r10,SPRN_DBSR; /* check single-step/branch taken */ \
- andis. r10,r10,DBSR_IC@h; \
+ andis. r10,r10,(DBSR_IC|DBSR_BT)@h; \
beq+ 2f; \
\
lis r10,KERNELBASE@h; /* check if exception in vectors */ \
@@ -271,7 +271,7 @@ label:
\
/* here it looks like we got an inappropriate debug exception. */ \
1: rlwinm r9,r9,0,~MSR_DE; /* clear DE in the CDRR1 value */ \
- lis r10,DBSR_IC@h; /* clear the IC event */ \
+ lis r10,(DBSR_IC|DBSR_BT)@h; /* clear the IC event */ \
mtspr SPRN_DBSR,r10; \
/* restore state and get out */ \
lwz r10,_CCR(r11); \
@@ -309,7 +309,7 @@ label:
* off DE in the CSRR1 value and clearing the debug status. \
*/ \
mfspr r10,SPRN_DBSR; /* check single-step/branch taken */ \
- andis. r10,r10,DBSR_IC@h; \
+ andis. r10,r10,(DBSR_IC|DBSR_BT)@h; \
beq+ 2f; \
\
lis r10,KERNELBASE@h; /* check if exception in vectors */ \
@@ -317,14 +317,14 @@ label:
cmplw r12,r10; \
blt+ 2f; /* addr below exception vectors */ \
\
- lis r10,DebugCrit@h; \
+ lis r10,DebugCrit@h; \
ori r10,r10,DebugCrit@l; \
cmplw r12,r10; \
bgt+ 2f; /* addr above exception vectors */ \
\
/* here it looks like we got an inappropriate debug exception. */ \
1: rlwinm r9,r9,0,~MSR_DE; /* clear DE in the CSRR1 value */ \
- lis r10,DBSR_IC@h; /* clear the IC event */ \
+ lis r10,(DBSR_IC|DBSR_BT)@h; /* clear the IC event */ \
mtspr SPRN_DBSR,r10; \
/* restore state and get out */ \
lwz r10,_CCR(r11); \
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 7d46e5d5b20..8564a412e7a 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -117,6 +117,7 @@ notrace void raw_local_irq_restore(unsigned long en)
if (!en)
return;
+#ifdef CONFIG_PPC_STD_MMU_64
if (firmware_has_feature(FW_FEATURE_ISERIES)) {
/*
* Do we need to disable preemption here? Not really: in the
@@ -134,6 +135,7 @@ notrace void raw_local_irq_restore(unsigned long en)
if (local_paca->lppaca_ptr->int_dword.any_int)
iseries_handle_interrupts();
}
+#endif /* CONFIG_PPC_STD_MMU_64 */
/*
* if (get_paca()->hard_enabled) return;
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index b9530b2395a..a5cf9c1356a 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -457,98 +457,6 @@ _GLOBAL(disable_kernel_fp)
isync
blr
-#ifdef CONFIG_ALTIVEC
-
-#if 0 /* this has no callers for now */
-/*
- * disable_kernel_altivec()
- * Disable the VMX.
- */
-_GLOBAL(disable_kernel_altivec)
- mfmsr r3
- rldicl r0,r3,(63-MSR_VEC_LG),1
- rldicl r3,r0,(MSR_VEC_LG+1),0
- mtmsrd r3 /* disable use of VMX now */
- isync
- blr
-#endif /* 0 */
-
-/*
- * giveup_altivec(tsk)
- * Disable VMX for the task given as the argument,
- * and save the vector registers in its thread_struct.
- * Enables the VMX for use in the kernel on return.
- */
-_GLOBAL(giveup_altivec)
- mfmsr r5
- oris r5,r5,MSR_VEC@h
- mtmsrd r5 /* enable use of VMX now */
- isync
- cmpdi 0,r3,0
- beqlr- /* if no previous owner, done */
- addi r3,r3,THREAD /* want THREAD of task */
- ld r5,PT_REGS(r3)
- cmpdi 0,r5,0
- SAVE_32VRS(0,r4,r3)
- mfvscr vr0
- li r4,THREAD_VSCR
- stvx vr0,r4,r3
- beq 1f
- ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-#ifdef CONFIG_VSX
-BEGIN_FTR_SECTION
- lis r3,(MSR_VEC|MSR_VSX)@h
-FTR_SECTION_ELSE
- lis r3,MSR_VEC@h
-ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
-#else
- lis r3,MSR_VEC@h
-#endif
- andc r4,r4,r3 /* disable FP for previous task */
- std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-1:
-#ifndef CONFIG_SMP
- li r5,0
- ld r4,last_task_used_altivec@got(r2)
- std r5,0(r4)
-#endif /* CONFIG_SMP */
- blr
-
-#endif /* CONFIG_ALTIVEC */
-
-#ifdef CONFIG_VSX
-/*
- * __giveup_vsx(tsk)
- * Disable VSX for the task given as the argument.
- * Does NOT save vsx registers.
- * Enables the VSX for use in the kernel on return.
- */
-_GLOBAL(__giveup_vsx)
- mfmsr r5
- oris r5,r5,MSR_VSX@h
- mtmsrd r5 /* enable use of VSX now */
- isync
-
- cmpdi 0,r3,0
- beqlr- /* if no previous owner, done */
- addi r3,r3,THREAD /* want THREAD of task */
- ld r5,PT_REGS(r3)
- cmpdi 0,r5,0
- beq 1f
- ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
- lis r3,MSR_VSX@h
- andc r4,r4,r3 /* disable VSX for previous task */
- std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-1:
-#ifndef CONFIG_SMP
- li r5,0
- ld r4,last_task_used_vsx@got(r2)
- std r5,0(r4)
-#endif /* CONFIG_SMP */
- blr
-
-#endif /* CONFIG_VSX */
-
/* kexec_wait(phys_cpu)
*
* wait for the flag to change, indicating this kernel is going away but
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index c744b327bca..e9962c7f8a0 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -18,6 +18,8 @@
* field correctly */
extern unsigned long __toc_start;
+#ifdef CONFIG_PPC_BOOK3S
+
/*
* The structure which the hypervisor knows about - this structure
* should not cross a page boundary. The vpa_init/register_vpa call
@@ -41,6 +43,10 @@ struct lppaca lppaca[] = {
},
};
+#endif /* CONFIG_PPC_BOOK3S */
+
+#ifdef CONFIG_PPC_STD_MMU_64
+
/*
* 3 persistent SLBs are registered here. The buffer will be zero
* initially, hence will all be invaild until we actually write them.
@@ -52,6 +58,8 @@ struct slb_shadow slb_shadow[] __cacheline_aligned = {
},
};
+#endif /* CONFIG_PPC_STD_MMU_64 */
+
/* The Paca is an array with one entry per processor. Each contains an
* lppaca, which contains the information shared between the
* hypervisor and Linux.
@@ -77,15 +85,19 @@ void __init initialise_pacas(void)
for (cpu = 0; cpu < NR_CPUS; cpu++) {
struct paca_struct *new_paca = &paca[cpu];
+#ifdef CONFIG_PPC_BOOK3S
new_paca->lppaca_ptr = &lppaca[cpu];
+#endif
new_paca->lock_token = 0x8000;
new_paca->paca_index = cpu;
new_paca->kernel_toc = kernel_toc;
new_paca->kernelbase = (unsigned long) _stext;
new_paca->kernel_msr = MSR_KERNEL;
new_paca->hw_cpu_id = 0xffff;
- new_paca->slb_shadow_ptr = &slb_shadow[cpu];
new_paca->__current = &init_task;
+#ifdef CONFIG_PPC_STD_MMU_64
+ new_paca->slb_shadow_ptr = &slb_shadow[cpu];
+#endif /* CONFIG_PPC_STD_MMU_64 */
}
}
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index dd6c7a3bf72..9e8902fa14c 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -420,6 +420,9 @@ int pcibios_unmap_io_space(struct pci_bus *bus)
* so flushing the hash table is the only sane way to make sure
* that no hash entries are covering that removed bridge area
* while still allowing other busses overlapping those pages
+ *
+ * Note: If we ever support P2P hotplug on Book3E, we'll have
+ * to do an appropriate TLB flush here too
*/
if (bus->self) {
struct resource *res = bus->resource[0];
@@ -427,8 +430,10 @@ int pcibios_unmap_io_space(struct pci_bus *bus)
pr_debug("IO unmapping for PCI-PCI bridge %s\n",
pci_name(bus->self));
+#ifdef CONFIG_PPC_STD_MMU_64
__flush_hash_table_range(&init_mm, res->start + _IO_BASE,
res->end + _IO_BASE + 1);
+#endif
return 0;
}
@@ -501,7 +506,7 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus)
pr_debug("IO mapping for PHB %s\n", hose->dn->full_name);
pr_debug(" phys=0x%016llx, virt=0x%p (alloc=0x%p)\n",
hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc);
- pr_debug(" size=0x%016lx (alloc=0x%016lx)\n",
+ pr_debug(" size=0x%016llx (alloc=0x%016lx)\n",
hose->pci_io_size, size_page);
/* Establish the mapping */
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 7b44a33f03c..3e7135bbe40 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -650,7 +650,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
_ALIGN_UP(sizeof(struct thread_info), 16);
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_STD_MMU_64
if (cpu_has_feature(CPU_FTR_SLB)) {
unsigned long sp_vsid;
unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index ce01ff2474d..d4405b95bfa 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -585,7 +585,7 @@ static void __init check_cpu_pa_features(unsigned long node)
ibm_pa_features, ARRAY_SIZE(ibm_pa_features));
}
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_STD_MMU_64
static void __init check_cpu_slb_size(unsigned long node)
{
u32 *slb_size_ptr;
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 3635be61f89..9fa2c7dcd05 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -704,15 +704,34 @@ void user_enable_single_step(struct task_struct *task)
if (regs != NULL) {
#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
+ task->thread.dbcr0 &= ~DBCR0_BT;
task->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC;
regs->msr |= MSR_DE;
#else
+ regs->msr &= ~MSR_BE;
regs->msr |= MSR_SE;
#endif
}
set_tsk_thread_flag(task, TIF_SINGLESTEP);
}
+void user_enable_block_step(struct task_struct *task)
+{
+ struct pt_regs *regs = task->thread.regs;
+
+ if (regs != NULL) {
+#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
+ task->thread.dbcr0 &= ~DBCR0_IC;
+ task->thread.dbcr0 = DBCR0_IDM | DBCR0_BT;
+ regs->msr |= MSR_DE;
+#else
+ regs->msr &= ~MSR_SE;
+ regs->msr |= MSR_BE;
+#endif
+ }
+ set_tsk_thread_flag(task, TIF_SINGLESTEP);
+}
+
void user_disable_single_step(struct task_struct *task)
{
struct pt_regs *regs = task->thread.regs;
@@ -726,10 +745,10 @@ void user_disable_single_step(struct task_struct *task)
if (regs != NULL) {
#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
- task->thread.dbcr0 &= ~(DBCR0_IC | DBCR0_IDM);
+ task->thread.dbcr0 &= ~(DBCR0_IC | DBCR0_BT | DBCR0_IDM);
regs->msr &= ~MSR_DE;
#else
- regs->msr &= ~MSR_SE;
+ regs->msr &= ~(MSR_SE | MSR_BE);
#endif
}
clear_tsk_thread_flag(task, TIF_SINGLESTEP);
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 9e1ca745d8f..1d154248cf4 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -39,6 +39,7 @@
#include <asm/serial.h>
#include <asm/udbg.h>
#include <asm/mmu_context.h>
+#include <asm/swiotlb.h>
#include "setup.h"
@@ -332,6 +333,11 @@ void __init setup_arch(char **cmdline_p)
ppc_md.setup_arch();
if ( ppc_md.progress ) ppc_md.progress("arch: exit", 0x3eab);
+#ifdef CONFIG_SWIOTLB
+ if (ppc_swiotlb_enable)
+ swiotlb_init();
+#endif
+
paging_init();
/* Initialize the MMU context management stuff */
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index c410c606955..f46548e6604 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -61,6 +61,7 @@
#include <asm/xmon.h>
#include <asm/udbg.h>
#include <asm/kexec.h>
+#include <asm/swiotlb.h>
#include "setup.h"
@@ -417,9 +418,11 @@ void __init setup_system(void)
if (ppc64_caches.iline_size != 0x80)
printk("ppc64_caches.icache_line_size = 0x%x\n",
ppc64_caches.iline_size);
+#ifdef CONFIG_PPC_STD_MMU_64
if (htab_address)
printk("htab_address = 0x%p\n", htab_address);
printk("htab_hash_mask = 0x%lx\n", htab_hash_mask);
+#endif /* CONFIG_PPC_STD_MMU_64 */
if (PHYSICAL_START > 0)
printk("physical_start = 0x%lx\n",
PHYSICAL_START);
@@ -511,8 +514,9 @@ void __init setup_arch(char **cmdline_p)
irqstack_early_init();
emergency_stack_init();
+#ifdef CONFIG_PPC_STD_MMU_64
stabs_alloc();
-
+#endif
/* set up the bootmem stuff with available memory */
do_init_bootmem();
sparse_init();
@@ -524,6 +528,11 @@ void __init setup_arch(char **cmdline_p)
if (ppc_md.setup_arch)
ppc_md.setup_arch();
+#ifdef CONFIG_SWIOTLB
+ if (ppc_swiotlb_enable)
+ swiotlb_init();
+#endif
+
paging_init();
ppc64_boot_msg(0x15, "Setup Done");
}
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 6a5b2b731f4..6f0ae1a9bfa 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -1041,7 +1041,34 @@ void SoftwareEmulation(struct pt_regs *regs)
void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status)
{
- if (debug_status & DBSR_IC) { /* instruction completion */
+ /* Hack alert: On BookE, Branch Taken stops on the branch itself, while
+ * on server, it stops on the target of the branch. In order to simulate
+ * the server behaviour, we thus restart right away with a single step
+ * instead of stopping here when hitting a BT
+ */
+ if (debug_status & DBSR_BT) {
+ regs->msr &= ~MSR_DE;
+
+ /* Disable BT */
+ mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT);
+ /* Clear the BT event */
+ mtspr(SPRN_DBSR, DBSR_BT);
+
+ /* Do the single step trick only when coming from userspace */
+ if (user_mode(regs)) {
+ current->thread.dbcr0 &= ~DBCR0_BT;
+ current->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC;
+ regs->msr |= MSR_DE;
+ return;
+ }
+
+ if (notify_die(DIE_SSTEP, "block_step", regs, 5,
+ 5, SIGTRAP) == NOTIFY_STOP) {
+ return;
+ }
+ if (debugger_sstep(regs))
+ return;
+ } else if (debug_status & DBSR_IC) { /* Instruction complete */
regs->msr &= ~MSR_DE;
/* Disable instruction completion */
@@ -1057,9 +1084,8 @@ void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status)
if (debugger_sstep(regs))
return;
- if (user_mode(regs)) {
- current->thread.dbcr0 &= ~DBCR0_IC;
- }
+ if (user_mode(regs))
+ current->thread.dbcr0 &= ~(DBCR0_IC);
_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
} else if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) {
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index 49ac3d6e139..ef36cbbc588 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -1,5 +1,215 @@
+#include <asm/processor.h>
#include <asm/ppc_asm.h>
#include <asm/reg.h>
+#include <asm/asm-offsets.h>
+#include <asm/cputable.h>
+#include <asm/thread_info.h>
+#include <asm/page.h>
+
+/*
+ * load_up_altivec(unused, unused, tsk)
+ * Disable VMX for the task which had it previously,
+ * and save its vector registers in its thread_struct.
+ * Enables the VMX for use in the kernel on return.
+ * On SMP we know the VMX is free, since we give it up every
+ * switch (ie, no lazy save of the vector registers).
+ */
+_GLOBAL(load_up_altivec)
+ mfmsr r5 /* grab the current MSR */
+ oris r5,r5,MSR_VEC@h
+ MTMSRD(r5) /* enable use of AltiVec now */
+ isync
+
+/*
+ * For SMP, we don't do lazy VMX switching because it just gets too
+ * horrendously complex, especially when a task switches from one CPU
+ * to another. Instead we call giveup_altvec in switch_to.
+ * VRSAVE isn't dealt with here, that is done in the normal context
+ * switch code. Note that we could rely on vrsave value to eventually
+ * avoid saving all of the VREGs here...
+ */
+#ifndef CONFIG_SMP
+ LOAD_REG_ADDRBASE(r3, last_task_used_altivec)
+ toreal(r3)
+ PPC_LL r4,ADDROFF(last_task_used_altivec)(r3)
+ PPC_LCMPI 0,r4,0
+ beq 1f
+
+ /* Save VMX state to last_task_used_altivec's THREAD struct */
+ toreal(r4)
+ addi r4,r4,THREAD
+ SAVE_32VRS(0,r5,r4)
+ mfvscr vr0
+ li r10,THREAD_VSCR
+ stvx vr0,r10,r4
+ /* Disable VMX for last_task_used_altivec */
+ PPC_LL r5,PT_REGS(r4)
+ toreal(r5)
+ PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+ lis r10,MSR_VEC@h
+ andc r4,r4,r10
+ PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+1:
+#endif /* CONFIG_SMP */
+
+ /* Hack: if we get an altivec unavailable trap with VRSAVE
+ * set to all zeros, we assume this is a broken application
+ * that fails to set it properly, and thus we switch it to
+ * all 1's
+ */
+ mfspr r4,SPRN_VRSAVE
+ cmpdi 0,r4,0
+ bne+ 1f
+ li r4,-1
+ mtspr SPRN_VRSAVE,r4
+1:
+ /* enable use of VMX after return */
+#ifdef CONFIG_PPC32
+ mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */
+ oris r9,r9,MSR_VEC@h
+#else
+ ld r4,PACACURRENT(r13)
+ addi r5,r4,THREAD /* Get THREAD */
+ oris r12,r12,MSR_VEC@h
+ std r12,_MSR(r1)
+#endif
+ li r4,1
+ li r10,THREAD_VSCR
+ stw r4,THREAD_USED_VR(r5)
+ lvx vr0,r10,r5
+ mtvscr vr0
+ REST_32VRS(0,r4,r5)
+#ifndef CONFIG_SMP
+ /* Update last_task_used_math to 'current' */
+ subi r4,r5,THREAD /* Back to 'current' */
+ fromreal(r4)
+ PPC_STL r4,ADDROFF(last_task_used_math)(r3)
+#endif /* CONFIG_SMP */
+ /* restore registers and return */
+ blr
+
+/*
+ * giveup_altivec(tsk)
+ * Disable VMX for the task given as the argument,
+ * and save the vector registers in its thread_struct.
+ * Enables the VMX for use in the kernel on return.
+ */
+_GLOBAL(giveup_altivec)
+ mfmsr r5
+ oris r5,r5,MSR_VEC@h
+ SYNC
+ MTMSRD(r5) /* enable use of VMX now */
+ isync
+ PPC_LCMPI 0,r3,0
+ beqlr- /* if no previous owner, done */
+ addi r3,r3,THREAD /* want THREAD of task */
+ PPC_LL r5,PT_REGS(r3)
+ PPC_LCMPI 0,r5,0
+ SAVE_32VRS(0,r4,r3)
+ mfvscr vr0
+ li r4,THREAD_VSCR
+ stvx vr0,r4,r3
+ beq 1f
+ PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+#ifdef CONFIG_VSX
+BEGIN_FTR_SECTION
+ lis r3,(MSR_VEC|MSR_VSX)@h
+FTR_SECTION_ELSE
+ lis r3,MSR_VEC@h
+ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
+#else
+ lis r3,MSR_VEC@h
+#endif
+ andc r4,r4,r3 /* disable FP for previous task */
+ PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+1:
+#ifndef CONFIG_SMP
+ li r5,0
+ LOAD_REG_ADDRBASE(r4,last_task_used_altivec)
+ PPC_STL r5,ADDROFF(last_task_used_altivec)(r4)
+#endif /* CONFIG_SMP */
+ blr
+
+#ifdef CONFIG_VSX
+
+#ifdef CONFIG_PPC32
+#error This asm code isn't ready for 32-bit kernels
+#endif
+
+/*
+ * load_up_vsx(unused, unused, tsk)
+ * Disable VSX for the task which had it previously,
+ * and save its vector registers in its thread_struct.
+ * Reuse the fp and vsx saves, but first check to see if they have
+ * been saved already.
+ */
+_GLOBAL(load_up_vsx)
+/* Load FP and VSX registers if they haven't been done yet */
+ andi. r5,r12,MSR_FP
+ beql+ load_up_fpu /* skip if already loaded */
+ andis. r5,r12,MSR_VEC@h
+ beql+ load_up_altivec /* skip if already loaded */
+
+#ifndef CONFIG_SMP
+ ld r3,last_task_used_vsx@got(r2)
+ ld r4,0(r3)
+ cmpdi 0,r4,0
+ beq 1f
+ /* Disable VSX for last_task_used_vsx */
+ addi r4,r4,THREAD
+ ld r5,PT_REGS(r4)
+ ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+ lis r6,MSR_VSX@h
+ andc r6,r4,r6
+ std r6,_MSR-STACK_FRAME_OVERHEAD(r5)
+1:
+#endif /* CONFIG_SMP */
+ ld r4,PACACURRENT(r13)
+ addi r4,r4,THREAD /* Get THREAD */
+ li r6,1
+ stw r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */
+ /* enable use of VSX after return */
+ oris r12,r12,MSR_VSX@h
+ std r12,_MSR(r1)
+#ifndef CONFIG_SMP
+ /* Update last_task_used_math to 'current' */
+ ld r4,PACACURRENT(r13)
+ std r4,0(r3)
+#endif /* CONFIG_SMP */
+ b fast_exception_return
+
+/*
+ * __giveup_vsx(tsk)
+ * Disable VSX for the task given as the argument.
+ * Does NOT save vsx registers.
+ * Enables the VSX for use in the kernel on return.
+ */
+_GLOBAL(__giveup_vsx)
+ mfmsr r5
+ oris r5,r5,MSR_VSX@h
+ mtmsrd r5 /* enable use of VSX now */
+ isync
+
+ cmpdi 0,r3,0
+ beqlr- /* if no previous owner, done */
+ addi r3,r3,THREAD /* want THREAD of task */
+ ld r5,PT_REGS(r3)
+ cmpdi 0,r5,0
+ beq 1f
+ ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+ lis r3,MSR_VSX@h
+ andc r4,r4,r3 /* disable VSX for previous task */
+ std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+1:
+#ifndef CONFIG_SMP
+ li r5,0
+ ld r4,last_task_used_vsx@got(r2)
+ std r5,0(r4)
+#endif /* CONFIG_SMP */
+ blr
+
+#endif /* CONFIG_VSX */
+
/*
* The routines below are in assembler so we can closely control the
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index b746f4ca420..c4bcf072cb3 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -11,10 +11,11 @@ obj-y := fault.o mem.o pgtable.o gup.o \
pgtable_$(CONFIG_WORD_SIZE).o
obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \
tlb_nohash_low.o
-hash-$(CONFIG_PPC_NATIVE) := hash_native_64.o
-obj-$(CONFIG_PPC64) += hash_utils_64.o \
+obj-$(CONFIG_PPC64) += mmap_64.o
+hash64-$(CONFIG_PPC_NATIVE) := hash_native_64.o
+obj-$(CONFIG_PPC_STD_MMU_64) += hash_utils_64.o \
slb_low.o slb.o stab.o \
- mmap_64.o $(hash-y)
+ mmap_64.o $(hash64-y)
obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o
obj-$(CONFIG_PPC_STD_MMU) += hash_low_$(CONFIG_WORD_SIZE).o \
tlb_hash$(CONFIG_WORD_SIZE).o \
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 3e6a6543f53..68a821add28 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -66,6 +66,7 @@
#include "mmu_decl.h"
+#ifdef CONFIG_PPC_STD_MMU_64
#if PGTABLE_RANGE > USER_VSID_RANGE
#warning Limited user VSID range means pagetable space is wasted
#endif
@@ -73,6 +74,7 @@
#if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
#warning TASK_SIZE is smaller than it needs to be.
#endif
+#endif /* CONFIG_PPC_STD_MMU_64 */
phys_addr_t memstart_addr = ~0;
phys_addr_t kernstart_addr;
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index 030d0005b4d..8343986809c 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -46,7 +46,7 @@ static unsigned int next_context, nr_free_contexts;
static unsigned long *context_map;
static unsigned long *stale_map[NR_CPUS];
static struct mm_struct **context_mm;
-static spinlock_t context_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(context_lock);
#define CTX_MAP_SIZE \
(sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1))
@@ -73,7 +73,6 @@ static unsigned int steal_context_smp(unsigned int id)
struct mm_struct *mm;
unsigned int cpu, max;
- again:
max = last_context - first_context;
/* Attempt to free next_context first and then loop until we manage */
@@ -108,7 +107,9 @@ static unsigned int steal_context_smp(unsigned int id)
spin_unlock(&context_lock);
cpu_relax();
spin_lock(&context_lock);
- goto again;
+
+ /* This will cause the caller to try again */
+ return MMU_NO_CONTEXT;
}
#endif /* CONFIG_SMP */
@@ -194,6 +195,8 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
WARN_ON(prev->context.active < 1);
prev->context.active--;
}
+
+ again:
#endif /* CONFIG_SMP */
/* If we already have a valid assigned context, skip all that */
@@ -212,7 +215,8 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
#ifdef CONFIG_SMP
if (num_online_cpus() > 1) {
id = steal_context_smp(id);
- goto stolen;
+ if (id == MMU_NO_CONTEXT)
+ goto again;
}
#endif /* CONFIG_SMP */
id = steal_context_up(id);
@@ -272,6 +276,7 @@ int init_new_context(struct task_struct *t, struct mm_struct *mm)
*/
void destroy_context(struct mm_struct *mm)
{
+ unsigned long flags;
unsigned int id;
if (mm->context.id == MMU_NO_CONTEXT)
@@ -279,18 +284,18 @@ void destroy_context(struct mm_struct *mm)
WARN_ON(mm->context.active != 0);
- spin_lock(&context_lock);
+ spin_lock_irqsave(&context_lock, flags);
id = mm->context.id;
if (id != MMU_NO_CONTEXT) {
__clear_bit(id, context_map);
mm->context.id = MMU_NO_CONTEXT;
#ifdef DEBUG_MAP_CONSISTENCY
mm->context.active = 0;
- context_mm[id] = NULL;
#endif
+ context_mm[id] = NULL;
nr_free_contexts++;
}
- spin_unlock(&context_lock);
+ spin_unlock_irqrestore(&context_lock, flags);
}
#ifdef CONFIG_SMP
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 9047145095a..b037d95eead 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -981,6 +981,8 @@ void __init do_init_bootmem(void)
mark_reserved_regions_for_nid(nid);
sparse_memory_present_with_active_regions(nid);
}
+
+ init_bootmem_done = 1;
}
void __init paging_init(void)
diff --git a/arch/powerpc/platforms/40x/virtex.c b/arch/powerpc/platforms/40x/virtex.c
index fc7fb001276..d0fc6866b00 100644
--- a/arch/powerpc/platforms/40x/virtex.c
+++ b/arch/powerpc/platforms/40x/virtex.c
@@ -14,6 +14,7 @@
#include <asm/prom.h>
#include <asm/time.h>
#include <asm/xilinx_intc.h>
+#include <asm/xilinx_pci.h>
#include <asm/ppc4xx.h>
static struct of_device_id xilinx_of_bus_ids[] __initdata = {
@@ -47,6 +48,7 @@ static int __init virtex_probe(void)
define_machine(virtex) {
.name = "Xilinx Virtex",
.probe = virtex_probe,
+ .setup_arch = xilinx_pci_init,
.init_IRQ = xilinx_intc_init_tree,
.get_irq = xilinx_intc_get_irq,
.restart = ppc4xx_reset_system,
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index 0d83a6a0397..90e3192611a 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -156,7 +156,7 @@ config YOSEMITE
# This option enables support for the IBM PPC440GX evaluation board.
config XILINX_VIRTEX440_GENERIC_BOARD
- bool "Generic Xilinx Virtex 440 board"
+ bool "Generic Xilinx Virtex 5 FXT board support"
depends on 44x
default n
select XILINX_VIRTEX_5_FXT
@@ -171,6 +171,17 @@ config XILINX_VIRTEX440_GENERIC_BOARD
Most Virtex 5 designs should use this unless it needs to do some
special configuration at board probe time.
+config XILINX_ML510
+ bool "Xilinx ML510 extra support"
+ depends on XILINX_VIRTEX440_GENERIC_BOARD
+ select PPC_PCI_CHOICE
+ select XILINX_PCI if PCI
+ select PPC_INDIRECT_PCI if PCI
+ select PPC_I8259 if PCI
+ help
+ This option enables extra support for features on the Xilinx ML510
+ board. The ML510 has a PCI bus with ALI south bridge.
+
config PPC44x_SIMPLE
bool "Simple PowerPC 44x board support"
depends on 44x
diff --git a/arch/powerpc/platforms/44x/Makefile b/arch/powerpc/platforms/44x/Makefile
index 01f51daace1..ee6185aeaa3 100644
--- a/arch/powerpc/platforms/44x/Makefile
+++ b/arch/powerpc/platforms/44x/Makefile
@@ -4,3 +4,4 @@ obj-$(CONFIG_EBONY) += ebony.o
obj-$(CONFIG_SAM440EP) += sam440ep.o
obj-$(CONFIG_WARP) += warp.o
obj-$(CONFIG_XILINX_VIRTEX_5_FXT) += virtex.o
+obj-$(CONFIG_XILINX_ML510) += virtex_ml510.o
diff --git a/arch/powerpc/platforms/44x/virtex.c b/arch/powerpc/platforms/44x/virtex.c
index 68637faf70a..cf96ccaa760 100644
--- a/arch/powerpc/platforms/44x/virtex.c
+++ b/arch/powerpc/platforms/44x/virtex.c
@@ -16,6 +16,7 @@
#include <asm/prom.h>
#include <asm/time.h>
#include <asm/xilinx_intc.h>
+#include <asm/xilinx_pci.h>
#include <asm/reg.h>
#include <asm/ppc4xx.h>
#include "44x.h"
@@ -53,6 +54,7 @@ static int __init virtex_probe(void)
define_machine(virtex) {
.name = "Xilinx Virtex440",
.probe = virtex_probe,
+ .setup_arch = xilinx_pci_init,
.init_IRQ = xilinx_intc_init_tree,
.get_irq = xilinx_intc_get_irq,
.calibrate_decr = generic_calibrate_decr,
diff --git a/arch/powerpc/platforms/44x/virtex_ml510.c b/arch/powerpc/platforms/44x/virtex_ml510.c
new file mode 100644
index 00000000000..ba4a6e388a4
--- /dev/null
+++ b/arch/powerpc/platforms/44x/virtex_ml510.c
@@ -0,0 +1,29 @@
+#include <asm/i8259.h>
+#include <linux/pci.h>
+#include "44x.h"
+
+/**
+ * ml510_ail_quirk
+ */
+static void __devinit ml510_ali_quirk(struct pci_dev *dev)
+{
+ /* Enable the IDE controller */
+ pci_write_config_byte(dev, 0x58, 0x4c);
+ /* Assign irq 14 to the primary ide channel */
+ pci_write_config_byte(dev, 0x44, 0x0d);
+ /* Assign irq 15 to the secondary ide channel */
+ pci_write_config_byte(dev, 0x75, 0x0f);
+ /* Set the ide controller in native mode */
+ pci_write_config_byte(dev, 0x09, 0xff);
+
+ /* INTB = disabled, INTA = disabled */
+ pci_write_config_byte(dev, 0x48, 0x00);
+ /* INTD = disabled, INTC = disabled */
+ pci_write_config_byte(dev, 0x4a, 0x00);
+ /* Audio = INT7, Modem = disabled. */
+ pci_write_config_byte(dev, 0x4b, 0x60);
+ /* USB = INT7 */
+ pci_write_config_byte(dev, 0x74, 0x06);
+}
+DECLARE_PCI_FIXUP_EARLY(0x10b9, 0x1533, ml510_ali_quirk);
+
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index e3e87078d03..04a8061045c 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -329,4 +329,8 @@ config MCU_MPC8349EMITX
also register MCU GPIOs with the generic GPIO API, so you'll able
to use MCU pins as GPIOs.
+config XILINX_PCI
+ bool "Xilinx PCI host bridge support"
+ depends on PCI && XILINX_VIRTEX
+
endmenu
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 915a7ac6a91..25f8a9c2ba1 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -9,7 +9,6 @@ menu "Processor support"
choice
prompt "Processor Type"
depends on PPC32
- default 6xx
help
There are five families of 32 bit PowerPC chips supported.
The most common ones are the desktop and server CPUs (601, 603,
@@ -21,7 +20,7 @@ choice
If unsure, select 52xx/6xx/7xx/74xx/82xx/83xx/86xx.
-config 6xx
+config PPC_BOOK3S
bool "512x/52xx/6xx/7xx/74xx/82xx/83xx/86xx"
select PPC_FPU
@@ -57,13 +56,11 @@ config E200
endchoice
-# Until we have a choice of exclusive CPU types on 64-bit, we always
-# use PPC_BOOK3S. On 32-bit, this is equivalent to 6xx which is
-# "classic" MMU
-
config PPC_BOOK3S
- def_bool y
- depends on PPC64 || 6xx
+ default y
+ depends on PPC64
+ select PPC_FPU
+
config POWER4_ONLY
bool "Optimize for POWER4"
@@ -74,6 +71,10 @@ config POWER4_ONLY
The resulting binary will not work on POWER3 or RS64 processors
when compiled with binutils 2.15 or later.
+config 6xx
+ def_bool y
+ depends on PPC32 && PPC_BOOK3S
+
config POWER3
bool
depends on PPC64 && PPC_BOOK3S
@@ -202,9 +203,8 @@ config SPE
If in doubt, say Y here.
config PPC_STD_MMU
- bool
- depends on 6xx || PPC64
- default y
+ def_bool y
+ depends on PPC_BOOK3S
config PPC_STD_MMU_32
def_bool y
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 36b67006d6b..24b30b6909c 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -644,8 +644,6 @@ long spufs_create(struct nameidata *nd, unsigned int flags, mode_t mode,
fsnotify_mkdir(nd->path.dentry->d_inode, dentry);
return ret;
-out_dput:
- dput(dentry);
out_dir:
mutex_unlock(&nd->path.dentry->d_inode->i_mutex);
out:
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 3ee01b4f425..661c8e02bcb 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -388,7 +388,7 @@ static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
while (pci->phb->dma_window_size * children > 0x80000000ul)
pci->phb->dma_window_size >>= 1;
- pr_debug("No ISA/IDE, window size is 0x%lx\n",
+ pr_debug("No ISA/IDE, window size is 0x%llx\n",
pci->phb->dma_window_size);
pci->phb->dma_window_base_cur = 0;
@@ -414,7 +414,7 @@ static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
while (pci->phb->dma_window_size * children > 0x70000000ul)
pci->phb->dma_window_size >>= 1;
- pr_debug("ISA/IDE, window size is 0x%lx\n", pci->phb->dma_window_size);
+ pr_debug("ISA/IDE, window size is 0x%llx\n", pci->phb->dma_window_size);
}
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index b33b28a6fe1..2d1c87dd5d1 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_IPIC) += ipic.o
obj-$(CONFIG_4xx) += uic.o
obj-$(CONFIG_4xx_SOC) += ppc4xx_soc.o
obj-$(CONFIG_XILINX_VIRTEX) += xilinx_intc.o
+obj-$(CONFIG_XILINX_PCI) += xilinx_pci.o
obj-$(CONFIG_OF_RTC) += of_rtc.o
ifeq ($(CONFIG_PCI),y)
obj-$(CONFIG_4xx) += ppc4xx_pci.o
diff --git a/arch/powerpc/sysdev/xilinx_intc.c b/arch/powerpc/sysdev/xilinx_intc.c
index c658b413c9b..3ee1fd37bbf 100644
--- a/arch/powerpc/sysdev/xilinx_intc.c
+++ b/arch/powerpc/sysdev/xilinx_intc.c
@@ -25,6 +25,7 @@
#include <linux/of.h>
#include <asm/io.h>
#include <asm/processor.h>
+#include <asm/i8259.h>
#include <asm/irq.h>
/*
@@ -191,20 +192,14 @@ struct irq_host * __init
xilinx_intc_init(struct device_node *np)
{
struct irq_host * irq;
- struct resource res;
void * regs;
- int rc;
/* Find and map the intc registers */
- rc = of_address_to_resource(np, 0, &res);
- if (rc) {
- printk(KERN_ERR __FILE__ ": of_address_to_resource() failed\n");
+ regs = of_iomap(np, 0);
+ if (!regs) {
+ pr_err("xilinx_intc: could not map registers\n");
return NULL;
}
- regs = ioremap(res.start, 32);
-
- printk(KERN_INFO "Xilinx intc at 0x%08llx mapped to 0x%p\n",
- (unsigned long long) res.start, regs);
/* Setup interrupt controller */
out_be32(regs + XINTC_IER, 0); /* disable all irqs */
@@ -217,6 +212,7 @@ xilinx_intc_init(struct device_node *np)
if (!irq)
panic(__FILE__ ": Cannot allocate IRQ host\n");
irq->host_data = regs;
+
return irq;
}
@@ -227,23 +223,70 @@ int xilinx_intc_get_irq(void)
return irq_linear_revmap(master_irqhost, in_be32(regs + XINTC_IVR));
}
+#if defined(CONFIG_PPC_I8259)
+/*
+ * Support code for cascading to 8259 interrupt controllers
+ */
+static void xilinx_i8259_cascade(unsigned int irq, struct irq_desc *desc)
+{
+ unsigned int cascade_irq = i8259_irq();
+ if (cascade_irq)
+ generic_handle_irq(cascade_irq);
+
+ /* Let xilinx_intc end the interrupt */
+ desc->chip->ack(irq);
+ desc->chip->unmask(irq);
+}
+
+static void __init xilinx_i8259_setup_cascade(void)
+{
+ struct device_node *cascade_node;
+ int cascade_irq;
+
+ /* Initialize i8259 controller */
+ cascade_node = of_find_compatible_node(NULL, NULL, "chrp,iic");
+ if (!cascade_node)
+ return;
+
+ cascade_irq = irq_of_parse_and_map(cascade_node, 0);
+ if (!cascade_irq) {
+ pr_err("virtex_ml510: Failed to map cascade interrupt\n");
+ goto out;
+ }
+
+ i8259_init(cascade_node, 0);
+ set_irq_chained_handler(cascade_irq, xilinx_i8259_cascade);
+
+ /* Program irq 7 (usb/audio), 14/15 (ide) to level sensitive */
+ /* This looks like a dirty hack to me --gcl */
+ outb(0xc0, 0x4d0);
+ outb(0xc0, 0x4d1);
+
+ out:
+ of_node_put(cascade_node);
+}
+#else
+static inline void xilinx_i8259_setup_cascade(void) { return; }
+#endif /* defined(CONFIG_PPC_I8259) */
+
+static struct of_device_id xilinx_intc_match[] __initconst = {
+ { .compatible = "xlnx,opb-intc-1.00.c", },
+ { .compatible = "xlnx,xps-intc-1.00.a", },
+ {}
+};
+
+/*
+ * Initialize master Xilinx interrupt controller
+ */
void __init xilinx_intc_init_tree(void)
{
struct device_node *np;
/* find top level interrupt controller */
- for_each_compatible_node(np, NULL, "xlnx,opb-intc-1.00.c") {
+ for_each_matching_node(np, xilinx_intc_match) {
if (!of_get_property(np, "interrupts", NULL))
break;
}
- if (!np) {
- for_each_compatible_node(np, NULL, "xlnx,xps-intc-1.00.a") {
- if (!of_get_property(np, "interrupts", NULL))
- break;
- }
- }
-
- /* xilinx interrupt controller needs to be top level */
BUG_ON(!np);
master_irqhost = xilinx_intc_init(np);
@@ -251,4 +294,6 @@ void __init xilinx_intc_init_tree(void)
irq_set_default_host(master_irqhost);
of_node_put(np);
+
+ xilinx_i8259_setup_cascade();
}
diff --git a/arch/powerpc/sysdev/xilinx_pci.c b/arch/powerpc/sysdev/xilinx_pci.c
new file mode 100644
index 00000000000..1453b0eed22
--- /dev/null
+++ b/arch/powerpc/sysdev/xilinx_pci.c
@@ -0,0 +1,132 @@
+/*
+ * PCI support for Xilinx plbv46_pci soft-core which can be used on
+ * Xilinx Virtex ML410 / ML510 boards.
+ *
+ * Copyright 2009 Roderick Colenbrander
+ * Copyright 2009 Secret Lab Technologies Ltd.
+ *
+ * The pci bridge fixup code was copied from ppc4xx_pci.c and was written
+ * by Benjamin Herrenschmidt.
+ * Copyright 2007 Ben. Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/ioport.h>
+#include <linux/of.h>
+#include <linux/pci.h>
+#include <mm/mmu_decl.h>
+#include <asm/io.h>
+#include <asm/xilinx_pci.h>
+
+#define XPLB_PCI_ADDR 0x10c
+#define XPLB_PCI_DATA 0x110
+#define XPLB_PCI_BUS 0x114
+
+#define PCI_HOST_ENABLE_CMD PCI_COMMAND_SERR | PCI_COMMAND_PARITY | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY
+
+static struct of_device_id xilinx_pci_match[] = {
+ { .compatible = "xlnx,plbv46-pci-1.03.a", },
+ {}
+};
+
+/**
+ * xilinx_pci_fixup_bridge - Block Xilinx PHB configuration.
+ */
+static void xilinx_pci_fixup_bridge(struct pci_dev *dev)
+{
+ struct pci_controller *hose;
+ int i;
+
+ if (dev->devfn || dev->bus->self)
+ return;
+
+ hose = pci_bus_to_host(dev->bus);
+ if (!hose)
+ return;
+
+ if (!of_match_node(xilinx_pci_match, hose->dn))
+ return;
+
+ /* Hide the PCI host BARs from the kernel as their content doesn't
+ * fit well in the resource management
+ */
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ dev->resource[i].start = 0;
+ dev->resource[i].end = 0;
+ dev->resource[i].flags = 0;
+ }
+
+ dev_info(&dev->dev, "Hiding Xilinx plb-pci host bridge resources %s\n",
+ pci_name(dev));
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, xilinx_pci_fixup_bridge);
+
+/**
+ * xilinx_pci_exclude_device - Don't do config access for non-root bus
+ *
+ * This is a hack. Config access to any bus other than bus 0 does not
+ * currently work on the ML510 so we prevent it here.
+ */
+static int
+xilinx_pci_exclude_device(struct pci_controller *hose, u_char bus, u8 devfn)
+{
+ return (bus != 0);
+}
+
+/**
+ * xilinx_pci_init - Find and register a Xilinx PCI host bridge
+ */
+void __init xilinx_pci_init(void)
+{
+ struct pci_controller *hose;
+ struct resource r;
+ void __iomem *pci_reg;
+ struct device_node *pci_node;
+
+ pci_node = of_find_matching_node(NULL, xilinx_pci_match);
+ if(!pci_node)
+ return;
+
+ if (of_address_to_resource(pci_node, 0, &r)) {
+ pr_err("xilinx-pci: cannot resolve base address\n");
+ return;
+ }
+
+ hose = pcibios_alloc_controller(pci_node);
+ if (!hose) {
+ pr_err("xilinx-pci: pcibios_alloc_controller() failed\n");
+ return;
+ }
+
+ /* Setup config space */
+ setup_indirect_pci(hose, r.start + XPLB_PCI_ADDR,
+ r.start + XPLB_PCI_DATA,
+ PPC_INDIRECT_TYPE_SET_CFG_TYPE);
+
+ /* According to the xilinx plbv46_pci documentation the soft-core starts
+ * a self-init when the bus master enable bit is set. Without this bit
+ * set the pci bus can't be scanned.
+ */
+ early_write_config_word(hose, 0, 0, PCI_COMMAND, PCI_HOST_ENABLE_CMD);
+
+ /* Set the max latency timer to 255 */
+ early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0xff);
+
+ /* Set the max bus number to 255 */
+ pci_reg = of_iomap(pci_node, 0);
+ out_8(pci_reg + XPLB_PCI_BUS, 0xff);
+ iounmap(pci_reg);
+
+ /* Nothing past the root bridge is working right now. By default
+ * exclude config access to anything except bus 0 */
+ if (!ppc_md.pci_exclude_device)
+ ppc_md.pci_exclude_device = xilinx_pci_exclude_device;
+
+ /* Register the host bridge with the linux kernel! */
+ pci_process_bridge_OF_ranges(hose, pci_node, 1);
+
+ pr_info("xilinx-pci: Registered PCI host bridge\n");
+}
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 08121d3e47b..e1f33a81e5e 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -2130,7 +2130,7 @@ void
dump_log_buf(void)
{
const unsigned long size = 128;
- unsigned long i, end, addr;
+ unsigned long end, addr;
unsigned char buf[size + 1];
addr = 0;