aboutsummaryrefslogtreecommitdiff
path: root/arch/x86_64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64')
-rw-r--r--arch/x86_64/Kconfig90
-rw-r--r--arch/x86_64/Kconfig.debug10
-rw-r--r--arch/x86_64/Makefile12
-rw-r--r--arch/x86_64/boot/.gitignore3
-rw-r--r--arch/x86_64/boot/Makefile2
-rw-r--r--arch/x86_64/boot/compressed/misc.c2
-rw-r--r--arch/x86_64/boot/compressed/miscsetup.h39
-rw-r--r--arch/x86_64/boot/install.sh40
-rw-r--r--arch/x86_64/boot/tools/.gitignore1
-rw-r--r--arch/x86_64/crypto/aes.c25
-rw-r--r--arch/x86_64/defconfig132
-rw-r--r--arch/x86_64/ia32/Makefile7
-rw-r--r--arch/x86_64/ia32/ia32_binfmt.c25
-rw-r--r--arch/x86_64/ia32/ia32_ioctl.c79
-rw-r--r--arch/x86_64/ia32/ia32_signal.c26
-rw-r--r--arch/x86_64/ia32/ia32entry.S56
-rw-r--r--arch/x86_64/ia32/mmap32.c78
-rw-r--r--arch/x86_64/ia32/ptrace32.c59
-rw-r--r--arch/x86_64/ia32/sys_ia32.c19
-rw-r--r--arch/x86_64/ia32/vsyscall-sigreturn.S1
-rw-r--r--arch/x86_64/ia32/vsyscall-syscall.S1
-rw-r--r--arch/x86_64/ia32/vsyscall-sysenter.S1
-rw-r--r--arch/x86_64/kernel/Makefile9
-rw-r--r--arch/x86_64/kernel/aperture.c3
-rw-r--r--arch/x86_64/kernel/apic.c198
-rw-r--r--arch/x86_64/kernel/asm-offsets.c4
-rw-r--r--arch/x86_64/kernel/crash.c156
-rw-r--r--arch/x86_64/kernel/crash_dump.c47
-rw-r--r--arch/x86_64/kernel/e820.c21
-rw-r--r--arch/x86_64/kernel/early_printk.c4
-rw-r--r--arch/x86_64/kernel/entry.S42
-rw-r--r--arch/x86_64/kernel/genapic_cluster.c5
-rw-r--r--arch/x86_64/kernel/genapic_flat.c10
-rw-r--r--arch/x86_64/kernel/head.S122
-rw-r--r--arch/x86_64/kernel/head64.c5
-rw-r--r--arch/x86_64/kernel/i387.c2
-rw-r--r--arch/x86_64/kernel/i8259.c5
-rw-r--r--arch/x86_64/kernel/init_task.c2
-rw-r--r--arch/x86_64/kernel/io_apic.c177
-rw-r--r--arch/x86_64/kernel/ioport.c1
-rw-r--r--arch/x86_64/kernel/irq.c6
-rw-r--r--arch/x86_64/kernel/kprobes.c18
-rw-r--r--arch/x86_64/kernel/mce.c35
-rw-r--r--arch/x86_64/kernel/mce_amd.c6
-rw-r--r--arch/x86_64/kernel/mce_intel.c6
-rw-r--r--arch/x86_64/kernel/mpparse.c8
-rw-r--r--arch/x86_64/kernel/nmi.c8
-rw-r--r--arch/x86_64/kernel/pci-dma.c286
-rw-r--r--arch/x86_64/kernel/pci-gart.c413
-rw-r--r--arch/x86_64/kernel/pci-nommu.c145
-rw-r--r--arch/x86_64/kernel/pci-swiotlb.c42
-rw-r--r--arch/x86_64/kernel/process.c87
-rw-r--r--arch/x86_64/kernel/ptrace.c19
-rw-r--r--arch/x86_64/kernel/reboot.c10
-rw-r--r--arch/x86_64/kernel/setup.c116
-rw-r--r--arch/x86_64/kernel/setup64.c48
-rw-r--r--arch/x86_64/kernel/smp.c7
-rw-r--r--arch/x86_64/kernel/smpboot.c63
-rw-r--r--arch/x86_64/kernel/suspend.c2
-rw-r--r--arch/x86_64/kernel/syscall.c2
-rw-r--r--arch/x86_64/kernel/time.c127
-rw-r--r--arch/x86_64/kernel/trampoline.S11
-rw-r--r--arch/x86_64/kernel/traps.c164
-rw-r--r--arch/x86_64/kernel/vmlinux.lds.S4
-rw-r--r--arch/x86_64/kernel/vsmp.c45
-rw-r--r--arch/x86_64/kernel/vsyscall.c14
-rw-r--r--arch/x86_64/kernel/x8664_ksyms.c33
-rw-r--r--arch/x86_64/lib/copy_user.S244
-rw-r--r--arch/x86_64/lib/delay.c2
-rw-r--r--arch/x86_64/lib/usercopy.c12
-rw-r--r--arch/x86_64/mm/Makefile2
-rw-r--r--arch/x86_64/mm/fault.c54
-rw-r--r--arch/x86_64/mm/init.c208
-rw-r--r--arch/x86_64/mm/ioremap.c37
-rw-r--r--arch/x86_64/mm/mmap.c30
-rw-r--r--arch/x86_64/mm/numa.c79
-rw-r--r--arch/x86_64/mm/pageattr.c9
-rw-r--r--arch/x86_64/mm/srat.c67
-rw-r--r--arch/x86_64/pci/Makefile2
-rw-r--r--arch/x86_64/pci/Makefile-BUS22
-rw-r--r--arch/x86_64/pci/mmconfig.c65
81 files changed, 2347 insertions, 1732 deletions
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index 6ece645e4db..2f9deca31cc 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -69,12 +69,34 @@ config ARCH_MAY_HAVE_PC_FDC
bool
default y
+config DMI
+ bool
+ default y
+
source "init/Kconfig"
menu "Processor type and features"
choice
+ prompt "Subarchitecture Type"
+ default X86_PC
+
+config X86_PC
+ bool "PC-compatible"
+ help
+ Choose this option if your computer is a standard PC or compatible.
+
+config X86_VSMP
+ bool "Support for ScaleMP vSMP"
+ help
+ Support for ScaleMP vSMP systems. Say 'Y' here if this kernel is
+ supposed to run on these EM64T-based machines. Only choose this option
+ if you have one of these machines.
+
+endchoice
+
+choice
prompt "Processor family"
default MK8
@@ -283,7 +305,11 @@ config ARCH_DISCONTIGMEM_DEFAULT
config ARCH_SPARSEMEM_ENABLE
def_bool y
- depends on NUMA
+ depends on (NUMA || EXPERIMENTAL)
+
+config ARCH_MEMORY_PROBE
+ def_bool y
+ depends on MEMORY_HOTPLUG
config ARCH_FLATMEM_ENABLE
def_bool y
@@ -293,6 +319,7 @@ source "mm/Kconfig"
config HAVE_ARCH_EARLY_PFN_TO_NID
def_bool y
+ depends on NUMA
config NR_CPUS
int "Maximum number of CPUs (2-256)"
@@ -328,7 +355,7 @@ config HPET_TIMER
<http://www.intel.com/hardwaredesign/hpetspec.htm>.
config X86_PM_TIMER
- bool "PM timer"
+ bool "PM timer" if EMBEDDED
depends on ACPI
default y
help
@@ -347,32 +374,24 @@ config HPET_EMULATE_RTC
depends on HPET_TIMER && RTC=y
config GART_IOMMU
- bool "IOMMU support"
+ bool "K8 GART IOMMU support"
default y
+ select SWIOTLB
depends on PCI
help
Support the IOMMU. Needed to run systems with more than 3GB of memory
properly with 32-bit PCI devices that do not support DAC (Double Address
Cycle). The IOMMU can be turned off at runtime with the iommu=off parameter.
Normally the kernel will take the right choice by itself.
- This option includes a driver for the AMD Opteron/Athlon64 IOMMU
- and a software emulation used on some other systems.
+ This option includes a driver for the AMD Opteron/Athlon64 northbridge IOMMU
+ and a software emulation used on other systems.
If unsure, say Y.
# need this always enabled with GART_IOMMU for the VIA workaround
config SWIOTLB
- bool
- depends on GART_IOMMU
- default y
-
-config DUMMY_IOMMU
bool
- depends on !GART_IOMMU && !SWIOTLB
default y
- help
- Don't use IOMMU code. This will cause problems when you have more than 4GB
- of memory and any 32-bit devices. Don't turn on unless you know what you
- are doing.
+ depends on GART_IOMMU
config X86_MCE
bool "Machine check support" if EMBEDDED
@@ -399,17 +418,6 @@ config X86_MCE_AMD
Additional support for AMD specific MCE features such as
the DRAM Error Threshold.
-config PHYSICAL_START
- hex "Physical address where the kernel is loaded" if EMBEDDED
- default "0x100000"
- help
- This gives the physical address where the kernel is loaded.
- Primarily used in the case of kexec on panic where the
- fail safe kernel needs to run at a different address than
- the panic-ed kernel.
-
- Don't change this unless you know what you are doing.
-
config KEXEC
bool "kexec system call (EXPERIMENTAL)"
depends on EXPERIMENTAL
@@ -427,6 +435,31 @@ config KEXEC
support. As of this writing the exact hardware interface is
strongly in flux, so no good recommendation can be made.
+config CRASH_DUMP
+ bool "kernel crash dumps (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ help
+ Generate crash dump after being started by kexec.
+
+config PHYSICAL_START
+ hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
+ default "0x1000000" if CRASH_DUMP
+ default "0x100000"
+ help
+ This gives the physical address where the kernel is loaded. Normally
+ for regular kernels this value is 0x100000 (1MB). But in the case
+ of kexec on panic the fail safe kernel needs to run at a different
+ address than the panic-ed kernel. This option is used to set the load
+ address for kernels used to capture crash dump on being kexec'ed
+ after panic. The default value for crash dump kernels is
+ 0x1000000 (16MB). This can also be set based on the "X" value as
+ specified in the "crashkernel=YM@XM" command line boot parameter
+ passed to the panic-ed kernel. Typically this parameter is set as
+ crashkernel=64M@16M. Please take a look at
+ Documentation/kdump/kdump.txt for more details about crash dumps.
+
+ Don't change this unless you know what you are doing.
+
config SECCOMP
bool "Enable seccomp to safely compute untrusted bytecode"
depends on PROC_FS
@@ -542,11 +575,6 @@ config SYSVIPC_COMPAT
depends on COMPAT && SYSVIPC
default y
-config UID16
- bool
- depends on IA32_EMULATION
- default y
-
endmenu
source "net/Kconfig"
diff --git a/arch/x86_64/Kconfig.debug b/arch/x86_64/Kconfig.debug
index e2c6e64a85e..fcb06a50fdd 100644
--- a/arch/x86_64/Kconfig.debug
+++ b/arch/x86_64/Kconfig.debug
@@ -9,6 +9,16 @@ config INIT_DEBUG
Fill __init and __initdata at the end of boot. This helps debugging
illegal uses of __init and __initdata after initialization.
+config DEBUG_RODATA
+ bool "Write protect kernel read-only data structures"
+ depends on DEBUG_KERNEL
+ help
+ Mark the kernel read-only data as write-protected in the pagetables,
+ in order to catch accidental (and incorrect) writes to such const data.
+ This option may have a slight performance impact because a portion
+ of the kernel code won't be covered by a 2MB TLB anymore.
+ If in doubt, say "N".
+
config IOMMU_DEBUG
depends on GART_IOMMU && DEBUG_KERNEL
bool "Enable IOMMU debugging"
diff --git a/arch/x86_64/Makefile b/arch/x86_64/Makefile
index a9cd42e6182..d7fd46479c5 100644
--- a/arch/x86_64/Makefile
+++ b/arch/x86_64/Makefile
@@ -31,6 +31,7 @@ cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
CFLAGS += $(cflags-y)
+CFLAGS += -m64
CFLAGS += -mno-red-zone
CFLAGS += -mcmodel=kernel
CFLAGS += -pipe
@@ -38,8 +39,10 @@ CFLAGS += -pipe
# actually it makes the kernel smaller too.
CFLAGS += -fno-reorder-blocks
CFLAGS += -Wno-sign-compare
-ifneq ($(CONFIG_DEBUG_INFO),y)
+ifneq ($(CONFIG_UNWIND_INFO),y)
CFLAGS += -fno-asynchronous-unwind-tables
+endif
+ifneq ($(CONFIG_DEBUG_INFO),y)
# -fweb shrinks the kernel a bit, but the difference is very small
# it also messes up debugging, so don't use it for now.
#CFLAGS += $(call cc-option,-fweb)
@@ -50,6 +53,8 @@ CFLAGS += $(call cc-option,-funit-at-a-time)
# prevent gcc from generating any FP code by mistake
CFLAGS += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,)
+AFLAGS += -m64
+
head-y := arch/x86_64/kernel/head.o arch/x86_64/kernel/head64.o arch/x86_64/kernel/init_task.o
libs-y += arch/x86_64/lib/
@@ -80,9 +85,12 @@ bzlilo: vmlinux
bzdisk: vmlinux
$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) zdisk
-install fdimage fdimage144 fdimage288: vmlinux
+fdimage fdimage144 fdimage288: vmlinux
$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) $@
+install:
+ $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) $@
+
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
diff --git a/arch/x86_64/boot/.gitignore b/arch/x86_64/boot/.gitignore
new file mode 100644
index 00000000000..495f20c085d
--- /dev/null
+++ b/arch/x86_64/boot/.gitignore
@@ -0,0 +1,3 @@
+bootsect
+bzImage
+setup
diff --git a/arch/x86_64/boot/Makefile b/arch/x86_64/boot/Makefile
index 18c6e915d69..29f8396ed15 100644
--- a/arch/x86_64/boot/Makefile
+++ b/arch/x86_64/boot/Makefile
@@ -98,5 +98,5 @@ zlilo: $(BOOTIMAGE)
cp System.map $(INSTALL_PATH)/
if [ -x /sbin/lilo ]; then /sbin/lilo; else /etc/lilo/install; fi
-install: $(BOOTIMAGE)
+install:
sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $(BOOTIMAGE) System.map "$(INSTALL_PATH)"
diff --git a/arch/x86_64/boot/compressed/misc.c b/arch/x86_64/boot/compressed/misc.c
index 0e10fd84c7c..cf4b88c416d 100644
--- a/arch/x86_64/boot/compressed/misc.c
+++ b/arch/x86_64/boot/compressed/misc.c
@@ -9,7 +9,7 @@
* High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
*/
-#include "miscsetup.h"
+#include <linux/screen_info.h>
#include <asm/io.h>
#include <asm/page.h>
diff --git a/arch/x86_64/boot/compressed/miscsetup.h b/arch/x86_64/boot/compressed/miscsetup.h
deleted file mode 100644
index bb162053170..00000000000
--- a/arch/x86_64/boot/compressed/miscsetup.h
+++ /dev/null
@@ -1,39 +0,0 @@
-#define NULL 0
-//typedef unsigned int size_t;
-
-
-struct screen_info {
- unsigned char orig_x; /* 0x00 */
- unsigned char orig_y; /* 0x01 */
- unsigned short dontuse1; /* 0x02 -- EXT_MEM_K sits here */
- unsigned short orig_video_page; /* 0x04 */
- unsigned char orig_video_mode; /* 0x06 */
- unsigned char orig_video_cols; /* 0x07 */
- unsigned short unused2; /* 0x08 */
- unsigned short orig_video_ega_bx; /* 0x0a */
- unsigned short unused3; /* 0x0c */
- unsigned char orig_video_lines; /* 0x0e */
- unsigned char orig_video_isVGA; /* 0x0f */
- unsigned short orig_video_points; /* 0x10 */
-
- /* VESA graphic mode -- linear frame buffer */
- unsigned short lfb_width; /* 0x12 */
- unsigned short lfb_height; /* 0x14 */
- unsigned short lfb_depth; /* 0x16 */
- unsigned long lfb_base; /* 0x18 */
- unsigned long lfb_size; /* 0x1c */
- unsigned short dontuse2, dontuse3; /* 0x20 -- CL_MAGIC and CL_OFFSET here */
- unsigned short lfb_linelength; /* 0x24 */
- unsigned char red_size; /* 0x26 */
- unsigned char red_pos; /* 0x27 */
- unsigned char green_size; /* 0x28 */
- unsigned char green_pos; /* 0x29 */
- unsigned char blue_size; /* 0x2a */
- unsigned char blue_pos; /* 0x2b */
- unsigned char rsvd_size; /* 0x2c */
- unsigned char rsvd_pos; /* 0x2d */
- unsigned short vesapm_seg; /* 0x2e */
- unsigned short vesapm_off; /* 0x30 */
- unsigned short pages; /* 0x32 */
- /* 0x34 -- 0x3f reserved for future expansion */
-};
diff --git a/arch/x86_64/boot/install.sh b/arch/x86_64/boot/install.sh
index 198af15a775..baaa2369bdb 100644
--- a/arch/x86_64/boot/install.sh
+++ b/arch/x86_64/boot/install.sh
@@ -1,40 +1,2 @@
#!/bin/sh
-#
-# arch/x86_64/boot/install.sh
-#
-# This file is subject to the terms and conditions of the GNU General Public
-# License. See the file "COPYING" in the main directory of this archive
-# for more details.
-#
-# Copyright (C) 1995 by Linus Torvalds
-#
-# Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin
-#
-# "make install" script for i386 architecture
-#
-# Arguments:
-# $1 - kernel version
-# $2 - kernel image file
-# $3 - kernel map file
-# $4 - default install path (blank if root directory)
-#
-
-# User may have a custom install script
-
-if [ -x ~/bin/${CROSS_COMPILE}installkernel ]; then exec ~/bin/${CROSS_COMPILE}installkernel "$@"; fi
-if [ -x /sbin/${CROSS_COMPILE}installkernel ]; then exec /sbin/${CROSS_COMPILE}installkernel "$@"; fi
-
-# Default install - same as make zlilo
-
-if [ -f $4/vmlinuz ]; then
- mv $4/vmlinuz $4/vmlinuz.old
-fi
-
-if [ -f $4/System.map ]; then
- mv $4/System.map $4/System.old
-fi
-
-cat $2 > $4/vmlinuz
-cp $3 $4/System.map
-
-if [ -x /sbin/lilo ]; then /sbin/lilo; else /etc/lilo/install; fi
+. $srctree/arch/i386/boot/install.sh
diff --git a/arch/x86_64/boot/tools/.gitignore b/arch/x86_64/boot/tools/.gitignore
new file mode 100644
index 00000000000..378eac25d31
--- /dev/null
+++ b/arch/x86_64/boot/tools/.gitignore
@@ -0,0 +1 @@
+build
diff --git a/arch/x86_64/crypto/aes.c b/arch/x86_64/crypto/aes.c
index acfdaa28791..fb1b961a2e2 100644
--- a/arch/x86_64/crypto/aes.c
+++ b/arch/x86_64/crypto/aes.c
@@ -74,8 +74,6 @@ static inline u8 byte(const u32 x, const unsigned n)
return x >> (n << 3);
}
-#define u32_in(x) le32_to_cpu(*(const __le32 *)(x))
-
struct aes_ctx
{
u32 key_length;
@@ -234,6 +232,7 @@ static int aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len,
u32 *flags)
{
struct aes_ctx *ctx = ctx_arg;
+ const __le32 *key = (const __le32 *)in_key;
u32 i, j, t, u, v, w;
if (key_len != 16 && key_len != 24 && key_len != 32) {
@@ -243,10 +242,10 @@ static int aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len,
ctx->key_length = key_len;
- D_KEY[key_len + 24] = E_KEY[0] = u32_in(in_key);
- D_KEY[key_len + 25] = E_KEY[1] = u32_in(in_key + 4);
- D_KEY[key_len + 26] = E_KEY[2] = u32_in(in_key + 8);
- D_KEY[key_len + 27] = E_KEY[3] = u32_in(in_key + 12);
+ D_KEY[key_len + 24] = E_KEY[0] = le32_to_cpu(key[0]);
+ D_KEY[key_len + 25] = E_KEY[1] = le32_to_cpu(key[1]);
+ D_KEY[key_len + 26] = E_KEY[2] = le32_to_cpu(key[2]);
+ D_KEY[key_len + 27] = E_KEY[3] = le32_to_cpu(key[3]);
switch (key_len) {
case 16:
@@ -256,17 +255,17 @@ static int aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len,
break;
case 24:
- E_KEY[4] = u32_in(in_key + 16);
- t = E_KEY[5] = u32_in(in_key + 20);
+ E_KEY[4] = le32_to_cpu(key[4]);
+ t = E_KEY[5] = le32_to_cpu(key[5]);
for (i = 0; i < 8; ++i)
loop6 (i);
break;
case 32:
- E_KEY[4] = u32_in(in_key + 16);
- E_KEY[5] = u32_in(in_key + 20);
- E_KEY[6] = u32_in(in_key + 24);
- t = E_KEY[7] = u32_in(in_key + 28);
+ E_KEY[4] = le32_to_cpu(key[4]);
+ E_KEY[5] = le32_to_cpu(key[5]);
+ E_KEY[6] = le32_to_cpu(key[6]);
+ t = E_KEY[7] = le32_to_cpu(key[7]);
for (i = 0; i < 7; ++i)
loop8(i);
break;
@@ -290,6 +289,8 @@ extern void aes_decrypt(void *ctx_arg, u8 *out, const u8 *in);
static struct crypto_alg aes_alg = {
.cra_name = "aes",
+ .cra_driver_name = "aes-x86_64",
+ .cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aes_ctx),
diff --git a/arch/x86_64/defconfig b/arch/x86_64/defconfig
index 5d56542fb68..09a3eb74331 100644
--- a/arch/x86_64/defconfig
+++ b/arch/x86_64/defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.14-git7
-# Sat Nov 5 15:55:50 2005
+# Linux kernel version: 2.6.15-git12
+# Mon Jan 16 13:09:08 2006
#
CONFIG_X86_64=y
CONFIG_64BIT=y
@@ -15,6 +15,7 @@ CONFIG_EARLY_PRINTK=y
CONFIG_GENERIC_ISA_DMA=y
CONFIG_GENERIC_IOMAP=y
CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_DMI=y
#
# Code maturity level options
@@ -35,18 +36,21 @@ CONFIG_POSIX_MQUEUE=y
# CONFIG_BSD_PROCESS_ACCT is not set
CONFIG_SYSCTL=y
# CONFIG_AUDIT is not set
-CONFIG_HOTPLUG=y
-CONFIG_KOBJECT_UEVENT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
# CONFIG_CPUSETS is not set
CONFIG_INITRAMFS_SOURCE=""
+CONFIG_UID16=y
+CONFIG_VM86=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_EMBEDDED is not set
CONFIG_KALLSYMS=y
CONFIG_KALLSYMS_ALL=y
# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
+CONFIG_ELF_CORE=y
CONFIG_BASE_FULL=y
CONFIG_FUTEX=y
CONFIG_EPOLL=y
@@ -55,8 +59,10 @@ CONFIG_CC_ALIGN_FUNCTIONS=0
CONFIG_CC_ALIGN_LABELS=0
CONFIG_CC_ALIGN_LOOPS=0
CONFIG_CC_ALIGN_JUMPS=0
+CONFIG_SLAB=y
# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=0
+# CONFIG_SLOB is not set
#
# Loadable module support
@@ -71,8 +77,28 @@ CONFIG_OBSOLETE_MODPARM=y
CONFIG_STOP_MACHINE=y
#
+# Block layer
+#
+CONFIG_LBD=y
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+# CONFIG_IOSCHED_AS is not set
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+
+#
# Processor type and features
#
+CONFIG_X86_PC=y
+# CONFIG_X86_VSMP is not set
# CONFIG_MK8 is not set
# CONFIG_MPSC is not set
CONFIG_GENERIC_CPU=y
@@ -89,14 +115,14 @@ CONFIG_X86_LOCAL_APIC=y
CONFIG_MTRR=y
CONFIG_SMP=y
CONFIG_SCHED_SMT=y
-CONFIG_PREEMPT_NONE=y
-# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT_NONE is not set
+CONFIG_PREEMPT_VOLUNTARY=y
# CONFIG_PREEMPT is not set
CONFIG_PREEMPT_BKL=y
CONFIG_NUMA=y
CONFIG_K8_NUMA=y
CONFIG_X86_64_ACPI_NUMA=y
-# CONFIG_NUMA_EMU is not set
+CONFIG_NUMA_EMU=y
CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y
CONFIG_ARCH_SPARSEMEM_ENABLE=y
@@ -109,6 +135,7 @@ CONFIG_FLAT_NODE_MEM_MAP=y
CONFIG_NEED_MULTIPLE_NODES=y
# CONFIG_SPARSEMEM_STATIC is not set
CONFIG_SPLIT_PTLOCK_CPUS=4
+CONFIG_MIGRATION=y
CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y
CONFIG_NR_CPUS=32
CONFIG_HOTPLUG_CPU=y
@@ -120,8 +147,9 @@ CONFIG_SWIOTLB=y
CONFIG_X86_MCE=y
CONFIG_X86_MCE_INTEL=y
CONFIG_X86_MCE_AMD=y
-CONFIG_PHYSICAL_START=0x100000
# CONFIG_KEXEC is not set
+# CONFIG_CRASH_DUMP is not set
+CONFIG_PHYSICAL_START=0x100000
CONFIG_SECCOMP=y
# CONFIG_HZ_100 is not set
CONFIG_HZ_250=y
@@ -136,6 +164,7 @@ CONFIG_GENERIC_PENDING_IRQ=y
# Power management options
#
CONFIG_PM=y
+# CONFIG_PM_LEGACY is not set
# CONFIG_PM_DEBUG is not set
CONFIG_SOFTWARE_SUSPEND=y
CONFIG_PM_STD_PARTITION=""
@@ -152,7 +181,7 @@ CONFIG_ACPI_AC=y
CONFIG_ACPI_BATTERY=y
CONFIG_ACPI_BUTTON=y
# CONFIG_ACPI_VIDEO is not set
-CONFIG_ACPI_HOTKEY=m
+# CONFIG_ACPI_HOTKEY is not set
CONFIG_ACPI_FAN=y
CONFIG_ACPI_PROCESSOR=y
CONFIG_ACPI_HOTPLUG_CPU=y
@@ -205,7 +234,7 @@ CONFIG_PCI=y
CONFIG_PCI_DIRECT=y
CONFIG_PCI_MMCONFIG=y
CONFIG_UNORDERED_IO=y
-# CONFIG_PCIEPORTBUS is not set
+CONFIG_PCIEPORTBUS=y
CONFIG_PCI_MSI=y
# CONFIG_PCI_LEGACY_PROC is not set
# CONFIG_PCI_DEBUG is not set
@@ -229,7 +258,6 @@ CONFIG_IA32_EMULATION=y
CONFIG_IA32_AOUT=y
CONFIG_COMPAT=y
CONFIG_SYSVIPC_COMPAT=y
-CONFIG_UID16=y
#
# Networking
@@ -291,11 +319,19 @@ CONFIG_IPV6=y
# CONFIG_ATALK is not set
# CONFIG_X25 is not set
# CONFIG_LAPB is not set
+
+#
+# TIPC Configuration (EXPERIMENTAL)
+#
+# CONFIG_TIPC is not set
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
# CONFIG_NET_SCHED is not set
-# CONFIG_NET_CLS_ROUTE is not set
#
# Network testing
@@ -315,7 +351,7 @@ CONFIG_IPV6=y
#
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
-# CONFIG_FW_LOADER is not set
+CONFIG_FW_LOADER=y
# CONFIG_DEBUG_DRIVER is not set
#
@@ -356,21 +392,7 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=16
CONFIG_BLK_DEV_RAM_SIZE=4096
CONFIG_BLK_DEV_INITRD=y
-CONFIG_LBD=y
# CONFIG_CDROM_PKTCDVD is not set
-
-#
-# IO Schedulers
-#
-CONFIG_IOSCHED_NOOP=y
-# CONFIG_IOSCHED_AS is not set
-CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
-# CONFIG_DEFAULT_AS is not set
-CONFIG_DEFAULT_DEADLINE=y
-# CONFIG_DEFAULT_CFQ is not set
-# CONFIG_DEFAULT_NOOP is not set
-CONFIG_DEFAULT_IOSCHED="cfq"
# CONFIG_ATA_OVER_ETH is not set
#
@@ -410,7 +432,7 @@ CONFIG_IDEDMA_PCI_AUTO=y
# CONFIG_BLK_DEV_AEC62XX is not set
# CONFIG_BLK_DEV_ALI15X3 is not set
CONFIG_BLK_DEV_AMD74XX=y
-# CONFIG_BLK_DEV_ATIIXP is not set
+CONFIG_BLK_DEV_ATIIXP=y
# CONFIG_BLK_DEV_CMD64X is not set
# CONFIG_BLK_DEV_TRIFLEX is not set
# CONFIG_BLK_DEV_CY82C693 is not set
@@ -458,20 +480,21 @@ CONFIG_BLK_DEV_SD=y
# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
#
# CONFIG_SCSI_MULTI_LUN is not set
-# CONFIG_SCSI_CONSTANTS is not set
+CONFIG_SCSI_CONSTANTS=y
# CONFIG_SCSI_LOGGING is not set
#
# SCSI Transport Attributes
#
CONFIG_SCSI_SPI_ATTRS=y
-# CONFIG_SCSI_FC_ATTRS is not set
+CONFIG_SCSI_FC_ATTRS=y
# CONFIG_SCSI_ISCSI_ATTRS is not set
# CONFIG_SCSI_SAS_ATTRS is not set
#
# SCSI low-level drivers
#
+# CONFIG_ISCSI_TCP is not set
# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
# CONFIG_SCSI_3W_9XXX is not set
# CONFIG_SCSI_ACARD is not set
@@ -485,11 +508,13 @@ CONFIG_AIC79XX_RESET_DELAY_MS=4000
# CONFIG_AIC79XX_DEBUG_ENABLE is not set
CONFIG_AIC79XX_DEBUG_MASK=0
# CONFIG_AIC79XX_REG_PRETTY_PRINT is not set
-# CONFIG_MEGARAID_NEWGEN is not set
+CONFIG_MEGARAID_NEWGEN=y
+CONFIG_MEGARAID_MM=y
+CONFIG_MEGARAID_MAILBOX=y
# CONFIG_MEGARAID_LEGACY is not set
-# CONFIG_MEGARAID_SAS is not set
+CONFIG_MEGARAID_SAS=y
CONFIG_SCSI_SATA=y
-# CONFIG_SCSI_SATA_AHCI is not set
+CONFIG_SCSI_SATA_AHCI=y
# CONFIG_SCSI_SATA_SVW is not set
CONFIG_SCSI_ATA_PIIX=y
# CONFIG_SCSI_SATA_MV is not set
@@ -498,7 +523,7 @@ CONFIG_SCSI_SATA_NV=y
# CONFIG_SCSI_SATA_QSTOR is not set
# CONFIG_SCSI_SATA_PROMISE is not set
# CONFIG_SCSI_SATA_SX4 is not set
-# CONFIG_SCSI_SATA_SIL is not set
+CONFIG_SCSI_SATA_SIL=y
# CONFIG_SCSI_SATA_SIL24 is not set
# CONFIG_SCSI_SATA_SIS is not set
# CONFIG_SCSI_SATA_ULI is not set
@@ -517,13 +542,7 @@ CONFIG_SCSI_SATA_INTEL_COMBINED=y
# CONFIG_SCSI_IPR is not set
# CONFIG_SCSI_QLOGIC_FC is not set
# CONFIG_SCSI_QLOGIC_1280 is not set
-CONFIG_SCSI_QLA2XXX=y
-# CONFIG_SCSI_QLA21XX is not set
-# CONFIG_SCSI_QLA22XX is not set
-# CONFIG_SCSI_QLA2300 is not set
-# CONFIG_SCSI_QLA2322 is not set
-# CONFIG_SCSI_QLA6312 is not set
-# CONFIG_SCSI_QLA24XX is not set
+# CONFIG_SCSI_QLA_FC is not set
# CONFIG_SCSI_LPFC is not set
# CONFIG_SCSI_DC395x is not set
# CONFIG_SCSI_DC390T is not set
@@ -627,12 +646,14 @@ CONFIG_8139TOO=y
# CONFIG_DL2K is not set
CONFIG_E1000=y
# CONFIG_E1000_NAPI is not set
+# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
# CONFIG_NS83820 is not set
# CONFIG_HAMACHI is not set
# CONFIG_YELLOWFIN is not set
# CONFIG_R8169 is not set
# CONFIG_SIS190 is not set
# CONFIG_SKGE is not set
+# CONFIG_SKY2 is not set
# CONFIG_SK98LIN is not set
# CONFIG_VIA_VELOCITY is not set
CONFIG_TIGON3=y
@@ -645,7 +666,6 @@ CONFIG_TIGON3=y
# CONFIG_IXGB is not set
CONFIG_S2IO=m
# CONFIG_S2IO_NAPI is not set
-# CONFIG_2BUFF_MODE is not set
#
# Token Ring devices
@@ -744,6 +764,7 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_SERIAL_8250_ACPI is not set
CONFIG_SERIAL_8250_NR_UARTS=4
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
# CONFIG_SERIAL_8250_EXTENDED is not set
#
@@ -751,7 +772,6 @@ CONFIG_SERIAL_8250_NR_UARTS=4
#
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
-# CONFIG_SERIAL_JSM is not set
CONFIG_UNIX98_PTYS=y
CONFIG_LEGACY_PTYS=y
CONFIG_LEGACY_PTY_COUNT=256
@@ -790,6 +810,7 @@ CONFIG_SOFT_WATCHDOG=y
# CONFIG_W83877F_WDT is not set
# CONFIG_W83977F_WDT is not set
# CONFIG_MACHZ_WDT is not set
+# CONFIG_SBC_EPX_C3_WATCHDOG is not set
#
# PCI-based Watchdog Cards
@@ -817,10 +838,10 @@ CONFIG_AGP_INTEL=y
# CONFIG_DRM is not set
# CONFIG_MWAVE is not set
CONFIG_RAW_DRIVER=y
+CONFIG_MAX_RAW_DEVS=256
CONFIG_HPET=y
# CONFIG_HPET_RTC_IRQ is not set
CONFIG_HPET_MMAP=y
-CONFIG_MAX_RAW_DEVS=256
# CONFIG_HANGCHECK_TIMER is not set
#
@@ -835,6 +856,12 @@ CONFIG_MAX_RAW_DEVS=256
# CONFIG_I2C is not set
#
+# SPI support
+#
+# CONFIG_SPI is not set
+# CONFIG_SPI_MASTER is not set
+
+#
# Dallas's 1-wire bus
#
# CONFIG_W1 is not set
@@ -892,6 +919,7 @@ CONFIG_SOUND=y
# Open Sound System
#
CONFIG_SOUND_PRIME=y
+CONFIG_OBSOLETE_OSS_DRIVER=y
# CONFIG_SOUND_BT878 is not set
# CONFIG_SOUND_CMPCI is not set
# CONFIG_SOUND_EMU10K1 is not set
@@ -968,13 +996,15 @@ CONFIG_USB_STORAGE=y
# CONFIG_USB_STORAGE_SDDR09 is not set
# CONFIG_USB_STORAGE_SDDR55 is not set
# CONFIG_USB_STORAGE_JUMPSHOT is not set
-# CONFIG_USB_STORAGE_ONETOUCH is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_LIBUSUAL is not set
#
# USB Input Devices
#
CONFIG_USB_HID=y
CONFIG_USB_HIDINPUT=y
+# CONFIG_USB_HIDINPUT_POWERBOOK is not set
# CONFIG_HID_FF is not set
# CONFIG_USB_HIDDEV is not set
# CONFIG_USB_AIPTEK is not set
@@ -988,6 +1018,7 @@ CONFIG_USB_HIDINPUT=y
# CONFIG_USB_YEALINK is not set
# CONFIG_USB_XPAD is not set
# CONFIG_USB_ATI_REMOTE is not set
+# CONFIG_USB_ATI_REMOTE2 is not set
# CONFIG_USB_KEYSPAN_REMOTE is not set
# CONFIG_USB_APPLETOUCH is not set
@@ -1097,6 +1128,7 @@ CONFIG_REISERFS_FS_POSIX_ACL=y
# CONFIG_JFS_FS is not set
CONFIG_FS_POSIX_ACL=y
# CONFIG_XFS_FS is not set
+# CONFIG_OCFS2_FS is not set
# CONFIG_MINIX_FS is not set
# CONFIG_ROMFS_FS is not set
CONFIG_INOTIFY=y
@@ -1135,6 +1167,7 @@ CONFIG_HUGETLBFS=y
CONFIG_HUGETLB_PAGE=y
CONFIG_RAMFS=y
CONFIG_RELAYFS_FS=y
+# CONFIG_CONFIGFS_FS is not set
#
# Miscellaneous filesystems
@@ -1232,21 +1265,23 @@ CONFIG_NLS_ISO8859_15=y
CONFIG_NLS_UTF8=y
#
-# Profiling support
+# Instrumentation Support
#
CONFIG_PROFILING=y
CONFIG_OPROFILE=y
+CONFIG_KPROBES=y
#
# Kernel hacking
#
# CONFIG_PRINTK_TIME is not set
-CONFIG_DEBUG_KERNEL=y
CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
CONFIG_LOG_BUF_SHIFT=18
CONFIG_DETECT_SOFTLOCKUP=y
# CONFIG_SCHEDSTATS is not set
# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_MUTEXES is not set
# CONFIG_DEBUG_SPINLOCK is not set
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_KOBJECT is not set
@@ -1254,10 +1289,11 @@ CONFIG_DETECT_SOFTLOCKUP=y
CONFIG_DEBUG_FS=y
# CONFIG_DEBUG_VM is not set
# CONFIG_FRAME_POINTER is not set
+# CONFIG_FORCED_INLINING is not set
# CONFIG_RCU_TORTURE_TEST is not set
CONFIG_INIT_DEBUG=y
+# CONFIG_DEBUG_RODATA is not set
# CONFIG_IOMMU_DEBUG is not set
-CONFIG_KPROBES=y
#
# Security options
diff --git a/arch/x86_64/ia32/Makefile b/arch/x86_64/ia32/Makefile
index f76217d8f57..929e6b0771f 100644
--- a/arch/x86_64/ia32/Makefile
+++ b/arch/x86_64/ia32/Makefile
@@ -2,9 +2,9 @@
# Makefile for the ia32 kernel emulation subsystem.
#
-obj-$(CONFIG_IA32_EMULATION) := ia32entry.o sys_ia32.o ia32_ioctl.o \
- ia32_signal.o tls32.o \
- ia32_binfmt.o fpu32.o ptrace32.o syscall32.o syscall32_syscall.o
+obj-$(CONFIG_IA32_EMULATION) := ia32entry.o sys_ia32.o ia32_signal.o tls32.o \
+ ia32_binfmt.o fpu32.o ptrace32.o syscall32.o syscall32_syscall.o \
+ mmap32.o
sysv-$(CONFIG_SYSVIPC) := ipc32.o
obj-$(CONFIG_IA32_EMULATION) += $(sysv-y)
@@ -29,4 +29,3 @@ $(obj)/vsyscall-%.so: $(src)/vsyscall.lds $(obj)/vsyscall-%.o FORCE
AFLAGS_vsyscall-sysenter.o = -m32
AFLAGS_vsyscall-syscall.o = -m32
-CFLAGS_ia32_ioctl.o += -Ifs/
diff --git a/arch/x86_64/ia32/ia32_binfmt.c b/arch/x86_64/ia32/ia32_binfmt.c
index 830feb272ec..572b3b28772 100644
--- a/arch/x86_64/ia32/ia32_binfmt.c
+++ b/arch/x86_64/ia32/ia32_binfmt.c
@@ -197,8 +197,7 @@ static inline void elf_core_copy_regs(elf_gregset_t *elfregs, struct pt_regs *re
static inline int elf_core_copy_task_regs(struct task_struct *t, elf_gregset_t* elfregs)
{
- struct pt_regs *pp = (struct pt_regs *)(t->thread.rsp0);
- --pp;
+ struct pt_regs *pp = task_pt_regs(t);
ELF_CORE_COPY_REGS((*elfregs), pp);
/* fix wrong segments */
(*elfregs)[7] = t->thread.ds;
@@ -217,8 +216,7 @@ elf_core_copy_task_fpregs(struct task_struct *tsk, struct pt_regs *regs, elf_fpr
if (!tsk_used_math(tsk))
return 0;
if (!regs)
- regs = (struct pt_regs *)tsk->thread.rsp0;
- --regs;
+ regs = task_pt_regs(tsk);
if (tsk == current)
unlazy_fpu(tsk);
set_fs(KERNEL_DS);
@@ -234,7 +232,7 @@ elf_core_copy_task_fpregs(struct task_struct *tsk, struct pt_regs *regs, elf_fpr
static inline int
elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregset_t *xfpu)
{
- struct pt_regs *regs = ((struct pt_regs *)(t->thread.rsp0))-1;
+ struct pt_regs *regs = task_pt_regs(t);
if (!tsk_used_math(t))
return 0;
if (t == current)
@@ -295,8 +293,6 @@ int ia32_setup_arg_pages(struct linux_binprm *bprm, unsigned long stack_top, int
} while(0)
-#define elf_map elf32_map
-
#include <linux/module.h>
MODULE_DESCRIPTION("Binary format loader for compatibility with IA32 ELF binaries.");
@@ -392,21 +388,6 @@ int ia32_setup_arg_pages(struct linux_binprm *bprm, unsigned long stack_top,
}
EXPORT_SYMBOL(ia32_setup_arg_pages);
-static unsigned long
-elf32_map (struct file *filep, unsigned long addr, struct elf_phdr *eppnt, int prot, int type)
-{
- unsigned long map_addr;
- struct task_struct *me = current;
-
- down_write(&me->mm->mmap_sem);
- map_addr = do_mmap(filep, ELF_PAGESTART(addr),
- eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr), prot,
- type,
- eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr));
- up_write(&me->mm->mmap_sem);
- return(map_addr);
-}
-
#ifdef CONFIG_SYSCTL
/* Register vsyscall32 into the ABI table */
#include <linux/sysctl.h>
diff --git a/arch/x86_64/ia32/ia32_ioctl.c b/arch/x86_64/ia32/ia32_ioctl.c
deleted file mode 100644
index e335bd0b637..00000000000
--- a/arch/x86_64/ia32/ia32_ioctl.c
+++ /dev/null
@@ -1,79 +0,0 @@
-/* $Id: ia32_ioctl.c,v 1.25 2002/10/11 07:17:06 ak Exp $
- * ioctl32.c: Conversion between 32bit and 64bit native ioctls.
- *
- * Copyright (C) 1997-2000 Jakub Jelinek (jakub@redhat.com)
- * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
- * Copyright (C) 2001,2002 Andi Kleen, SuSE Labs
- *
- * These routines maintain argument size conversion between 32bit and 64bit
- * ioctls.
- */
-
-#define INCLUDES
-#include <linux/syscalls.h>
-#include "compat_ioctl.c"
-#include <asm/ia32.h>
-
-#define CODE
-#include "compat_ioctl.c"
-
-#define RTC_IRQP_READ32 _IOR('p', 0x0b, unsigned int) /* Read IRQ rate */
-#define RTC_IRQP_SET32 _IOW('p', 0x0c, unsigned int) /* Set IRQ rate */
-#define RTC_EPOCH_READ32 _IOR('p', 0x0d, unsigned) /* Read epoch */
-#define RTC_EPOCH_SET32 _IOW('p', 0x0e, unsigned) /* Set epoch */
-
-static int rtc32_ioctl(unsigned fd, unsigned cmd, unsigned long arg)
-{
- unsigned long val;
- mm_segment_t oldfs = get_fs();
- int ret;
-
- switch (cmd) {
- case RTC_IRQP_READ32:
- set_fs(KERNEL_DS);
- ret = sys_ioctl(fd, RTC_IRQP_READ, (unsigned long)&val);
- set_fs(oldfs);
- if (!ret)
- ret = put_user(val, (unsigned int __user *) arg);
- return ret;
-
- case RTC_IRQP_SET32:
- cmd = RTC_IRQP_SET;
- break;
-
- case RTC_EPOCH_READ32:
- set_fs(KERNEL_DS);
- ret = sys_ioctl(fd, RTC_EPOCH_READ, (unsigned long) &val);
- set_fs(oldfs);
- if (!ret)
- ret = put_user(val, (unsigned int __user *) arg);
- return ret;
-
- case RTC_EPOCH_SET32:
- cmd = RTC_EPOCH_SET;
- break;
- }
- return sys_ioctl(fd,cmd,arg);
-}
-
-
-#define HANDLE_IOCTL(cmd,handler) { (cmd), (ioctl_trans_handler_t)(handler) },
-#define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL(cmd,sys_ioctl)
-
-struct ioctl_trans ioctl_start[] = {
-#include <linux/compat_ioctl.h>
-#define DECLARES
-#include "compat_ioctl.c"
-
-/* And these ioctls need translation */
-/* realtime device */
-HANDLE_IOCTL(RTC_IRQP_READ, rtc32_ioctl)
-HANDLE_IOCTL(RTC_IRQP_READ32,rtc32_ioctl)
-HANDLE_IOCTL(RTC_IRQP_SET32, rtc32_ioctl)
-HANDLE_IOCTL(RTC_EPOCH_READ32, rtc32_ioctl)
-HANDLE_IOCTL(RTC_EPOCH_SET32, rtc32_ioctl)
-/* take care of sizeof(sizeof()) breakage */
-};
-
-int ioctl_table_size = ARRAY_SIZE(ioctl_start);
-
diff --git a/arch/x86_64/ia32/ia32_signal.c b/arch/x86_64/ia32/ia32_signal.c
index 0903cc1faef..e0a92439f63 100644
--- a/arch/x86_64/ia32/ia32_signal.c
+++ b/arch/x86_64/ia32/ia32_signal.c
@@ -353,7 +353,6 @@ ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc, struct _fpstate_ia32 __
struct pt_regs *regs, unsigned int mask)
{
int tmp, err = 0;
- u32 eflags;
tmp = 0;
__asm__("movl %%gs,%0" : "=r"(tmp): "0"(tmp));
@@ -378,10 +377,7 @@ ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc, struct _fpstate_ia32 __
err |= __put_user(current->thread.trap_no, &sc->trapno);
err |= __put_user(current->thread.error_code, &sc->err);
err |= __put_user((u32)regs->rip, &sc->eip);
- eflags = regs->eflags;
- if (current->ptrace & PT_PTRACED)
- eflags &= ~TF_MASK;
- err |= __put_user((u32)eflags, &sc->eflags);
+ err |= __put_user((u32)regs->eflags, &sc->eflags);
err |= __put_user((u32)regs->rsp, &sc->esp_at_signal);
tmp = save_i387_ia32(current, fpstate, regs, 0);
@@ -505,13 +501,9 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
regs->ss = __USER32_DS;
set_fs(USER_DS);
- if (regs->eflags & TF_MASK) {
- if (current->ptrace & PT_PTRACED) {
- ptrace_notify(SIGTRAP);
- } else {
- regs->eflags &= ~TF_MASK;
- }
- }
+ regs->eflags &= ~TF_MASK;
+ if (test_thread_flag(TIF_SINGLESTEP))
+ ptrace_notify(SIGTRAP);
#if DEBUG_SIG
printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n",
@@ -605,13 +597,9 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
regs->ss = __USER32_DS;
set_fs(USER_DS);
- if (regs->eflags & TF_MASK) {
- if (current->ptrace & PT_PTRACED) {
- ptrace_notify(SIGTRAP);
- } else {
- regs->eflags &= ~TF_MASK;
- }
- }
+ regs->eflags &= ~TF_MASK;
+ if (test_thread_flag(TIF_SINGLESTEP))
+ ptrace_notify(SIGTRAP);
#if DEBUG_SIG
printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n",
diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S
index e0eb0c712fe..f05c2a80248 100644
--- a/arch/x86_64/ia32/ia32entry.S
+++ b/arch/x86_64/ia32/ia32entry.S
@@ -35,6 +35,18 @@
movq %rax,R8(%rsp)
.endm
+ .macro CFI_STARTPROC32 simple
+ CFI_STARTPROC \simple
+ CFI_UNDEFINED r8
+ CFI_UNDEFINED r9
+ CFI_UNDEFINED r10
+ CFI_UNDEFINED r11
+ CFI_UNDEFINED r12
+ CFI_UNDEFINED r13
+ CFI_UNDEFINED r14
+ CFI_UNDEFINED r15
+ .endm
+
/*
* 32bit SYSENTER instruction entry.
*
@@ -55,7 +67,7 @@
* with the int 0x80 path.
*/
ENTRY(ia32_sysenter_target)
- CFI_STARTPROC simple
+ CFI_STARTPROC32 simple
CFI_DEF_CFA rsp,0
CFI_REGISTER rsp,rbp
swapgs
@@ -92,6 +104,7 @@ ENTRY(ia32_sysenter_target)
.quad 1b,ia32_badarg
.previous
GET_THREAD_INFO(%r10)
+ orl $TS_COMPAT,threadinfo_status(%r10)
testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%r10)
CFI_REMEMBER_STATE
jnz sysenter_tracesys
@@ -105,6 +118,7 @@ sysenter_do_call:
cli
testl $_TIF_ALLWORK_MASK,threadinfo_flags(%r10)
jnz int_ret_from_sys_call
+ andl $~TS_COMPAT,threadinfo_status(%r10)
/* clear IF, that popfq doesn't enable interrupts early */
andl $~0x200,EFLAGS-R11(%rsp)
RESTORE_ARGS 1,24,1,1,1,1
@@ -161,7 +175,7 @@ sysenter_tracesys:
* with the int 0x80 path.
*/
ENTRY(ia32_cstar_target)
- CFI_STARTPROC simple
+ CFI_STARTPROC32 simple
CFI_DEF_CFA rsp,0
CFI_REGISTER rip,rcx
/*CFI_REGISTER rflags,r11*/
@@ -191,6 +205,7 @@ ENTRY(ia32_cstar_target)
.quad 1b,ia32_badarg
.previous
GET_THREAD_INFO(%r10)
+ orl $TS_COMPAT,threadinfo_status(%r10)
testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%r10)
CFI_REMEMBER_STATE
jnz cstar_tracesys
@@ -204,6 +219,7 @@ cstar_do_call:
cli
testl $_TIF_ALLWORK_MASK,threadinfo_flags(%r10)
jnz int_ret_from_sys_call
+ andl $~TS_COMPAT,threadinfo_status(%r10)
RESTORE_ARGS 1,-ARG_SKIP,1,1,1
movl RIP-ARGOFFSET(%rsp),%ecx
CFI_REGISTER rip,rcx
@@ -276,6 +292,7 @@ ENTRY(ia32_syscall)
this could be a problem. */
SAVE_ARGS 0,0,1
GET_THREAD_INFO(%r10)
+ orl $TS_COMPAT,threadinfo_status(%r10)
testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%r10)
jnz ia32_tracesys
ia32_do_syscall:
@@ -318,7 +335,7 @@ quiet_ni_syscall:
jmp ia32_ptregs_common
.endm
- CFI_STARTPROC
+ CFI_STARTPROC32
PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn, %rdi
PTREGSCALL stub32_sigreturn, sys32_sigreturn, %rdi
@@ -333,15 +350,26 @@ quiet_ni_syscall:
ENTRY(ia32_ptregs_common)
popq %r11
- CFI_ADJUST_CFA_OFFSET -8
- CFI_REGISTER rip, r11
+ CFI_ENDPROC
+ CFI_STARTPROC32 simple
+ CFI_DEF_CFA rsp,SS+8-ARGOFFSET
+ CFI_REL_OFFSET rax,RAX-ARGOFFSET
+ CFI_REL_OFFSET rcx,RCX-ARGOFFSET
+ CFI_REL_OFFSET rdx,RDX-ARGOFFSET
+ CFI_REL_OFFSET rsi,RSI-ARGOFFSET
+ CFI_REL_OFFSET rdi,RDI-ARGOFFSET
+ CFI_REL_OFFSET rip,RIP-ARGOFFSET
+/* CFI_REL_OFFSET cs,CS-ARGOFFSET*/
+/* CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
+ CFI_REL_OFFSET rsp,RSP-ARGOFFSET
+/* CFI_REL_OFFSET ss,SS-ARGOFFSET*/
SAVE_REST
call *%rax
RESTORE_REST
jmp ia32_sysret /* misbalances the return cache */
CFI_ENDPROC
- .data
+ .section .rodata,"a"
.align 8
.globl ia32_sys_call_table
ia32_sys_call_table:
@@ -608,7 +636,7 @@ ia32_sys_call_table:
.quad sys_epoll_wait
.quad sys_remap_file_pages
.quad sys_set_tid_address
- .quad sys32_timer_create
+ .quad compat_sys_timer_create
.quad compat_sys_timer_settime /* 260 */
.quad compat_sys_timer_gettime
.quad sys_timer_getoverrun
@@ -643,6 +671,20 @@ ia32_sys_call_table:
.quad sys_inotify_init
.quad sys_inotify_add_watch
.quad sys_inotify_rm_watch
+ .quad sys_migrate_pages
+ .quad compat_sys_openat /* 295 */
+ .quad sys_mkdirat
+ .quad sys_mknodat
+ .quad sys_fchownat
+ .quad sys_futimesat
+ .quad compat_sys_newfstatat /* 300 */
+ .quad sys_unlinkat
+ .quad sys_renameat
+ .quad sys_linkat
+ .quad sys_symlinkat
+ .quad sys_readlinkat /* 305 */
+ .quad sys_fchmodat
+ .quad sys_faccessat
ia32_syscall_end:
.rept IA32_NR_syscalls-(ia32_syscall_end-ia32_sys_call_table)/8
.quad ni_syscall
diff --git a/arch/x86_64/ia32/mmap32.c b/arch/x86_64/ia32/mmap32.c
new file mode 100644
index 00000000000..079f4132575
--- /dev/null
+++ b/arch/x86_64/ia32/mmap32.c
@@ -0,0 +1,78 @@
+/*
+ * linux/arch/x86_64/ia32/mm/mmap.c
+ *
+ * flexible mmap layout support
+ *
+ * Based on the i386 version which was
+ *
+ * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ *
+ * Started by Ingo Molnar <mingo@elte.hu>
+ */
+
+#include <linux/personality.h>
+#include <linux/mm.h>
+#include <linux/random.h>
+
+/*
+ * Top of mmap area (just below the process stack).
+ *
+ * Leave an at least ~128 MB hole.
+ */
+#define MIN_GAP (128*1024*1024)
+#define MAX_GAP (TASK_SIZE/6*5)
+
+static inline unsigned long mmap_base(struct mm_struct *mm)
+{
+ unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
+ unsigned long random_factor = 0;
+
+ if (current->flags & PF_RANDOMIZE)
+ random_factor = get_random_int() % (1024*1024);
+
+ if (gap < MIN_GAP)
+ gap = MIN_GAP;
+ else if (gap > MAX_GAP)
+ gap = MAX_GAP;
+
+ return PAGE_ALIGN(TASK_SIZE - gap - random_factor);
+}
+
+/*
+ * This function, called very early during the creation of a new
+ * process VM image, sets up which VM layout function to use:
+ */
+void ia32_pick_mmap_layout(struct mm_struct *mm)
+{
+ /*
+ * Fall back to the standard layout if the personality
+ * bit is set, or if the expected stack growth is unlimited:
+ */
+ if (sysctl_legacy_va_layout ||
+ (current->personality & ADDR_COMPAT_LAYOUT) ||
+ current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY) {
+ mm->mmap_base = TASK_UNMAPPED_BASE;
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ } else {
+ mm->mmap_base = mmap_base(mm);
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ mm->unmap_area = arch_unmap_area_topdown;
+ }
+}
diff --git a/arch/x86_64/ia32/ptrace32.c b/arch/x86_64/ia32/ptrace32.c
index 2a925e2af39..23a4515a73b 100644
--- a/arch/x86_64/ia32/ptrace32.c
+++ b/arch/x86_64/ia32/ptrace32.c
@@ -28,9 +28,12 @@
#include <asm/i387.h>
#include <asm/fpu32.h>
-/* determines which flags the user has access to. */
-/* 1 = access 0 = no access */
-#define FLAG_MASK 0x44dd5UL
+/*
+ * Determines which flags the user has access to [1 = access, 0 = no access].
+ * Prohibits changing ID(21), VIP(20), VIF(19), VM(17), IOPL(12-13), IF(9).
+ * Also masks reserved bits (31-22, 15, 5, 3, 1).
+ */
+#define FLAG_MASK 0x54dd5UL
#define R32(l,q) \
case offsetof(struct user32, regs.l): stack[offsetof(struct pt_regs, q)/8] = val; break
@@ -38,7 +41,7 @@
static int putreg32(struct task_struct *child, unsigned regno, u32 val)
{
int i;
- __u64 *stack = (__u64 *)(child->thread.rsp0 - sizeof(struct pt_regs));
+ __u64 *stack = (__u64 *)task_pt_regs(child);
switch (regno) {
case offsetof(struct user32, regs.fs):
@@ -134,7 +137,7 @@ static int putreg32(struct task_struct *child, unsigned regno, u32 val)
static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
{
- __u64 *stack = (__u64 *)(child->thread.rsp0 - sizeof(struct pt_regs));
+ __u64 *stack = (__u64 *)task_pt_regs(child);
switch (regno) {
case offsetof(struct user32, regs.fs):
@@ -196,36 +199,6 @@ static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
#undef R32
-static struct task_struct *find_target(int request, int pid, int *err)
-{
- struct task_struct *child;
-
- *err = -EPERM;
- if (pid == 1)
- return NULL;
-
- *err = -ESRCH;
- read_lock(&tasklist_lock);
- child = find_task_by_pid(pid);
- if (child)
- get_task_struct(child);
- read_unlock(&tasklist_lock);
- if (child) {
- *err = -EPERM;
- if (child->pid == 1)
- goto out;
- *err = ptrace_check_attach(child, request == PTRACE_KILL);
- if (*err < 0)
- goto out;
- return child;
- }
- out:
- if (child)
- put_task_struct(child);
- return NULL;
-
-}
-
asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
{
struct task_struct *child;
@@ -254,11 +227,18 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
break;
}
- child = find_target(request, pid, &ret);
- if (!child)
- return ret;
+ if (request == PTRACE_TRACEME)
+ return ptrace_traceme();
+
+ child = ptrace_get_task_struct(pid);
+ if (IS_ERR(child))
+ return PTR_ERR(child);
- childregs = (struct pt_regs *)(child->thread.rsp0 - sizeof(struct pt_regs));
+ ret = ptrace_check_attach(child, request == PTRACE_KILL);
+ if (ret < 0)
+ goto out;
+
+ childregs = task_pt_regs(child);
switch (request) {
case PTRACE_PEEKDATA:
@@ -373,6 +353,7 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
break;
}
+ out:
put_task_struct(child);
return ret;
}
diff --git a/arch/x86_64/ia32/sys_ia32.c b/arch/x86_64/ia32/sys_ia32.c
index 5389df610e7..54481af5344 100644
--- a/arch/x86_64/ia32/sys_ia32.c
+++ b/arch/x86_64/ia32/sys_ia32.c
@@ -969,25 +969,6 @@ long sys32_kill(int pid, int sig)
return sys_kill(pid, sig);
}
-extern asmlinkage long
-sys_timer_create(clockid_t which_clock,
- struct sigevent __user *timer_event_spec,
- timer_t __user * created_timer_id);
-
-long
-sys32_timer_create(u32 clock, struct compat_sigevent __user *se32, timer_t __user *timer_id)
-{
- struct sigevent __user *p = NULL;
- if (se32) {
- struct sigevent se;
- p = compat_alloc_user_space(sizeof(struct sigevent));
- if (get_compat_sigevent(&se, se32) ||
- copy_to_user(p, &se, sizeof(se)))
- return -EFAULT;
- }
- return sys_timer_create(clock, p, timer_id);
-}
-
long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high,
__u32 len_low, __u32 len_high, int advice)
{
diff --git a/arch/x86_64/ia32/vsyscall-sigreturn.S b/arch/x86_64/ia32/vsyscall-sigreturn.S
index 8b5a4b060bb..d90321fe9bb 100644
--- a/arch/x86_64/ia32/vsyscall-sigreturn.S
+++ b/arch/x86_64/ia32/vsyscall-sigreturn.S
@@ -7,6 +7,7 @@
* by doing ".balign 32" must match in both versions of the page.
*/
+ .code32
.section .text.sigreturn,"ax"
.balign 32
.globl __kernel_sigreturn
diff --git a/arch/x86_64/ia32/vsyscall-syscall.S b/arch/x86_64/ia32/vsyscall-syscall.S
index b024965bb68..cf9ef678de3 100644
--- a/arch/x86_64/ia32/vsyscall-syscall.S
+++ b/arch/x86_64/ia32/vsyscall-syscall.S
@@ -6,6 +6,7 @@
#include <asm/asm-offsets.h>
#include <asm/segment.h>
+ .code32
.text
.section .text.vsyscall,"ax"
.globl __kernel_vsyscall
diff --git a/arch/x86_64/ia32/vsyscall-sysenter.S b/arch/x86_64/ia32/vsyscall-sysenter.S
index 71f3de586b5..ae056e553d1 100644
--- a/arch/x86_64/ia32/vsyscall-sysenter.S
+++ b/arch/x86_64/ia32/vsyscall-sysenter.S
@@ -5,6 +5,7 @@
#include <asm/ia32_unistd.h>
#include <asm/asm-offsets.h>
+ .code32
.text
.section .text.vsyscall,"ax"
.globl __kernel_vsyscall
diff --git a/arch/x86_64/kernel/Makefile b/arch/x86_64/kernel/Makefile
index fe4cbd1c4b2..72fe60c20d3 100644
--- a/arch/x86_64/kernel/Makefile
+++ b/arch/x86_64/kernel/Makefile
@@ -7,7 +7,8 @@ EXTRA_AFLAGS := -traditional
obj-y := process.o signal.o entry.o traps.o irq.o \
ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_x86_64.o \
x8664_ksyms.o i387.o syscall.o vsyscall.o \
- setup64.o bootflag.o e820.o reboot.o quirks.o i8237.o
+ setup64.o bootflag.o e820.o reboot.o quirks.o i8237.o \
+ dmi_scan.o pci-dma.o pci-nommu.o
obj-$(CONFIG_X86_MCE) += mce.o
obj-$(CONFIG_X86_MCE_INTEL) += mce_intel.o
@@ -22,14 +23,16 @@ obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
obj-$(CONFIG_X86_IO_APIC) += io_apic.o mpparse.o \
genapic.o genapic_cluster.o genapic_flat.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o
+obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_PM) += suspend.o
obj-$(CONFIG_SOFTWARE_SUSPEND) += suspend_asm.o
obj-$(CONFIG_CPU_FREQ) += cpufreq/
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_GART_IOMMU) += pci-gart.o aperture.o
-obj-$(CONFIG_DUMMY_IOMMU) += pci-nommu.o pci-dma.o
+obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_X86_PM_TIMER) += pmtimer.o
+obj-$(CONFIG_X86_VSMP) += vsmp.o
obj-$(CONFIG_MODULES) += module.o
@@ -46,3 +49,5 @@ intel_cacheinfo-y += ../../i386/kernel/cpu/intel_cacheinfo.o
quirks-y += ../../i386/kernel/quirks.o
i8237-y += ../../i386/kernel/i8237.o
msr-$(subst m,y,$(CONFIG_X86_MSR)) += ../../i386/kernel/msr.o
+dmi_scan-y += ../../i386/kernel/dmi_scan.o
+
diff --git a/arch/x86_64/kernel/aperture.c b/arch/x86_64/kernel/aperture.c
index c7f4fdd20f0..e4e2b7d01f8 100644
--- a/arch/x86_64/kernel/aperture.c
+++ b/arch/x86_64/kernel/aperture.c
@@ -23,6 +23,7 @@
#include <asm/io.h>
#include <asm/proto.h>
#include <asm/pci-direct.h>
+#include <asm/dma.h>
int iommu_aperture;
int iommu_aperture_disabled __initdata = 0;
@@ -247,7 +248,7 @@ void __init iommu_hole_init(void)
/* Got the aperture from the AGP bridge */
} else if (swiotlb && !valid_agp) {
/* Do nothing */
- } else if ((!no_iommu && end_pfn >= 0xffffffff>>PAGE_SHIFT) ||
+ } else if ((!no_iommu && end_pfn >= MAX_DMA32_PFN) ||
force_iommu ||
valid_agp ||
fallback_aper_force) {
diff --git a/arch/x86_64/kernel/apic.c b/arch/x86_64/kernel/apic.c
index 18691ce4c75..5d3c5b07b8d 100644
--- a/arch/x86_64/kernel/apic.c
+++ b/arch/x86_64/kernel/apic.c
@@ -25,6 +25,7 @@
#include <linux/mc146818rtc.h>
#include <linux/kernel_stat.h>
#include <linux/sysdev.h>
+#include <linux/module.h>
#include <asm/atomic.h>
#include <asm/smp.h>
@@ -33,36 +34,36 @@
#include <asm/pgalloc.h>
#include <asm/mach_apic.h>
#include <asm/nmi.h>
+#include <asm/idle.h>
int apic_verbosity;
int disable_apic_timer __initdata;
+/*
+ * cpu_mask that denotes the CPUs that needs timer interrupt coming in as
+ * IPIs in place of local APIC timers
+ */
+static cpumask_t timer_interrupt_broadcast_ipi_mask;
+
/* Using APIC to generate smp_local_timer_interrupt? */
int using_apic_timer = 0;
-static DEFINE_PER_CPU(int, prof_multiplier) = 1;
-static DEFINE_PER_CPU(int, prof_old_multiplier) = 1;
-static DEFINE_PER_CPU(int, prof_counter) = 1;
-
static void apic_pm_activate(void);
void enable_NMI_through_LVT0 (void * dummy)
{
- unsigned int v, ver;
+ unsigned int v;
- ver = apic_read(APIC_LVR);
- ver = GET_APIC_VERSION(ver);
v = APIC_DM_NMI; /* unmask and set to NMI */
- apic_write_around(APIC_LVT0, v);
+ apic_write(APIC_LVT0, v);
}
int get_maxlvt(void)
{
- unsigned int v, ver, maxlvt;
+ unsigned int v, maxlvt;
v = apic_read(APIC_LVR);
- ver = GET_APIC_VERSION(v);
maxlvt = GET_APIC_MAXLVT(v);
return maxlvt;
}
@@ -80,33 +81,33 @@ void clear_local_APIC(void)
*/
if (maxlvt >= 3) {
v = ERROR_APIC_VECTOR; /* any non-zero vector will do */
- apic_write_around(APIC_LVTERR, v | APIC_LVT_MASKED);
+ apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
}
/*
* Careful: we have to set masks only first to deassert
* any level-triggered sources.
*/
v = apic_read(APIC_LVTT);
- apic_write_around(APIC_LVTT, v | APIC_LVT_MASKED);
+ apic_write(APIC_LVTT, v | APIC_LVT_MASKED);
v = apic_read(APIC_LVT0);
- apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED);
+ apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
v = apic_read(APIC_LVT1);
- apic_write_around(APIC_LVT1, v | APIC_LVT_MASKED);
+ apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
if (maxlvt >= 4) {
v = apic_read(APIC_LVTPC);
- apic_write_around(APIC_LVTPC, v | APIC_LVT_MASKED);
+ apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
}
/*
* Clean APIC state for other OSs:
*/
- apic_write_around(APIC_LVTT, APIC_LVT_MASKED);
- apic_write_around(APIC_LVT0, APIC_LVT_MASKED);
- apic_write_around(APIC_LVT1, APIC_LVT_MASKED);
+ apic_write(APIC_LVTT, APIC_LVT_MASKED);
+ apic_write(APIC_LVT0, APIC_LVT_MASKED);
+ apic_write(APIC_LVT1, APIC_LVT_MASKED);
if (maxlvt >= 3)
- apic_write_around(APIC_LVTERR, APIC_LVT_MASKED);
+ apic_write(APIC_LVTERR, APIC_LVT_MASKED);
if (maxlvt >= 4)
- apic_write_around(APIC_LVTPC, APIC_LVT_MASKED);
+ apic_write(APIC_LVTPC, APIC_LVT_MASKED);
v = GET_APIC_VERSION(apic_read(APIC_LVR));
apic_write(APIC_ESR, 0);
apic_read(APIC_ESR);
@@ -151,7 +152,7 @@ void disconnect_bsp_APIC(int virt_wire_setup)
value &= ~APIC_VECTOR_MASK;
value |= APIC_SPIV_APIC_ENABLED;
value |= 0xf;
- apic_write_around(APIC_SPIV, value);
+ apic_write(APIC_SPIV, value);
if (!virt_wire_setup) {
/* For LVT0 make it edge triggered, active high, external and enabled */
@@ -161,11 +162,11 @@ void disconnect_bsp_APIC(int virt_wire_setup)
APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED );
value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
- apic_write_around(APIC_LVT0, value);
+ apic_write(APIC_LVT0, value);
}
else {
/* Disable LVT0 */
- apic_write_around(APIC_LVT0, APIC_LVT_MASKED);
+ apic_write(APIC_LVT0, APIC_LVT_MASKED);
}
/* For LVT1 make it edge triggered, active high, nmi and enabled */
@@ -176,7 +177,7 @@ void disconnect_bsp_APIC(int virt_wire_setup)
APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI);
- apic_write_around(APIC_LVT1, value);
+ apic_write(APIC_LVT1, value);
}
}
@@ -192,7 +193,7 @@ void disable_local_APIC(void)
*/
value = apic_read(APIC_SPIV);
value &= ~APIC_SPIV_APIC_ENABLED;
- apic_write_around(APIC_SPIV, value);
+ apic_write(APIC_SPIV, value);
}
/*
@@ -269,7 +270,7 @@ void __init sync_Arb_IDs(void)
apic_wait_icr_idle();
apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n");
- apic_write_around(APIC_ICR, APIC_DEST_ALLINC | APIC_INT_LEVELTRIG
+ apic_write(APIC_ICR, APIC_DEST_ALLINC | APIC_INT_LEVELTRIG
| APIC_DM_INIT);
}
@@ -280,7 +281,7 @@ extern void __error_in_apic_c (void);
*/
void __init init_bsp_APIC(void)
{
- unsigned int value, ver;
+ unsigned int value;
/*
* Don't do the setup now if we have a SMP BIOS as the
@@ -290,7 +291,6 @@ void __init init_bsp_APIC(void)
return;
value = apic_read(APIC_LVR);
- ver = GET_APIC_VERSION(value);
/*
* Do not trust the local APIC being empty at bootup.
@@ -305,22 +305,21 @@ void __init init_bsp_APIC(void)
value |= APIC_SPIV_APIC_ENABLED;
value |= APIC_SPIV_FOCUS_DISABLED;
value |= SPURIOUS_APIC_VECTOR;
- apic_write_around(APIC_SPIV, value);
+ apic_write(APIC_SPIV, value);
/*
* Set up the virtual wire mode.
*/
- apic_write_around(APIC_LVT0, APIC_DM_EXTINT);
+ apic_write(APIC_LVT0, APIC_DM_EXTINT);
value = APIC_DM_NMI;
- apic_write_around(APIC_LVT1, value);
+ apic_write(APIC_LVT1, value);
}
void __cpuinit setup_local_APIC (void)
{
- unsigned int value, ver, maxlvt;
+ unsigned int value, maxlvt;
value = apic_read(APIC_LVR);
- ver = GET_APIC_VERSION(value);
if ((SPURIOUS_APIC_VECTOR & 0x0f) != 0x0f)
__error_in_apic_c();
@@ -345,7 +344,7 @@ void __cpuinit setup_local_APIC (void)
*/
value = apic_read(APIC_TASKPRI);
value &= ~APIC_TPRI_MASK;
- apic_write_around(APIC_TASKPRI, value);
+ apic_write(APIC_TASKPRI, value);
/*
* Now that we are all set up, enable the APIC
@@ -387,7 +386,7 @@ void __cpuinit setup_local_APIC (void)
* Set spurious IRQ vector
*/
value |= SPURIOUS_APIC_VECTOR;
- apic_write_around(APIC_SPIV, value);
+ apic_write(APIC_SPIV, value);
/*
* Set up LVT0, LVT1:
@@ -407,7 +406,7 @@ void __cpuinit setup_local_APIC (void)
value = APIC_DM_EXTINT | APIC_LVT_MASKED;
apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n", smp_processor_id());
}
- apic_write_around(APIC_LVT0, value);
+ apic_write(APIC_LVT0, value);
/*
* only the BP should see the LINT1 NMI signal, obviously.
@@ -416,14 +415,14 @@ void __cpuinit setup_local_APIC (void)
value = APIC_DM_NMI;
else
value = APIC_DM_NMI | APIC_LVT_MASKED;
- apic_write_around(APIC_LVT1, value);
+ apic_write(APIC_LVT1, value);
{
unsigned oldvalue;
maxlvt = get_maxlvt();
oldvalue = apic_read(APIC_ESR);
value = ERROR_APIC_VECTOR; // enables sending errors
- apic_write_around(APIC_LVTERR, value);
+ apic_write(APIC_LVTERR, value);
/*
* spec says clear errors after enabling vector.
*/
@@ -500,13 +499,10 @@ static int lapic_resume(struct sys_device *dev)
if (!apic_pm_state.active)
return 0;
- /* XXX: Pavel needs this for S3 resume, but can't explain why */
- set_fixmap_nocache(FIX_APIC_BASE, APIC_DEFAULT_PHYS_BASE);
-
local_irq_save(flags);
rdmsr(MSR_IA32_APICBASE, l, h);
l &= ~MSR_IA32_APICBASE_BASE;
- l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE;
+ l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
wrmsr(MSR_IA32_APICBASE, l, h);
apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
apic_write(APIC_ID, apic_pm_state.apic_id);
@@ -660,20 +656,25 @@ void __init init_apic_mappings(void)
static void __setup_APIC_LVTT(unsigned int clocks)
{
unsigned int lvtt_value, tmp_value, ver;
+ int cpu = smp_processor_id();
ver = GET_APIC_VERSION(apic_read(APIC_LVR));
lvtt_value = APIC_LVT_TIMER_PERIODIC | LOCAL_TIMER_VECTOR;
- apic_write_around(APIC_LVTT, lvtt_value);
+
+ if (cpu_isset(cpu, timer_interrupt_broadcast_ipi_mask))
+ lvtt_value |= APIC_LVT_MASKED;
+
+ apic_write(APIC_LVTT, lvtt_value);
/*
* Divide PICLK by 16
*/
tmp_value = apic_read(APIC_TDCR);
- apic_write_around(APIC_TDCR, (tmp_value
+ apic_write(APIC_TDCR, (tmp_value
& ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE))
| APIC_TDR_DIV_16);
- apic_write_around(APIC_TMICT, clocks/APIC_DIVISOR);
+ apic_write(APIC_TMICT, clocks/APIC_DIVISOR);
}
static void setup_APIC_timer(unsigned int clocks)
@@ -682,12 +683,6 @@ static void setup_APIC_timer(unsigned int clocks)
local_irq_save(flags);
- /* For some reasons this doesn't work on Simics, so fake it for now */
- if (!strstr(boot_cpu_data.x86_model_id, "Screwdriver")) {
- __setup_APIC_LVTT(clocks);
- return;
- }
-
/* wait for irq slice */
if (vxtime.hpet_address) {
int trigger = hpet_readl(HPET_T0_CMP);
@@ -700,7 +695,7 @@ static void setup_APIC_timer(unsigned int clocks)
outb_p(0x00, 0x43);
c2 = inb_p(0x40);
c2 |= inb_p(0x40) << 8;
- do {
+ do {
c1 = c2;
outb_p(0x00, 0x43);
c2 = inb_p(0x40);
@@ -785,52 +780,68 @@ void __cpuinit setup_secondary_APIC_clock(void)
local_irq_enable();
}
-void __cpuinit disable_APIC_timer(void)
+void disable_APIC_timer(void)
{
if (using_apic_timer) {
unsigned long v;
v = apic_read(APIC_LVTT);
- apic_write_around(APIC_LVTT, v | APIC_LVT_MASKED);
+ apic_write(APIC_LVTT, v | APIC_LVT_MASKED);
}
}
void enable_APIC_timer(void)
{
- if (using_apic_timer) {
+ int cpu = smp_processor_id();
+
+ if (using_apic_timer &&
+ !cpu_isset(cpu, timer_interrupt_broadcast_ipi_mask)) {
unsigned long v;
v = apic_read(APIC_LVTT);
- apic_write_around(APIC_LVTT, v & ~APIC_LVT_MASKED);
+ apic_write(APIC_LVTT, v & ~APIC_LVT_MASKED);
}
}
-/*
- * the frequency of the profiling timer can be changed
- * by writing a multiplier value into /proc/profile.
- */
-int setup_profiling_timer(unsigned int multiplier)
+void switch_APIC_timer_to_ipi(void *cpumask)
{
- int i;
+ cpumask_t mask = *(cpumask_t *)cpumask;
+ int cpu = smp_processor_id();
- /*
- * Sanity check. [at least 500 APIC cycles should be
- * between APIC interrupts as a rule of thumb, to avoid
- * irqs flooding us]
- */
- if ( (!multiplier) || (calibration_result/multiplier < 500))
- return -EINVAL;
-
- /*
- * Set the new multiplier for each CPU. CPUs don't start using the
- * new values until the next timer interrupt in which they do process
- * accounting. At that time they also adjust their APIC timers
- * accordingly.
- */
- for (i = 0; i < NR_CPUS; ++i)
- per_cpu(prof_multiplier, i) = multiplier;
+ if (cpu_isset(cpu, mask) &&
+ !cpu_isset(cpu, timer_interrupt_broadcast_ipi_mask)) {
+ disable_APIC_timer();
+ cpu_set(cpu, timer_interrupt_broadcast_ipi_mask);
+ }
+}
+EXPORT_SYMBOL(switch_APIC_timer_to_ipi);
- return 0;
+void smp_send_timer_broadcast_ipi(void)
+{
+ cpumask_t mask;
+
+ cpus_and(mask, cpu_online_map, timer_interrupt_broadcast_ipi_mask);
+ if (!cpus_empty(mask)) {
+ send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
+ }
+}
+
+void switch_ipi_to_APIC_timer(void *cpumask)
+{
+ cpumask_t mask = *(cpumask_t *)cpumask;
+ int cpu = smp_processor_id();
+
+ if (cpu_isset(cpu, mask) &&
+ cpu_isset(cpu, timer_interrupt_broadcast_ipi_mask)) {
+ cpu_clear(cpu, timer_interrupt_broadcast_ipi_mask);
+ enable_APIC_timer();
+ }
+}
+EXPORT_SYMBOL(switch_ipi_to_APIC_timer);
+
+int setup_profiling_timer(unsigned int multiplier)
+{
+ return -EINVAL;
}
#ifdef CONFIG_X86_MCE_AMD
@@ -857,32 +868,10 @@ void setup_threshold_lvt(unsigned long lvt_off)
void smp_local_timer_interrupt(struct pt_regs *regs)
{
- int cpu = smp_processor_id();
-
profile_tick(CPU_PROFILING, regs);
- if (--per_cpu(prof_counter, cpu) <= 0) {
- /*
- * The multiplier may have changed since the last time we got
- * to this point as a result of the user writing to
- * /proc/profile. In this case we need to adjust the APIC
- * timer accordingly.
- *
- * Interrupts are already masked off at this point.
- */
- per_cpu(prof_counter, cpu) = per_cpu(prof_multiplier, cpu);
- if (per_cpu(prof_counter, cpu) !=
- per_cpu(prof_old_multiplier, cpu)) {
- __setup_APIC_LVTT(calibration_result/
- per_cpu(prof_counter, cpu));
- per_cpu(prof_old_multiplier, cpu) =
- per_cpu(prof_counter, cpu);
- }
-
#ifdef CONFIG_SMP
- update_process_times(user_mode(regs));
+ update_process_times(user_mode(regs));
#endif
- }
-
/*
* We take the 'long' return path, and there every subsystem
* grabs the appropriate locks (kernel lock/ irq lock).
@@ -920,6 +909,7 @@ void smp_apic_timer_interrupt(struct pt_regs *regs)
* Besides, if we don't timer interrupts ignore the global
* interrupt lock, which is the WrongThing (tm) to do.
*/
+ exit_idle();
irq_enter();
smp_local_timer_interrupt(regs);
irq_exit();
@@ -979,6 +969,7 @@ __init int oem_force_hpet_timer(void)
asmlinkage void smp_spurious_interrupt(void)
{
unsigned int v;
+ exit_idle();
irq_enter();
/*
* Check if this really is a spurious interrupt and ACK it
@@ -1014,6 +1005,7 @@ asmlinkage void smp_error_interrupt(void)
{
unsigned int v, v1;
+ exit_idle();
irq_enter();
/* First tickle the hardware, only then report what went on. -- REW */
v = apic_read(APIC_ESR);
@@ -1060,7 +1052,7 @@ int __init APIC_init_uniprocessor (void)
connect_bsp_APIC();
phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id);
- apic_write_around(APIC_ID, boot_cpu_id);
+ apic_write(APIC_ID, SET_APIC_ID(boot_cpu_id));
setup_local_APIC();
diff --git a/arch/x86_64/kernel/asm-offsets.c b/arch/x86_64/kernel/asm-offsets.c
index aaa6d383351..38834bbbae1 100644
--- a/arch/x86_64/kernel/asm-offsets.c
+++ b/arch/x86_64/kernel/asm-offsets.c
@@ -33,6 +33,7 @@ int main(void)
ENTRY(flags);
ENTRY(addr_limit);
ENTRY(preempt_count);
+ ENTRY(status);
BLANK();
#undef ENTRY
#define ENTRY(entry) DEFINE(pda_ ## entry, offsetof(struct x8664_pda, entry))
@@ -42,6 +43,7 @@ int main(void)
ENTRY(irqcount);
ENTRY(cpunumber);
ENTRY(irqstackptr);
+ ENTRY(data_offset);
BLANK();
#undef ENTRY
#ifdef CONFIG_IA32_EMULATION
@@ -64,5 +66,7 @@ int main(void)
DEFINE(pbe_address, offsetof(struct pbe, address));
DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
DEFINE(pbe_next, offsetof(struct pbe, next));
+ BLANK();
+ DEFINE(TSS_ist, offsetof(struct tss_struct, ist));
return 0;
}
diff --git a/arch/x86_64/kernel/crash.c b/arch/x86_64/kernel/crash.c
index 535e0446607..4e6c3b729e3 100644
--- a/arch/x86_64/kernel/crash.c
+++ b/arch/x86_64/kernel/crash.c
@@ -11,19 +11,156 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/smp.h>
+#include <linux/irq.h>
#include <linux/reboot.h>
#include <linux/kexec.h>
+#include <linux/delay.h>
+#include <linux/elf.h>
+#include <linux/elfcore.h>
#include <asm/processor.h>
#include <asm/hardirq.h>
#include <asm/nmi.h>
#include <asm/hw_irq.h>
+#include <asm/mach_apic.h>
-note_buf_t crash_notes[NR_CPUS];
+/* This keeps a track of which one is crashing cpu. */
+static int crashing_cpu;
+
+static u32 *append_elf_note(u32 *buf, char *name, unsigned type,
+ void *data, size_t data_len)
+{
+ struct elf_note note;
+
+ note.n_namesz = strlen(name) + 1;
+ note.n_descsz = data_len;
+ note.n_type = type;
+ memcpy(buf, &note, sizeof(note));
+ buf += (sizeof(note) +3)/4;
+ memcpy(buf, name, note.n_namesz);
+ buf += (note.n_namesz + 3)/4;
+ memcpy(buf, data, note.n_descsz);
+ buf += (note.n_descsz + 3)/4;
+
+ return buf;
+}
+
+static void final_note(u32 *buf)
+{
+ struct elf_note note;
+
+ note.n_namesz = 0;
+ note.n_descsz = 0;
+ note.n_type = 0;
+ memcpy(buf, &note, sizeof(note));
+}
+
+static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
+{
+ struct elf_prstatus prstatus;
+ u32 *buf;
+
+ if ((cpu < 0) || (cpu >= NR_CPUS))
+ return;
+
+ /* Using ELF notes here is opportunistic.
+ * I need a well defined structure format
+ * for the data I pass, and I need tags
+ * on the data to indicate what information I have
+ * squirrelled away. ELF notes happen to provide
+ * all of that that no need to invent something new.
+ */
+
+ buf = (u32*)per_cpu_ptr(crash_notes, cpu);
+
+ if (!buf)
+ return;
+
+ memset(&prstatus, 0, sizeof(prstatus));
+ prstatus.pr_pid = current->pid;
+ elf_core_copy_regs(&prstatus.pr_reg, regs);
+ buf = append_elf_note(buf, "CORE", NT_PRSTATUS, &prstatus,
+ sizeof(prstatus));
+ final_note(buf);
+}
+
+static void crash_save_self(struct pt_regs *regs)
+{
+ int cpu;
+
+ cpu = smp_processor_id();
+ crash_save_this_cpu(regs, cpu);
+}
+
+#ifdef CONFIG_SMP
+static atomic_t waiting_for_crash_ipi;
+
+static int crash_nmi_callback(struct pt_regs *regs, int cpu)
+{
+ /*
+ * Don't do anything if this handler is invoked on crashing cpu.
+ * Otherwise, system will completely hang. Crashing cpu can get
+ * an NMI if system was initially booted with nmi_watchdog parameter.
+ */
+ if (cpu == crashing_cpu)
+ return 1;
+ local_irq_disable();
+
+ crash_save_this_cpu(regs, cpu);
+ disable_local_APIC();
+ atomic_dec(&waiting_for_crash_ipi);
+ /* Assume hlt works */
+ for(;;)
+ asm("hlt");
+
+ return 1;
+}
+
+static void smp_send_nmi_allbutself(void)
+{
+ send_IPI_allbutself(APIC_DM_NMI);
+}
+
+/*
+ * This code is a best effort heuristic to get the
+ * other cpus to stop executing. So races with
+ * cpu hotplug shouldn't matter.
+ */
+
+static void nmi_shootdown_cpus(void)
+{
+ unsigned long msecs;
+
+ atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
+ set_nmi_callback(crash_nmi_callback);
+
+ /*
+ * Ensure the new callback function is set before sending
+ * out the NMI
+ */
+ wmb();
+
+ smp_send_nmi_allbutself();
+
+ msecs = 1000; /* Wait at most a second for the other cpus to stop */
+ while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
+ mdelay(1);
+ msecs--;
+ }
+ /* Leave the nmi callback set */
+ disable_local_APIC();
+}
+#else
+static void nmi_shootdown_cpus(void)
+{
+ /* There are no cpus to shootdown */
+}
+#endif
void machine_crash_shutdown(struct pt_regs *regs)
{
- /* This function is only called after the system
+ /*
+ * This function is only called after the system
* has paniced or is otherwise in a critical state.
* The minimum amount of code to allow a kexec'd kernel
* to run successfully needs to happen here.
@@ -31,4 +168,19 @@ void machine_crash_shutdown(struct pt_regs *regs)
* In practice this means shooting down the other cpus in
* an SMP system.
*/
+ /* The kernel is broken so disable interrupts */
+ local_irq_disable();
+
+ /* Make a note of crashing cpu. Will be used in NMI callback.*/
+ crashing_cpu = smp_processor_id();
+ nmi_shootdown_cpus();
+
+ if(cpu_has_apic)
+ disable_local_APIC();
+
+#if defined(CONFIG_X86_IO_APIC)
+ disable_IO_APIC();
+#endif
+
+ crash_save_self(regs);
}
diff --git a/arch/x86_64/kernel/crash_dump.c b/arch/x86_64/kernel/crash_dump.c
new file mode 100644
index 00000000000..942deac4d43
--- /dev/null
+++ b/arch/x86_64/kernel/crash_dump.c
@@ -0,0 +1,47 @@
+/*
+ * kernel/crash_dump.c - Memory preserving reboot related code.
+ *
+ * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
+ * Copyright (C) IBM Corporation, 2004. All rights reserved
+ */
+
+#include <linux/errno.h>
+#include <linux/crash_dump.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+/**
+ * copy_oldmem_page - copy one page from "oldmem"
+ * @pfn: page frame number to be copied
+ * @buf: target memory address for the copy; this can be in kernel address
+ * space or user address space (see @userbuf)
+ * @csize: number of bytes to copy
+ * @offset: offset in bytes into the page (based on pfn) to begin the copy
+ * @userbuf: if set, @buf is in user address space, use copy_to_user(),
+ * otherwise @buf is in kernel address space, use memcpy().
+ *
+ * Copy a page from "oldmem". For this page, there is no pte mapped
+ * in the current kernel. We stitch up a pte, similar to kmap_atomic.
+ */
+ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
+ size_t csize, unsigned long offset, int userbuf)
+{
+ void *vaddr;
+
+ if (!csize)
+ return 0;
+
+ vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
+
+ if (userbuf) {
+ if (copy_to_user(buf, (vaddr + offset), csize)) {
+ iounmap(vaddr);
+ return -EFAULT;
+ }
+ } else
+ memcpy(buf, (vaddr + offset), csize);
+
+ iounmap(vaddr);
+ return csize;
+}
diff --git a/arch/x86_64/kernel/e820.c b/arch/x86_64/kernel/e820.c
index 17579a1a174..293cd71a266 100644
--- a/arch/x86_64/kernel/e820.c
+++ b/arch/x86_64/kernel/e820.c
@@ -559,6 +559,27 @@ void __init parse_memopt(char *p, char **from)
end_user_pfn >>= PAGE_SHIFT;
}
+void __init parse_memmapopt(char *p, char **from)
+{
+ unsigned long long start_at, mem_size;
+
+ mem_size = memparse(p, from);
+ p = *from;
+ if (*p == '@') {
+ start_at = memparse(p+1, from);
+ add_memory_region(start_at, mem_size, E820_RAM);
+ } else if (*p == '#') {
+ start_at = memparse(p+1, from);
+ add_memory_region(start_at, mem_size, E820_ACPI);
+ } else if (*p == '$') {
+ start_at = memparse(p+1, from);
+ add_memory_region(start_at, mem_size, E820_RESERVED);
+ } else {
+ end_user_pfn = (mem_size >> PAGE_SHIFT);
+ }
+ p = *from;
+}
+
unsigned long pci_mem_start = 0xaeedbabe;
/*
diff --git a/arch/x86_64/kernel/early_printk.c b/arch/x86_64/kernel/early_printk.c
index 9cd968dd0f5..6dffb498ccd 100644
--- a/arch/x86_64/kernel/early_printk.c
+++ b/arch/x86_64/kernel/early_printk.c
@@ -216,7 +216,7 @@ void early_printk(const char *fmt, ...)
va_end(ap);
}
-static int keep_early;
+static int __initdata keep_early;
int __init setup_early_printk(char *opt)
{
@@ -226,8 +226,6 @@ int __init setup_early_printk(char *opt)
if (early_console_initialized)
return -1;
- opt = strchr(opt, '=') + 1;
-
strlcpy(buf,opt,sizeof(buf));
space = strchr(buf, ' ');
if (space)
diff --git a/arch/x86_64/kernel/entry.S b/arch/x86_64/kernel/entry.S
index 9ff42041bb6..dbdba56e8fa 100644
--- a/arch/x86_64/kernel/entry.S
+++ b/arch/x86_64/kernel/entry.S
@@ -41,6 +41,7 @@
#include <asm/unistd.h>
#include <asm/thread_info.h>
#include <asm/hw_irq.h>
+#include <asm/page.h>
.code64
@@ -313,6 +314,7 @@ int_with_check:
movl threadinfo_flags(%rcx),%edx
andl %edi,%edx
jnz int_careful
+ andl $~TS_COMPAT,threadinfo_status(%rcx)
jmp retint_swapgs
/* Either reschedule or signal or syscall exit tracking needed. */
@@ -673,7 +675,7 @@ ENTRY(spurious_interrupt)
/* error code is on the stack already */
/* handle NMI like exceptions that can happen everywhere */
- .macro paranoidentry sym
+ .macro paranoidentry sym, ist=0
SAVE_ALL
cld
movl $1,%ebx
@@ -683,10 +685,20 @@ ENTRY(spurious_interrupt)
js 1f
swapgs
xorl %ebx,%ebx
-1: movq %rsp,%rdi
+1:
+ .if \ist
+ movq %gs:pda_data_offset, %rbp
+ .endif
+ movq %rsp,%rdi
movq ORIG_RAX(%rsp),%rsi
movq $-1,ORIG_RAX(%rsp)
+ .if \ist
+ subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
+ .endif
call \sym
+ .if \ist
+ addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
+ .endif
cli
.endm
@@ -754,7 +766,7 @@ error_exit:
jnz retint_careful
swapgs
RESTORE_ARGS 0,8,0
- iretq
+ jmp iret_label
CFI_ENDPROC
error_kernelspace:
@@ -904,7 +916,7 @@ KPROBE_ENTRY(debug)
INTR_FRAME
pushq $0
CFI_ADJUST_CFA_OFFSET 8
- paranoidentry do_debug
+ paranoidentry do_debug, DEBUG_STACK
jmp paranoid_exit
CFI_ENDPROC
.previous .text
@@ -959,7 +971,12 @@ paranoid_schedule:
CFI_ENDPROC
KPROBE_ENTRY(int3)
- zeroentry do_int3
+ INTR_FRAME
+ pushq $0
+ CFI_ADJUST_CFA_OFFSET 8
+ paranoidentry do_int3, DEBUG_STACK
+ jmp paranoid_exit
+ CFI_ENDPROC
.previous .text
ENTRY(overflow)
@@ -1021,23 +1038,18 @@ ENTRY(machine_check)
CFI_ENDPROC
#endif
-ENTRY(call_debug)
- zeroentry do_call_debug
-
ENTRY(call_softirq)
CFI_STARTPROC
movq %gs:pda_irqstackptr,%rax
- pushq %r15
- CFI_ADJUST_CFA_OFFSET 8
- movq %rsp,%r15
- CFI_DEF_CFA_REGISTER r15
+ movq %rsp,%rdx
+ CFI_DEF_CFA_REGISTER rdx
incl %gs:pda_irqcount
cmove %rax,%rsp
+ pushq %rdx
+ /*todo CFI_DEF_CFA_EXPRESSION ...*/
call __do_softirq
- movq %r15,%rsp
+ popq %rsp
CFI_DEF_CFA_REGISTER rsp
decl %gs:pda_irqcount
- popq %r15
- CFI_ADJUST_CFA_OFFSET -8
ret
CFI_ENDPROC
diff --git a/arch/x86_64/kernel/genapic_cluster.c b/arch/x86_64/kernel/genapic_cluster.c
index a472d62f899..43fcf62fef0 100644
--- a/arch/x86_64/kernel/genapic_cluster.c
+++ b/arch/x86_64/kernel/genapic_cluster.c
@@ -72,14 +72,11 @@ static void cluster_send_IPI_mask(cpumask_t mask, int vector)
static void cluster_send_IPI_allbutself(int vector)
{
cpumask_t mask = cpu_online_map;
- int me = get_cpu(); /* Ensure we are not preempted when we clear */
- cpu_clear(me, mask);
+ cpu_clear(smp_processor_id(), mask);
if (!cpus_empty(mask))
cluster_send_IPI_mask(mask, vector);
-
- put_cpu();
}
static void cluster_send_IPI_all(int vector)
diff --git a/arch/x86_64/kernel/genapic_flat.c b/arch/x86_64/kernel/genapic_flat.c
index 9da3edb799e..1a2ab825be9 100644
--- a/arch/x86_64/kernel/genapic_flat.c
+++ b/arch/x86_64/kernel/genapic_flat.c
@@ -83,12 +83,11 @@ static void flat_send_IPI_allbutself(int vector)
__send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL);
#else
cpumask_t allbutme = cpu_online_map;
- int me = get_cpu(); /* Ensure we are not preempted when we clear */
- cpu_clear(me, allbutme);
+
+ cpu_clear(smp_processor_id(), allbutme);
if (!cpus_empty(allbutme))
flat_send_IPI_mask(allbutme, vector);
- put_cpu();
#endif
}
@@ -149,10 +148,9 @@ static void physflat_send_IPI_mask(cpumask_t cpumask, int vector)
static void physflat_send_IPI_allbutself(int vector)
{
cpumask_t allbutme = cpu_online_map;
- int me = get_cpu();
- cpu_clear(me, allbutme);
+
+ cpu_clear(smp_processor_id(), allbutme);
physflat_send_IPI_mask(allbutme, vector);
- put_cpu();
}
static void physflat_send_IPI_all(int vector)
diff --git a/arch/x86_64/kernel/head.S b/arch/x86_64/kernel/head.S
index 15290968e49..692c737fedd 100644
--- a/arch/x86_64/kernel/head.S
+++ b/arch/x86_64/kernel/head.S
@@ -241,104 +241,70 @@ ljumpvector:
ENTRY(stext)
ENTRY(_stext)
-.org 0x1000
-ENTRY(init_level4_pgt)
+ $page = 0
+#define NEXT_PAGE(name) \
+ $page = $page + 1; \
+ .org $page * 0x1000; \
+ phys_/**/name = $page * 0x1000 + __PHYSICAL_START; \
+ENTRY(name)
+
+NEXT_PAGE(init_level4_pgt)
/* This gets initialized in x86_64_start_kernel */
.fill 512,8,0
-.org 0x2000
-ENTRY(level3_ident_pgt)
- .quad 0x0000000000004007 + __PHYSICAL_START
+NEXT_PAGE(level3_ident_pgt)
+ .quad phys_level2_ident_pgt | 0x007
.fill 511,8,0
-.org 0x3000
-ENTRY(level3_kernel_pgt)
+NEXT_PAGE(level3_kernel_pgt)
.fill 510,8,0
/* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
- .quad 0x0000000000005007 + __PHYSICAL_START /* -> level2_kernel_pgt */
+ .quad phys_level2_kernel_pgt | 0x007
.fill 1,8,0
-.org 0x4000
-ENTRY(level2_ident_pgt)
+NEXT_PAGE(level2_ident_pgt)
/* 40MB for bootup. */
- .quad 0x0000000000000083
- .quad 0x0000000000200083
- .quad 0x0000000000400083
- .quad 0x0000000000600083
- .quad 0x0000000000800083
- .quad 0x0000000000A00083
- .quad 0x0000000000C00083
- .quad 0x0000000000E00083
- .quad 0x0000000001000083
- .quad 0x0000000001200083
- .quad 0x0000000001400083
- .quad 0x0000000001600083
- .quad 0x0000000001800083
- .quad 0x0000000001A00083
- .quad 0x0000000001C00083
- .quad 0x0000000001E00083
- .quad 0x0000000002000083
- .quad 0x0000000002200083
- .quad 0x0000000002400083
- .quad 0x0000000002600083
+ i = 0
+ .rept 20
+ .quad i << 21 | 0x083
+ i = i + 1
+ .endr
/* Temporary mappings for the super early allocator in arch/x86_64/mm/init.c */
.globl temp_boot_pmds
temp_boot_pmds:
.fill 492,8,0
-.org 0x5000
-ENTRY(level2_kernel_pgt)
+NEXT_PAGE(level2_kernel_pgt)
/* 40MB kernel mapping. The kernel code cannot be bigger than that.
When you change this change KERNEL_TEXT_SIZE in page.h too. */
/* (2^48-(2*1024*1024*1024)-((2^39)*511)-((2^30)*510)) = 0 */
- .quad 0x0000000000000183
- .quad 0x0000000000200183
- .quad 0x0000000000400183
- .quad 0x0000000000600183
- .quad 0x0000000000800183
- .quad 0x0000000000A00183
- .quad 0x0000000000C00183
- .quad 0x0000000000E00183
- .quad 0x0000000001000183
- .quad 0x0000000001200183
- .quad 0x0000000001400183
- .quad 0x0000000001600183
- .quad 0x0000000001800183
- .quad 0x0000000001A00183
- .quad 0x0000000001C00183
- .quad 0x0000000001E00183
- .quad 0x0000000002000183
- .quad 0x0000000002200183
- .quad 0x0000000002400183
- .quad 0x0000000002600183
+ i = 0
+ .rept 20
+ .quad i << 21 | 0x183
+ i = i + 1
+ .endr
/* Module mapping starts here */
.fill 492,8,0
-.org 0x6000
-ENTRY(empty_zero_page)
+NEXT_PAGE(empty_zero_page)
-.org 0x7000
-ENTRY(empty_bad_page)
-
-.org 0x8000
-ENTRY(empty_bad_pte_table)
+NEXT_PAGE(level3_physmem_pgt)
+ .quad phys_level2_kernel_pgt | 0x007 /* so that __va works even before pagetable_init */
+ .fill 511,8,0
-.org 0x9000
-ENTRY(empty_bad_pmd_table)
+#undef NEXT_PAGE
-.org 0xa000
-ENTRY(level3_physmem_pgt)
- .quad 0x0000000000005007 + __PHYSICAL_START /* -> level2_kernel_pgt (so that __va works even before pagetable_init) */
+ .data
- .org 0xb000
#ifdef CONFIG_ACPI_SLEEP
+ .align PAGE_SIZE
ENTRY(wakeup_level4_pgt)
- .quad 0x0000000000002007 + __PHYSICAL_START /* -> level3_ident_pgt */
+ .quad phys_level3_ident_pgt | 0x007
.fill 255,8,0
- .quad 0x000000000000a007 + __PHYSICAL_START
+ .quad phys_level3_physmem_pgt | 0x007
.fill 254,8,0
/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
- .quad 0x0000000000003007 + __PHYSICAL_START /* -> level3_kernel_pgt */
+ .quad phys_level3_kernel_pgt | 0x007
#endif
#ifndef CONFIG_HOTPLUG_CPU
@@ -352,12 +318,12 @@ ENTRY(wakeup_level4_pgt)
*/
.align PAGE_SIZE
ENTRY(boot_level4_pgt)
- .quad 0x0000000000002007 + __PHYSICAL_START /* -> level3_ident_pgt */
+ .quad phys_level3_ident_pgt | 0x007
.fill 255,8,0
- .quad 0x000000000000a007 + __PHYSICAL_START
+ .quad phys_level3_physmem_pgt | 0x007
.fill 254,8,0
/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
- .quad 0x0000000000003007 + __PHYSICAL_START /* -> level3_kernel_pgt */
+ .quad phys_level3_kernel_pgt | 0x007
.data
@@ -379,14 +345,14 @@ gdt:
* Also sysret mandates a special GDT layout
*/
-.align L1_CACHE_BYTES
+.align PAGE_SIZE
/* The TLS descriptors are currently at a different place compared to i386.
Hopefully nobody expects them at a fixed place (Wine?) */
ENTRY(cpu_gdt_table)
.quad 0x0000000000000000 /* NULL descriptor */
- .quad 0x008f9a000000ffff /* __KERNEL_COMPAT32_CS */
+ .quad 0x0 /* unused */
.quad 0x00af9a000000ffff /* __KERNEL_CS */
.quad 0x00cf92000000ffff /* __KERNEL_DS */
.quad 0x00cffa000000ffff /* __USER32_CS */
@@ -396,15 +362,15 @@ ENTRY(cpu_gdt_table)
.quad 0,0 /* TSS */
.quad 0,0 /* LDT */
.quad 0,0,0 /* three TLS descriptors */
- .quad 0x00009a000000ffff /* __KERNEL16_CS - 16bit PM for S3 wakeup. */
- /* base must be patched for real base address. */
+ .quad 0 /* unused */
gdt_end:
/* asm/segment.h:GDT_ENTRIES must match this */
/* This should be a multiple of the cache line size */
- /* GDTs of other CPUs: */
- .fill (GDT_SIZE * NR_CPUS) - (gdt_end - cpu_gdt_table)
+ /* GDTs of other CPUs are now dynamically allocated */
+
+ /* zero the remaining page */
+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
- .align L1_CACHE_BYTES
ENTRY(idt_table)
.rept 256
.quad 0
diff --git a/arch/x86_64/kernel/head64.c b/arch/x86_64/kernel/head64.c
index b675c5add01..cea20a66c15 100644
--- a/arch/x86_64/kernel/head64.c
+++ b/arch/x86_64/kernel/head64.c
@@ -92,6 +92,9 @@ void __init x86_64_start_kernel(char * real_mode_data)
memcpy(init_level4_pgt, boot_level4_pgt, PTRS_PER_PGD*sizeof(pgd_t));
asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
+ for (i = 0; i < NR_CPUS; i++)
+ cpu_pda(i) = &boot_cpu_pda[i];
+
pda_init(0);
copy_bootdata(real_mode_data);
#ifdef CONFIG_SMP
@@ -99,7 +102,7 @@ void __init x86_64_start_kernel(char * real_mode_data)
#endif
s = strstr(saved_command_line, "earlyprintk=");
if (s != NULL)
- setup_early_printk(s);
+ setup_early_printk(strchr(s, '=') + 1);
#ifdef CONFIG_NUMA
s = strstr(saved_command_line, "numa=");
if (s != NULL)
diff --git a/arch/x86_64/kernel/i387.c b/arch/x86_64/kernel/i387.c
index d9b22b633e3..a5d7e16b928 100644
--- a/arch/x86_64/kernel/i387.c
+++ b/arch/x86_64/kernel/i387.c
@@ -95,7 +95,7 @@ int save_i387(struct _fpstate __user *buf)
if (!used_math())
return 0;
clear_used_math(); /* trigger finit */
- if (tsk->thread_info->status & TS_USEDFPU) {
+ if (task_thread_info(tsk)->status & TS_USEDFPU) {
err = save_i387_checking((struct i387_fxsave_struct __user *)buf);
if (err) return err;
stts();
diff --git a/arch/x86_64/kernel/i8259.c b/arch/x86_64/kernel/i8259.c
index 6e5101ad3d1..5ecd34ab8c2 100644
--- a/arch/x86_64/kernel/i8259.c
+++ b/arch/x86_64/kernel/i8259.c
@@ -133,7 +133,7 @@ static void end_8259A_irq (unsigned int irq)
{
if (irq > 256) {
char var;
- printk("return %p stack %p ti %p\n", __builtin_return_address(0), &var, current->thread_info);
+ printk("return %p stack %p ti %p\n", __builtin_return_address(0), &var, task_thread_info(current));
BUG();
}
@@ -549,10 +549,9 @@ void __init init_IRQ(void)
int vector = FIRST_EXTERNAL_VECTOR + i;
if (i >= NR_IRQS)
break;
- if (vector != IA32_SYSCALL_VECTOR && vector != KDB_VECTOR) {
+ if (vector != IA32_SYSCALL_VECTOR)
set_intr_gate(vector, interrupt[i]);
}
- }
#ifdef CONFIG_SMP
/*
diff --git a/arch/x86_64/kernel/init_task.c b/arch/x86_64/kernel/init_task.c
index e0ba5c1043f..ce31d904d60 100644
--- a/arch/x86_64/kernel/init_task.c
+++ b/arch/x86_64/kernel/init_task.c
@@ -44,6 +44,6 @@ EXPORT_SYMBOL(init_task);
* section. Since TSS's are completely CPU-local, we want them
* on exact cacheline boundaries, to eliminate cacheline ping-pong.
*/
-DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_maxaligned_in_smp = INIT_TSS;
+DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_internodealigned_in_smp = INIT_TSS;
#define ALIGN_TO_4K __attribute__((section(".data.init_task")))
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index 97154ab058b..1a5060b434b 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -37,6 +37,7 @@
#include <asm/proto.h>
#include <asm/mach_apic.h>
#include <asm/acpi.h>
+#include <asm/dma.h>
#define __apicdebuginit __init
@@ -46,6 +47,9 @@ static int no_timer_check;
int disable_timer_pin_1 __initdata;
+/* Where if anywhere is the i8259 connect in external int mode */
+static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
+
static DEFINE_SPINLOCK(ioapic_lock);
/*
@@ -260,9 +264,6 @@ __setup("apic", enable_ioapic_setup);
void __init check_ioapic(void)
{
int num,slot,func;
- if (ioapic_force)
- return;
-
/* Poor man's PCI discovery */
for (num = 0; num < 32; num++) {
for (slot = 0; slot < 32; slot++) {
@@ -284,7 +285,7 @@ void __init check_ioapic(void)
switch (vendor) {
case PCI_VENDOR_ID_VIA:
#ifdef CONFIG_GART_IOMMU
- if ((end_pfn >= (0xffffffff>>PAGE_SHIFT) ||
+ if ((end_pfn > MAX_DMA32_PFN ||
force_iommu) &&
!iommu_aperture_allowed) {
printk(KERN_INFO
@@ -363,7 +364,7 @@ static int find_irq_entry(int apic, int pin, int type)
/*
* Find the pin to which IRQ[irq] (ISA) is connected
*/
-static int find_isa_irq_pin(int irq, int type)
+static int __init find_isa_irq_pin(int irq, int type)
{
int i;
@@ -381,6 +382,31 @@ static int find_isa_irq_pin(int irq, int type)
return -1;
}
+static int __init find_isa_irq_apic(int irq, int type)
+{
+ int i;
+
+ for (i = 0; i < mp_irq_entries; i++) {
+ int lbus = mp_irqs[i].mpc_srcbus;
+
+ if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
+ mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
+ mp_bus_id_to_type[lbus] == MP_BUS_MCA) &&
+ (mp_irqs[i].mpc_irqtype == type) &&
+ (mp_irqs[i].mpc_srcbusirq == irq))
+ break;
+ }
+ if (i < mp_irq_entries) {
+ int apic;
+ for(apic = 0; apic < nr_ioapics; apic++) {
+ if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic)
+ return apic;
+ }
+ }
+
+ return -1;
+}
+
/*
* Find a specific PCI IRQ entry.
* Not an __init, possibly needed by modules
@@ -874,7 +900,7 @@ static void __init setup_IO_APIC_irqs(void)
* Set up the 8259A-master output pin as broadcast to all
* CPUs.
*/
-static void __init setup_ExtINT_IRQ0_pin(unsigned int pin, int vector)
+static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector)
{
struct IO_APIC_route_entry entry;
unsigned long flags;
@@ -884,7 +910,7 @@ static void __init setup_ExtINT_IRQ0_pin(unsigned int pin, int vector)
disable_8259A_irq(0);
/* mask LVT0 */
- apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
+ apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
/*
* We use logical delivery to get the timer IRQ
@@ -908,8 +934,8 @@ static void __init setup_ExtINT_IRQ0_pin(unsigned int pin, int vector)
* Add it to the IO-APIC irq-routing table:
*/
spin_lock_irqsave(&ioapic_lock, flags);
- io_apic_write(0, 0x11+2*pin, *(((int *)&entry)+1));
- io_apic_write(0, 0x10+2*pin, *(((int *)&entry)+0));
+ io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
+ io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
spin_unlock_irqrestore(&ioapic_lock, flags);
enable_8259A_irq(0);
@@ -1188,7 +1214,8 @@ void __apicdebuginit print_PIC(void)
static void __init enable_IO_APIC(void)
{
union IO_APIC_reg_01 reg_01;
- int i;
+ int i8259_apic, i8259_pin;
+ int i, apic;
unsigned long flags;
for (i = 0; i < PIN_MAP_SIZE; i++) {
@@ -1202,11 +1229,48 @@ static void __init enable_IO_APIC(void)
/*
* The number of IO-APIC IRQ registers (== #pins):
*/
- for (i = 0; i < nr_ioapics; i++) {
+ for (apic = 0; apic < nr_ioapics; apic++) {
spin_lock_irqsave(&ioapic_lock, flags);
- reg_01.raw = io_apic_read(i, 1);
+ reg_01.raw = io_apic_read(apic, 1);
spin_unlock_irqrestore(&ioapic_lock, flags);
- nr_ioapic_registers[i] = reg_01.bits.entries+1;
+ nr_ioapic_registers[apic] = reg_01.bits.entries+1;
+ }
+ for(apic = 0; apic < nr_ioapics; apic++) {
+ int pin;
+ /* See if any of the pins is in ExtINT mode */
+ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
+ struct IO_APIC_route_entry entry;
+ spin_lock_irqsave(&ioapic_lock, flags);
+ *(((int *)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
+ *(((int *)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+
+
+ /* If the interrupt line is enabled and in ExtInt mode
+ * I have found the pin where the i8259 is connected.
+ */
+ if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
+ ioapic_i8259.apic = apic;
+ ioapic_i8259.pin = pin;
+ goto found_i8259;
+ }
+ }
+ }
+ found_i8259:
+ /* Look to see what if the MP table has reported the ExtINT */
+ i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
+ i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
+ /* Trust the MP table if nothing is setup in the hardware */
+ if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
+ printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
+ ioapic_i8259.pin = i8259_pin;
+ ioapic_i8259.apic = i8259_apic;
+ }
+ /* Complain if the MP table and the hardware disagree */
+ if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
+ (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
+ {
+ printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
}
/*
@@ -1220,7 +1284,6 @@ static void __init enable_IO_APIC(void)
*/
void disable_IO_APIC(void)
{
- int pin;
/*
* Clear the IO-APIC before rebooting:
*/
@@ -1231,8 +1294,7 @@ void disable_IO_APIC(void)
* Put that IOAPIC in virtual wire mode
* so legacy interrupts can be delivered.
*/
- pin = find_isa_irq_pin(0, mp_ExtINT);
- if (pin != -1) {
+ if (ioapic_i8259.pin != -1) {
struct IO_APIC_route_entry entry;
unsigned long flags;
@@ -1243,21 +1305,23 @@ void disable_IO_APIC(void)
entry.polarity = 0; /* High */
entry.delivery_status = 0;
entry.dest_mode = 0; /* Physical */
- entry.delivery_mode = 7; /* ExtInt */
+ entry.delivery_mode = dest_ExtINT; /* ExtInt */
entry.vector = 0;
- entry.dest.physical.physical_dest = 0;
-
+ entry.dest.physical.physical_dest =
+ GET_APIC_ID(apic_read(APIC_ID));
/*
* Add it to the IO-APIC irq-routing table:
*/
spin_lock_irqsave(&ioapic_lock, flags);
- io_apic_write(0, 0x11+2*pin, *(((int *)&entry)+1));
- io_apic_write(0, 0x10+2*pin, *(((int *)&entry)+0));
+ io_apic_write(ioapic_i8259.apic, 0x11+2*ioapic_i8259.pin,
+ *(((int *)&entry)+1));
+ io_apic_write(ioapic_i8259.apic, 0x10+2*ioapic_i8259.pin,
+ *(((int *)&entry)+0));
spin_unlock_irqrestore(&ioapic_lock, flags);
}
- disconnect_bsp_APIC(pin != -1);
+ disconnect_bsp_APIC(ioapic_i8259.pin != -1);
}
/*
@@ -1571,7 +1635,7 @@ static void enable_lapic_irq (unsigned int irq)
unsigned long v;
v = apic_read(APIC_LVT0);
- apic_write_around(APIC_LVT0, v & ~APIC_LVT_MASKED);
+ apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
}
static void disable_lapic_irq (unsigned int irq)
@@ -1579,7 +1643,7 @@ static void disable_lapic_irq (unsigned int irq)
unsigned long v;
v = apic_read(APIC_LVT0);
- apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED);
+ apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
}
static void ack_lapic_irq (unsigned int irq)
@@ -1626,20 +1690,21 @@ static void setup_nmi (void)
*/
static inline void unlock_ExtINT_logic(void)
{
- int pin, i;
+ int apic, pin, i;
struct IO_APIC_route_entry entry0, entry1;
unsigned char save_control, save_freq_select;
unsigned long flags;
- pin = find_isa_irq_pin(8, mp_INT);
+ pin = find_isa_irq_pin(8, mp_INT);
+ apic = find_isa_irq_apic(8, mp_INT);
if (pin == -1)
return;
spin_lock_irqsave(&ioapic_lock, flags);
- *(((int *)&entry0) + 1) = io_apic_read(0, 0x11 + 2 * pin);
- *(((int *)&entry0) + 0) = io_apic_read(0, 0x10 + 2 * pin);
+ *(((int *)&entry0) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
+ *(((int *)&entry0) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
spin_unlock_irqrestore(&ioapic_lock, flags);
- clear_IO_APIC_pin(0, pin);
+ clear_IO_APIC_pin(apic, pin);
memset(&entry1, 0, sizeof(entry1));
@@ -1652,8 +1717,8 @@ static inline void unlock_ExtINT_logic(void)
entry1.vector = 0;
spin_lock_irqsave(&ioapic_lock, flags);
- io_apic_write(0, 0x11 + 2 * pin, *(((int *)&entry1) + 1));
- io_apic_write(0, 0x10 + 2 * pin, *(((int *)&entry1) + 0));
+ io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry1) + 1));
+ io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry1) + 0));
spin_unlock_irqrestore(&ioapic_lock, flags);
save_control = CMOS_READ(RTC_CONTROL);
@@ -1671,11 +1736,11 @@ static inline void unlock_ExtINT_logic(void)
CMOS_WRITE(save_control, RTC_CONTROL);
CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
- clear_IO_APIC_pin(0, pin);
+ clear_IO_APIC_pin(apic, pin);
spin_lock_irqsave(&ioapic_lock, flags);
- io_apic_write(0, 0x11 + 2 * pin, *(((int *)&entry0) + 1));
- io_apic_write(0, 0x10 + 2 * pin, *(((int *)&entry0) + 0));
+ io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry0) + 1));
+ io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry0) + 0));
spin_unlock_irqrestore(&ioapic_lock, flags);
}
@@ -1687,7 +1752,7 @@ static inline void unlock_ExtINT_logic(void)
*/
static inline void check_timer(void)
{
- int pin1, pin2;
+ int apic1, pin1, apic2, pin2;
int vector;
/*
@@ -1704,14 +1769,17 @@ static inline void check_timer(void)
* the 8259A which implies the virtual wire has to be
* disabled in the local APIC.
*/
- apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
+ apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
init_8259A(1);
enable_8259A_irq(0);
- pin1 = find_isa_irq_pin(0, mp_INT);
- pin2 = find_isa_irq_pin(0, mp_ExtINT);
+ pin1 = find_isa_irq_pin(0, mp_INT);
+ apic1 = find_isa_irq_apic(0, mp_INT);
+ pin2 = ioapic_i8259.pin;
+ apic2 = ioapic_i8259.apic;
- apic_printk(APIC_VERBOSE,KERN_INFO "..TIMER: vector=0x%02X pin1=%d pin2=%d\n", vector, pin1, pin2);
+ apic_printk(APIC_VERBOSE,KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
+ vector, apic1, pin1, apic2, pin2);
if (pin1 != -1) {
/*
@@ -1729,17 +1797,20 @@ static inline void check_timer(void)
clear_IO_APIC_pin(0, pin1);
return;
}
- clear_IO_APIC_pin(0, pin1);
- apic_printk(APIC_QUIET,KERN_ERR "..MP-BIOS bug: 8254 timer not connected to IO-APIC\n");
+ clear_IO_APIC_pin(apic1, pin1);
+ apic_printk(APIC_QUIET,KERN_ERR "..MP-BIOS bug: 8254 timer not "
+ "connected to IO-APIC\n");
}
- apic_printk(APIC_VERBOSE,KERN_INFO "...trying to set up timer (IRQ0) through the 8259A ... ");
+ apic_printk(APIC_VERBOSE,KERN_INFO "...trying to set up timer (IRQ0) "
+ "through the 8259A ... ");
if (pin2 != -1) {
- apic_printk(APIC_VERBOSE,"\n..... (found pin %d) ...", pin2);
+ apic_printk(APIC_VERBOSE,"\n..... (found apic %d pin %d) ...",
+ apic2, pin2);
/*
* legacy devices should be connected to IO APIC #0
*/
- setup_ExtINT_IRQ0_pin(pin2, vector);
+ setup_ExtINT_IRQ0_pin(apic2, pin2, vector);
if (timer_irq_works()) {
printk("works.\n");
nmi_watchdog_default();
@@ -1751,7 +1822,7 @@ static inline void check_timer(void)
/*
* Cleanup, just in case ...
*/
- clear_IO_APIC_pin(0, pin2);
+ clear_IO_APIC_pin(apic2, pin2);
}
printk(" failed.\n");
@@ -1764,21 +1835,21 @@ static inline void check_timer(void)
disable_8259A_irq(0);
irq_desc[0].handler = &lapic_irq_type;
- apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
+ apic_write(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
enable_8259A_irq(0);
if (timer_irq_works()) {
apic_printk(APIC_QUIET, " works.\n");
return;
}
- apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
+ apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
apic_printk(APIC_VERBOSE," failed.\n");
apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as ExtINT IRQ...");
init_8259A(0);
make_8259A_irq(0);
- apic_write_around(APIC_LVT0, APIC_DM_EXTINT);
+ apic_write(APIC_LVT0, APIC_DM_EXTINT);
unlock_ExtINT_logic();
@@ -1956,7 +2027,7 @@ int __init io_apic_get_redir_entries (int ioapic)
}
-int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low)
+int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
{
struct IO_APIC_route_entry entry;
unsigned long flags;
@@ -1978,8 +2049,8 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a
entry.delivery_mode = INT_DELIVERY_MODE;
entry.dest_mode = INT_DEST_MODE;
entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
- entry.trigger = edge_level;
- entry.polarity = active_high_low;
+ entry.trigger = triggering;
+ entry.polarity = polarity;
entry.mask = 1; /* Disabled (masked) */
irq = gsi_irq_sharing(irq);
@@ -1994,9 +2065,9 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a
apic_printk(APIC_VERBOSE,KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry (%d-%d -> 0x%x -> "
"IRQ %d Mode:%i Active:%i)\n", ioapic,
mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
- edge_level, active_high_low);
+ triggering, polarity);
- ioapic_register_intr(irq, entry.vector, edge_level);
+ ioapic_register_intr(irq, entry.vector, triggering);
if (!ioapic && (irq < 16))
disable_8259A_irq(irq);
diff --git a/arch/x86_64/kernel/ioport.c b/arch/x86_64/kernel/ioport.c
index cde0e868efe..b81614970ec 100644
--- a/arch/x86_64/kernel/ioport.c
+++ b/arch/x86_64/kernel/ioport.c
@@ -7,6 +7,7 @@
#include <linux/sched.h>
#include <linux/kernel.h>
+#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/ioport.h>
diff --git a/arch/x86_64/kernel/irq.c b/arch/x86_64/kernel/irq.c
index d6a04a8320a..30d2a1e545f 100644
--- a/arch/x86_64/kernel/irq.c
+++ b/arch/x86_64/kernel/irq.c
@@ -17,6 +17,7 @@
#include <linux/delay.h>
#include <asm/uaccess.h>
#include <asm/io_apic.h>
+#include <asm/idle.h>
atomic_t irq_err_count;
#ifdef CONFIG_X86_IO_APIC
@@ -69,13 +70,13 @@ skip:
seq_printf(p, "NMI: ");
for (j = 0; j < NR_CPUS; j++)
if (cpu_online(j))
- seq_printf(p, "%10u ", cpu_pda[j].__nmi_count);
+ seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count);
seq_putc(p, '\n');
#ifdef CONFIG_X86_LOCAL_APIC
seq_printf(p, "LOC: ");
for (j = 0; j < NR_CPUS; j++)
if (cpu_online(j))
- seq_printf(p, "%10u ", cpu_pda[j].apic_timer_irqs);
+ seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs);
seq_putc(p, '\n');
#endif
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
@@ -98,6 +99,7 @@ asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
/* high bits used in ret_from_ code */
unsigned irq = regs->orig_rax & 0xff;
+ exit_idle();
irq_enter();
__do_IRQ(irq, regs);
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c
index dddeb678b44..8b866a8572c 100644
--- a/arch/x86_64/kernel/kprobes.c
+++ b/arch/x86_64/kernel/kprobes.c
@@ -42,8 +42,8 @@
#include <asm/pgtable.h>
#include <asm/kdebug.h>
-static DECLARE_MUTEX(kprobe_mutex);
void jprobe_return_end(void);
+static void __kprobes arch_copy_kprobe(struct kprobe *p);
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
@@ -69,12 +69,11 @@ static inline int is_IF_modifier(kprobe_opcode_t *insn)
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
/* insn: must be on special executable page on x86_64. */
- down(&kprobe_mutex);
p->ainsn.insn = get_insn_slot();
- up(&kprobe_mutex);
if (!p->ainsn.insn) {
return -ENOMEM;
}
+ arch_copy_kprobe(p);
return 0;
}
@@ -181,7 +180,7 @@ static inline s32 *is_riprel(u8 *insn)
return NULL;
}
-void __kprobes arch_copy_kprobe(struct kprobe *p)
+static void __kprobes arch_copy_kprobe(struct kprobe *p)
{
s32 *ripdisp;
memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE);
@@ -329,12 +328,21 @@ int __kprobes kprobe_handler(struct pt_regs *regs)
*/
save_previous_kprobe(kcb);
set_current_kprobe(p, regs, kcb);
- p->nmissed++;
+ kprobes_inc_nmissed_count(p);
prepare_singlestep(p, regs);
kcb->kprobe_status = KPROBE_REENTER;
return 1;
}
} else {
+ if (*addr != BREAKPOINT_INSTRUCTION) {
+ /* The breakpoint instruction was removed by
+ * another cpu right after we hit, no further
+ * handling of this interrupt is appropriate
+ */
+ regs->rip = (unsigned long)addr;
+ ret = 1;
+ goto no_kprobe;
+ }
p = __get_cpu_var(current_kprobe);
if (p->break_handler && p->break_handler(p, regs)) {
goto ss_probe;
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index 183dc610542..13a2eada6c9 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -15,6 +15,7 @@
#include <linux/sysdev.h>
#include <linux/miscdevice.h>
#include <linux/fs.h>
+#include <linux/capability.h>
#include <linux/cpu.h>
#include <linux/percpu.h>
#include <linux/ctype.h>
@@ -23,9 +24,10 @@
#include <asm/mce.h>
#include <asm/kdebug.h>
#include <asm/uaccess.h>
+#include <asm/smp.h>
#define MISC_MCELOG_MINOR 227
-#define NR_BANKS 5
+#define NR_BANKS 6
static int mce_dont_init;
@@ -91,6 +93,7 @@ void mce_log(struct mce *mce)
static void print_mce(struct mce *m)
{
printk(KERN_EMERG "\n"
+ KERN_EMERG "HARDWARE ERROR\n"
KERN_EMERG
"CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n",
m->cpu, m->mcgstatus, m->bank, m->status);
@@ -109,6 +112,9 @@ static void print_mce(struct mce *m)
if (m->misc)
printk("MISC %Lx ", m->misc);
printk("\n");
+ printk(KERN_EMERG "This is not a software problem!\n");
+ printk(KERN_EMERG
+ "Run through mcelog --ascii to decode and contact your hardware vendor\n");
}
static void mce_panic(char *msg, struct mce *backup, unsigned long start)
@@ -168,12 +174,12 @@ void do_machine_check(struct pt_regs * regs, long error_code)
int panicm_found = 0;
if (regs)
- notify_die(DIE_NMI, "machine check", regs, error_code, 255, SIGKILL);
+ notify_die(DIE_NMI, "machine check", regs, error_code, 18, SIGKILL);
if (!banks)
return;
memset(&m, 0, sizeof(struct mce));
- m.cpu = hard_smp_processor_id();
+ m.cpu = safe_smp_processor_id();
rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus);
if (!(m.mcgstatus & MCG_STATUS_RIPV))
kill_it = 1;
@@ -573,6 +579,10 @@ ACCESSOR(bank1ctl,bank[1],mce_restart())
ACCESSOR(bank2ctl,bank[2],mce_restart())
ACCESSOR(bank3ctl,bank[3],mce_restart())
ACCESSOR(bank4ctl,bank[4],mce_restart())
+ACCESSOR(bank5ctl,bank[5],mce_restart())
+static struct sysdev_attribute * bank_attributes[NR_BANKS] = {
+ &attr_bank0ctl, &attr_bank1ctl, &attr_bank2ctl,
+ &attr_bank3ctl, &attr_bank4ctl, &attr_bank5ctl};
ACCESSOR(tolerant,tolerant,)
ACCESSOR(check_interval,check_interval,mce_restart())
@@ -580,6 +590,7 @@ ACCESSOR(check_interval,check_interval,mce_restart())
static __cpuinit int mce_create_device(unsigned int cpu)
{
int err;
+ int i;
if (!mce_available(&cpu_data[cpu]))
return -EIO;
@@ -589,11 +600,9 @@ static __cpuinit int mce_create_device(unsigned int cpu)
err = sysdev_register(&per_cpu(device_mce,cpu));
if (!err) {
- sysdev_create_file(&per_cpu(device_mce,cpu), &attr_bank0ctl);
- sysdev_create_file(&per_cpu(device_mce,cpu), &attr_bank1ctl);
- sysdev_create_file(&per_cpu(device_mce,cpu), &attr_bank2ctl);
- sysdev_create_file(&per_cpu(device_mce,cpu), &attr_bank3ctl);
- sysdev_create_file(&per_cpu(device_mce,cpu), &attr_bank4ctl);
+ for (i = 0; i < banks; i++)
+ sysdev_create_file(&per_cpu(device_mce,cpu),
+ bank_attributes[i]);
sysdev_create_file(&per_cpu(device_mce,cpu), &attr_tolerant);
sysdev_create_file(&per_cpu(device_mce,cpu), &attr_check_interval);
}
@@ -603,11 +612,11 @@ static __cpuinit int mce_create_device(unsigned int cpu)
#ifdef CONFIG_HOTPLUG_CPU
static __cpuinit void mce_remove_device(unsigned int cpu)
{
- sysdev_remove_file(&per_cpu(device_mce,cpu), &attr_bank0ctl);
- sysdev_remove_file(&per_cpu(device_mce,cpu), &attr_bank1ctl);
- sysdev_remove_file(&per_cpu(device_mce,cpu), &attr_bank2ctl);
- sysdev_remove_file(&per_cpu(device_mce,cpu), &attr_bank3ctl);
- sysdev_remove_file(&per_cpu(device_mce,cpu), &attr_bank4ctl);
+ int i;
+
+ for (i = 0; i < banks; i++)
+ sysdev_remove_file(&per_cpu(device_mce,cpu),
+ bank_attributes[i]);
sysdev_remove_file(&per_cpu(device_mce,cpu), &attr_tolerant);
sysdev_remove_file(&per_cpu(device_mce,cpu), &attr_check_interval);
sysdev_unregister(&per_cpu(device_mce,cpu));
diff --git a/arch/x86_64/kernel/mce_amd.c b/arch/x86_64/kernel/mce_amd.c
index 1f76175ace0..d3ad7d81266 100644
--- a/arch/x86_64/kernel/mce_amd.c
+++ b/arch/x86_64/kernel/mce_amd.c
@@ -27,6 +27,7 @@
#include <asm/mce.h>
#include <asm/msr.h>
#include <asm/percpu.h>
+#include <asm/idle.h>
#define PFX "mce_threshold: "
#define VERSION "version 1.00.9"
@@ -140,6 +141,7 @@ asmlinkage void mce_threshold_interrupt(void)
struct mce m;
ack_APIC_irq();
+ exit_idle();
irq_enter();
memset(&m, 0, sizeof(m));
@@ -318,7 +320,7 @@ static struct kobj_type threshold_ktype = {
static __cpuinit int threshold_create_bank(unsigned int cpu, int bank)
{
int err = 0;
- struct threshold_bank *b = 0;
+ struct threshold_bank *b = NULL;
#ifdef CONFIG_SMP
if (cpu_core_id[cpu] && shared_bank[bank]) { /* symlink */
@@ -407,7 +409,7 @@ static __cpuinit void threshold_remove_bank(unsigned int cpu, int bank)
if (shared_bank[bank] && atomic_read(&b->kobj.kref.refcount) > 2) {
sprintf(name, "bank%i", bank);
sysfs_remove_link(&per_cpu(device_threshold, cpu).kobj, name);
- per_cpu(threshold_banks, cpu)[bank] = 0;
+ per_cpu(threshold_banks, cpu)[bank] = NULL;
} else {
kobject_unregister(&b->kobj);
kfree(per_cpu(threshold_banks, cpu)[bank]);
diff --git a/arch/x86_64/kernel/mce_intel.c b/arch/x86_64/kernel/mce_intel.c
index 0be0a795981..8f533d2c40c 100644
--- a/arch/x86_64/kernel/mce_intel.c
+++ b/arch/x86_64/kernel/mce_intel.c
@@ -10,6 +10,7 @@
#include <asm/msr.h>
#include <asm/mce.h>
#include <asm/hw_irq.h>
+#include <asm/idle.h>
static DEFINE_PER_CPU(unsigned long, next_check);
@@ -19,6 +20,7 @@ asmlinkage void smp_thermal_interrupt(void)
ack_APIC_irq();
+ exit_idle();
irq_enter();
if (time_before(jiffies, __get_cpu_var(next_check)))
goto done;
@@ -78,7 +80,7 @@ static void __cpuinit intel_init_thermal(struct cpuinfo_x86 *c)
h = THERMAL_APIC_VECTOR;
h |= (APIC_DM_FIXED | APIC_LVT_MASKED);
- apic_write_around(APIC_LVTTHMR, h);
+ apic_write(APIC_LVTTHMR, h);
rdmsr(MSR_IA32_THERM_INTERRUPT, l, h);
wrmsr(MSR_IA32_THERM_INTERRUPT, l | 0x03, h);
@@ -87,7 +89,7 @@ static void __cpuinit intel_init_thermal(struct cpuinfo_x86 *c)
wrmsr(MSR_IA32_MISC_ENABLE, l | (1 << 3), h);
l = apic_read(APIC_LVTTHMR);
- apic_write_around(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
+ apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
printk(KERN_INFO "CPU%d: Thermal monitoring enabled (%s)\n",
cpu, tm2 ? "TM2" : "TM1");
return;
diff --git a/arch/x86_64/kernel/mpparse.c b/arch/x86_64/kernel/mpparse.c
index 1105250bf02..dc49bfb6db0 100644
--- a/arch/x86_64/kernel/mpparse.c
+++ b/arch/x86_64/kernel/mpparse.c
@@ -915,7 +915,7 @@ void __init mp_config_acpi_legacy_irqs (void)
#define MAX_GSI_NUM 4096
-int mp_register_gsi(u32 gsi, int edge_level, int active_high_low)
+int mp_register_gsi(u32 gsi, int triggering, int polarity)
{
int ioapic = -1;
int ioapic_pin = 0;
@@ -964,7 +964,7 @@ int mp_register_gsi(u32 gsi, int edge_level, int active_high_low)
mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
- if (edge_level) {
+ if (triggering == ACPI_LEVEL_SENSITIVE) {
/*
* For PCI devices assign IRQs in order, avoiding gaps
* due to unused I/O APIC pins.
@@ -986,8 +986,8 @@ int mp_register_gsi(u32 gsi, int edge_level, int active_high_low)
}
io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
- edge_level == ACPI_EDGE_SENSITIVE ? 0 : 1,
- active_high_low == ACPI_ACTIVE_HIGH ? 0 : 1);
+ triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
+ polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
return gsi;
}
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index 39d445e16f2..5fae6f0cd99 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -151,23 +151,25 @@ int __init check_nmi_watchdog (void)
printk(KERN_INFO "testing NMI watchdog ... ");
+#ifdef CONFIG_SMP
if (nmi_watchdog == NMI_LOCAL_APIC)
smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
+#endif
for (cpu = 0; cpu < NR_CPUS; cpu++)
- counts[cpu] = cpu_pda[cpu].__nmi_count;
+ counts[cpu] = cpu_pda(cpu)->__nmi_count;
local_irq_enable();
mdelay((10*1000)/nmi_hz); // wait 10 ticks
for (cpu = 0; cpu < NR_CPUS; cpu++) {
if (!cpu_online(cpu))
continue;
- if (cpu_pda[cpu].__nmi_count - counts[cpu] <= 5) {
+ if (cpu_pda(cpu)->__nmi_count - counts[cpu] <= 5) {
endflag = 1;
printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n",
cpu,
counts[cpu],
- cpu_pda[cpu].__nmi_count);
+ cpu_pda(cpu)->__nmi_count);
nmi_active = 0;
lapic_nmi_owner &= ~LAPIC_NMI_WATCHDOG;
nmi_perfctr_msr = 0;
diff --git a/arch/x86_64/kernel/pci-dma.c b/arch/x86_64/kernel/pci-dma.c
index cab471cf3ed..2f5d8328e2b 100644
--- a/arch/x86_64/kernel/pci-dma.c
+++ b/arch/x86_64/kernel/pci-dma.c
@@ -8,53 +8,259 @@
#include <linux/pci.h>
#include <linux/module.h>
#include <asm/io.h>
+#include <asm/proto.h>
-/* Map a set of buffers described by scatterlist in streaming
- * mode for DMA. This is the scatter-gather version of the
- * above pci_map_single interface. Here the scatter gather list
- * elements are each tagged with the appropriate dma address
- * and length. They are obtained via sg_dma_{address,length}(SG).
- *
- * NOTE: An implementation may be able to use a smaller number of
- * DMA address/length pairs than there are SG table elements.
- * (for example via virtual mapping capabilities)
- * The routine returns the number of addr/length pairs actually
- * used, at most nents.
- *
- * Device ownership issues as mentioned above for pci_map_single are
- * the same here.
+int iommu_merge __read_mostly = 0;
+EXPORT_SYMBOL(iommu_merge);
+
+dma_addr_t bad_dma_address __read_mostly;
+EXPORT_SYMBOL(bad_dma_address);
+
+/* This tells the BIO block layer to assume merging. Default to off
+ because we cannot guarantee merging later. */
+int iommu_bio_merge __read_mostly = 0;
+EXPORT_SYMBOL(iommu_bio_merge);
+
+int iommu_sac_force __read_mostly = 0;
+EXPORT_SYMBOL(iommu_sac_force);
+
+int no_iommu __read_mostly;
+#ifdef CONFIG_IOMMU_DEBUG
+int panic_on_overflow __read_mostly = 1;
+int force_iommu __read_mostly = 1;
+#else
+int panic_on_overflow __read_mostly = 0;
+int force_iommu __read_mostly= 0;
+#endif
+
+/* Dummy device used for NULL arguments (normally ISA). Better would
+ be probably a smaller DMA mask, but this is bug-to-bug compatible
+ to i386. */
+struct device fallback_dev = {
+ .bus_id = "fallback device",
+ .coherent_dma_mask = 0xffffffff,
+ .dma_mask = &fallback_dev.coherent_dma_mask,
+};
+
+/* Allocate DMA memory on node near device */
+noinline static void *
+dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
+{
+ struct page *page;
+ int node;
+ if (dev->bus == &pci_bus_type)
+ node = pcibus_to_node(to_pci_dev(dev)->bus);
+ else
+ node = numa_node_id();
+ page = alloc_pages_node(node, gfp, order);
+ return page ? page_address(page) : NULL;
+}
+
+/*
+ * Allocate memory for a coherent mapping.
*/
-int dma_map_sg(struct device *hwdev, struct scatterlist *sg,
- int nents, int direction)
+void *
+dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp)
{
- int i;
-
- BUG_ON(direction == DMA_NONE);
- for (i = 0; i < nents; i++ ) {
- struct scatterlist *s = &sg[i];
- BUG_ON(!s->page);
- s->dma_address = virt_to_bus(page_address(s->page) +s->offset);
- s->dma_length = s->length;
+ void *memory;
+ unsigned long dma_mask = 0;
+ u64 bus;
+
+ if (!dev)
+ dev = &fallback_dev;
+ dma_mask = dev->coherent_dma_mask;
+ if (dma_mask == 0)
+ dma_mask = 0xffffffff;
+
+ /* Kludge to make it bug-to-bug compatible with i386. i386
+ uses the normal dma_mask for alloc_coherent. */
+ dma_mask &= *dev->dma_mask;
+
+ /* Why <=? Even when the mask is smaller than 4GB it is often
+ larger than 16MB and in this case we have a chance of
+ finding fitting memory in the next higher zone first. If
+ not retry with true GFP_DMA. -AK */
+ if (dma_mask <= 0xffffffff)
+ gfp |= GFP_DMA32;
+
+ again:
+ memory = dma_alloc_pages(dev, gfp, get_order(size));
+ if (memory == NULL)
+ return NULL;
+
+ {
+ int high, mmu;
+ bus = virt_to_bus(memory);
+ high = (bus + size) >= dma_mask;
+ mmu = high;
+ if (force_iommu && !(gfp & GFP_DMA))
+ mmu = 1;
+ else if (high) {
+ free_pages((unsigned long)memory,
+ get_order(size));
+
+ /* Don't use the 16MB ZONE_DMA unless absolutely
+ needed. It's better to use remapping first. */
+ if (dma_mask < 0xffffffff && !(gfp & GFP_DMA)) {
+ gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
+ goto again;
+ }
+
+ if (dma_ops->alloc_coherent)
+ return dma_ops->alloc_coherent(dev, size,
+ dma_handle, gfp);
+ return NULL;
+ }
+
+ memset(memory, 0, size);
+ if (!mmu) {
+ *dma_handle = virt_to_bus(memory);
+ return memory;
+ }
+ }
+
+ if (dma_ops->alloc_coherent) {
+ free_pages((unsigned long)memory, get_order(size));
+ gfp &= ~(GFP_DMA|GFP_DMA32);
+ return dma_ops->alloc_coherent(dev, size, dma_handle, gfp);
+ }
+
+ if (dma_ops->map_simple) {
+ *dma_handle = dma_ops->map_simple(dev, memory,
+ size,
+ PCI_DMA_BIDIRECTIONAL);
+ if (*dma_handle != bad_dma_address)
+ return memory;
}
- return nents;
-}
-EXPORT_SYMBOL(dma_map_sg);
+ if (panic_on_overflow)
+ panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n",size);
+ free_pages((unsigned long)memory, get_order(size));
+ return NULL;
+}
+EXPORT_SYMBOL(dma_alloc_coherent);
-/* Unmap a set of streaming mode DMA translations.
- * Again, cpu read rules concerning calls here are the same as for
- * pci_unmap_single() above.
+/*
+ * Unmap coherent memory.
+ * The caller must ensure that the device has finished accessing the mapping.
*/
-void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nents, int dir)
+void dma_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t bus)
+{
+ if (dma_ops->unmap_single)
+ dma_ops->unmap_single(dev, bus, size, 0);
+ free_pages((unsigned long)vaddr, get_order(size));
+}
+EXPORT_SYMBOL(dma_free_coherent);
+
+int dma_supported(struct device *dev, u64 mask)
+{
+ if (dma_ops->dma_supported)
+ return dma_ops->dma_supported(dev, mask);
+
+ /* Copied from i386. Doesn't make much sense, because it will
+ only work for pci_alloc_coherent.
+ The caller just has to use GFP_DMA in this case. */
+ if (mask < 0x00ffffff)
+ return 0;
+
+ /* Tell the device to use SAC when IOMMU force is on. This
+ allows the driver to use cheaper accesses in some cases.
+
+ Problem with this is that if we overflow the IOMMU area and
+ return DAC as fallback address the device may not handle it
+ correctly.
+
+ As a special case some controllers have a 39bit address
+ mode that is as efficient as 32bit (aic79xx). Don't force
+ SAC for these. Assume all masks <= 40 bits are of this
+ type. Normally this doesn't make any difference, but gives
+ more gentle handling of IOMMU overflow. */
+ if (iommu_sac_force && (mask >= 0xffffffffffULL)) {
+ printk(KERN_INFO "%s: Force SAC with mask %Lx\n", dev->bus_id,mask);
+ return 0;
+ }
+
+ return 1;
+}
+EXPORT_SYMBOL(dma_supported);
+
+int dma_set_mask(struct device *dev, u64 mask)
{
- int i;
- for (i = 0; i < nents; i++) {
- struct scatterlist *s = &sg[i];
- BUG_ON(s->page == NULL);
- BUG_ON(s->dma_address == 0);
- dma_unmap_single(dev, s->dma_address, s->dma_length, dir);
- }
+ if (!dev->dma_mask || !dma_supported(dev, mask))
+ return -EIO;
+ *dev->dma_mask = mask;
+ return 0;
}
+EXPORT_SYMBOL(dma_set_mask);
+
+/* iommu=[size][,noagp][,off][,force][,noforce][,leak][,memaper[=order]][,merge]
+ [,forcesac][,fullflush][,nomerge][,biomerge]
+ size set size of iommu (in bytes)
+ noagp don't initialize the AGP driver and use full aperture.
+ off don't use the IOMMU
+ leak turn on simple iommu leak tracing (only when CONFIG_IOMMU_LEAK is on)
+ memaper[=order] allocate an own aperture over RAM with size 32MB^order.
+ noforce don't force IOMMU usage. Default.
+ force Force IOMMU.
+ merge Do lazy merging. This may improve performance on some block devices.
+ Implies force (experimental)
+ biomerge Do merging at the BIO layer. This is more efficient than merge,
+ but should be only done with very big IOMMUs. Implies merge,force.
+ nomerge Don't do SG merging.
+ forcesac For SAC mode for masks <40bits (experimental)
+ fullflush Flush IOMMU on each allocation (default)
+ nofullflush Don't use IOMMU fullflush
+ allowed overwrite iommu off workarounds for specific chipsets.
+ soft Use software bounce buffering (default for Intel machines)
+ noaperture Don't touch the aperture for AGP.
+*/
+__init int iommu_setup(char *p)
+{
+ iommu_merge = 1;
-EXPORT_SYMBOL(dma_unmap_sg);
+ while (*p) {
+ if (!strncmp(p,"off",3))
+ no_iommu = 1;
+ /* gart_parse_options has more force support */
+ if (!strncmp(p,"force",5))
+ force_iommu = 1;
+ if (!strncmp(p,"noforce",7)) {
+ iommu_merge = 0;
+ force_iommu = 0;
+ }
+
+ if (!strncmp(p, "biomerge",8)) {
+ iommu_bio_merge = 4096;
+ iommu_merge = 1;
+ force_iommu = 1;
+ }
+ if (!strncmp(p, "panic",5))
+ panic_on_overflow = 1;
+ if (!strncmp(p, "nopanic",7))
+ panic_on_overflow = 0;
+ if (!strncmp(p, "merge",5)) {
+ iommu_merge = 1;
+ force_iommu = 1;
+ }
+ if (!strncmp(p, "nomerge",7))
+ iommu_merge = 0;
+ if (!strncmp(p, "forcesac",8))
+ iommu_sac_force = 1;
+
+#ifdef CONFIG_SWIOTLB
+ if (!strncmp(p, "soft",4))
+ swiotlb = 1;
+#endif
+
+#ifdef CONFIG_GART_IOMMU
+ gart_parse_options(p);
+#endif
+
+ p += strcspn(p, ",");
+ if (*p == ',')
+ ++p;
+ }
+ return 1;
+}
diff --git a/arch/x86_64/kernel/pci-gart.c b/arch/x86_64/kernel/pci-gart.c
index 2e28e855ec3..c37fc7726ba 100644
--- a/arch/x86_64/kernel/pci-gart.c
+++ b/arch/x86_64/kernel/pci-gart.c
@@ -30,8 +30,8 @@
#include <asm/proto.h>
#include <asm/cacheflush.h>
#include <asm/kdebug.h>
-
-dma_addr_t bad_dma_address;
+#include <asm/swiotlb.h>
+#include <asm/dma.h>
unsigned long iommu_bus_base; /* GART remapping area (physical) */
static unsigned long iommu_size; /* size of remapping area bytes */
@@ -39,18 +39,6 @@ static unsigned long iommu_pages; /* .. and in pages */
u32 *iommu_gatt_base; /* Remapping table */
-int no_iommu;
-static int no_agp;
-#ifdef CONFIG_IOMMU_DEBUG
-int panic_on_overflow = 1;
-int force_iommu = 1;
-#else
-int panic_on_overflow = 0;
-int force_iommu = 0;
-#endif
-int iommu_merge = 1;
-int iommu_sac_force = 0;
-
/* If this is disabled the IOMMU will use an optimized flushing strategy
of only flushing when an mapping is reused. With it true the GART is flushed
for every mapping. Problem is that doing the lazy flush seems to trigger
@@ -58,10 +46,6 @@ int iommu_sac_force = 0;
also seen with Qlogic at least). */
int iommu_fullflush = 1;
-/* This tells the BIO block layer to assume merging. Default to off
- because we cannot guarantee merging later. */
-int iommu_bio_merge = 0;
-
#define MAX_NB 8
/* Allocation bitmap for the remapping area */
@@ -102,16 +86,6 @@ AGPEXTERN __u32 *agp_gatt_table;
static unsigned long next_bit; /* protected by iommu_bitmap_lock */
static int need_flush; /* global flush state. set for each gart wrap */
-static dma_addr_t dma_map_area(struct device *dev, unsigned long phys_mem,
- size_t size, int dir, int do_panic);
-
-/* Dummy device used for NULL arguments (normally ISA). Better would
- be probably a smaller DMA mask, but this is bug-to-bug compatible to i386. */
-static struct device fallback_dev = {
- .bus_id = "fallback device",
- .coherent_dma_mask = 0xffffffff,
- .dma_mask = &fallback_dev.coherent_dma_mask,
-};
static unsigned long alloc_iommu(int size)
{
@@ -185,114 +159,7 @@ static void flush_gart(struct device *dev)
spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
}
-/* Allocate DMA memory on node near device */
-noinline
-static void *dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
-{
- struct page *page;
- int node;
- if (dev->bus == &pci_bus_type)
- node = pcibus_to_node(to_pci_dev(dev)->bus);
- else
- node = numa_node_id();
- page = alloc_pages_node(node, gfp, order);
- return page ? page_address(page) : NULL;
-}
-
-/*
- * Allocate memory for a coherent mapping.
- */
-void *
-dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
- gfp_t gfp)
-{
- void *memory;
- unsigned long dma_mask = 0;
- u64 bus;
- if (!dev)
- dev = &fallback_dev;
- dma_mask = dev->coherent_dma_mask;
- if (dma_mask == 0)
- dma_mask = 0xffffffff;
-
- /* Kludge to make it bug-to-bug compatible with i386. i386
- uses the normal dma_mask for alloc_coherent. */
- dma_mask &= *dev->dma_mask;
-
- /* Why <=? Even when the mask is smaller than 4GB it is often larger
- than 16MB and in this case we have a chance of finding fitting memory
- in the next higher zone first. If not retry with true GFP_DMA. -AK */
- if (dma_mask <= 0xffffffff)
- gfp |= GFP_DMA32;
-
- again:
- memory = dma_alloc_pages(dev, gfp, get_order(size));
- if (memory == NULL)
- return NULL;
-
- {
- int high, mmu;
- bus = virt_to_bus(memory);
- high = (bus + size) >= dma_mask;
- mmu = high;
- if (force_iommu && !(gfp & GFP_DMA))
- mmu = 1;
- if (no_iommu || dma_mask < 0xffffffffUL) {
- if (high) {
- free_pages((unsigned long)memory,
- get_order(size));
-
- if (swiotlb) {
- return
- swiotlb_alloc_coherent(dev, size,
- dma_handle,
- gfp);
- }
-
- if (!(gfp & GFP_DMA)) {
- gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
- goto again;
- }
- return NULL;
- }
- mmu = 0;
- }
- memset(memory, 0, size);
- if (!mmu) {
- *dma_handle = virt_to_bus(memory);
- return memory;
- }
- }
-
- *dma_handle = dma_map_area(dev, bus, size, PCI_DMA_BIDIRECTIONAL, 0);
- if (*dma_handle == bad_dma_address)
- goto error;
- flush_gart(dev);
- return memory;
-
-error:
- if (panic_on_overflow)
- panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n", size);
- free_pages((unsigned long)memory, get_order(size));
- return NULL;
-}
-
-/*
- * Unmap coherent memory.
- * The caller must ensure that the device has finished accessing the mapping.
- */
-void dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t bus)
-{
- if (swiotlb) {
- swiotlb_free_coherent(dev, size, vaddr, bus);
- return;
- }
-
- dma_unmap_single(dev, bus, size, 0);
- free_pages((unsigned long)vaddr, get_order(size));
-}
#ifdef CONFIG_IOMMU_LEAK
@@ -326,7 +193,7 @@ void dump_leak(void)
#define CLEAR_LEAK(x)
#endif
-static void iommu_full(struct device *dev, size_t size, int dir, int do_panic)
+static void iommu_full(struct device *dev, size_t size, int dir)
{
/*
* Ran out of IOMMU space for this operation. This is very bad.
@@ -342,11 +209,11 @@ static void iommu_full(struct device *dev, size_t size, int dir, int do_panic)
"PCI-DMA: Out of IOMMU space for %lu bytes at device %s\n",
size, dev->bus_id);
- if (size > PAGE_SIZE*EMERGENCY_PAGES && do_panic) {
+ if (size > PAGE_SIZE*EMERGENCY_PAGES) {
if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
panic("PCI-DMA: Memory would be corrupted\n");
if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
- panic("PCI-DMA: Random memory would be DMAed\n");
+ panic(KERN_ERR "PCI-DMA: Random memory would be DMAed\n");
}
#ifdef CONFIG_IOMMU_LEAK
@@ -385,8 +252,8 @@ static inline int nonforced_iommu(struct device *dev, unsigned long addr, size_t
/* Map a single continuous physical area into the IOMMU.
* Caller needs to check if the iommu is needed and flush.
*/
-static dma_addr_t dma_map_area(struct device *dev, unsigned long phys_mem,
- size_t size, int dir, int do_panic)
+static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
+ size_t size, int dir)
{
unsigned long npages = to_pages(phys_mem, size);
unsigned long iommu_page = alloc_iommu(npages);
@@ -396,7 +263,7 @@ static dma_addr_t dma_map_area(struct device *dev, unsigned long phys_mem,
return phys_mem;
if (panic_on_overflow)
panic("dma_map_area overflow %lu bytes\n", size);
- iommu_full(dev, size, dir, do_panic);
+ iommu_full(dev, size, dir);
return bad_dma_address;
}
@@ -408,15 +275,21 @@ static dma_addr_t dma_map_area(struct device *dev, unsigned long phys_mem,
return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
}
+static dma_addr_t gart_map_simple(struct device *dev, char *buf,
+ size_t size, int dir)
+{
+ dma_addr_t map = dma_map_area(dev, virt_to_bus(buf), size, dir);
+ flush_gart(dev);
+ return map;
+}
+
/* Map a single area into the IOMMU */
-dma_addr_t dma_map_single(struct device *dev, void *addr, size_t size, int dir)
+dma_addr_t gart_map_single(struct device *dev, void *addr, size_t size, int dir)
{
unsigned long phys_mem, bus;
BUG_ON(dir == DMA_NONE);
- if (swiotlb)
- return swiotlb_map_single(dev,addr,size,dir);
if (!dev)
dev = &fallback_dev;
@@ -424,10 +297,24 @@ dma_addr_t dma_map_single(struct device *dev, void *addr, size_t size, int dir)
if (!need_iommu(dev, phys_mem, size))
return phys_mem;
- bus = dma_map_area(dev, phys_mem, size, dir, 1);
- flush_gart(dev);
+ bus = gart_map_simple(dev, addr, size, dir);
return bus;
-}
+}
+
+/*
+ * Wrapper for pci_unmap_single working with scatterlists.
+ */
+void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
+{
+ int i;
+
+ for (i = 0; i < nents; i++) {
+ struct scatterlist *s = &sg[i];
+ if (!s->dma_length || !s->length)
+ break;
+ dma_unmap_single(dev, s->dma_address, s->dma_length, dir);
+ }
+}
/* Fallback for dma_map_sg in case of overflow */
static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
@@ -443,10 +330,10 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
struct scatterlist *s = &sg[i];
unsigned long addr = page_to_phys(s->page) + s->offset;
if (nonforced_iommu(dev, addr, s->length)) {
- addr = dma_map_area(dev, addr, s->length, dir, 0);
+ addr = dma_map_area(dev, addr, s->length, dir);
if (addr == bad_dma_address) {
if (i > 0)
- dma_unmap_sg(dev, sg, i, dir);
+ gart_unmap_sg(dev, sg, i, dir);
nents = 0;
sg[0].dma_length = 0;
break;
@@ -515,7 +402,7 @@ static inline int dma_map_cont(struct scatterlist *sg, int start, int stopat,
* DMA map all entries in a scatterlist.
* Merge chunks that have page aligned sizes into a continuous mapping.
*/
-int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
+int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
{
int i;
int out;
@@ -527,8 +414,6 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
if (nents == 0)
return 0;
- if (swiotlb)
- return swiotlb_map_sg(dev,sg,nents,dir);
if (!dev)
dev = &fallback_dev;
@@ -571,13 +456,13 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
error:
flush_gart(NULL);
- dma_unmap_sg(dev, sg, nents, dir);
+ gart_unmap_sg(dev, sg, nents, dir);
/* When it was forced try again unforced */
if (force_iommu)
return dma_map_sg_nonforce(dev, sg, nents, dir);
if (panic_on_overflow)
panic("dma_map_sg: overflow on %lu pages\n", pages);
- iommu_full(dev, pages << PAGE_SHIFT, dir, 0);
+ iommu_full(dev, pages << PAGE_SHIFT, dir);
for (i = 0; i < nents; i++)
sg[i].dma_address = bad_dma_address;
return 0;
@@ -586,18 +471,13 @@ error:
/*
* Free a DMA mapping.
*/
-void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
+void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
size_t size, int direction)
{
unsigned long iommu_page;
int npages;
int i;
- if (swiotlb) {
- swiotlb_unmap_single(dev,dma_addr,size,direction);
- return;
- }
-
if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
dma_addr >= iommu_bus_base + iommu_size)
return;
@@ -610,68 +490,7 @@ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
free_iommu(iommu_page, npages);
}
-/*
- * Wrapper for pci_unmap_single working with scatterlists.
- */
-void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
-{
- int i;
- if (swiotlb) {
- swiotlb_unmap_sg(dev,sg,nents,dir);
- return;
- }
- for (i = 0; i < nents; i++) {
- struct scatterlist *s = &sg[i];
- if (!s->dma_length || !s->length)
- break;
- dma_unmap_single(dev, s->dma_address, s->dma_length, dir);
- }
-}
-
-int dma_supported(struct device *dev, u64 mask)
-{
- /* Copied from i386. Doesn't make much sense, because it will
- only work for pci_alloc_coherent.
- The caller just has to use GFP_DMA in this case. */
- if (mask < 0x00ffffff)
- return 0;
-
- /* Tell the device to use SAC when IOMMU force is on.
- This allows the driver to use cheaper accesses in some cases.
-
- Problem with this is that if we overflow the IOMMU area
- and return DAC as fallback address the device may not handle it correctly.
-
- As a special case some controllers have a 39bit address mode
- that is as efficient as 32bit (aic79xx). Don't force SAC for these.
- Assume all masks <= 40 bits are of this type. Normally this doesn't
- make any difference, but gives more gentle handling of IOMMU overflow. */
- if (iommu_sac_force && (mask >= 0xffffffffffULL)) {
- printk(KERN_INFO "%s: Force SAC with mask %Lx\n", dev->bus_id,mask);
- return 0;
- }
-
- return 1;
-}
-
-int dma_get_cache_alignment(void)
-{
- return boot_cpu_data.x86_clflush_size;
-}
-
-EXPORT_SYMBOL(dma_unmap_sg);
-EXPORT_SYMBOL(dma_map_sg);
-EXPORT_SYMBOL(dma_map_single);
-EXPORT_SYMBOL(dma_unmap_single);
-EXPORT_SYMBOL(dma_supported);
-EXPORT_SYMBOL(no_iommu);
-EXPORT_SYMBOL(force_iommu);
-EXPORT_SYMBOL(bad_dma_address);
-EXPORT_SYMBOL(iommu_bio_merge);
-EXPORT_SYMBOL(iommu_sac_force);
-EXPORT_SYMBOL(dma_get_cache_alignment);
-EXPORT_SYMBOL(dma_alloc_coherent);
-EXPORT_SYMBOL(dma_free_coherent);
+static int no_agp;
static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
{
@@ -772,12 +591,27 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
nommu:
/* Should not happen anymore */
printk(KERN_ERR "PCI-DMA: More than 4GB of RAM and no IOMMU\n"
- KERN_ERR "PCI-DMA: 32bit PCI IO may malfunction.");
+ KERN_ERR "PCI-DMA: 32bit PCI IO may malfunction.\n");
return -1;
}
extern int agp_amd64_init(void);
+static struct dma_mapping_ops gart_dma_ops = {
+ .mapping_error = NULL,
+ .map_single = gart_map_single,
+ .map_simple = gart_map_simple,
+ .unmap_single = gart_unmap_single,
+ .sync_single_for_cpu = NULL,
+ .sync_single_for_device = NULL,
+ .sync_single_range_for_cpu = NULL,
+ .sync_single_range_for_device = NULL,
+ .sync_sg_for_cpu = NULL,
+ .sync_sg_for_device = NULL,
+ .map_sg = gart_map_sg,
+ .unmap_sg = gart_unmap_sg,
+};
+
static int __init pci_iommu_init(void)
{
struct agp_kern_info info;
@@ -799,16 +633,15 @@ static int __init pci_iommu_init(void)
if (swiotlb) {
no_iommu = 1;
- printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
return -1;
}
if (no_iommu ||
- (!force_iommu && end_pfn < 0xffffffff>>PAGE_SHIFT) ||
+ (!force_iommu && end_pfn <= MAX_DMA32_PFN) ||
!iommu_aperture ||
(no_agp && init_k8_gatt(&info) < 0)) {
- printk(KERN_INFO "PCI-DMA: Disabling IOMMU.\n");
no_iommu = 1;
+ no_iommu_init();
return -1;
}
@@ -885,100 +718,50 @@ static int __init pci_iommu_init(void)
flush_gart(NULL);
+ printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
+ dma_ops = &gart_dma_ops;
+
return 0;
}
/* Must execute after PCI subsystem */
fs_initcall(pci_iommu_init);
-/* iommu=[size][,noagp][,off][,force][,noforce][,leak][,memaper[=order]][,merge]
- [,forcesac][,fullflush][,nomerge][,biomerge]
- size set size of iommu (in bytes)
- noagp don't initialize the AGP driver and use full aperture.
- off don't use the IOMMU
- leak turn on simple iommu leak tracing (only when CONFIG_IOMMU_LEAK is on)
- memaper[=order] allocate an own aperture over RAM with size 32MB^order.
- noforce don't force IOMMU usage. Default.
- force Force IOMMU.
- merge Do lazy merging. This may improve performance on some block devices.
- Implies force (experimental)
- biomerge Do merging at the BIO layer. This is more efficient than merge,
- but should be only done with very big IOMMUs. Implies merge,force.
- nomerge Don't do SG merging.
- forcesac For SAC mode for masks <40bits (experimental)
- fullflush Flush IOMMU on each allocation (default)
- nofullflush Don't use IOMMU fullflush
- allowed overwrite iommu off workarounds for specific chipsets.
- soft Use software bounce buffering (default for Intel machines)
- noaperture Don't touch the aperture for AGP.
-*/
-__init int iommu_setup(char *p)
-{
- int arg;
-
- while (*p) {
- if (!strncmp(p,"noagp",5))
- no_agp = 1;
- if (!strncmp(p,"off",3))
- no_iommu = 1;
- if (!strncmp(p,"force",5)) {
- force_iommu = 1;
- iommu_aperture_allowed = 1;
- }
- if (!strncmp(p,"allowed",7))
- iommu_aperture_allowed = 1;
- if (!strncmp(p,"noforce",7)) {
- iommu_merge = 0;
- force_iommu = 0;
- }
- if (!strncmp(p, "memaper", 7)) {
- fallback_aper_force = 1;
- p += 7;
- if (*p == '=') {
- ++p;
- if (get_option(&p, &arg))
- fallback_aper_order = arg;
- }
- }
- if (!strncmp(p, "biomerge",8)) {
- iommu_bio_merge = 4096;
- iommu_merge = 1;
- force_iommu = 1;
- }
- if (!strncmp(p, "panic",5))
- panic_on_overflow = 1;
- if (!strncmp(p, "nopanic",7))
- panic_on_overflow = 0;
- if (!strncmp(p, "merge",5)) {
- iommu_merge = 1;
- force_iommu = 1;
- }
- if (!strncmp(p, "nomerge",7))
- iommu_merge = 0;
- if (!strncmp(p, "forcesac",8))
- iommu_sac_force = 1;
- if (!strncmp(p, "fullflush",8))
- iommu_fullflush = 1;
- if (!strncmp(p, "nofullflush",11))
- iommu_fullflush = 0;
- if (!strncmp(p, "soft",4))
- swiotlb = 1;
- if (!strncmp(p, "noaperture",10))
- fix_aperture = 0;
+void gart_parse_options(char *p)
+{
+ int arg;
+
#ifdef CONFIG_IOMMU_LEAK
- if (!strncmp(p,"leak",4)) {
- leak_trace = 1;
- p += 4;
- if (*p == '=') ++p;
- if (isdigit(*p) && get_option(&p, &arg))
- iommu_leak_pages = arg;
- } else
+ if (!strncmp(p,"leak",4)) {
+ leak_trace = 1;
+ p += 4;
+ if (*p == '=') ++p;
+ if (isdigit(*p) && get_option(&p, &arg))
+ iommu_leak_pages = arg;
+ }
#endif
- if (isdigit(*p) && get_option(&p, &arg))
- iommu_size = arg;
- p += strcspn(p, ",");
- if (*p == ',')
- ++p;
- }
- return 1;
-}
+ if (isdigit(*p) && get_option(&p, &arg))
+ iommu_size = arg;
+ if (!strncmp(p, "fullflush",8))
+ iommu_fullflush = 1;
+ if (!strncmp(p, "nofullflush",11))
+ iommu_fullflush = 0;
+ if (!strncmp(p,"noagp",5))
+ no_agp = 1;
+ if (!strncmp(p, "noaperture",10))
+ fix_aperture = 0;
+ /* duplicated from pci-dma.c */
+ if (!strncmp(p,"force",5))
+ iommu_aperture_allowed = 1;
+ if (!strncmp(p,"allowed",7))
+ iommu_aperture_allowed = 1;
+ if (!strncmp(p, "memaper", 7)) {
+ fallback_aper_force = 1;
+ p += 7;
+ if (*p == '=') {
+ ++p;
+ if (get_option(&p, &arg))
+ fallback_aper_order = arg;
+ }
+ }
+}
diff --git a/arch/x86_64/kernel/pci-nommu.c b/arch/x86_64/kernel/pci-nommu.c
index 5a981dca87f..e4156497519 100644
--- a/arch/x86_64/kernel/pci-nommu.c
+++ b/arch/x86_64/kernel/pci-nommu.c
@@ -6,89 +6,94 @@
#include <linux/string.h>
#include <asm/proto.h>
#include <asm/processor.h>
+#include <asm/dma.h>
-int iommu_merge = 0;
-EXPORT_SYMBOL(iommu_merge);
-
-dma_addr_t bad_dma_address;
-EXPORT_SYMBOL(bad_dma_address);
-
-int iommu_bio_merge = 0;
-EXPORT_SYMBOL(iommu_bio_merge);
-
-int iommu_sac_force = 0;
-EXPORT_SYMBOL(iommu_sac_force);
-
-/*
- * Dummy IO MMU functions
- */
-
-void *dma_alloc_coherent(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp)
+static int
+check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
{
- void *ret;
- u64 mask;
- int order = get_order(size);
-
- if (hwdev)
- mask = hwdev->coherent_dma_mask & *hwdev->dma_mask;
- else
- mask = 0xffffffff;
- for (;;) {
- ret = (void *)__get_free_pages(gfp, order);
- if (ret == NULL)
- return NULL;
- *dma_handle = virt_to_bus(ret);
- if ((*dma_handle & ~mask) == 0)
- break;
- free_pages((unsigned long)ret, order);
- if (gfp & GFP_DMA)
- return NULL;
- gfp |= GFP_DMA;
+ if (hwdev && bus + size > *hwdev->dma_mask) {
+ printk(KERN_ERR
+ "nommu_%s: overflow %Lx+%lu of device mask %Lx\n",
+ name, (long long)bus, size, (long long)*hwdev->dma_mask);
+ return 0;
}
+ return 1;
+}
- memset(ret, 0, size);
- return ret;
+static dma_addr_t
+nommu_map_single(struct device *hwdev, void *ptr, size_t size,
+ int direction)
+{
+ dma_addr_t bus = virt_to_bus(ptr);
+ if (!check_addr("map_single", hwdev, bus, size))
+ return bad_dma_address;
+ return bus;
}
-EXPORT_SYMBOL(dma_alloc_coherent);
-void dma_free_coherent(struct device *hwdev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
+void nommu_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
+ int direction)
{
- free_pages((unsigned long)vaddr, get_order(size));
}
-EXPORT_SYMBOL(dma_free_coherent);
-int dma_supported(struct device *hwdev, u64 mask)
+/* Map a set of buffers described by scatterlist in streaming
+ * mode for DMA. This is the scatter-gather version of the
+ * above pci_map_single interface. Here the scatter gather list
+ * elements are each tagged with the appropriate dma address
+ * and length. They are obtained via sg_dma_{address,length}(SG).
+ *
+ * NOTE: An implementation may be able to use a smaller number of
+ * DMA address/length pairs than there are SG table elements.
+ * (for example via virtual mapping capabilities)
+ * The routine returns the number of addr/length pairs actually
+ * used, at most nents.
+ *
+ * Device ownership issues as mentioned above for pci_map_single are
+ * the same here.
+ */
+int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
+ int nents, int direction)
{
- /*
- * we fall back to GFP_DMA when the mask isn't all 1s,
- * so we can't guarantee allocations that must be
- * within a tighter range than GFP_DMA..
- * RED-PEN this won't work for pci_map_single. Caller has to
- * use GFP_DMA in the first place.
- */
- if (mask < 0x00ffffff)
- return 0;
+ int i;
- return 1;
-}
-EXPORT_SYMBOL(dma_supported);
+ BUG_ON(direction == DMA_NONE);
+ for (i = 0; i < nents; i++ ) {
+ struct scatterlist *s = &sg[i];
+ BUG_ON(!s->page);
+ s->dma_address = virt_to_bus(page_address(s->page) +s->offset);
+ if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
+ return 0;
+ s->dma_length = s->length;
+ }
+ return nents;
+}
-int dma_get_cache_alignment(void)
+/* Unmap a set of streaming mode DMA translations.
+ * Again, cpu read rules concerning calls here are the same as for
+ * pci_unmap_single() above.
+ */
+void nommu_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nents, int dir)
{
- return boot_cpu_data.x86_clflush_size;
}
-EXPORT_SYMBOL(dma_get_cache_alignment);
-static int __init check_ram(void)
-{
- if (end_pfn >= 0xffffffff>>PAGE_SHIFT) {
- printk(
- KERN_ERR "WARNING more than 4GB of memory but IOMMU not compiled in.\n"
- KERN_ERR "WARNING 32bit PCI may malfunction.\n");
- }
- return 0;
-}
-__initcall(check_ram);
+struct dma_mapping_ops nommu_dma_ops = {
+ .map_single = nommu_map_single,
+ .unmap_single = nommu_unmap_single,
+ .map_sg = nommu_map_sg,
+ .unmap_sg = nommu_unmap_sg,
+ .is_phys = 1,
+};
+void __init no_iommu_init(void)
+{
+ if (dma_ops)
+ return;
+ printk(KERN_INFO "PCI-DMA: Disabling IOMMU.\n");
+ dma_ops = &nommu_dma_ops;
+ if (end_pfn > MAX_DMA32_PFN) {
+ printk(KERN_ERR
+ "WARNING more than 4GB of memory but IOMMU not compiled in.\n"
+ KERN_ERR "WARNING 32bit PCI may malfunction.\n"
+ KERN_ERR "You might want to enable CONFIG_GART_IOMMU\n");
+ }
+}
diff --git a/arch/x86_64/kernel/pci-swiotlb.c b/arch/x86_64/kernel/pci-swiotlb.c
new file mode 100644
index 00000000000..3569a25ad7f
--- /dev/null
+++ b/arch/x86_64/kernel/pci-swiotlb.c
@@ -0,0 +1,42 @@
+/* Glue code to lib/swiotlb.c */
+
+#include <linux/pci.h>
+#include <linux/cache.h>
+#include <linux/module.h>
+#include <asm/dma-mapping.h>
+#include <asm/proto.h>
+#include <asm/swiotlb.h>
+#include <asm/dma.h>
+
+int swiotlb __read_mostly;
+EXPORT_SYMBOL(swiotlb);
+
+struct dma_mapping_ops swiotlb_dma_ops = {
+ .mapping_error = swiotlb_dma_mapping_error,
+ .alloc_coherent = swiotlb_alloc_coherent,
+ .free_coherent = swiotlb_free_coherent,
+ .map_single = swiotlb_map_single,
+ .unmap_single = swiotlb_unmap_single,
+ .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
+ .sync_single_for_device = swiotlb_sync_single_for_device,
+ .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
+ .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
+ .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
+ .sync_sg_for_device = swiotlb_sync_sg_for_device,
+ .map_sg = swiotlb_map_sg,
+ .unmap_sg = swiotlb_unmap_sg,
+ .dma_supported = NULL,
+};
+
+void pci_swiotlb_init(void)
+{
+ /* don't initialize swiotlb if iommu=off (no_iommu=1) */
+ if (!iommu_aperture && !no_iommu &&
+ (end_pfn > MAX_DMA32_PFN || force_iommu))
+ swiotlb = 1;
+ if (swiotlb) {
+ swiotlb_init();
+ printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
+ dma_ops = &swiotlb_dma_ops;
+ }
+}
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index 7519fc520eb..8ded407e4a9 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -36,6 +36,7 @@
#include <linux/utsname.h>
#include <linux/random.h>
#include <linux/kprobes.h>
+#include <linux/notifier.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
@@ -50,13 +51,12 @@
#include <asm/desc.h>
#include <asm/proto.h>
#include <asm/ia32.h>
+#include <asm/idle.h>
asmlinkage extern void ret_from_fork(void);
unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
-static atomic_t hlt_counter = ATOMIC_INIT(0);
-
unsigned long boot_option_idle_override = 0;
EXPORT_SYMBOL(boot_option_idle_override);
@@ -66,19 +66,49 @@ EXPORT_SYMBOL(boot_option_idle_override);
void (*pm_idle)(void);
static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
-void disable_hlt(void)
+static struct notifier_block *idle_notifier;
+static DEFINE_SPINLOCK(idle_notifier_lock);
+
+void idle_notifier_register(struct notifier_block *n)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&idle_notifier_lock, flags);
+ notifier_chain_register(&idle_notifier, n);
+ spin_unlock_irqrestore(&idle_notifier_lock, flags);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_register);
+
+void idle_notifier_unregister(struct notifier_block *n)
{
- atomic_inc(&hlt_counter);
+ unsigned long flags;
+ spin_lock_irqsave(&idle_notifier_lock, flags);
+ notifier_chain_unregister(&idle_notifier, n);
+ spin_unlock_irqrestore(&idle_notifier_lock, flags);
}
+EXPORT_SYMBOL(idle_notifier_unregister);
-EXPORT_SYMBOL(disable_hlt);
+enum idle_state { CPU_IDLE, CPU_NOT_IDLE };
+static DEFINE_PER_CPU(enum idle_state, idle_state) = CPU_NOT_IDLE;
-void enable_hlt(void)
+void enter_idle(void)
{
- atomic_dec(&hlt_counter);
+ __get_cpu_var(idle_state) = CPU_IDLE;
+ notifier_call_chain(&idle_notifier, IDLE_START, NULL);
}
-EXPORT_SYMBOL(enable_hlt);
+static void __exit_idle(void)
+{
+ __get_cpu_var(idle_state) = CPU_NOT_IDLE;
+ notifier_call_chain(&idle_notifier, IDLE_END, NULL);
+}
+
+/* Called from interrupts to signify idle end */
+void exit_idle(void)
+{
+ if (current->pid | read_pda(irqcount))
+ return;
+ __exit_idle();
+}
/*
* We use this if we don't have any better
@@ -88,21 +118,16 @@ void default_idle(void)
{
local_irq_enable();
- if (!atomic_read(&hlt_counter)) {
- clear_thread_flag(TIF_POLLING_NRFLAG);
- smp_mb__after_clear_bit();
- while (!need_resched()) {
- local_irq_disable();
- if (!need_resched())
- safe_halt();
- else
- local_irq_enable();
- }
- set_thread_flag(TIF_POLLING_NRFLAG);
- } else {
- while (!need_resched())
- cpu_relax();
+ clear_thread_flag(TIF_POLLING_NRFLAG);
+ smp_mb__after_clear_bit();
+ while (!need_resched()) {
+ local_irq_disable();
+ if (!need_resched())
+ safe_halt();
+ else
+ local_irq_enable();
}
+ set_thread_flag(TIF_POLLING_NRFLAG);
}
/*
@@ -157,7 +182,7 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait);
DECLARE_PER_CPU(int, cpu_state);
#include <asm/nmi.h>
-/* We don't actually take CPU down, just spin without interrupts. */
+/* We halt the CPU with physical CPU hotplug */
static inline void play_dead(void)
{
idle_task_exit();
@@ -166,8 +191,9 @@ static inline void play_dead(void)
/* Ack it */
__get_cpu_var(cpu_state) = CPU_DEAD;
+ local_irq_disable();
while (1)
- safe_halt();
+ halt();
}
#else
static inline void play_dead(void)
@@ -200,7 +226,9 @@ void cpu_idle (void)
idle = default_idle;
if (cpu_is_offline(smp_processor_id()))
play_dead();
+ enter_idle();
idle();
+ __exit_idle();
}
preempt_enable_no_resched();
@@ -423,7 +451,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
struct task_struct *me = current;
childregs = ((struct pt_regs *)
- (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
+ (THREAD_SIZE + task_stack_page(p))) - 1;
*childregs = *regs;
childregs->rax = 0;
@@ -435,7 +463,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
p->thread.rsp0 = (unsigned long) (childregs+1);
p->thread.userrsp = me->thread.userrsp;
- set_ti_thread_flag(p->thread_info, TIF_FORK);
+ set_tsk_thread_flag(p, TIF_FORK);
p->thread.fs = me->thread.fs;
p->thread.gs = me->thread.gs;
@@ -562,7 +590,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
write_pda(oldrsp, next->userrsp);
write_pda(pcurrent, next_p);
write_pda(kernelstack,
- (unsigned long)next_p->thread_info + THREAD_SIZE - PDA_STACKOFFSET);
+ task_stack_page(next_p) + THREAD_SIZE - PDA_STACKOFFSET);
/*
* Now maybe reload the debug registers
@@ -676,7 +704,7 @@ unsigned long get_wchan(struct task_struct *p)
if (!p || p == current || p->state==TASK_RUNNING)
return 0;
- stack = (unsigned long)p->thread_info;
+ stack = (unsigned long)task_stack_page(p);
if (p->thread.rsp < stack || p->thread.rsp > stack+THREAD_SIZE)
return 0;
fp = *(u64 *)(p->thread.rsp);
@@ -794,8 +822,7 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
{
struct pt_regs *pp, ptregs;
- pp = (struct pt_regs *)(tsk->thread.rsp0);
- --pp;
+ pp = task_pt_regs(tsk);
ptregs = *pp;
ptregs.cs &= 0xffff;
diff --git a/arch/x86_64/kernel/ptrace.c b/arch/x86_64/kernel/ptrace.c
index a87b6cebe80..53205622351 100644
--- a/arch/x86_64/kernel/ptrace.c
+++ b/arch/x86_64/kernel/ptrace.c
@@ -36,9 +36,12 @@
* in exit.c or in signal.c.
*/
-/* determines which flags the user has access to. */
-/* 1 = access 0 = no access */
-#define FLAG_MASK 0x44dd5UL
+/*
+ * Determines which flags the user has access to [1 = access, 0 = no access].
+ * Prohibits changing ID(21), VIP(20), VIF(19), VM(17), IOPL(12-13), IF(9).
+ * Also masks reserved bits (63-22, 15, 5, 3, 1).
+ */
+#define FLAG_MASK 0x54dd5UL
/* set's the trap flag. */
#define TRAP_FLAG 0x100UL
@@ -64,12 +67,6 @@ static inline unsigned long get_stack_long(struct task_struct *task, int offset)
return (*((unsigned long *)stack));
}
-static inline struct pt_regs *get_child_regs(struct task_struct *task)
-{
- struct pt_regs *regs = (void *)task->thread.rsp0;
- return regs - 1;
-}
-
/*
* this routine will put a word on the processes privileged stack.
* the offset is how far from the base addr as stored in the TSS.
@@ -167,7 +164,7 @@ static int is_at_popf(struct task_struct *child, struct pt_regs *regs)
static void set_singlestep(struct task_struct *child)
{
- struct pt_regs *regs = get_child_regs(child);
+ struct pt_regs *regs = task_pt_regs(child);
/*
* Always set TIF_SINGLESTEP - this guarantees that
@@ -205,7 +202,7 @@ static void clear_singlestep(struct task_struct *child)
/* But touch TF only if it was set by us.. */
if (child->ptrace & PT_DTRACE) {
- struct pt_regs *regs = get_child_regs(child);
+ struct pt_regs *regs = task_pt_regs(child);
regs->eflags &= ~TRAP_FLAG;
child->ptrace &= ~PT_DTRACE;
}
diff --git a/arch/x86_64/kernel/reboot.c b/arch/x86_64/kernel/reboot.c
index 75235ed2b31..57117b8beb2 100644
--- a/arch/x86_64/kernel/reboot.c
+++ b/arch/x86_64/kernel/reboot.c
@@ -6,6 +6,7 @@
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <linux/string.h>
+#include <linux/pm.h>
#include <asm/io.h>
#include <asm/kdebug.h>
#include <asm/delay.h>
@@ -154,10 +155,11 @@ void machine_halt(void)
void machine_power_off(void)
{
- if (!reboot_force) {
- machine_shutdown();
- }
- if (pm_power_off)
+ if (pm_power_off) {
+ if (!reboot_force) {
+ machine_shutdown();
+ }
pm_power_off();
+ }
}
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index 750e01dcbdf..28895c03cb1 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -35,6 +35,7 @@
#include <asm/processor.h>
#include <linux/console.h>
#include <linux/seq_file.h>
+#include <linux/crash_dump.h>
#include <linux/root_dev.h>
#include <linux/pci.h>
#include <linux/acpi.h>
@@ -42,6 +43,9 @@
#include <linux/edd.h>
#include <linux/mmzone.h>
#include <linux/kexec.h>
+#include <linux/cpufreq.h>
+#include <linux/dmi.h>
+#include <linux/dma-mapping.h>
#include <asm/mtrr.h>
#include <asm/uaccess.h>
@@ -60,7 +64,9 @@
#include <asm/setup.h>
#include <asm/mach_apic.h>
#include <asm/numa.h>
+#include <asm/swiotlb.h>
#include <asm/sections.h>
+#include <asm/gart-mapping.h>
/*
* Machine setup..
@@ -85,11 +91,6 @@ int bootloader_type;
unsigned long saved_video_mode;
-#ifdef CONFIG_SWIOTLB
-int swiotlb;
-EXPORT_SYMBOL(swiotlb);
-#endif
-
/*
* Setup options
*/
@@ -274,10 +275,7 @@ static __init void parse_cmdline_early (char ** cmdline_p)
{
char c = ' ', *to = command_line, *from = COMMAND_LINE;
int len = 0;
-
- /* Save unparsed command line copy for /proc/cmdline */
- memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
- saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
+ int userdef = 0;
for (;;) {
if (c != ' ')
@@ -345,10 +343,14 @@ static __init void parse_cmdline_early (char ** cmdline_p)
!memcmp(from, "disableapic", 11))
disable_apic = 1;
- if (!memcmp(from, "noapic", 6))
+ /* Don't confuse with noapictimer */
+ if (!memcmp(from, "noapic", 6) &&
+ (from[6] == ' ' || from[6] == 0))
skip_ioapic_setup = 1;
- if (!memcmp(from, "apic", 4)) {
+ /* Make sure to not confuse with apic= */
+ if (!memcmp(from, "apic", 4) &&
+ (from[4] == ' ' || from[4] == 0)) {
skip_ioapic_setup = 0;
ioapic_force = 1;
}
@@ -356,16 +358,36 @@ static __init void parse_cmdline_early (char ** cmdline_p)
if (!memcmp(from, "mem=", 4))
parse_memopt(from+4, &from);
+ if (!memcmp(from, "memmap=", 7)) {
+ /* exactmap option is for used defined memory */
+ if (!memcmp(from+7, "exactmap", 8)) {
+#ifdef CONFIG_CRASH_DUMP
+ /* If we are doing a crash dump, we
+ * still need to know the real mem
+ * size before original memory map is
+ * reset.
+ */
+ saved_max_pfn = e820_end_of_ram();
+#endif
+ from += 8+7;
+ end_pfn_map = 0;
+ e820.nr_map = 0;
+ userdef = 1;
+ }
+ else {
+ parse_memmapopt(from+7, &from);
+ userdef = 1;
+ }
+ }
+
#ifdef CONFIG_NUMA
if (!memcmp(from, "numa=", 5))
numa_setup(from+5);
#endif
-#ifdef CONFIG_GART_IOMMU
if (!memcmp(from,"iommu=",6)) {
iommu_setup(from+6);
}
-#endif
if (!memcmp(from,"oops=panic", 10))
panic_on_oops = 1;
@@ -394,6 +416,14 @@ static __init void parse_cmdline_early (char ** cmdline_p)
}
#endif
+#ifdef CONFIG_PROC_VMCORE
+ /* elfcorehdr= specifies the location of elf core header
+ * stored by the crashed kernel. This option will be passed
+ * by kexec loader to the capture kernel.
+ */
+ else if(!memcmp(from, "elfcorehdr=", 11))
+ elfcorehdr_addr = memparse(from+11, &from);
+#endif
next_char:
c = *(from++);
if (!c)
@@ -402,6 +432,10 @@ static __init void parse_cmdline_early (char ** cmdline_p)
break;
*(to++) = c;
}
+ if (userdef) {
+ printk(KERN_INFO "user-defined physical RAM map:\n");
+ e820_print_map("user");
+ }
*to = '\0';
*cmdline_p = command_line;
}
@@ -442,6 +476,8 @@ static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
};
+extern char __vsyscall_0;
+
/* Replace instructions with better alternatives for this CPU type.
This runs before SMP is initialized to avoid SMP problems with
@@ -453,11 +489,17 @@ void apply_alternatives(void *start, void *end)
struct alt_instr *a;
int diff, i, k;
for (a = start; (void *)a < end; a++) {
+ u8 *instr;
+
if (!boot_cpu_has(a->cpuid))
continue;
BUG_ON(a->replacementlen > a->instrlen);
- __inline_memcpy(a->instr, a->replacement, a->replacementlen);
+ instr = a->instr;
+ /* vsyscall code is not mapped yet. resolve it manually. */
+ if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END)
+ instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
+ __inline_memcpy(instr, a->replacement, a->replacementlen);
diff = a->instrlen - a->replacementlen;
/* Pad the rest with nops */
@@ -465,7 +507,7 @@ void apply_alternatives(void *start, void *end)
k = diff;
if (k > ASM_NOP_MAX)
k = ASM_NOP_MAX;
- __inline_memcpy(a->instr + i, k8_nops[k], k);
+ __inline_memcpy(instr + i, k8_nops[k], k);
}
}
}
@@ -669,6 +711,8 @@ void __init setup_arch(char **cmdline_p)
acpi_boot_init();
#endif
+ init_cpu_to_node();
+
#ifdef CONFIG_X86_LOCAL_APIC
/*
* get boot-time SMP configuration:
@@ -833,7 +877,6 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
static int __init init_amd(struct cpuinfo_x86 *c)
{
int r;
- int level;
#ifdef CONFIG_SMP
unsigned long value;
@@ -856,11 +899,6 @@ static int __init init_amd(struct cpuinfo_x86 *c)
3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
clear_bit(0*32+31, &c->x86_capability);
- /* C-stepping K8? */
- level = cpuid_eax(1);
- if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
- set_bit(X86_FEATURE_K8_C, &c->x86_capability);
-
r = get_model_name(c);
if (!r) {
switch (c->x86) {
@@ -873,6 +911,10 @@ static int __init init_amd(struct cpuinfo_x86 *c)
}
display_cacheinfo(c);
+ /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
+ if (c->x86_power & (1<<8))
+ set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
+
if (c->extended_cpuid_level >= 0x80000008) {
c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
if (c->x86_max_cores & (c->x86_max_cores - 1))
@@ -991,8 +1033,10 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
if (c->x86 == 15)
c->x86_cache_alignment = c->x86_clflush_size * 2;
- if (c->x86 >= 15)
+ if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
+ (c->x86 == 0x6 && c->x86_model >= 0x0e))
set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
+ set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
c->x86_max_cores = intel_num_cpu_cores(c);
srat_detect_node();
@@ -1191,7 +1235,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
- NULL, "fxsr_opt", NULL, NULL, NULL, "lm", "3dnowext", "3dnow",
+ NULL, "fxsr_opt", "rdtscp", NULL, NULL, "lm", "3dnowext", "3dnow",
/* Transmeta-defined */
"recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
@@ -1219,7 +1263,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/* AMD-defined (#2) */
- "lahf_lm", "cmp_legacy", NULL, NULL, NULL, NULL, NULL, NULL,
+ "lahf_lm", "cmp_legacy", "svm", NULL, "cr8_legacy", NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
@@ -1230,7 +1274,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
"vid", /* voltage id control */
"ttp", /* thermal trip */
"tm",
- "stc"
+ "stc",
+ NULL,
+ /* nothing */ /* constant_tsc - moved to flags */
};
@@ -1256,8 +1302,11 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "stepping\t: unknown\n");
if (cpu_has(c,X86_FEATURE_TSC)) {
+ unsigned int freq = cpufreq_quick_get((unsigned)(c-cpu_data));
+ if (!freq)
+ freq = cpu_khz;
seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
- cpu_khz / 1000, (cpu_khz % 1000));
+ freq / 1000, (freq % 1000));
}
/* Cache size */
@@ -1307,8 +1356,11 @@ static int show_cpuinfo(struct seq_file *m, void *v)
unsigned i;
for (i = 0; i < 32; i++)
if (c->x86_power & (1 << i)) {
- if (i < ARRAY_SIZE(x86_power_flags))
- seq_printf(m, " %s", x86_power_flags[i]);
+ if (i < ARRAY_SIZE(x86_power_flags) &&
+ x86_power_flags[i])
+ seq_printf(m, "%s%s",
+ x86_power_flags[i][0]?" ":"",
+ x86_power_flags[i]);
else
seq_printf(m, " [%d]", i);
}
@@ -1340,3 +1392,11 @@ struct seq_operations cpuinfo_op = {
.stop = c_stop,
.show = show_cpuinfo,
};
+
+static int __init run_dmi_scan(void)
+{
+ dmi_scan_machine();
+ return 0;
+}
+core_initcall(run_dmi_scan);
+
diff --git a/arch/x86_64/kernel/setup64.c b/arch/x86_64/kernel/setup64.c
index 06dc354375c..70f1bb808a2 100644
--- a/arch/x86_64/kernel/setup64.c
+++ b/arch/x86_64/kernel/setup64.c
@@ -30,14 +30,15 @@ char x86_boot_params[BOOT_PARAM_SIZE] __initdata = {0,};
cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
-struct x8664_pda cpu_pda[NR_CPUS] __cacheline_aligned;
+struct x8664_pda *_cpu_pda[NR_CPUS] __read_mostly;
+struct x8664_pda boot_cpu_pda[NR_CPUS] __cacheline_aligned;
struct desc_ptr idt_descr = { 256 * 16, (unsigned long) idt_table };
char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned")));
unsigned long __supported_pte_mask __read_mostly = ~0UL;
-static int do_not_nx __initdata = 0;
+static int do_not_nx __cpuinitdata = 0;
/* noexec=on|off
Control non executable mappings for 64bit processes.
@@ -110,18 +111,18 @@ void __init setup_per_cpu_areas(void)
}
if (!ptr)
panic("Cannot allocate cpu data for CPU %d\n", i);
- cpu_pda[i].data_offset = ptr - __per_cpu_start;
+ cpu_pda(i)->data_offset = ptr - __per_cpu_start;
memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
}
}
void pda_init(int cpu)
{
- struct x8664_pda *pda = &cpu_pda[cpu];
+ struct x8664_pda *pda = cpu_pda(cpu);
/* Setup up data that may be needed in __get_free_pages early */
asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0));
- wrmsrl(MSR_GS_BASE, cpu_pda + cpu);
+ wrmsrl(MSR_GS_BASE, pda);
pda->cpunumber = cpu;
pda->irqcount = -1;
@@ -145,7 +146,7 @@ void pda_init(int cpu)
pda->irqstackptr += IRQSTACKSIZE-64;
}
-char boot_exception_stacks[N_EXCEPTION_STACKS * EXCEPTION_STKSZ]
+char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]
__attribute__((section(".bss.page_aligned")));
/* May not be marked __init: used by software suspend */
@@ -213,23 +214,14 @@ void __cpuinit cpu_init (void)
* Initialize the per-CPU GDT with the boot GDT,
* and set up the GDT descriptor:
*/
- if (cpu) {
- memcpy(cpu_gdt_table[cpu], cpu_gdt_table[0], GDT_SIZE);
- }
+ if (cpu)
+ memcpy(cpu_gdt(cpu), cpu_gdt_table, GDT_SIZE);
cpu_gdt_descr[cpu].size = GDT_SIZE;
- cpu_gdt_descr[cpu].address = (unsigned long)cpu_gdt_table[cpu];
asm volatile("lgdt %0" :: "m" (cpu_gdt_descr[cpu]));
asm volatile("lidt %0" :: "m" (idt_descr));
- memcpy(me->thread.tls_array, cpu_gdt_table[cpu], GDT_ENTRY_TLS_ENTRIES * 8);
-
- /*
- * Delete NT
- */
-
- asm volatile("pushfq ; popq %%rax ; btr $14,%%rax ; pushq %%rax ; popfq" ::: "eax");
-
+ memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
syscall_init();
wrmsrl(MSR_FS_BASE, 0);
@@ -243,13 +235,27 @@ void __cpuinit cpu_init (void)
*/
for (v = 0; v < N_EXCEPTION_STACKS; v++) {
if (cpu) {
- estacks = (char *)__get_free_pages(GFP_ATOMIC,
- EXCEPTION_STACK_ORDER);
+ static const unsigned int order[N_EXCEPTION_STACKS] = {
+ [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER,
+ [DEBUG_STACK - 1] = DEBUG_STACK_ORDER
+ };
+
+ estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]);
if (!estacks)
panic("Cannot allocate exception stack %ld %d\n",
v, cpu);
}
- estacks += EXCEPTION_STKSZ;
+ switch (v + 1) {
+#if DEBUG_STKSZ > EXCEPTION_STKSZ
+ case DEBUG_STACK:
+ cpu_pda[cpu].debugstack = (unsigned long)estacks;
+ estacks += DEBUG_STKSZ;
+ break;
+#endif
+ default:
+ estacks += EXCEPTION_STKSZ;
+ break;
+ }
t->ist[v] = (unsigned long)estacks;
}
diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c
index cfc3d9dccbd..19ef012b1f1 100644
--- a/arch/x86_64/kernel/smp.c
+++ b/arch/x86_64/kernel/smp.c
@@ -27,6 +27,7 @@
#include <asm/mmu_context.h>
#include <asm/proto.h>
#include <asm/apicdef.h>
+#include <asm/idle.h>
/*
* Smarter SMP flushing macros.
@@ -280,11 +281,6 @@ void flush_tlb_all(void)
on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
}
-void smp_kdb_stop(void)
-{
- send_IPI_allbutself(KDB_VECTOR);
-}
-
/*
* this function sends a 'reschedule' IPI to another CPU.
* it goes straight through and wastes no time serializing
@@ -512,6 +508,7 @@ asmlinkage void smp_call_function_interrupt(void)
/*
* At this point the info structure may be out of scope unless wait==1
*/
+ exit_idle();
irq_enter();
(*func)(info);
irq_exit();
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index 683c33f7b96..a28756ef7ce 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -335,7 +335,13 @@ static __cpuinit void sync_tsc(unsigned int master)
static void __cpuinit tsc_sync_wait(void)
{
- if (notscsync || !cpu_has_tsc)
+ /*
+ * When the CPU has synchronized TSCs assume the BIOS
+ * or the hardware already synced. Otherwise we could
+ * mess up a possible perfect synchronization with a
+ * not-quite-perfect algorithm.
+ */
+ if (notscsync || !cpu_has_tsc || !unsynchronized_tsc())
return;
sync_tsc(0);
}
@@ -646,6 +652,7 @@ static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int sta
send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
} while (send_status && (timeout++ < 1000));
+ mb();
atomic_set(&init_deasserted, 1);
num_starts = 2;
@@ -659,7 +666,6 @@ static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int sta
for (j = 1; j <= num_starts; j++) {
Dprintk("Sending STARTUP #%d.\n",j);
- apic_read_around(APIC_SPIV);
apic_write(APIC_ESR, 0);
apic_read(APIC_ESR);
Dprintk("After apic_write.\n");
@@ -698,7 +704,6 @@ static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int sta
* Due to the Pentium erratum 3AP.
*/
if (maxlvt > 3) {
- apic_read_around(APIC_SPIV);
apic_write(APIC_ESR, 0);
}
accept_status = (apic_read(APIC_ESR) & 0xEF);
@@ -743,11 +748,35 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid)
};
DECLARE_WORK(work, do_fork_idle, &c_idle);
+ /* allocate memory for gdts of secondary cpus. Hotplug is considered */
+ if (!cpu_gdt_descr[cpu].address &&
+ !(cpu_gdt_descr[cpu].address = get_zeroed_page(GFP_KERNEL))) {
+ printk(KERN_ERR "Failed to allocate GDT for CPU %d\n", cpu);
+ return -1;
+ }
+
+ /* Allocate node local memory for AP pdas */
+ if (cpu_pda(cpu) == &boot_cpu_pda[cpu]) {
+ struct x8664_pda *newpda, *pda;
+ int node = cpu_to_node(cpu);
+ pda = cpu_pda(cpu);
+ newpda = kmalloc_node(sizeof (struct x8664_pda), GFP_ATOMIC,
+ node);
+ if (newpda) {
+ memcpy(newpda, pda, sizeof (struct x8664_pda));
+ cpu_pda(cpu) = newpda;
+ } else
+ printk(KERN_ERR
+ "Could not allocate node local PDA for CPU %d on node %d\n",
+ cpu, node);
+ }
+
+
c_idle.idle = get_idle_for_cpu(cpu);
if (c_idle.idle) {
c_idle.idle->thread.rsp = (unsigned long) (((struct pt_regs *)
- (THREAD_SIZE + (unsigned long) c_idle.idle->thread_info)) - 1);
+ (THREAD_SIZE + task_stack_page(c_idle.idle))) - 1);
init_idle(c_idle.idle, cpu);
goto do_rest;
}
@@ -778,14 +807,14 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid)
do_rest:
- cpu_pda[cpu].pcurrent = c_idle.idle;
+ cpu_pda(cpu)->pcurrent = c_idle.idle;
start_rip = setup_trampoline();
init_rsp = c_idle.idle->thread.rsp;
per_cpu(init_tss,cpu).rsp0 = init_rsp;
initial_code = start_secondary;
- clear_ti_thread_flag(c_idle.idle->thread_info, TIF_FORK);
+ clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
printk(KERN_INFO "Booting processor %d/%d APIC 0x%x\n", cpu,
cpus_weight(cpu_present_map),
@@ -811,11 +840,8 @@ do_rest:
/*
* Be paranoid about clearing APIC errors.
*/
- if (APIC_INTEGRATED(apic_version[apicid])) {
- apic_read_around(APIC_SPIV);
- apic_write(APIC_ESR, 0);
- apic_read(APIC_ESR);
- }
+ apic_write(APIC_ESR, 0);
+ apic_read(APIC_ESR);
/*
* Status is now clean
@@ -927,8 +953,8 @@ int additional_cpus __initdata = -1;
*
* Three ways to find out the number of additional hotplug CPUs:
* - If the BIOS specified disabled CPUs in ACPI/mptables use that.
- * - otherwise use half of the available CPUs or 2, whatever is more.
* - The user can overwrite it with additional_cpus=NUM
+ * - Otherwise don't reserve additional CPUs.
* We do this because additional CPUs waste a lot of memory.
* -AK
*/
@@ -938,13 +964,10 @@ __init void prefill_possible_map(void)
int possible;
if (additional_cpus == -1) {
- if (disabled_cpus > 0) {
+ if (disabled_cpus > 0)
additional_cpus = disabled_cpus;
- } else {
- additional_cpus = num_processors / 2;
- if (additional_cpus == 0)
- additional_cpus = 2;
- }
+ else
+ additional_cpus = 0;
}
possible = num_processors + additional_cpus;
if (possible > NR_CPUS)
@@ -996,7 +1019,7 @@ static int __init smp_sanity_check(unsigned max_cpus)
/*
* If we couldn't find a local APIC, then get out of here now!
*/
- if (APIC_INTEGRATED(apic_version[boot_cpu_id]) && !cpu_has_apic) {
+ if (!cpu_has_apic) {
printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
boot_cpu_id);
printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
@@ -1181,7 +1204,7 @@ int __cpu_disable(void)
if (cpu == 0)
return -EBUSY;
- disable_APIC_timer();
+ clear_local_APIC();
/*
* HACK:
diff --git a/arch/x86_64/kernel/suspend.c b/arch/x86_64/kernel/suspend.c
index fd2bef78088..ecbd34c1093 100644
--- a/arch/x86_64/kernel/suspend.c
+++ b/arch/x86_64/kernel/suspend.c
@@ -120,7 +120,7 @@ void fix_processor_context(void)
set_tss_desc(cpu,t); /* This just modifies memory; should not be neccessary. But... This is neccessary, because 386 hardware has concept of busy TSS or some similar stupidity. */
- cpu_gdt_table[cpu][GDT_ENTRY_TSS].type = 9;
+ cpu_gdt(cpu)[GDT_ENTRY_TSS].type = 9;
syscall_init(); /* This sets MSR_*STAR and related */
load_TR_desc(); /* This does ltr */
diff --git a/arch/x86_64/kernel/syscall.c b/arch/x86_64/kernel/syscall.c
index e263685f864..7c176b3edde 100644
--- a/arch/x86_64/kernel/syscall.c
+++ b/arch/x86_64/kernel/syscall.c
@@ -19,7 +19,7 @@ typedef void (*sys_call_ptr_t)(void);
extern void sys_ni_syscall(void);
-sys_call_ptr_t sys_call_table[__NR_syscall_max+1] __cacheline_aligned = {
+const sys_call_ptr_t sys_call_table[__NR_syscall_max+1] = {
/* Smells like a like a compiler bug -- it doesn't work when the & below is removed. */
[0 ... __NR_syscall_max] = &sys_ni_syscall,
#include <asm-x86_64/unistd.h>
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index fdaddc4e528..f8c47c68844 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -59,7 +59,7 @@ static int notsc __initdata = 0;
unsigned int cpu_khz; /* TSC clocks / usec, not used here */
static unsigned long hpet_period; /* fsecs / HPET clock */
unsigned long hpet_tick; /* HPET clocks / interrupt */
-static int hpet_use_timer;
+static int hpet_use_timer; /* Use counter of hpet for time keeping, otherwise PIT */
unsigned long vxtime_hz = PIT_TICK_RATE;
int report_lost_ticks; /* command line option */
unsigned long long monotonic_base;
@@ -71,14 +71,6 @@ unsigned long __wall_jiffies __section_wall_jiffies = INITIAL_JIFFIES;
struct timespec __xtime __section_xtime;
struct timezone __sys_tz __section_sys_tz;
-static inline void rdtscll_sync(unsigned long *tsc)
-{
-#ifdef CONFIG_SMP
- sync_core();
-#endif
- rdtscll(*tsc);
-}
-
/*
* do_gettimeoffset() returns microseconds since last timer interrupt was
* triggered by hardware. A memory read of HPET is slower than a register read
@@ -93,7 +85,7 @@ static inline unsigned int do_gettimeoffset_tsc(void)
{
unsigned long t;
unsigned long x;
- rdtscll_sync(&t);
+ t = get_cycles_sync();
if (t < vxtime.last_tsc) t = vxtime.last_tsc; /* hack */
x = ((t - vxtime.last_tsc) * vxtime.tsc_quot) >> 32;
return x;
@@ -259,8 +251,8 @@ static void set_rtc_mmss(unsigned long nowtime)
#endif
{
- BIN_TO_BCD(real_seconds);
- BIN_TO_BCD(real_minutes);
+ BIN_TO_BCD(real_seconds);
+ BIN_TO_BCD(real_minutes);
CMOS_WRITE(real_seconds, RTC_SECONDS);
CMOS_WRITE(real_minutes, RTC_MINUTES);
}
@@ -297,25 +289,21 @@ unsigned long long monotonic_clock(void)
last_offset = vxtime.last;
base = monotonic_base;
this_offset = hpet_readl(HPET_COUNTER);
-
} while (read_seqretry(&xtime_lock, seq));
offset = (this_offset - last_offset);
offset *=(NSEC_PER_SEC/HZ)/hpet_tick;
return base + offset;
- }else{
+ } else {
do {
seq = read_seqbegin(&xtime_lock);
last_offset = vxtime.last_tsc;
base = monotonic_base;
} while (read_seqretry(&xtime_lock, seq));
- sync_core();
- rdtscll(this_offset);
+ this_offset = get_cycles_sync();
offset = (this_offset - last_offset)*1000/cpu_khz;
return base + offset;
}
-
-
}
EXPORT_SYMBOL(monotonic_clock);
@@ -391,7 +379,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
delay = LATCH - 1 - delay;
}
- rdtscll_sync(&tsc);
+ tsc = get_cycles_sync();
if (vxtime.mode == VXTIME_HPET) {
if (offset - vxtime.last > hpet_tick) {
@@ -471,6 +459,11 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
write_sequnlock(&xtime_lock);
+#ifdef CONFIG_X86_LOCAL_APIC
+ if (using_apic_timer)
+ smp_send_timer_broadcast_ipi();
+#endif
+
return IRQ_HANDLED;
}
@@ -509,10 +502,10 @@ unsigned long long sched_clock(void)
return cycles_2_ns(a);
}
-unsigned long get_cmos_time(void)
+static unsigned long get_cmos_time(void)
{
- unsigned int timeout, year, mon, day, hour, min, sec;
- unsigned char last, this;
+ unsigned int timeout = 1000000, year, mon, day, hour, min, sec;
+ unsigned char uip = 0, this = 0;
unsigned long flags;
/*
@@ -525,45 +518,41 @@ unsigned long get_cmos_time(void)
spin_lock_irqsave(&rtc_lock, flags);
- timeout = 1000000;
- last = this = 0;
-
- while (timeout && last && !this) {
- last = this;
+ while (timeout && (!uip || this)) {
+ uip |= this;
this = CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP;
timeout--;
}
-/*
- * Here we are safe to assume the registers won't change for a whole second, so
- * we just go ahead and read them.
- */
-
- sec = CMOS_READ(RTC_SECONDS);
- min = CMOS_READ(RTC_MINUTES);
- hour = CMOS_READ(RTC_HOURS);
- day = CMOS_READ(RTC_DAY_OF_MONTH);
- mon = CMOS_READ(RTC_MONTH);
- year = CMOS_READ(RTC_YEAR);
+ /*
+ * Here we are safe to assume the registers won't change for a whole
+ * second, so we just go ahead and read them.
+ */
+ sec = CMOS_READ(RTC_SECONDS);
+ min = CMOS_READ(RTC_MINUTES);
+ hour = CMOS_READ(RTC_HOURS);
+ day = CMOS_READ(RTC_DAY_OF_MONTH);
+ mon = CMOS_READ(RTC_MONTH);
+ year = CMOS_READ(RTC_YEAR);
spin_unlock_irqrestore(&rtc_lock, flags);
-/*
- * We know that x86-64 always uses BCD format, no need to check the config
- * register.
- */
+ /*
+ * We know that x86-64 always uses BCD format, no need to check the
+ * config register.
+ */
- BCD_TO_BIN(sec);
- BCD_TO_BIN(min);
- BCD_TO_BIN(hour);
- BCD_TO_BIN(day);
- BCD_TO_BIN(mon);
- BCD_TO_BIN(year);
+ BCD_TO_BIN(sec);
+ BCD_TO_BIN(min);
+ BCD_TO_BIN(hour);
+ BCD_TO_BIN(day);
+ BCD_TO_BIN(mon);
+ BCD_TO_BIN(year);
-/*
- * x86-64 systems only exists since 2002.
- * This will work up to Dec 31, 2100
- */
+ /*
+ * x86-64 systems only exists since 2002.
+ * This will work up to Dec 31, 2100
+ */
year += 2000;
return mktime(year, mon, day, hour, min, sec);
@@ -695,8 +684,7 @@ static unsigned int __init hpet_calibrate_tsc(void)
do {
local_irq_disable();
hpet_now = hpet_readl(HPET_COUNTER);
- sync_core();
- rdtscl(tsc_now);
+ tsc_now = get_cycles_sync();
local_irq_restore(flags);
} while ((tsc_now - tsc_start) < TICK_COUNT &&
(hpet_now - hpet_start) < TICK_COUNT);
@@ -726,11 +714,9 @@ static unsigned int __init pit_calibrate_tsc(void)
outb(0xb0, 0x43);
outb((PIT_TICK_RATE / (1000 / 50)) & 0xff, 0x42);
outb((PIT_TICK_RATE / (1000 / 50)) >> 8, 0x42);
- rdtscll(start);
- sync_core();
+ start = get_cycles_sync();
while ((inb(0x61) & 0x20) == 0);
- sync_core();
- rdtscll(end);
+ end = get_cycles_sync();
spin_unlock_irqrestore(&i8253_lock, flags);
@@ -744,7 +730,7 @@ static __init int late_hpet_init(void)
unsigned int ntimer;
if (!vxtime.hpet_address)
- return -1;
+ return -1;
memset(&hd, 0, sizeof (hd));
@@ -878,8 +864,6 @@ static struct irqaction irq0 = {
timer_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "timer", NULL, NULL
};
-extern void __init config_acpi_tables(void);
-
void __init time_init(void)
{
char *timename;
@@ -908,12 +892,14 @@ void __init time_init(void)
if (!hpet_init())
vxtime_hz = (1000000000000000L + hpet_period / 2) /
hpet_period;
+ else
+ vxtime.hpet_address = 0;
if (hpet_use_timer) {
cpu_khz = hpet_calibrate_tsc();
timename = "HPET";
#ifdef CONFIG_X86_PM_TIMER
- } else if (pmtmr_ioport) {
+ } else if (pmtmr_ioport && !vxtime.hpet_address) {
vxtime_hz = PM_TIMER_FREQUENCY;
timename = "PM";
pit_init();
@@ -932,7 +918,7 @@ void __init time_init(void)
vxtime.mode = VXTIME_TSC;
vxtime.quot = (1000000L << 32) / vxtime_hz;
vxtime.tsc_quot = (1000L << 32) / cpu_khz;
- rdtscll_sync(&vxtime.last_tsc);
+ vxtime.last_tsc = get_cycles_sync();
setup_irq(0, &irq0);
set_cyc2ns_scale(cpu_khz);
@@ -946,7 +932,7 @@ void __init time_init(void)
* Make an educated guess if the TSC is trustworthy and synchronized
* over all CPUs.
*/
-static __init int unsynchronized_tsc(void)
+__init int unsynchronized_tsc(void)
{
#ifdef CONFIG_SMP
if (oem_force_hpet_timer())
@@ -957,7 +943,7 @@ static __init int unsynchronized_tsc(void)
return 0;
#endif
/* Assume multi socket systems are not synchronized */
- return num_online_cpus() > 1;
+ return num_present_cpus() > 1;
}
/*
@@ -997,6 +983,10 @@ __setup("report_lost_ticks", time_setup);
static long clock_cmos_diff;
static unsigned long sleep_start;
+/*
+ * sysfs support for the timer.
+ */
+
static int timer_suspend(struct sys_device *dev, pm_message_t state)
{
/*
@@ -1039,7 +1029,6 @@ static struct sysdev_class timer_sysclass = {
set_kset_name("timer"),
};
-
/* XXX this driverfs stuff should probably go elsewhere later -john */
static struct sys_device device_timer = {
.id = 0,
@@ -1073,8 +1062,6 @@ device_initcall(time_init_device);
*/
#include <linux/rtc.h>
-extern irqreturn_t rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs);
-
#define DEFAULT_RTC_INT_FREQ 64
#define RTC_NUM_INTS 1
@@ -1283,8 +1270,6 @@ irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
}
#endif
-
-
static int __init nohpet_setup(char *s)
{
nohpet = 1;
@@ -1301,5 +1286,3 @@ static int __init notsc_setup(char *s)
}
__setup("notsc", notsc_setup);
-
-
diff --git a/arch/x86_64/kernel/trampoline.S b/arch/x86_64/kernel/trampoline.S
index 6d9c9a8e7d0..23a03eb91fc 100644
--- a/arch/x86_64/kernel/trampoline.S
+++ b/arch/x86_64/kernel/trampoline.S
@@ -42,8 +42,15 @@ r_base = .
movl $0xA5A5A5A5, trampoline_data - r_base
# write marker for master knows we're running
- lidt idt_48 - r_base # load idt with 0, 0
- lgdt gdt_48 - r_base # load gdt with whatever is appropriate
+ /*
+ * GDT tables in non default location kernel can be beyond 16MB and
+ * lgdt will not be able to load the address as in real mode default
+ * operand size is 16bit. Use lgdtl instead to force operand size
+ * to 32 bit.
+ */
+
+ lidtl idt_48 - r_base # load idt with 0, 0
+ lgdtl gdt_48 - r_base # load gdt with whatever is appropriate
xor %ax, %ax
inc %ax # protected mode (PE) bit
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index bf337f49318..8bb0aeda78b 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -70,7 +70,6 @@ asmlinkage void reserved(void);
asmlinkage void alignment_check(void);
asmlinkage void machine_check(void);
asmlinkage void spurious_interrupt_bug(void);
-asmlinkage void call_debug(void);
struct notifier_block *die_chain;
static DEFINE_SPINLOCK(die_notifier_lock);
@@ -121,19 +120,31 @@ int printk_address(unsigned long address)
static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
unsigned *usedp, const char **idp)
{
- static const char ids[N_EXCEPTION_STACKS][8] = {
+ static char ids[][8] = {
[DEBUG_STACK - 1] = "#DB",
[NMI_STACK - 1] = "NMI",
[DOUBLEFAULT_STACK - 1] = "#DF",
[STACKFAULT_STACK - 1] = "#SS",
[MCE_STACK - 1] = "#MC",
+#if DEBUG_STKSZ > EXCEPTION_STKSZ
+ [N_EXCEPTION_STACKS ... N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]"
+#endif
};
unsigned k;
for (k = 0; k < N_EXCEPTION_STACKS; k++) {
unsigned long end;
- end = per_cpu(init_tss, cpu).ist[k];
+ switch (k + 1) {
+#if DEBUG_STKSZ > EXCEPTION_STKSZ
+ case DEBUG_STACK:
+ end = cpu_pda(cpu)->debugstack + DEBUG_STKSZ;
+ break;
+#endif
+ default:
+ end = per_cpu(init_tss, cpu).ist[k];
+ break;
+ }
if (stack >= end)
continue;
if (stack >= end - EXCEPTION_STKSZ) {
@@ -143,6 +154,22 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
*idp = ids[k];
return (unsigned long *)end;
}
+#if DEBUG_STKSZ > EXCEPTION_STKSZ
+ if (k == DEBUG_STACK - 1 && stack >= end - DEBUG_STKSZ) {
+ unsigned j = N_EXCEPTION_STACKS - 1;
+
+ do {
+ ++j;
+ end -= EXCEPTION_STKSZ;
+ ids[j][4] = '1' + (j - N_EXCEPTION_STACKS);
+ } while (stack < end - EXCEPTION_STKSZ);
+ if (*usedp & (1U << j))
+ break;
+ *usedp |= 1U << j;
+ *idp = ids[j];
+ return (unsigned long *)end;
+ }
+#endif
}
return NULL;
}
@@ -156,9 +183,8 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
void show_trace(unsigned long *stack)
{
- unsigned long addr;
const unsigned cpu = safe_smp_processor_id();
- unsigned long *irqstack_end = (unsigned long *)cpu_pda[cpu].irqstackptr;
+ unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr;
int i;
unsigned used = 0;
@@ -166,8 +192,14 @@ void show_trace(unsigned long *stack)
#define HANDLE_STACK(cond) \
do while (cond) { \
- addr = *stack++; \
+ unsigned long addr = *stack++; \
if (kernel_text_address(addr)) { \
+ if (i > 50) { \
+ printk("\n "); \
+ i = 0; \
+ } \
+ else \
+ i += printk(" "); \
/* \
* If the address is either in the text segment of the \
* kernel, or in the region which contains vmalloc'ed \
@@ -177,25 +209,19 @@ void show_trace(unsigned long *stack)
* out the call path that was taken. \
*/ \
i += printk_address(addr); \
- if (i > 50) { \
- printk("\n "); \
- i = 0; \
- } \
- else \
- i += printk(" "); \
} \
} while (0)
- for(i = 0; ; ) {
+ for(i = 11; ; ) {
const char *id;
unsigned long *estack_end;
estack_end = in_exception_stack(cpu, (unsigned long)stack,
&used, &id);
if (estack_end) {
- i += printk(" <%s> ", id);
+ i += printk(" <%s>", id);
HANDLE_STACK (stack < estack_end);
- i += printk(" <EOE> ");
+ i += printk(" <EOE>");
stack = (unsigned long *) estack_end[-2];
continue;
}
@@ -205,11 +231,11 @@ void show_trace(unsigned long *stack)
(IRQSTACKSIZE - 64) / sizeof(*irqstack);
if (stack >= irqstack && stack < irqstack_end) {
- i += printk(" <IRQ> ");
+ i += printk(" <IRQ>");
HANDLE_STACK (stack < irqstack_end);
stack = (unsigned long *) (irqstack_end[-1]);
irqstack_end = NULL;
- i += printk(" <EOI> ");
+ i += printk(" <EOI>");
continue;
}
}
@@ -226,8 +252,8 @@ void show_stack(struct task_struct *tsk, unsigned long * rsp)
unsigned long *stack;
int i;
const int cpu = safe_smp_processor_id();
- unsigned long *irqstack_end = (unsigned long *) (cpu_pda[cpu].irqstackptr);
- unsigned long *irqstack = (unsigned long *) (cpu_pda[cpu].irqstackptr - IRQSTACKSIZE);
+ unsigned long *irqstack_end = (unsigned long *) (cpu_pda(cpu)->irqstackptr);
+ unsigned long *irqstack = (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE);
// debugging aid: "show_stack(NULL, NULL);" prints the
// back trace for this cpu.
@@ -275,14 +301,14 @@ void show_registers(struct pt_regs *regs)
int in_kernel = !user_mode(regs);
unsigned long rsp;
const int cpu = safe_smp_processor_id();
- struct task_struct *cur = cpu_pda[cpu].pcurrent;
+ struct task_struct *cur = cpu_pda(cpu)->pcurrent;
rsp = regs->rsp;
printk("CPU %d ", cpu);
__show_regs(regs);
printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
- cur->comm, cur->pid, cur->thread_info, cur);
+ cur->comm, cur->pid, task_thread_info(cur), cur);
/*
* When in-kernel, we also print out the stack and code at the
@@ -314,20 +340,26 @@ bad:
void handle_BUG(struct pt_regs *regs)
{
struct bug_frame f;
- char tmp;
+ long len;
+ const char *prefix = "";
if (user_mode(regs))
return;
- if (__copy_from_user(&f, (struct bug_frame *) regs->rip,
+ if (__copy_from_user(&f, (const void __user *) regs->rip,
sizeof(struct bug_frame)))
return;
if (f.filename >= 0 ||
f.ud2[0] != 0x0f || f.ud2[1] != 0x0b)
return;
- if (__get_user(tmp, (char *)(long)f.filename))
+ len = __strnlen_user((char *)(long)f.filename, PATH_MAX) - 1;
+ if (len < 0 || len >= PATH_MAX)
f.filename = (int)(long)"unmapped filename";
+ else if (len > 50) {
+ f.filename += len - 50;
+ prefix = "...";
+ }
printk("----------- [cut here ] --------- [please bite here ] ---------\n");
- printk(KERN_ALERT "Kernel BUG at %.50s:%d\n", (char *)(long)f.filename, f.line);
+ printk(KERN_ALERT "Kernel BUG at %s%.50s:%d\n", prefix, (char *)(long)f.filename, f.line);
}
#ifdef CONFIG_BUG
@@ -382,7 +414,7 @@ void __die(const char * str, struct pt_regs * regs, long err)
printk("DEBUG_PAGEALLOC");
#endif
printk("\n");
- notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV);
+ notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV);
show_registers(regs);
/* Executive summary in case the oops scrolled away */
printk(KERN_ALERT "RIP ");
@@ -399,11 +431,6 @@ void die(const char * str, struct pt_regs * regs, long err)
oops_end(flags);
do_exit(SIGSEGV);
}
-static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
-{
- if (!(regs->eflags & VM_MASK) && (regs->cs == __KERNEL_CS))
- die(str, regs, err);
-}
void die_nmi(char *str, struct pt_regs *regs)
{
@@ -426,19 +453,20 @@ static void __kprobes do_trap(int trapnr, int signr, char *str,
struct pt_regs * regs, long error_code,
siginfo_t *info)
{
+ struct task_struct *tsk = current;
+
conditional_sti(regs);
- if (user_mode(regs)) {
- struct task_struct *tsk = current;
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_no = trapnr;
+ if (user_mode(regs)) {
if (exception_trace && unhandled_signal(tsk, signr))
printk(KERN_INFO
"%s[%d] trap %s rip:%lx rsp:%lx error:%lx\n",
tsk->comm, tsk->pid, str,
regs->rip,regs->rsp,error_code);
- tsk->thread.error_code = error_code;
- tsk->thread.trap_no = trapnr;
if (info)
force_sig_info(signr, info, tsk);
else
@@ -485,7 +513,7 @@ asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
DO_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->rip)
DO_ERROR( 4, SIGSEGV, "overflow", overflow)
DO_ERROR( 5, SIGSEGV, "bounds", bounds)
-DO_ERROR_INFO( 6, SIGILL, "invalid operand", invalid_op, ILL_ILLOPN, regs->rip)
+DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->rip)
DO_ERROR( 7, SIGSEGV, "device not available", device_not_available)
DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
@@ -493,24 +521,41 @@ DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
DO_ERROR(18, SIGSEGV, "reserved", reserved)
DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
-DO_ERROR( 8, SIGSEGV, "double fault", double_fault)
+
+asmlinkage void do_double_fault(struct pt_regs * regs, long error_code)
+{
+ static const char str[] = "double fault";
+ struct task_struct *tsk = current;
+
+ /* Return not checked because double check cannot be ignored */
+ notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV);
+
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_no = 8;
+
+ /* This is always a kernel trap and never fixable (and thus must
+ never return). */
+ for (;;)
+ die(str, regs, error_code);
+}
asmlinkage void __kprobes do_general_protection(struct pt_regs * regs,
long error_code)
{
+ struct task_struct *tsk = current;
+
conditional_sti(regs);
- if (user_mode(regs)) {
- struct task_struct *tsk = current;
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_no = 13;
+ if (user_mode(regs)) {
if (exception_trace && unhandled_signal(tsk, SIGSEGV))
printk(KERN_INFO
"%s[%d] general protection rip:%lx rsp:%lx error:%lx\n",
tsk->comm, tsk->pid,
regs->rip,regs->rsp,error_code);
- tsk->thread.error_code = error_code;
- tsk->thread.trap_no = 13;
force_sig(SIGSEGV, tsk);
return;
}
@@ -573,7 +618,7 @@ asmlinkage void default_do_nmi(struct pt_regs *regs)
reason = get_nmi_reason();
if (!(reason & 0xc0)) {
- if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 0, SIGINT)
+ if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
== NOTIFY_STOP)
return;
#ifdef CONFIG_X86_LOCAL_APIC
@@ -589,7 +634,7 @@ asmlinkage void default_do_nmi(struct pt_regs *regs)
unknown_nmi_error(reason, regs);
return;
}
- if (notify_die(DIE_NMI, "nmi", regs, reason, 0, SIGINT) == NOTIFY_STOP)
+ if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
return;
/* AK: following checks seem to be broken on modern chipsets. FIXME */
@@ -600,6 +645,7 @@ asmlinkage void default_do_nmi(struct pt_regs *regs)
io_check_error(reason, regs);
}
+/* runs on IST stack. */
asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code)
{
if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) {
@@ -620,7 +666,7 @@ asmlinkage struct pt_regs *sync_regs(struct pt_regs *eregs)
;
/* Exception from user space */
else if (user_mode(eregs))
- regs = ((struct pt_regs *)current->thread.rsp0) - 1;
+ regs = task_pt_regs(current);
/* Exception from kernel and interrupts are enabled. Move to
kernel process stack. */
else if (eregs->eflags & X86_EFLAGS_IF)
@@ -684,11 +730,9 @@ asmlinkage void __kprobes do_debug(struct pt_regs * regs,
info.si_signo = SIGTRAP;
info.si_errno = 0;
info.si_code = TRAP_BRKPT;
- if (!user_mode(regs))
- goto clear_dr7;
+ info.si_addr = user_mode(regs) ? (void __user *)regs->rip : NULL;
+ force_sig_info(SIGTRAP, &info, tsk);
- info.si_addr = (void __user *)regs->rip;
- force_sig_info(SIGTRAP, &info, tsk);
clear_dr7:
set_debugreg(0UL, 7);
return;
@@ -698,7 +742,7 @@ clear_TF_reenable:
regs->eflags &= ~TF_MASK;
}
-static int kernel_math_error(struct pt_regs *regs, char *str)
+static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
{
const struct exception_table_entry *fixup;
fixup = search_exception_tables(regs->rip);
@@ -706,8 +750,9 @@ static int kernel_math_error(struct pt_regs *regs, char *str)
regs->rip = fixup->fixup;
return 1;
}
- notify_die(DIE_GPF, str, regs, 0, 16, SIGFPE);
+ notify_die(DIE_GPF, str, regs, 0, trapnr, SIGFPE);
/* Illegal floating point operation in the kernel */
+ current->thread.trap_no = trapnr;
die(str, regs, 0);
return 0;
}
@@ -726,7 +771,7 @@ asmlinkage void do_coprocessor_error(struct pt_regs *regs)
conditional_sti(regs);
if (!user_mode(regs) &&
- kernel_math_error(regs, "kernel x87 math error"))
+ kernel_math_error(regs, "kernel x87 math error", 16))
return;
/*
@@ -795,7 +840,7 @@ asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
conditional_sti(regs);
if (!user_mode(regs) &&
- kernel_math_error(regs, "kernel simd math error"))
+ kernel_math_error(regs, "kernel simd math error", 19))
return;
/*
@@ -867,12 +912,7 @@ asmlinkage void math_state_restore(void)
if (!used_math())
init_fpu(me);
restore_fpu_checking(&me->thread.i387.fxsave);
- me->thread_info->status |= TS_USEDFPU;
-}
-
-void do_call_debug(struct pt_regs *regs)
-{
- notify_die(DIE_CALL, "debug call", regs, 0, 255, SIGINT);
+ task_thread_info(me)->status |= TS_USEDFPU;
}
void __init trap_init(void)
@@ -880,9 +920,9 @@ void __init trap_init(void)
set_intr_gate(0,&divide_error);
set_intr_gate_ist(1,&debug,DEBUG_STACK);
set_intr_gate_ist(2,&nmi,NMI_STACK);
- set_system_gate(3,&int3);
- set_system_gate(4,&overflow); /* int4-5 can be called from all */
- set_system_gate(5,&bounds);
+ set_system_gate_ist(3,&int3,DEBUG_STACK); /* int3 can be called from all */
+ set_system_gate(4,&overflow); /* int4 can be called from all */
+ set_intr_gate(5,&bounds);
set_intr_gate(6,&invalid_op);
set_intr_gate(7,&device_not_available);
set_intr_gate_ist(8,&double_fault, DOUBLEFAULT_STACK);
@@ -904,8 +944,6 @@ void __init trap_init(void)
set_system_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
#endif
- set_intr_gate(KDB_VECTOR, call_debug);
-
/*
* Should be a barrier for any external CPU state.
*/
diff --git a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S
index 58b19215b4b..b0eed1faf74 100644
--- a/arch/x86_64/kernel/vmlinux.lds.S
+++ b/arch/x86_64/kernel/vmlinux.lds.S
@@ -8,6 +8,8 @@
#include <asm/page.h>
#include <linux/config.h>
+#undef i386 /* in case the preprocessor is a 32bit one */
+
OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64")
OUTPUT_ARCH(i386:x86-64)
ENTRY(phys_startup_64)
@@ -189,7 +191,7 @@ SECTIONS
/* Sections to be discarded */
/DISCARD/ : {
*(.exitcall.exit)
-#ifndef CONFIG_DEBUG_INFO
+#ifndef CONFIG_UNWIND_INFO
*(.eh_frame)
#endif
}
diff --git a/arch/x86_64/kernel/vsmp.c b/arch/x86_64/kernel/vsmp.c
new file mode 100644
index 00000000000..92f70c74965
--- /dev/null
+++ b/arch/x86_64/kernel/vsmp.c
@@ -0,0 +1,45 @@
+/*
+ * vSMPowered(tm) systems specific initialization
+ * Copyright (C) 2005 ScaleMP Inc.
+ *
+ * Use of this code is subject to the terms and conditions of the
+ * GNU general public license version 2. See "COPYING" or
+ * http://www.gnu.org/licenses/gpl.html
+ *
+ * Ravikiran Thirumalai <kiran@scalemp.com>,
+ * Shai Fultheim <shai@scalemp.com>
+ */
+
+#include <linux/init.h>
+#include <linux/pci_ids.h>
+#include <linux/pci_regs.h>
+#include <asm/pci-direct.h>
+
+static int __init vsmp_init(void)
+{
+ void *address;
+ unsigned int cap, ctl;
+
+ /* Check if we are running on a ScaleMP vSMP box */
+ if ((read_pci_config_16(0, 0x1f, 0, PCI_VENDOR_ID) != PCI_VENDOR_ID_SCALEMP) ||
+ (read_pci_config_16(0, 0x1f, 0, PCI_DEVICE_ID) != PCI_DEVICE_ID_SCALEMP_VSMP_CTL))
+ return 0;
+
+ /* set vSMP magic bits to indicate vSMP capable kernel */
+ address = ioremap(read_pci_config(0, 0x1f, 0, PCI_BASE_ADDRESS_0), 8);
+ cap = readl(address);
+ ctl = readl(address + 4);
+ printk("vSMP CTL: capabilities:0x%08x control:0x%08x\n", cap, ctl);
+ if (cap & ctl & (1 << 4)) {
+ /* Turn on vSMP IRQ fastpath handling (see system.h) */
+ ctl &= ~(1 << 4);
+ writel(ctl, address + 4);
+ ctl = readl(address + 4);
+ printk("vSMP CTL: control set to:0x%08x\n", ctl);
+ }
+
+ iounmap(address);
+ return 0;
+}
+
+core_initcall(vsmp_init);
diff --git a/arch/x86_64/kernel/vsyscall.c b/arch/x86_64/kernel/vsyscall.c
index 70a0bd16085..9468fb20b0b 100644
--- a/arch/x86_64/kernel/vsyscall.c
+++ b/arch/x86_64/kernel/vsyscall.c
@@ -35,14 +35,13 @@
#include <asm/io.h>
#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
-#define force_inline __attribute__((always_inline)) inline
int __sysctl_vsyscall __section_sysctl_vsyscall = 1;
seqlock_t __xtime_lock __section_xtime_lock = SEQLOCK_UNLOCKED;
#include <asm/unistd.h>
-static force_inline void timeval_normalize(struct timeval * tv)
+static __always_inline void timeval_normalize(struct timeval * tv)
{
time_t __sec;
@@ -53,7 +52,7 @@ static force_inline void timeval_normalize(struct timeval * tv)
}
}
-static force_inline void do_vgettimeofday(struct timeval * tv)
+static __always_inline void do_vgettimeofday(struct timeval * tv)
{
long sequence, t;
unsigned long sec, usec;
@@ -66,8 +65,7 @@ static force_inline void do_vgettimeofday(struct timeval * tv)
(__jiffies - __wall_jiffies) * (1000000 / HZ);
if (__vxtime.mode != VXTIME_HPET) {
- sync_core();
- rdtscll(t);
+ t = get_cycles_sync();
if (t < __vxtime.last_tsc)
t = __vxtime.last_tsc;
usec += ((t - __vxtime.last_tsc) *
@@ -84,12 +82,12 @@ static force_inline void do_vgettimeofday(struct timeval * tv)
}
/* RED-PEN may want to readd seq locking, but then the variable should be write-once. */
-static force_inline void do_get_tz(struct timezone * tz)
+static __always_inline void do_get_tz(struct timezone * tz)
{
*tz = __sys_tz;
}
-static force_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
+static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
{
int ret;
asm volatile("vsysc2: syscall"
@@ -98,7 +96,7 @@ static force_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
return ret;
}
-static force_inline long time_syscall(long *t)
+static __always_inline long time_syscall(long *t)
{
long secs;
asm volatile("vsysc1: syscall"
diff --git a/arch/x86_64/kernel/x8664_ksyms.c b/arch/x86_64/kernel/x8664_ksyms.c
index 4a54221e10b..b614d54d2ae 100644
--- a/arch/x86_64/kernel/x8664_ksyms.c
+++ b/arch/x86_64/kernel/x8664_ksyms.c
@@ -13,7 +13,6 @@
#include <linux/string.h>
#include <linux/syscalls.h>
#include <linux/tty.h>
-#include <linux/ioctl32.h>
#include <asm/semaphore.h>
#include <asm/processor.h>
@@ -45,22 +44,15 @@ extern struct drive_info_struct drive_info;
EXPORT_SYMBOL(drive_info);
#endif
-extern unsigned long get_cmos_time(void);
-
/* platform dependent support */
EXPORT_SYMBOL(boot_cpu_data);
//EXPORT_SYMBOL(dump_fpu);
EXPORT_SYMBOL(__ioremap);
EXPORT_SYMBOL(ioremap_nocache);
EXPORT_SYMBOL(iounmap);
-EXPORT_SYMBOL(enable_irq);
-EXPORT_SYMBOL(disable_irq);
-EXPORT_SYMBOL(disable_irq_nosync);
-EXPORT_SYMBOL(probe_irq_mask);
EXPORT_SYMBOL(kernel_thread);
EXPORT_SYMBOL(pm_idle);
EXPORT_SYMBOL(pm_power_off);
-EXPORT_SYMBOL(get_cmos_time);
EXPORT_SYMBOL(__down_failed);
EXPORT_SYMBOL(__down_failed_interruptible);
@@ -84,9 +76,6 @@ EXPORT_SYMBOL(__put_user_2);
EXPORT_SYMBOL(__put_user_4);
EXPORT_SYMBOL(__put_user_8);
-EXPORT_SYMBOL(strpbrk);
-EXPORT_SYMBOL(strstr);
-
EXPORT_SYMBOL(strncpy_from_user);
EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(clear_user);
@@ -98,25 +87,18 @@ EXPORT_SYMBOL(copy_in_user);
EXPORT_SYMBOL(strnlen_user);
#ifdef CONFIG_PCI
-EXPORT_SYMBOL(pci_alloc_consistent);
-EXPORT_SYMBOL(pci_free_consistent);
-#endif
-
-#ifdef CONFIG_PCI
EXPORT_SYMBOL(pci_mem_start);
#endif
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(clear_page);
-EXPORT_SYMBOL(cpu_pda);
+EXPORT_SYMBOL(_cpu_pda);
#ifdef CONFIG_SMP
EXPORT_SYMBOL(cpu_data);
-EXPORT_SYMBOL(cpu_online_map);
EXPORT_SYMBOL(__write_lock_failed);
EXPORT_SYMBOL(__read_lock_failed);
-EXPORT_SYMBOL(synchronize_irq);
EXPORT_SYMBOL(smp_call_function);
EXPORT_SYMBOL(cpu_callout_map);
#endif
@@ -137,30 +119,17 @@ EXPORT_SYMBOL_GPL(unset_nmi_callback);
#undef memcpy
#undef memset
#undef memmove
-#undef memchr
#undef strlen
-#undef strncmp
-#undef strncpy
-#undef strchr
extern void * memset(void *,int,__kernel_size_t);
extern size_t strlen(const char *);
extern void * memmove(void * dest,const void *src,size_t count);
-extern void *memchr(const void *s, int c, size_t n);
extern void * memcpy(void *,const void *,__kernel_size_t);
extern void * __memcpy(void *,const void *,__kernel_size_t);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(strlen);
EXPORT_SYMBOL(memmove);
-EXPORT_SYMBOL(strncmp);
-EXPORT_SYMBOL(strncpy);
-EXPORT_SYMBOL(strchr);
-EXPORT_SYMBOL(strncat);
-EXPORT_SYMBOL(memchr);
-EXPORT_SYMBOL(strrchr);
-EXPORT_SYMBOL(strnlen);
-EXPORT_SYMBOL(memscan);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(__memcpy);
diff --git a/arch/x86_64/lib/copy_user.S b/arch/x86_64/lib/copy_user.S
index dfa358b05c8..79422b6559c 100644
--- a/arch/x86_64/lib/copy_user.S
+++ b/arch/x86_64/lib/copy_user.S
@@ -4,12 +4,9 @@
* Functions to copy from and to user space.
*/
-#define FIX_ALIGNMENT 1
-
#include <asm/current.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
- #include <asm/cpufeature.h>
/* Standard copy_to_user with segment limit checking */
.globl copy_to_user
@@ -21,23 +18,7 @@ copy_to_user:
jc bad_to_user
cmpq threadinfo_addr_limit(%rax),%rcx
jae bad_to_user
-2:
- .byte 0xe9 /* 32bit jump */
- .long .Lcug-1f
-1:
-
- .section .altinstr_replacement,"ax"
-3: .byte 0xe9 /* replacement jmp with 8 bit immediate */
- .long copy_user_generic_c-1b /* offset */
- .previous
- .section .altinstructions,"a"
- .align 8
- .quad 2b
- .quad 3b
- .byte X86_FEATURE_K8_C
- .byte 5
- .byte 5
- .previous
+ jmp copy_user_generic
/* Standard copy_from_user with segment limit checking */
.globl copy_from_user
@@ -72,223 +53,44 @@ bad_to_user:
* rsi source
* rdx count
*
+ * Only 4GB of copy is supported. This shouldn't be a problem
+ * because the kernel normally only writes from/to page sized chunks
+ * even if user space passed a longer buffer.
+ * And more would be dangerous because both Intel and AMD have
+ * errata with rep movsq > 4GB. If someone feels the need to fix
+ * this please consider this.
+ *
* Output:
* eax uncopied bytes or 0 if successful.
*/
- .globl copy_user_generic
- .p2align 4
-copy_user_generic:
- .byte 0x66,0x66,0x90 /* 5 byte nop for replacement jump */
- .byte 0x66,0x90
-1:
- .section .altinstr_replacement,"ax"
-2: .byte 0xe9 /* near jump with 32bit immediate */
- .long copy_user_generic_c-1b /* offset */
- .previous
- .section .altinstructions,"a"
- .align 8
- .quad copy_user_generic
- .quad 2b
- .byte X86_FEATURE_K8_C
- .byte 5
- .byte 5
- .previous
-.Lcug:
- pushq %rbx
- xorl %eax,%eax /*zero for the exception handler */
-
-#ifdef FIX_ALIGNMENT
- /* check for bad alignment of destination */
- movl %edi,%ecx
- andl $7,%ecx
- jnz .Lbad_alignment
-.Lafter_bad_alignment:
-#endif
- movq %rdx,%rcx
-
- movl $64,%ebx
- shrq $6,%rdx
- decq %rdx
- js .Lhandle_tail
-
- .p2align 4
-.Lloop:
-.Ls1: movq (%rsi),%r11
-.Ls2: movq 1*8(%rsi),%r8
-.Ls3: movq 2*8(%rsi),%r9
-.Ls4: movq 3*8(%rsi),%r10
-.Ld1: movq %r11,(%rdi)
-.Ld2: movq %r8,1*8(%rdi)
-.Ld3: movq %r9,2*8(%rdi)
-.Ld4: movq %r10,3*8(%rdi)
-
-.Ls5: movq 4*8(%rsi),%r11
-.Ls6: movq 5*8(%rsi),%r8
-.Ls7: movq 6*8(%rsi),%r9
-.Ls8: movq 7*8(%rsi),%r10
-.Ld5: movq %r11,4*8(%rdi)
-.Ld6: movq %r8,5*8(%rdi)
-.Ld7: movq %r9,6*8(%rdi)
-.Ld8: movq %r10,7*8(%rdi)
-
- decq %rdx
-
- leaq 64(%rsi),%rsi
- leaq 64(%rdi),%rdi
-
- jns .Lloop
-
- .p2align 4
-.Lhandle_tail:
- movl %ecx,%edx
- andl $63,%ecx
- shrl $3,%ecx
- jz .Lhandle_7
- movl $8,%ebx
- .p2align 4
-.Lloop_8:
-.Ls9: movq (%rsi),%r8
-.Ld9: movq %r8,(%rdi)
- decl %ecx
- leaq 8(%rdi),%rdi
- leaq 8(%rsi),%rsi
- jnz .Lloop_8
-
-.Lhandle_7:
- movl %edx,%ecx
- andl $7,%ecx
- jz .Lende
- .p2align 4
-.Lloop_1:
-.Ls10: movb (%rsi),%bl
-.Ld10: movb %bl,(%rdi)
- incq %rdi
- incq %rsi
- decl %ecx
- jnz .Lloop_1
-
-.Lende:
- popq %rbx
- ret
-
-#ifdef FIX_ALIGNMENT
- /* align destination */
- .p2align 4
-.Lbad_alignment:
- movl $8,%r9d
- subl %ecx,%r9d
- movl %r9d,%ecx
- cmpq %r9,%rdx
- jz .Lhandle_7
- js .Lhandle_7
-.Lalign_1:
-.Ls11: movb (%rsi),%bl
-.Ld11: movb %bl,(%rdi)
- incq %rsi
- incq %rdi
- decl %ecx
- jnz .Lalign_1
- subq %r9,%rdx
- jmp .Lafter_bad_alignment
-#endif
-
- /* table sorted by exception address */
- .section __ex_table,"a"
- .align 8
- .quad .Ls1,.Ls1e
- .quad .Ls2,.Ls2e
- .quad .Ls3,.Ls3e
- .quad .Ls4,.Ls4e
- .quad .Ld1,.Ls1e
- .quad .Ld2,.Ls2e
- .quad .Ld3,.Ls3e
- .quad .Ld4,.Ls4e
- .quad .Ls5,.Ls5e
- .quad .Ls6,.Ls6e
- .quad .Ls7,.Ls7e
- .quad .Ls8,.Ls8e
- .quad .Ld5,.Ls5e
- .quad .Ld6,.Ls6e
- .quad .Ld7,.Ls7e
- .quad .Ld8,.Ls8e
- .quad .Ls9,.Le_quad
- .quad .Ld9,.Le_quad
- .quad .Ls10,.Le_byte
- .quad .Ld10,.Le_byte
-#ifdef FIX_ALIGNMENT
- .quad .Ls11,.Lzero_rest
- .quad .Ld11,.Lzero_rest
-#endif
- .quad .Le5,.Le_zero
- .previous
-
- /* compute 64-offset for main loop. 8 bytes accuracy with error on the
- pessimistic side. this is gross. it would be better to fix the
- interface. */
- /* eax: zero, ebx: 64 */
-.Ls1e: addl $8,%eax
-.Ls2e: addl $8,%eax
-.Ls3e: addl $8,%eax
-.Ls4e: addl $8,%eax
-.Ls5e: addl $8,%eax
-.Ls6e: addl $8,%eax
-.Ls7e: addl $8,%eax
-.Ls8e: addl $8,%eax
- addq %rbx,%rdi /* +64 */
- subq %rax,%rdi /* correct destination with computed offset */
-
- shlq $6,%rdx /* loop counter * 64 (stride length) */
- addq %rax,%rdx /* add offset to loopcnt */
- andl $63,%ecx /* remaining bytes */
- addq %rcx,%rdx /* add them */
- jmp .Lzero_rest
-
- /* exception on quad word loop in tail handling */
- /* ecx: loopcnt/8, %edx: length, rdi: correct */
-.Le_quad:
- shll $3,%ecx
- andl $7,%edx
- addl %ecx,%edx
- /* edx: bytes to zero, rdi: dest, eax:zero */
-.Lzero_rest:
- movq %rdx,%rcx
-.Le_byte:
- xorl %eax,%eax
-.Le5: rep
- stosb
- /* when there is another exception while zeroing the rest just return */
-.Le_zero:
- movq %rdx,%rax
- jmp .Lende
-
- /* C stepping K8 run faster using the string copy instructions.
- This is also a lot simpler. Use them when possible.
- Patch in jmps to this code instead of copying it fully
- to avoid unwanted aliasing in the exception tables. */
-
- /* rdi destination
- * rsi source
- * rdx count
- *
- * Output:
- * eax uncopied bytes or 0 if successfull.
- */
-copy_user_generic_c:
+ .globl copy_user_generic
+copy_user_generic:
movl %edx,%ecx
shrl $3,%ecx
andl $7,%edx
+ jz 5f
1: rep
movsq
movl %edx,%ecx
+ xor %eax,%eax
2: rep
movsb
-4: movl %ecx,%eax
ret
+ /* align here? */
+5: xorl %eax,%eax
+6: rep movsq
+ ret
+
+ .section .fixup,"ax"
3: lea (%rdx,%rcx,8),%rax
ret
-
+4: movl %ecx,%eax
+ ret
+ .previous
+
.section __ex_table,"a"
.quad 1b,3b
.quad 2b,4b
+ .quad 6b,4b
.previous
diff --git a/arch/x86_64/lib/delay.c b/arch/x86_64/lib/delay.c
index 841bd738a18..03c460cbdd1 100644
--- a/arch/x86_64/lib/delay.c
+++ b/arch/x86_64/lib/delay.c
@@ -39,7 +39,7 @@ void __delay(unsigned long loops)
inline void __const_udelay(unsigned long xloops)
{
- __delay(((xloops * cpu_data[raw_smp_processor_id()].loops_per_jiffy) >> 32) * HZ);
+ __delay((xloops * HZ * cpu_data[raw_smp_processor_id()].loops_per_jiffy) >> 32);
}
void __udelay(unsigned long usecs)
diff --git a/arch/x86_64/lib/usercopy.c b/arch/x86_64/lib/usercopy.c
index db8abba1ad8..9bc2c295818 100644
--- a/arch/x86_64/lib/usercopy.c
+++ b/arch/x86_64/lib/usercopy.c
@@ -109,14 +109,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
* Return 0 on exception, a value greater than N if too long
*/
-long strnlen_user(const char __user *s, long n)
+long __strnlen_user(const char __user *s, long n)
{
long res = 0;
char c;
- if (!access_ok(VERIFY_READ, s, n))
- return 0;
-
while (1) {
if (res>n)
return n+1;
@@ -129,6 +126,13 @@ long strnlen_user(const char __user *s, long n)
}
}
+long strnlen_user(const char __user *s, long n)
+{
+ if (!access_ok(VERIFY_READ, s, n))
+ return 0;
+ return __strnlen_user(s, n);
+}
+
long strlen_user(const char __user *s)
{
long res = 0;
diff --git a/arch/x86_64/mm/Makefile b/arch/x86_64/mm/Makefile
index 1d232a87f11..d25ac86fe27 100644
--- a/arch/x86_64/mm/Makefile
+++ b/arch/x86_64/mm/Makefile
@@ -2,7 +2,7 @@
# Makefile for the linux x86_64-specific parts of the memory manager.
#
-obj-y := init.o fault.o ioremap.o extable.o pageattr.o
+obj-y := init.o fault.o ioremap.o extable.o pageattr.o mmap.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_NUMA) += numa.o
obj-$(CONFIG_K8_NUMA) += k8topology.o
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c
index 3a63707a698..26eac194064 100644
--- a/arch/x86_64/mm/fault.c
+++ b/arch/x86_64/mm/fault.c
@@ -35,6 +35,13 @@
#include <asm-generic/sections.h>
#include <asm/kdebug.h>
+/* Page fault error code bits */
+#define PF_PROT (1<<0) /* or no page found */
+#define PF_WRITE (1<<1)
+#define PF_USER (1<<2)
+#define PF_RSVD (1<<3)
+#define PF_INSTR (1<<4)
+
void bust_spinlocks(int yes)
{
int loglevel_save = console_loglevel;
@@ -68,7 +75,7 @@ static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr,
unsigned char *max_instr;
/* If it was a exec fault ignore */
- if (error_code & (1<<4))
+ if (error_code & PF_INSTR)
return 0;
instr = (unsigned char *)convert_rip_to_linear(current, regs);
@@ -222,17 +229,22 @@ static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
unsigned long error_code)
{
unsigned long flags = oops_begin();
+ struct task_struct *tsk;
printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
current->comm, address);
dump_pagetable(address);
+ tsk = current;
+ tsk->thread.cr2 = address;
+ tsk->thread.trap_no = 14;
+ tsk->thread.error_code = error_code;
__die("Bad pagetable", regs, error_code);
oops_end(flags);
do_exit(SIGKILL);
}
/*
- * Handle a fault on the vmalloc or module mapping area
+ * Handle a fault on the vmalloc area
*
* This assumes no large pages in there.
*/
@@ -278,7 +290,6 @@ static int vmalloc_fault(unsigned long address)
that. */
if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
BUG();
- __flush_tlb_all();
return 0;
}
@@ -289,12 +300,6 @@ int exception_trace = 1;
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
- *
- * error_code:
- * bit 0 == 0 means no page found, 1 means protection fault
- * bit 1 == 0 means read, 1 means write
- * bit 2 == 0 means kernel, 1 means user-mode
- * bit 3 == 1 means fault was an instruction fetch
*/
asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
unsigned long error_code)
@@ -337,12 +342,16 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
*
* This verifies that the fault happens in kernel space
* (error_code & 4) == 0, and that the fault was not a
- * protection error (error_code & 1) == 0.
+ * protection error (error_code & 9) == 0.
*/
if (unlikely(address >= TASK_SIZE64)) {
- if (!(error_code & 5) &&
- ((address >= VMALLOC_START && address < VMALLOC_END) ||
- (address >= MODULES_VADDR && address < MODULES_END))) {
+ /*
+ * Don't check for the module range here: its PML4
+ * is always initialized because it's shared with the main
+ * kernel text. Only vmalloc may need PML4 syncups.
+ */
+ if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
+ ((address >= VMALLOC_START && address < VMALLOC_END))) {
if (vmalloc_fault(address) < 0)
goto bad_area_nosemaphore;
return;
@@ -354,7 +363,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
goto bad_area_nosemaphore;
}
- if (unlikely(error_code & (1 << 3)))
+ if (unlikely(error_code & PF_RSVD))
pgtable_bad(address, regs, error_code);
/*
@@ -381,7 +390,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
* thus avoiding the deadlock.
*/
if (!down_read_trylock(&mm->mmap_sem)) {
- if ((error_code & 4) == 0 &&
+ if ((error_code & PF_USER) == 0 &&
!search_exception_tables(regs->rip))
goto bad_area_nosemaphore;
down_read(&mm->mmap_sem);
@@ -408,17 +417,17 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
good_area:
info.si_code = SEGV_ACCERR;
write = 0;
- switch (error_code & 3) {
+ switch (error_code & (PF_PROT|PF_WRITE)) {
default: /* 3: write, present */
/* fall through */
- case 2: /* write, not present */
+ case PF_WRITE: /* write, not present */
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
write++;
break;
- case 1: /* read, present */
+ case PF_PROT: /* read, present */
goto bad_area;
- case 0: /* read, not present */
+ case 0: /* read, not present */
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area;
}
@@ -453,7 +462,7 @@ bad_area:
bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
- if (error_code & 4) {
+ if (error_code & PF_USER) {
if (is_prefetch(regs, address, error_code))
return;
@@ -521,6 +530,9 @@ no_context:
printk_address(regs->rip);
printk("\n");
dump_pagetable(address);
+ tsk->thread.cr2 = address;
+ tsk->thread.trap_no = 14;
+ tsk->thread.error_code = error_code;
__die("Oops", regs, error_code);
/* Executive summary in case the body of the oops scrolled away */
printk(KERN_EMERG "CR2: %016lx\n", address);
@@ -546,7 +558,7 @@ do_sigbus:
up_read(&mm->mmap_sem);
/* Kernel mode? Handle exceptions or die */
- if (!(error_code & 4))
+ if (!(error_code & PF_USER))
goto no_context;
tsk->thread.cr2 = address;
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c
index 286f6a624c3..7af1742aa95 100644
--- a/arch/x86_64/mm/init.c
+++ b/arch/x86_64/mm/init.c
@@ -23,6 +23,9 @@
#include <linux/bootmem.h>
#include <linux/proc_fs.h>
#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/memory_hotplug.h>
#include <asm/processor.h>
#include <asm/system.h>
@@ -38,11 +41,16 @@
#include <asm/proto.h>
#include <asm/smp.h>
#include <asm/sections.h>
+#include <asm/dma-mapping.h>
+#include <asm/swiotlb.h>
#ifndef Dprintk
#define Dprintk(x...)
#endif
+struct dma_mapping_ops* dma_ops;
+EXPORT_SYMBOL(dma_ops);
+
static unsigned long dma_reserve __initdata;
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
@@ -174,13 +182,19 @@ static struct temp_map {
{}
};
-static __init void *alloc_low_page(int *index, unsigned long *phys)
+static __meminit void *alloc_low_page(int *index, unsigned long *phys)
{
struct temp_map *ti;
int i;
unsigned long pfn = table_end++, paddr;
void *adr;
+ if (after_bootmem) {
+ adr = (void *)get_zeroed_page(GFP_ATOMIC);
+ *phys = __pa(adr);
+ return adr;
+ }
+
if (pfn >= end_pfn)
panic("alloc_low_page: ran out of memory");
for (i = 0; temp_mappings[i].allocated; i++) {
@@ -193,55 +207,86 @@ static __init void *alloc_low_page(int *index, unsigned long *phys)
ti->allocated = 1;
__flush_tlb();
adr = ti->address + ((pfn << PAGE_SHIFT) & ~PMD_MASK);
+ memset(adr, 0, PAGE_SIZE);
*index = i;
*phys = pfn * PAGE_SIZE;
return adr;
}
-static __init void unmap_low_page(int i)
+static __meminit void unmap_low_page(int i)
{
- struct temp_map *ti = &temp_mappings[i];
+ struct temp_map *ti;
+
+ if (after_bootmem)
+ return;
+
+ ti = &temp_mappings[i];
set_pmd(ti->pmd, __pmd(0));
ti->allocated = 0;
}
-static void __init phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
+static void __meminit
+phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end)
+{
+ int i;
+
+ for (i = 0; i < PTRS_PER_PMD; pmd++, i++, address += PMD_SIZE) {
+ unsigned long entry;
+
+ if (address > end) {
+ for (; i < PTRS_PER_PMD; i++, pmd++)
+ set_pmd(pmd, __pmd(0));
+ break;
+ }
+ entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address;
+ entry &= __supported_pte_mask;
+ set_pmd(pmd, __pmd(entry));
+ }
+}
+
+static void __meminit
+phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
+{
+ pmd_t *pmd = pmd_offset(pud, (unsigned long)__va(address));
+
+ if (pmd_none(*pmd)) {
+ spin_lock(&init_mm.page_table_lock);
+ phys_pmd_init(pmd, address, end);
+ spin_unlock(&init_mm.page_table_lock);
+ __flush_tlb_all();
+ }
+}
+
+static void __meminit phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
{
- long i, j;
+ long i = pud_index(address);
- i = pud_index(address);
pud = pud + i;
+
+ if (after_bootmem && pud_val(*pud)) {
+ phys_pmd_update(pud, address, end);
+ return;
+ }
+
for (; i < PTRS_PER_PUD; pud++, i++) {
int map;
unsigned long paddr, pmd_phys;
pmd_t *pmd;
- paddr = address + i*PUD_SIZE;
- if (paddr >= end) {
- for (; i < PTRS_PER_PUD; i++, pud++)
- set_pud(pud, __pud(0));
+ paddr = (address & PGDIR_MASK) + i*PUD_SIZE;
+ if (paddr >= end)
break;
- }
- if (!e820_mapped(paddr, paddr+PUD_SIZE, 0)) {
+ if (!after_bootmem && !e820_mapped(paddr, paddr+PUD_SIZE, 0)) {
set_pud(pud, __pud(0));
continue;
}
pmd = alloc_low_page(&map, &pmd_phys);
+ spin_lock(&init_mm.page_table_lock);
set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
- for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) {
- unsigned long pe;
-
- if (paddr >= end) {
- for (; j < PTRS_PER_PMD; j++, pmd++)
- set_pmd(pmd, __pmd(0));
- break;
- }
- pe = _PAGE_NX|_PAGE_PSE | _KERNPG_TABLE | _PAGE_GLOBAL | paddr;
- pe &= __supported_pte_mask;
- set_pmd(pmd, __pmd(pe));
- }
+ phys_pmd_init(pmd, paddr, end);
+ spin_unlock(&init_mm.page_table_lock);
unmap_low_page(map);
}
__flush_tlb();
@@ -249,25 +294,32 @@ static void __init phys_pud_init(pud_t *pud, unsigned long address, unsigned lon
static void __init find_early_table_space(unsigned long end)
{
- unsigned long puds, pmds, tables;
+ unsigned long puds, pmds, tables, start;
puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
tables = round_up(puds * sizeof(pud_t), PAGE_SIZE) +
round_up(pmds * sizeof(pmd_t), PAGE_SIZE);
- table_start = find_e820_area(0x8000, __pa_symbol(&_text), tables);
+ /* RED-PEN putting page tables only on node 0 could
+ cause a hotspot and fill up ZONE_DMA. The page tables
+ need roughly 0.5KB per GB. */
+ start = 0x8000;
+ table_start = find_e820_area(start, end, tables);
if (table_start == -1UL)
panic("Cannot find space for the kernel page tables");
table_start >>= PAGE_SHIFT;
table_end = table_start;
+
+ early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",
+ end, table_start << PAGE_SHIFT, table_end << PAGE_SHIFT);
}
/* Setup the direct mapping of the physical memory at PAGE_OFFSET.
This runs before bootmem is initialized and gets pages directly from the
physical memory. To access them they are temporarily mapped. */
-void __init init_memory_mapping(unsigned long start, unsigned long end)
+void __meminit init_memory_mapping(unsigned long start, unsigned long end)
{
unsigned long next;
@@ -279,7 +331,8 @@ void __init init_memory_mapping(unsigned long start, unsigned long end)
* mapped. Unfortunately this is done currently before the nodes are
* discovered.
*/
- find_early_table_space(end);
+ if (!after_bootmem)
+ find_early_table_space(end);
start = (unsigned long)__va(start);
end = (unsigned long)__va(end);
@@ -287,20 +340,26 @@ void __init init_memory_mapping(unsigned long start, unsigned long end)
for (; start < end; start = next) {
int map;
unsigned long pud_phys;
- pud_t *pud = alloc_low_page(&map, &pud_phys);
+ pgd_t *pgd = pgd_offset_k(start);
+ pud_t *pud;
+
+ if (after_bootmem)
+ pud = pud_offset_k(pgd, __PAGE_OFFSET);
+ else
+ pud = alloc_low_page(&map, &pud_phys);
+
next = start + PGDIR_SIZE;
if (next > end)
next = end;
phys_pud_init(pud, __pa(start), __pa(next));
- set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
+ if (!after_bootmem)
+ set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
unmap_low_page(map);
}
- asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features));
+ if (!after_bootmem)
+ asm volatile("movq %%cr4,%0" : "=r" (mmu_cr4_features));
__flush_tlb_all();
- early_printk("kernel direct mapping tables upto %lx @ %lx-%lx\n", end,
- table_start<<PAGE_SHIFT,
- table_end<<PAGE_SHIFT);
}
void __cpuinit zap_low_mappings(int cpu)
@@ -348,7 +407,7 @@ size_zones(unsigned long *z, unsigned long *h,
}
/* Compute holes */
- w = 0;
+ w = start_pfn;
for (i = 0; i < MAX_NR_ZONES; i++) {
unsigned long s = w;
w += z[i];
@@ -375,6 +434,9 @@ size_zones(unsigned long *z, unsigned long *h,
void __init paging_init(void)
{
unsigned long zones[MAX_NR_ZONES], holes[MAX_NR_ZONES];
+
+ memory_present(0, 0, end_pfn);
+ sparse_init();
size_zones(zones, holes, 0, end_pfn);
free_area_init_node(0, NODE_DATA(0), zones,
__pa(PAGE_OFFSET) >> PAGE_SHIFT, holes);
@@ -415,6 +477,50 @@ void __init clear_kernel_mapping(unsigned long address, unsigned long size)
__flush_tlb_all();
}
+/*
+ * Memory hotplug specific functions
+ * These are only for non-NUMA machines right now.
+ */
+#ifdef CONFIG_MEMORY_HOTPLUG
+
+void online_page(struct page *page)
+{
+ ClearPageReserved(page);
+ set_page_count(page, 1);
+ __free_page(page);
+ totalram_pages++;
+ num_physpages++;
+}
+
+int add_memory(u64 start, u64 size)
+{
+ struct pglist_data *pgdat = NODE_DATA(0);
+ struct zone *zone = pgdat->node_zones + MAX_NR_ZONES-2;
+ unsigned long start_pfn = start >> PAGE_SHIFT;
+ unsigned long nr_pages = size >> PAGE_SHIFT;
+ int ret;
+
+ ret = __add_pages(zone, start_pfn, nr_pages);
+ if (ret)
+ goto error;
+
+ init_memory_mapping(start, (start + size -1));
+
+ return ret;
+error:
+ printk("%s: Problem encountered in __add_pages!\n", __func__);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(add_memory);
+
+int remove_memory(u64 start, u64 size)
+{
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(remove_memory);
+
+#endif
+
static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
kcore_vsyscall;
@@ -423,12 +529,9 @@ void __init mem_init(void)
long codesize, reservedpages, datasize, initsize;
#ifdef CONFIG_SWIOTLB
- if (!iommu_aperture &&
- (end_pfn >= 0xffffffff>>PAGE_SHIFT || force_iommu))
- swiotlb = 1;
- if (swiotlb)
- swiotlb_init();
+ pci_swiotlb_init();
#endif
+ no_iommu_init();
/* How many end-of-memory variables you have, grandma! */
max_low_pfn = end_pfn;
@@ -498,10 +601,33 @@ void free_initmem(void)
printk ("Freeing unused kernel memory: %luk freed\n", (__init_end - __init_begin) >> 10);
}
+#ifdef CONFIG_DEBUG_RODATA
+
+extern char __start_rodata, __end_rodata;
+void mark_rodata_ro(void)
+{
+ unsigned long addr = (unsigned long)&__start_rodata;
+
+ for (; addr < (unsigned long)&__end_rodata; addr += PAGE_SIZE)
+ change_page_attr_addr(addr, 1, PAGE_KERNEL_RO);
+
+ printk ("Write protecting the kernel read-only data: %luk\n",
+ (&__end_rodata - &__start_rodata) >> 10);
+
+ /*
+ * change_page_attr_addr() requires a global_flush_tlb() call after it.
+ * We do this after the printk so that if something went wrong in the
+ * change, the printk gets out at least to give a better debug hint
+ * of who is the culprit.
+ */
+ global_flush_tlb();
+}
+#endif
+
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
- if (start < (unsigned long)&_end)
+ if (start >= end)
return;
printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
for (; start < end; start += PAGE_SIZE) {
diff --git a/arch/x86_64/mm/ioremap.c b/arch/x86_64/mm/ioremap.c
index ecf7acb5db9..ae207064201 100644
--- a/arch/x86_64/mm/ioremap.c
+++ b/arch/x86_64/mm/ioremap.c
@@ -247,9 +247,15 @@ void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
return __ioremap(phys_addr, size, _PAGE_PCD);
}
+/**
+ * iounmap - Free a IO remapping
+ * @addr: virtual address from ioremap_*
+ *
+ * Caller must ensure there is only one unmapping for the same pointer.
+ */
void iounmap(volatile void __iomem *addr)
{
- struct vm_struct *p;
+ struct vm_struct *p, *o;
if (addr <= high_memory)
return;
@@ -257,12 +263,31 @@ void iounmap(volatile void __iomem *addr)
addr < phys_to_virt(ISA_END_ADDRESS))
return;
- write_lock(&vmlist_lock);
- p = __remove_vm_area((void *)((unsigned long)addr & PAGE_MASK));
- if (!p)
+ addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr);
+ /* Use the vm area unlocked, assuming the caller
+ ensures there isn't another iounmap for the same address
+ in parallel. Reuse of the virtual address is prevented by
+ leaving it in the global lists until we're done with it.
+ cpa takes care of the direct mappings. */
+ read_lock(&vmlist_lock);
+ for (p = vmlist; p; p = p->next) {
+ if (p->addr == addr)
+ break;
+ }
+ read_unlock(&vmlist_lock);
+
+ if (!p) {
printk("iounmap: bad address %p\n", addr);
- else if (p->flags >> 20)
+ dump_stack();
+ return;
+ }
+
+ /* Reset the direct mapping. Can block */
+ if (p->flags >> 20)
ioremap_change_attr(p->phys_addr, p->size, 0);
- write_unlock(&vmlist_lock);
+
+ /* Finally remove it */
+ o = remove_vm_area((void *)addr);
+ BUG_ON(p != o || o == NULL);
kfree(p);
}
diff --git a/arch/x86_64/mm/mmap.c b/arch/x86_64/mm/mmap.c
new file mode 100644
index 00000000000..43e9b99bdf2
--- /dev/null
+++ b/arch/x86_64/mm/mmap.c
@@ -0,0 +1,30 @@
+/* Copyright 2005 Andi Kleen, SuSE Labs.
+ * Licensed under GPL, v.2
+ */
+#include <linux/config.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/random.h>
+#include <asm/ia32.h>
+
+/* Notebook: move the mmap code from sys_x86_64.c over here. */
+
+void arch_pick_mmap_layout(struct mm_struct *mm)
+{
+#ifdef CONFIG_IA32_EMULATION
+ if (current_thread_info()->flags & _TIF_IA32)
+ return ia32_pick_mmap_layout(mm);
+#endif
+ mm->mmap_base = TASK_UNMAPPED_BASE;
+ if (current->flags & PF_RANDOMIZE) {
+ /* Add 28bit randomness which is about 40bits of address space
+ because mmap base has to be page aligned.
+ or ~1/128 of the total user VM
+ (total user address space is 47bits) */
+ unsigned rnd = get_random_int() & 0xfffffff;
+ mm->mmap_base += ((unsigned long)rnd) << PAGE_SHIFT;
+ }
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+}
+
diff --git a/arch/x86_64/mm/numa.c b/arch/x86_64/mm/numa.c
index a828a01739c..6ef9f9a7623 100644
--- a/arch/x86_64/mm/numa.c
+++ b/arch/x86_64/mm/numa.c
@@ -46,13 +46,15 @@ int numa_off __initdata;
* 0 if memnodmap[] too small (of shift too small)
* -1 if node overlap or lost ram (shift too big)
*/
-static int __init populate_memnodemap(
- const struct node *nodes, int numnodes, int shift)
+static int __init
+populate_memnodemap(const struct node *nodes, int numnodes, int shift)
{
int i;
int res = -1;
unsigned long addr, end;
+ if (shift >= 64)
+ return -1;
memset(memnodemap, 0xff, sizeof(memnodemap));
for (i = 0; i < numnodes; i++) {
addr = nodes[i].start;
@@ -65,7 +67,7 @@ static int __init populate_memnodemap(
if (memnodemap[addr >> shift] != 0xff)
return -1;
memnodemap[addr >> shift] = i;
- addr += (1 << shift);
+ addr += (1UL << shift);
} while (addr < end);
res = 1;
}
@@ -79,7 +81,7 @@ int __init compute_hash_shift(struct node *nodes, int numnodes)
while (populate_memnodemap(nodes, numnodes, shift + 1) >= 0)
shift++;
- printk(KERN_DEBUG "Using %d for the hash shift.\n",
+ printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n",
shift);
if (populate_memnodemap(nodes, numnodes, shift) != 1) {
@@ -108,7 +110,7 @@ void __init setup_node_bootmem(int nodeid, unsigned long start, unsigned long en
start = round_up(start, ZONE_ALIGN);
- printk("Bootmem setup node %d %016lx-%016lx\n", nodeid, start, end);
+ printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid, start, end);
start_pfn = start >> PAGE_SHIFT;
end_pfn = end >> PAGE_SHIFT;
@@ -154,7 +156,7 @@ void __init setup_node_zones(int nodeid)
start_pfn = node_start_pfn(nodeid);
end_pfn = node_end_pfn(nodeid);
- Dprintk(KERN_INFO "setting up node %d %lx-%lx\n",
+ Dprintk(KERN_INFO "Setting up node %d %lx-%lx\n",
nodeid, start_pfn, end_pfn);
size_zones(zones, holes, start_pfn, end_pfn);
@@ -198,7 +200,7 @@ static int numa_emulation(unsigned long start_pfn, unsigned long end_pfn)
while ((x << 1) < sz)
x <<= 1;
if (x < sz/2)
- printk("Numa emulation unbalanced. Complain to maintainer\n");
+ printk(KERN_ERR "Numa emulation unbalanced. Complain to maintainer\n");
sz = x;
}
@@ -270,7 +272,7 @@ __cpuinit void numa_add_cpu(int cpu)
void __cpuinit numa_set_node(int cpu, int node)
{
- cpu_pda[cpu].nodenumber = node;
+ cpu_pda(cpu)->nodenumber = node;
cpu_to_node[cpu] = node;
}
@@ -328,8 +330,69 @@ __init int numa_setup(char *opt)
return 1;
}
+/*
+ * Setup early cpu_to_node.
+ *
+ * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
+ * and apicid_to_node[] tables have valid entries for a CPU.
+ * This means we skip cpu_to_node[] initialisation for NUMA
+ * emulation and faking node case (when running a kernel compiled
+ * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
+ * is already initialized in a round robin manner at numa_init_array,
+ * prior to this call, and this initialization is good enough
+ * for the fake NUMA cases.
+ */
+void __init init_cpu_to_node(void)
+{
+ int i;
+ for (i = 0; i < NR_CPUS; i++) {
+ u8 apicid = x86_cpu_to_apicid[i];
+ if (apicid == BAD_APICID)
+ continue;
+ if (apicid_to_node[apicid] == NUMA_NO_NODE)
+ continue;
+ cpu_to_node[i] = apicid_to_node[apicid];
+ }
+}
+
EXPORT_SYMBOL(cpu_to_node);
EXPORT_SYMBOL(node_to_cpumask);
EXPORT_SYMBOL(memnode_shift);
EXPORT_SYMBOL(memnodemap);
EXPORT_SYMBOL(node_data);
+
+#ifdef CONFIG_DISCONTIGMEM
+/*
+ * Functions to convert PFNs from/to per node page addresses.
+ * These are out of line because they are quite big.
+ * They could be all tuned by pre caching more state.
+ * Should do that.
+ */
+
+/* Requires pfn_valid(pfn) to be true */
+struct page *pfn_to_page(unsigned long pfn)
+{
+ int nid = phys_to_nid(((unsigned long)(pfn)) << PAGE_SHIFT);
+ return (pfn - node_start_pfn(nid)) + NODE_DATA(nid)->node_mem_map;
+}
+EXPORT_SYMBOL(pfn_to_page);
+
+unsigned long page_to_pfn(struct page *page)
+{
+ return (long)(((page) - page_zone(page)->zone_mem_map) +
+ page_zone(page)->zone_start_pfn);
+}
+EXPORT_SYMBOL(page_to_pfn);
+
+int pfn_valid(unsigned long pfn)
+{
+ unsigned nid;
+ if (pfn >= num_physpages)
+ return 0;
+ nid = pfn_to_nid(pfn);
+ if (nid == 0xff)
+ return 0;
+ return pfn >= node_start_pfn(nid) && (pfn) < node_end_pfn(nid);
+}
+EXPORT_SYMBOL(pfn_valid);
+#endif
diff --git a/arch/x86_64/mm/pageattr.c b/arch/x86_64/mm/pageattr.c
index b90e8fe9eeb..35f1f1aab06 100644
--- a/arch/x86_64/mm/pageattr.c
+++ b/arch/x86_64/mm/pageattr.c
@@ -128,6 +128,7 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
pte_t *kpte;
struct page *kpte_page;
unsigned kpte_flags;
+ pgprot_t ref_prot2;
kpte = lookup_address(address);
if (!kpte) return 0;
kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
@@ -140,10 +141,14 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
* split_large_page will take the reference for this change_page_attr
* on the split page.
*/
- struct page *split = split_large_page(address, prot, ref_prot);
+
+ struct page *split;
+ ref_prot2 = __pgprot(pgprot_val(pte_pgprot(*lookup_address(address))) & ~(1<<_PAGE_BIT_PSE));
+
+ split = split_large_page(address, prot, ref_prot2);
if (!split)
return -ENOMEM;
- set_pte(kpte,mk_pte(split, ref_prot));
+ set_pte(kpte,mk_pte(split, ref_prot2));
kpte_page = split;
}
get_page(kpte_page);
diff --git a/arch/x86_64/mm/srat.c b/arch/x86_64/mm/srat.c
index 33340bd1e32..8b7f85608fa 100644
--- a/arch/x86_64/mm/srat.c
+++ b/arch/x86_64/mm/srat.c
@@ -17,21 +17,23 @@
#include <linux/topology.h>
#include <asm/proto.h>
#include <asm/numa.h>
+#include <asm/e820.h>
static struct acpi_table_slit *acpi_slit;
static nodemask_t nodes_parsed __initdata;
static nodemask_t nodes_found __initdata;
static struct node nodes[MAX_NUMNODES] __initdata;
-static __u8 pxm2node[256] = { [0 ... 255] = 0xff };
+static u8 pxm2node[256] = { [0 ... 255] = 0xff };
static int node_to_pxm(int n);
int pxm_to_node(int pxm)
{
if ((unsigned)pxm >= 256)
- return 0;
- return pxm2node[pxm];
+ return -1;
+ /* Extend 0xff to (int)-1 */
+ return (signed char)pxm2node[pxm];
}
static __init int setup_node(int pxm)
@@ -91,9 +93,36 @@ static __init inline int srat_disabled(void)
return numa_off || acpi_numa < 0;
}
+/*
+ * A lot of BIOS fill in 10 (= no distance) everywhere. This messes
+ * up the NUMA heuristics which wants the local node to have a smaller
+ * distance than the others.
+ * Do some quick checks here and only use the SLIT if it passes.
+ */
+static __init int slit_valid(struct acpi_table_slit *slit)
+{
+ int i, j;
+ int d = slit->localities;
+ for (i = 0; i < d; i++) {
+ for (j = 0; j < d; j++) {
+ u8 val = slit->entry[d*i + j];
+ if (i == j) {
+ if (val != 10)
+ return 0;
+ } else if (val <= 10)
+ return 0;
+ }
+ }
+ return 1;
+}
+
/* Callback for SLIT parsing */
void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
{
+ if (!slit_valid(slit)) {
+ printk(KERN_INFO "ACPI: SLIT table looks invalid. Not used.\n");
+ return;
+ }
acpi_slit = slit;
}
@@ -168,12 +197,39 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
nd->start, nd->end);
}
+/* Sanity check to catch more bad SRATs (they are amazingly common).
+ Make sure the PXMs cover all memory. */
+static int nodes_cover_memory(void)
+{
+ int i;
+ unsigned long pxmram, e820ram;
+
+ pxmram = 0;
+ for_each_node_mask(i, nodes_parsed) {
+ unsigned long s = nodes[i].start >> PAGE_SHIFT;
+ unsigned long e = nodes[i].end >> PAGE_SHIFT;
+ pxmram += e - s;
+ pxmram -= e820_hole_size(s, e);
+ }
+
+ e820ram = end_pfn - e820_hole_size(0, end_pfn);
+ if (pxmram < e820ram) {
+ printk(KERN_ERR
+ "SRAT: PXMs only cover %luMB of your %luMB e820 RAM. Not used.\n",
+ (pxmram << PAGE_SHIFT) >> 20,
+ (e820ram << PAGE_SHIFT) >> 20);
+ return 0;
+ }
+ return 1;
+}
+
void __init acpi_numa_arch_fixup(void) {}
/* Use the information discovered above to actually set up the nodes. */
int __init acpi_scan_nodes(unsigned long start, unsigned long end)
{
int i;
+
if (acpi_numa <= 0)
return -1;
@@ -184,6 +240,11 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
node_clear(i, nodes_parsed);
}
+ if (!nodes_cover_memory()) {
+ bad_srat();
+ return -1;
+ }
+
memnode_shift = compute_hash_shift(nodes, nodes_weight(nodes_parsed));
if (memnode_shift < 0) {
printk(KERN_ERR
diff --git a/arch/x86_64/pci/Makefile b/arch/x86_64/pci/Makefile
index bb34e5ef916..a8f75a2a0f6 100644
--- a/arch/x86_64/pci/Makefile
+++ b/arch/x86_64/pci/Makefile
@@ -11,7 +11,7 @@ obj-y += fixup.o
obj-$(CONFIG_ACPI) += acpi.o
obj-y += legacy.o irq.o common.o
# mmconfig has a 64bit special
-obj-$(CONFIG_PCI_MMCONFIG) += mmconfig.o
+obj-$(CONFIG_PCI_MMCONFIG) += mmconfig.o direct.o
obj-$(CONFIG_NUMA) += k8-bus.o
diff --git a/arch/x86_64/pci/Makefile-BUS b/arch/x86_64/pci/Makefile-BUS
deleted file mode 100644
index 4f0c05abd40..00000000000
--- a/arch/x86_64/pci/Makefile-BUS
+++ /dev/null
@@ -1,22 +0,0 @@
-#
-# Makefile for X86_64 specific PCI routines
-#
-# Reuse the i386 PCI subsystem
-#
-CFLAGS += -I arch/i386/pci
-
-obj-y := i386.o
-obj-$(CONFIG_PCI_DIRECT)+= direct.o
-obj-y += fixup.o
-obj-$(CONFIG_ACPI) += acpi.o
-obj-y += legacy.o irq.o common.o
-# mmconfig has a 64bit special
-obj-$(CONFIG_PCI_MMCONFIG) += mmconfig.o
-
-direct-y += ../../i386/pci/direct.o
-acpi-y += ../../i386/pci/acpi.o
-legacy-y += ../../i386/pci/legacy.o
-irq-y += ../../i386/pci/irq.o
-common-y += ../../i386/pci/common.o
-fixup-y += ../../i386/pci/fixup.o
-i386-y += ../../i386/pci/i386.o
diff --git a/arch/x86_64/pci/mmconfig.c b/arch/x86_64/pci/mmconfig.c
index a0838c4a94e..f16c0d57c55 100644
--- a/arch/x86_64/pci/mmconfig.c
+++ b/arch/x86_64/pci/mmconfig.c
@@ -8,18 +8,21 @@
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/acpi.h>
+#include <linux/bitmap.h>
#include "pci.h"
#define MMCONFIG_APER_SIZE (256*1024*1024)
+static DECLARE_BITMAP(fallback_slots, 32);
+
/* Static virtual mapping of the MMCONFIG aperture */
struct mmcfg_virt {
struct acpi_table_mcfg_config *cfg;
- char *virt;
+ char __iomem *virt;
};
static struct mmcfg_virt *pci_mmcfg_virt;
-static char *get_virt(unsigned int seg, int bus)
+static char __iomem *get_virt(unsigned int seg, unsigned bus)
{
int cfg_num = -1;
struct acpi_table_mcfg_config *cfg;
@@ -27,10 +30,9 @@ static char *get_virt(unsigned int seg, int bus)
while (1) {
++cfg_num;
if (cfg_num >= pci_mmcfg_config_num) {
- /* something bad is going on, no cfg table is found. */
- /* so we fall back to the old way we used to do this */
- /* and just rely on the first entry to be correct. */
- return pci_mmcfg_virt[0].virt;
+ /* Not found - fall back to type 1. This happens
+ e.g. on the internal devices of a K8 northbridge. */
+ return NULL;
}
cfg = pci_mmcfg_virt[cfg_num].cfg;
if (cfg->pci_segment_group_number != seg)
@@ -41,20 +43,30 @@ static char *get_virt(unsigned int seg, int bus)
}
}
-static inline char *pci_dev_base(unsigned int seg, unsigned int bus, unsigned int devfn)
+static char __iomem *pci_dev_base(unsigned int seg, unsigned int bus, unsigned int devfn)
{
-
- return get_virt(seg, bus) + ((bus << 20) | (devfn << 12));
+ char __iomem *addr;
+ if (seg == 0 && bus == 0 && test_bit(PCI_SLOT(devfn), &fallback_slots))
+ return NULL;
+ addr = get_virt(seg, bus);
+ if (!addr)
+ return NULL;
+ return addr + ((bus << 20) | (devfn << 12));
}
static int pci_mmcfg_read(unsigned int seg, unsigned int bus,
unsigned int devfn, int reg, int len, u32 *value)
{
- char *addr = pci_dev_base(seg, bus, devfn);
+ char __iomem *addr;
+ /* Why do we have this when nobody checks it. How about a BUG()!? -AK */
if (unlikely(!value || (bus > 255) || (devfn > 255) || (reg > 4095)))
return -EINVAL;
+ addr = pci_dev_base(seg, bus, devfn);
+ if (!addr)
+ return pci_conf1_read(seg,bus,devfn,reg,len,value);
+
switch (len) {
case 1:
*value = readb(addr + reg);
@@ -73,11 +85,16 @@ static int pci_mmcfg_read(unsigned int seg, unsigned int bus,
static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
unsigned int devfn, int reg, int len, u32 value)
{
- char *addr = pci_dev_base(seg, bus, devfn);
+ char __iomem *addr;
+ /* Why do we have this when nobody checks it. How about a BUG()!? -AK */
if (unlikely((bus > 255) || (devfn > 255) || (reg > 4095)))
return -EINVAL;
+ addr = pci_dev_base(seg, bus, devfn);
+ if (!addr)
+ return pci_conf1_write(seg,bus,devfn,reg,len,value);
+
switch (len) {
case 1:
writeb(value, addr + reg);
@@ -98,6 +115,30 @@ static struct pci_raw_ops pci_mmcfg = {
.write = pci_mmcfg_write,
};
+/* K8 systems have some devices (typically in the builtin northbridge)
+ that are only accessible using type1
+ Normally this can be expressed in the MCFG by not listing them
+ and assigning suitable _SEGs, but this isn't implemented in some BIOS.
+ Instead try to discover all devices on bus 0 that are unreachable using MM
+ and fallback for them.
+ We only do this for bus 0/seg 0 */
+static __init void unreachable_devices(void)
+{
+ int i;
+ for (i = 0; i < 32; i++) {
+ u32 val1;
+ char __iomem *addr;
+
+ pci_conf1_read(0, 0, PCI_DEVFN(i,0), 0, 4, &val1);
+ if (val1 == 0xffffffff)
+ continue;
+ addr = pci_dev_base(0, 0, PCI_DEVFN(i, 0));
+ if (addr == NULL|| readl(addr) != val1) {
+ set_bit(i, &fallback_slots);
+ }
+ }
+}
+
static int __init pci_mmcfg_init(void)
{
int i;
@@ -128,6 +169,8 @@ static int __init pci_mmcfg_init(void)
printk(KERN_INFO "PCI: Using MMCONFIG at %x\n", pci_mmcfg_config[i].base_address);
}
+ unreachable_devices();
+
raw_pci_ops = &pci_mmcfg;
pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF;