aboutsummaryrefslogtreecommitdiff
path: root/arch/ia64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/Kconfig21
-rw-r--r--arch/ia64/configs/sn2_defconfig78
-rw-r--r--arch/ia64/hp/common/sba_iommu.c2
-rw-r--r--arch/ia64/ia32/binfmt_elf32.c3
-rw-r--r--arch/ia64/ia32/ia32_entry.S2
-rw-r--r--arch/ia64/kernel/Makefile3
-rw-r--r--arch/ia64/kernel/acpi-ext.c143
-rw-r--r--arch/ia64/kernel/entry.S5
-rw-r--r--arch/ia64/kernel/iosapic.c4
-rw-r--r--arch/ia64/kernel/irq.c1
-rw-r--r--arch/ia64/kernel/kprobes.c10
-rw-r--r--arch/ia64/kernel/mca.c43
-rw-r--r--arch/ia64/kernel/mca_asm.S10
-rw-r--r--arch/ia64/kernel/mca_drv.c54
-rw-r--r--arch/ia64/kernel/module.c2
-rw-r--r--arch/ia64/kernel/palinfo.c2
-rw-r--r--arch/ia64/kernel/ptrace.c4
-rw-r--r--arch/ia64/kernel/salinfo.c2
-rw-r--r--arch/ia64/kernel/topology.c9
-rw-r--r--arch/ia64/lib/memcpy_mck.S9
-rw-r--r--arch/ia64/mm/discontig.c66
-rw-r--r--arch/ia64/mm/fault.c3
-rw-r--r--arch/ia64/pci/pci.c3
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_hwperf.c8
-rw-r--r--arch/ia64/sn/kernel/xpc_channel.c2
-rw-r--r--arch/ia64/sn/kernel/xpc_partition.c8
-rw-r--r--arch/ia64/sn/pci/tioce_provider.c6
27 files changed, 307 insertions, 196 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 0046020e73b..f0252eda12a 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -261,15 +261,6 @@ config NR_CPUS
than 64 will cause the use of a CPU mask array, causing a small
performance hit.
-config IA64_NR_NODES
- int "Maximum number of NODEs (256-1024)" if (IA64_SGI_SN2 || IA64_GENERIC)
- range 256 1024
- depends on IA64_SGI_SN2 || IA64_GENERIC
- default "256"
- help
- This option specifies the maximum number of nodes in your SSI system.
- If in doubt, use the default.
-
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
depends on SMP && EXPERIMENTAL
@@ -353,6 +344,16 @@ config NUMA
Access). This option is for configuring high-end multiprocessor
server systems. If in doubt, say N.
+config NODES_SHIFT
+ int "Max num nodes shift(3-10)"
+ range 3 10
+ default "8"
+ depends on NEED_MULTIPLE_NODES
+ help
+ This option specifies the maximum number of nodes in your SSI system.
+ MAX_NUMNODES will be 2^(This value).
+ If in doubt, use the default.
+
# VIRTUAL_MEM_MAP and FLAT_NODE_MEM_MAP are functionally equivalent.
# VIRTUAL_MEM_MAP has been retained for historical reasons.
config VIRTUAL_MEM_MAP
@@ -413,6 +414,8 @@ config IA64_PALINFO
config SGI_SN
def_bool y if (IA64_SGI_SN2 || IA64_GENERIC)
+source "drivers/sn/Kconfig"
+
source "drivers/firmware/Kconfig"
source "fs/Kconfig.binfmt"
diff --git a/arch/ia64/configs/sn2_defconfig b/arch/ia64/configs/sn2_defconfig
index a718034d68d..9ea35398e10 100644
--- a/arch/ia64/configs/sn2_defconfig
+++ b/arch/ia64/configs/sn2_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.16-rc5
-# Mon Feb 27 16:06:38 2006
+# Linux kernel version: 2.6.17-rc3
+# Thu Apr 27 11:48:23 2006
#
#
@@ -24,6 +24,7 @@ CONFIG_SYSCTL=y
# CONFIG_AUDIT is not set
# CONFIG_IKCONFIG is not set
CONFIG_CPUSETS=y
+CONFIG_RELAY=y
CONFIG_INITRAMFS_SOURCE=""
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_EMBEDDED is not set
@@ -38,10 +39,6 @@ CONFIG_BASE_FULL=y
CONFIG_FUTEX=y
CONFIG_EPOLL=y
CONFIG_SHMEM=y
-CONFIG_CC_ALIGN_FUNCTIONS=0
-CONFIG_CC_ALIGN_LABELS=0
-CONFIG_CC_ALIGN_LOOPS=0
-CONFIG_CC_ALIGN_JUMPS=0
CONFIG_SLAB=y
# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=0
@@ -53,7 +50,6 @@ CONFIG_BASE_SMALL=0
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_MODULE_FORCE_UNLOAD is not set
-CONFIG_OBSOLETE_MODPARM=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_KMOD=y
@@ -62,6 +58,7 @@ CONFIG_STOP_MACHINE=y
#
# Block layer
#
+# CONFIG_BLK_DEV_IO_TRACE is not set
#
# IO Schedulers
@@ -84,8 +81,10 @@ CONFIG_64BIT=y
CONFIG_MMU=y
CONFIG_SWIOTLB=y
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_TIME_INTERPOLATION=y
+CONFIG_DMI=y
CONFIG_EFI=y
CONFIG_GENERIC_IOMAP=y
CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
@@ -116,7 +115,6 @@ CONFIG_IA64_SGI_SN_XP=m
CONFIG_FORCE_MAX_ZONEORDER=17
CONFIG_SMP=y
CONFIG_NR_CPUS=1024
-CONFIG_IA64_NR_NODES=256
# CONFIG_HOTPLUG_CPU is not set
CONFIG_SCHED_SMT=y
CONFIG_PREEMPT=y
@@ -136,6 +134,7 @@ CONFIG_ARCH_FLATMEM_ENABLE=y
CONFIG_ARCH_SPARSEMEM_ENABLE=y
CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y
CONFIG_NUMA=y
+CONFIG_NODES_SHIFT=10
CONFIG_VIRTUAL_MEM_MAP=y
CONFIG_HOLES_IN_ZONE=y
CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y
@@ -187,7 +186,6 @@ CONFIG_ACPI_SYSTEM=y
CONFIG_PCI=y
CONFIG_PCI_DOMAINS=y
# CONFIG_PCI_MSI is not set
-CONFIG_PCI_LEGACY_PROC=y
# CONFIG_PCI_DEBUG is not set
#
@@ -231,6 +229,7 @@ CONFIG_SYN_COOKIES=y
# CONFIG_INET_AH is not set
# CONFIG_INET_ESP is not set
# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
CONFIG_INET_DIAG=m
CONFIG_INET_TCP_DIAG=m
@@ -238,9 +237,11 @@ CONFIG_INET_TCP_DIAG=m
CONFIG_TCP_CONG_BIC=y
CONFIG_IPV6=m
# CONFIG_IPV6_PRIVACY is not set
+# CONFIG_IPV6_ROUTER_PREF is not set
# CONFIG_INET6_AH is not set
# CONFIG_INET6_ESP is not set
# CONFIG_INET6_IPCOMP is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
# CONFIG_INET6_TUNNEL is not set
# CONFIG_IPV6_TUNNEL is not set
# CONFIG_NETFILTER is not set
@@ -468,9 +469,14 @@ CONFIG_SCSI_SATA_VITESSE=y
# CONFIG_SCSI_INIA100 is not set
# CONFIG_SCSI_SYM53C8XX_2 is not set
# CONFIG_SCSI_IPR is not set
-# CONFIG_SCSI_QLOGIC_FC is not set
CONFIG_SCSI_QLOGIC_1280=y
-# CONFIG_SCSI_QLA_FC is not set
+CONFIG_SCSI_QLA_FC=y
+CONFIG_SCSI_QLA2XXX_EMBEDDED_FIRMWARE=y
+# CONFIG_SCSI_QLA21XX is not set
+CONFIG_SCSI_QLA22XX=y
+CONFIG_SCSI_QLA2300=y
+CONFIG_SCSI_QLA2322=y
+# CONFIG_SCSI_QLA24XX is not set
# CONFIG_SCSI_LPFC is not set
# CONFIG_SCSI_DC395x is not set
# CONFIG_SCSI_DC390T is not set
@@ -486,6 +492,7 @@ CONFIG_MD_RAID0=y
CONFIG_MD_RAID1=y
# CONFIG_MD_RAID10 is not set
CONFIG_MD_RAID5=y
+# CONFIG_MD_RAID5_RESHAPE is not set
# CONFIG_MD_RAID6 is not set
CONFIG_MD_MULTIPATH=y
# CONFIG_MD_FAULTY is not set
@@ -694,6 +701,7 @@ CONFIG_EFI_RTC=y
# Ftape, the floppy tape device driver
#
CONFIG_AGP=y
+# CONFIG_AGP_VIA is not set
CONFIG_AGP_SGI_TIOCA=y
# CONFIG_DRM is not set
CONFIG_RAW_DRIVER=m
@@ -735,10 +743,6 @@ CONFIG_MMTIMER=y
#
#
-# Multimedia Capabilities Port drivers
-#
-
-#
# Multimedia devices
#
# CONFIG_VIDEO_DEV is not set
@@ -747,6 +751,7 @@ CONFIG_MMTIMER=y
# Digital Video Broadcasting Devices
#
# CONFIG_DVB is not set
+# CONFIG_USB_DABUSB is not set
#
# Graphics support
@@ -757,6 +762,7 @@ CONFIG_MMTIMER=y
# Console display driver support
#
CONFIG_VGA_CONSOLE=y
+# CONFIG_VGACON_SOFT_SCROLLBACK is not set
CONFIG_DUMMY_CONSOLE=y
#
@@ -769,6 +775,7 @@ CONFIG_DUMMY_CONSOLE=y
#
CONFIG_USB_ARCH_HAS_HCD=y
CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARCH_HAS_EHCI=y
CONFIG_USB=m
# CONFIG_USB_DEBUG is not set
@@ -829,9 +836,7 @@ CONFIG_USB_HIDINPUT=y
# CONFIG_USB_ACECAD is not set
# CONFIG_USB_KBTAB is not set
# CONFIG_USB_POWERMATE is not set
-# CONFIG_USB_MTOUCH is not set
-# CONFIG_USB_ITMTOUCH is not set
-# CONFIG_USB_EGALAX is not set
+# CONFIG_USB_TOUCHSCREEN is not set
# CONFIG_USB_YEALINK is not set
# CONFIG_USB_XPAD is not set
# CONFIG_USB_ATI_REMOTE is not set
@@ -846,15 +851,6 @@ CONFIG_USB_HIDINPUT=y
# CONFIG_USB_MICROTEK is not set
#
-# USB Multimedia devices
-#
-# CONFIG_USB_DABUSB is not set
-
-#
-# Video4Linux support is needed for USB Multimedia device support
-#
-
-#
# USB Network Adapters
#
# CONFIG_USB_CATC is not set
@@ -905,15 +901,29 @@ CONFIG_USB_MON=y
# CONFIG_MMC is not set
#
+# LED devices
+#
+# CONFIG_NEW_LEDS is not set
+
+#
+# LED drivers
+#
+
+#
+# LED Triggers
+#
+
+#
# InfiniBand support
#
CONFIG_INFINIBAND=m
# CONFIG_INFINIBAND_USER_MAD is not set
CONFIG_INFINIBAND_USER_ACCESS=m
CONFIG_INFINIBAND_MTHCA=m
-# CONFIG_INFINIBAND_MTHCA_DEBUG is not set
+CONFIG_INFINIBAND_MTHCA_DEBUG=y
CONFIG_INFINIBAND_IPOIB=m
-# CONFIG_INFINIBAND_IPOIB_DEBUG is not set
+CONFIG_INFINIBAND_IPOIB_DEBUG=y
+# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set
CONFIG_INFINIBAND_SRP=m
#
@@ -923,8 +933,13 @@ CONFIG_SGI_IOC4=y
CONFIG_SGI_IOC3=y
#
-# EDAC - error detection and reporting (RAS)
+# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
+#
+
+#
+# Real Time Clock
#
+# CONFIG_RTC_CLASS is not set
#
# File systems
@@ -997,7 +1012,6 @@ CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y
CONFIG_HUGETLB_PAGE=y
CONFIG_RAMFS=y
-CONFIG_RELAYFS_FS=m
# CONFIG_CONFIGFS_FS is not set
#
@@ -1145,7 +1159,7 @@ CONFIG_DETECT_SOFTLOCKUP=y
# CONFIG_SCHEDSTATS is not set
# CONFIG_DEBUG_SLAB is not set
CONFIG_DEBUG_PREEMPT=y
-CONFIG_DEBUG_MUTEXES=y
+# CONFIG_DEBUG_MUTEXES is not set
# CONFIG_DEBUG_SPINLOCK is not set
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_KOBJECT is not set
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index bdccd0b1eb6..dd4a2f79263 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -1999,7 +1999,7 @@ acpi_sba_ioc_add(struct acpi_device *device)
if (!iovp_shift)
iovp_shift = min(PAGE_SHIFT, 16);
}
- ACPI_MEM_FREE(dev_info);
+ kfree(dev_info);
/*
* default anything not caught above or specified on cmdline to 4k
diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
index 4e7a6a1ec6c..da03c06744f 100644
--- a/arch/ia64/ia32/binfmt_elf32.c
+++ b/arch/ia64/ia32/binfmt_elf32.c
@@ -35,6 +35,9 @@ extern void ia64_elf32_init (struct pt_regs *regs);
static void elf32_set_personality (void);
+static unsigned long __attribute ((unused))
+randomize_stack_top(unsigned long stack_top);
+
#define setup_arg_pages(bprm,tos,exec) ia32_setup_arg_pages(bprm,exec)
#define elf_map elf32_map
diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S
index 95fe04400f6..a32cd59b81e 100644
--- a/arch/ia64/ia32/ia32_entry.S
+++ b/arch/ia64/ia32/ia32_entry.S
@@ -334,7 +334,7 @@ ia32_syscall_table:
data8 sys_setdomainname
data8 sys32_newuname
data8 sys32_modify_ldt
- data8 sys_ni_syscall /* adjtimex */
+ data8 compat_sys_adjtimex
data8 sys32_mprotect /* 125 */
data8 compat_sys_sigprocmask
data8 sys_ni_syscall /* create_module */
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 59e871dae74..09a0dbc17fb 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -7,7 +7,7 @@ extra-y := head.o init_task.o vmlinux.lds
obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \
irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \
salinfo.o semaphore.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \
- unwind.o mca.o mca_asm.o topology.o dmi_scan.o
+ unwind.o mca.o mca_asm.o topology.o
obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o
obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o
@@ -30,7 +30,6 @@ obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o
obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o
obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o
mca_recovery-y += mca_drv.o mca_drv_asm.o
-dmi_scan-y += ../../i386/kernel/dmi_scan.o
# The gate DSO image is built using a special linker script.
targets += gate.so gate-syms.o
diff --git a/arch/ia64/kernel/acpi-ext.c b/arch/ia64/kernel/acpi-ext.c
index 4a5574ff007..fff82929d22 100644
--- a/arch/ia64/kernel/acpi-ext.c
+++ b/arch/ia64/kernel/acpi-ext.c
@@ -1,105 +1,104 @@
/*
- * arch/ia64/kernel/acpi-ext.c
+ * (c) Copyright 2003, 2006 Hewlett-Packard Development Company, L.P.
+ * Alex Williamson <alex.williamson@hp.com>
+ * Bjorn Helgaas <bjorn.helgaas@hp.com>
*
- * Copyright (C) 2003 Hewlett-Packard
- * Copyright (C) Alex Williamson
- * Copyright (C) Bjorn Helgaas
- *
- * Vendor specific extensions to ACPI.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/acpi.h>
-#include <linux/efi.h>
#include <asm/acpi-ext.h>
-struct acpi_vendor_descriptor {
- u8 guid_id;
- efi_guid_t guid;
-};
+/*
+ * Device CSRs that do not appear in PCI config space should be described
+ * via ACPI. This would normally be done with Address Space Descriptors
+ * marked as "consumer-only," but old versions of Windows and Linux ignore
+ * the producer/consumer flag, so HP invented a vendor-defined resource to
+ * describe the location and size of CSR space.
+ */
-struct acpi_vendor_info {
- struct acpi_vendor_descriptor *descriptor;
- u8 *data;
- u32 length;
+struct acpi_vendor_uuid hp_ccsr_uuid = {
+ .subtype = 2,
+ .data = { 0xf9, 0xad, 0xe9, 0x69, 0x4f, 0x92, 0x5f, 0xab, 0xf6, 0x4a,
+ 0x24, 0xd2, 0x01, 0x37, 0x0e, 0xad },
};
-acpi_status
-acpi_vendor_resource_match(struct acpi_resource *resource, void *context)
+static acpi_status hp_ccsr_locate(acpi_handle obj, u64 *base, u64 *length)
{
- struct acpi_vendor_info *info = (struct acpi_vendor_info *)context;
- struct acpi_resource_vendor *vendor;
- struct acpi_vendor_descriptor *descriptor;
- u32 byte_length;
-
- if (resource->type != ACPI_RESOURCE_TYPE_VENDOR)
- return AE_OK;
-
- vendor = (struct acpi_resource_vendor *)&resource->data;
- descriptor = (struct acpi_vendor_descriptor *)vendor->byte_data;
- if (vendor->byte_length <= sizeof(*info->descriptor) ||
- descriptor->guid_id != info->descriptor->guid_id ||
- efi_guidcmp(descriptor->guid, info->descriptor->guid))
- return AE_OK;
-
- byte_length = vendor->byte_length - sizeof(struct acpi_vendor_descriptor);
- info->data = acpi_os_allocate(byte_length);
- if (!info->data)
- return AE_NO_MEMORY;
-
- memcpy(info->data,
- vendor->byte_data + sizeof(struct acpi_vendor_descriptor),
- byte_length);
- info->length = byte_length;
- return AE_CTRL_TERMINATE;
-}
+ acpi_status status;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_resource *resource;
+ struct acpi_resource_vendor_typed *vendor;
-acpi_status
-acpi_find_vendor_resource(acpi_handle obj, struct acpi_vendor_descriptor * id,
- u8 ** data, u32 * byte_length)
-{
- struct acpi_vendor_info info;
+ status = acpi_get_vendor_resource(obj, METHOD_NAME__CRS, &hp_ccsr_uuid,
+ &buffer);
- info.descriptor = id;
- info.data = NULL;
+ resource = buffer.pointer;
+ vendor = &resource->data.vendor_typed;
- acpi_walk_resources(obj, METHOD_NAME__CRS, acpi_vendor_resource_match,
- &info);
- if (!info.data)
- return AE_NOT_FOUND;
+ if (ACPI_FAILURE(status) || vendor->byte_length < 16) {
+ status = AE_NOT_FOUND;
+ goto exit;
+ }
- *data = info.data;
- *byte_length = info.length;
- return AE_OK;
+ memcpy(base, vendor->byte_data, sizeof(*base));
+ memcpy(length, vendor->byte_data + 8, sizeof(*length));
+
+ exit:
+ acpi_os_free(buffer.pointer);
+ return status;
}
-struct acpi_vendor_descriptor hp_ccsr_descriptor = {
- .guid_id = 2,
- .guid =
- EFI_GUID(0x69e9adf9, 0x924f, 0xab5f, 0xf6, 0x4a, 0x24, 0xd2, 0x01,
- 0x37, 0x0e, 0xad)
+struct csr_space {
+ u64 base;
+ u64 length;
};
-acpi_status hp_acpi_csr_space(acpi_handle obj, u64 * csr_base, u64 * csr_length)
+static acpi_status find_csr_space(struct acpi_resource *resource, void *data)
{
+ struct csr_space *space = data;
+ struct acpi_resource_address64 addr;
acpi_status status;
- u8 *data;
- u32 length;
- status =
- acpi_find_vendor_resource(obj, &hp_ccsr_descriptor, &data, &length);
+ status = acpi_resource_to_address64(resource, &addr);
+ if (ACPI_SUCCESS(status) &&
+ addr.resource_type == ACPI_MEMORY_RANGE &&
+ addr.address_length &&
+ addr.producer_consumer == ACPI_CONSUMER) {
+ space->base = addr.minimum;
+ space->length = addr.address_length;
+ return AE_CTRL_TERMINATE;
+ }
+ return AE_OK; /* keep looking */
+}
- if (ACPI_FAILURE(status) || length != 16)
- return AE_NOT_FOUND;
+static acpi_status hp_crs_locate(acpi_handle obj, u64 *base, u64 *length)
+{
+ struct csr_space space = { 0, 0 };
- memcpy(csr_base, data, sizeof(*csr_base));
- memcpy(csr_length, data + 8, sizeof(*csr_length));
- acpi_os_free(data);
+ acpi_walk_resources(obj, METHOD_NAME__CRS, find_csr_space, &space);
+ if (!space.length)
+ return AE_NOT_FOUND;
+ *base = space.base;
+ *length = space.length;
return AE_OK;
}
+acpi_status hp_acpi_csr_space(acpi_handle obj, u64 *csr_base, u64 *csr_length)
+{
+ acpi_status status;
+
+ status = hp_ccsr_locate(obj, csr_base, csr_length);
+ if (ACPI_SUCCESS(status))
+ return status;
+
+ return hp_crs_locate(obj, csr_base, csr_length);
+}
EXPORT_SYMBOL(hp_acpi_csr_space);
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 750e8e7fbdc..bcb80ca5cf4 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1606,5 +1606,10 @@ sys_call_table:
data8 sys_ni_syscall // 1295 reserved for ppoll
data8 sys_unshare
data8 sys_splice
+ data8 sys_set_robust_list
+ data8 sys_get_robust_list
+ data8 sys_sync_file_range // 1300
+ data8 sys_tee
+ data8 sys_vmsplice
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 7956eb9058f..d58c1c5c903 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -416,7 +416,7 @@ iosapic_end_level_irq (unsigned int irq)
ia64_vector vec = irq_to_vector(irq);
struct iosapic_rte_info *rte;
- move_irq(irq);
+ move_native_irq(irq);
list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list)
iosapic_eoi(rte->addr, vec);
}
@@ -458,7 +458,7 @@ iosapic_ack_edge_irq (unsigned int irq)
{
irq_desc_t *idesc = irq_descp(irq);
- move_irq(irq);
+ move_native_irq(irq);
/*
* Once we have recorded IRQ_PENDING already, we can mask the
* interrupt for real. This prevents IRQ storms from unhandled
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index 5ce908ef9c9..9c72ea3f643 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -101,7 +101,6 @@ void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
if (irq < NR_IRQS) {
irq_affinity[irq] = mask;
- set_irq_info(irq, mask);
irq_redir[irq] = (char) (redir & 0xff);
}
}
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 789881ca83d..f9039f88d01 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -251,7 +251,7 @@ static void __kprobes prepare_break_inst(uint template, uint slot,
update_kprobe_inst_flag(template, slot, major_opcode, kprobe_inst, p);
}
-static inline void get_kprobe_inst(bundle_t *bundle, uint slot,
+static void __kprobes get_kprobe_inst(bundle_t *bundle, uint slot,
unsigned long *kprobe_inst, uint *major_opcode)
{
unsigned long kprobe_inst_p0, kprobe_inst_p1;
@@ -278,7 +278,7 @@ static inline void get_kprobe_inst(bundle_t *bundle, uint slot,
}
/* Returns non-zero if the addr is in the Interrupt Vector Table */
-static inline int in_ivt_functions(unsigned long addr)
+static int __kprobes in_ivt_functions(unsigned long addr)
{
return (addr >= (unsigned long)__start_ivt_text
&& addr < (unsigned long)__end_ivt_text);
@@ -308,19 +308,19 @@ static int __kprobes valid_kprobe_addr(int template, int slot,
return 0;
}
-static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
+static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
{
kcb->prev_kprobe.kp = kprobe_running();
kcb->prev_kprobe.status = kcb->kprobe_status;
}
-static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
__get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
kcb->kprobe_status = kcb->prev_kprobe.status;
}
-static inline void set_current_kprobe(struct kprobe *p,
+static void __kprobes set_current_kprobe(struct kprobe *p,
struct kprobe_ctlblk *kcb)
{
__get_cpu_var(current_kprobe) = p;
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 8963171788d..6a0880639bc 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -581,10 +581,12 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *regs)
{
unsigned long flags;
int cpu = smp_processor_id();
+ struct ia64_mca_notify_die nd =
+ { .sos = NULL, .monarch_cpu = &monarch_cpu };
/* Mask all interrupts */
local_irq_save(flags);
- if (notify_die(DIE_MCA_RENDZVOUS_ENTER, "MCA", regs, 0, 0, 0)
+ if (notify_die(DIE_MCA_RENDZVOUS_ENTER, "MCA", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
ia64_mca_spin(__FUNCTION__);
@@ -594,7 +596,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *regs)
*/
ia64_sal_mc_rendez();
- if (notify_die(DIE_MCA_RENDZVOUS_PROCESS, "MCA", regs, 0, 0, 0)
+ if (notify_die(DIE_MCA_RENDZVOUS_PROCESS, "MCA", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
ia64_mca_spin(__FUNCTION__);
@@ -602,7 +604,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *regs)
while (monarch_cpu != -1)
cpu_relax(); /* spin until monarch leaves */
- if (notify_die(DIE_MCA_RENDZVOUS_LEAVE, "MCA", regs, 0, 0, 0)
+ if (notify_die(DIE_MCA_RENDZVOUS_LEAVE, "MCA", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
ia64_mca_spin(__FUNCTION__);
@@ -961,7 +963,7 @@ no_mod:
*/
static void
-ia64_wait_for_slaves(int monarch)
+ia64_wait_for_slaves(int monarch, const char *type)
{
int c, wait = 0, missing = 0;
for_each_online_cpu(c) {
@@ -987,7 +989,7 @@ ia64_wait_for_slaves(int monarch)
}
if (!missing)
goto all_in;
- printk(KERN_INFO "OS MCA slave did not rendezvous on cpu");
+ printk(KERN_INFO "OS %s slave did not rendezvous on cpu", type);
for_each_online_cpu(c) {
if (c == monarch)
continue;
@@ -998,7 +1000,7 @@ ia64_wait_for_slaves(int monarch)
return;
all_in:
- printk(KERN_INFO "All OS MCA slaves have reached rendezvous\n");
+ printk(KERN_INFO "All OS %s slaves have reached rendezvous\n", type);
return;
}
@@ -1023,6 +1025,8 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
&sos->proc_state_param;
int recover, cpu = smp_processor_id();
task_t *previous_current;
+ struct ia64_mca_notify_die nd =
+ { .sos = sos, .monarch_cpu = &monarch_cpu };
oops_in_progress = 1; /* FIXME: make printk NMI/MCA/INIT safe */
console_loglevel = 15; /* make sure printks make it to console */
@@ -1031,10 +1035,10 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA");
monarch_cpu = cpu;
- if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, 0, 0, 0)
+ if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
ia64_mca_spin(__FUNCTION__);
- ia64_wait_for_slaves(cpu);
+ ia64_wait_for_slaves(cpu, "MCA");
/* Wakeup all the processors which are spinning in the rendezvous loop.
* They will leave SAL, then spin in the OS with interrupts disabled
@@ -1043,7 +1047,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
* spinning in SAL does not work.
*/
ia64_mca_wakeup_all();
- if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, 0, 0, 0)
+ if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
ia64_mca_spin(__FUNCTION__);
@@ -1064,7 +1068,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA);
sos->os_status = IA64_MCA_CORRECTED;
}
- if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, 0, 0, recover)
+ if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover)
== NOTIFY_STOP)
ia64_mca_spin(__FUNCTION__);
@@ -1351,10 +1355,14 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
static atomic_t monarchs;
task_t *previous_current;
int cpu = smp_processor_id();
+ struct ia64_mca_notify_die nd =
+ { .sos = sos, .monarch_cpu = &monarch_cpu };
oops_in_progress = 1; /* FIXME: make printk NMI/MCA/INIT safe */
console_loglevel = 15; /* make sure printks make it to console */
+ (void) notify_die(DIE_INIT_ENTER, "INIT", regs, (long)&nd, 0, 0);
+
printk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n",
sos->proc_state_param, cpu, sos->monarch);
salinfo_log_wakeup(SAL_INFO_TYPE_INIT, NULL, 0, 0);
@@ -1390,15 +1398,15 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT;
while (monarch_cpu == -1)
cpu_relax(); /* spin until monarch enters */
- if (notify_die(DIE_INIT_SLAVE_ENTER, "INIT", regs, 0, 0, 0)
+ if (notify_die(DIE_INIT_SLAVE_ENTER, "INIT", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
ia64_mca_spin(__FUNCTION__);
- if (notify_die(DIE_INIT_SLAVE_PROCESS, "INIT", regs, 0, 0, 0)
+ if (notify_die(DIE_INIT_SLAVE_PROCESS, "INIT", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
ia64_mca_spin(__FUNCTION__);
while (monarch_cpu != -1)
cpu_relax(); /* spin until monarch leaves */
- if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, 0, 0, 0)
+ if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
ia64_mca_spin(__FUNCTION__);
printk("Slave on cpu %d returning to normal service.\n", cpu);
@@ -1409,7 +1417,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
}
monarch_cpu = cpu;
- if (notify_die(DIE_INIT_MONARCH_ENTER, "INIT", regs, 0, 0, 0)
+ if (notify_die(DIE_INIT_MONARCH_ENTER, "INIT", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
ia64_mca_spin(__FUNCTION__);
@@ -1421,15 +1429,15 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
*/
printk("Delaying for 5 seconds...\n");
udelay(5*1000000);
- ia64_wait_for_slaves(cpu);
+ ia64_wait_for_slaves(cpu, "INIT");
/* If nobody intercepts DIE_INIT_MONARCH_PROCESS then we drop through
* to default_monarch_init_process() above and just print all the
* tasks.
*/
- if (notify_die(DIE_INIT_MONARCH_PROCESS, "INIT", regs, 0, 0, 0)
+ if (notify_die(DIE_INIT_MONARCH_PROCESS, "INIT", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
ia64_mca_spin(__FUNCTION__);
- if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, 0, 0, 0)
+ if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
ia64_mca_spin(__FUNCTION__);
printk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu);
@@ -1631,6 +1639,7 @@ ia64_mca_init(void)
printk(KERN_INFO "Increasing MCA rendezvous timeout from "
"%ld to %ld milliseconds\n", timeout, isrv.v0);
timeout = isrv.v0;
+ (void) notify_die(DIE_MCA_NEW_TIMEOUT, "MCA", NULL, timeout, 0, 0);
continue;
}
printk(KERN_ERR "Failed to register rendezvous interrupt "
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S
index 60a464bfd9e..6dff024cd62 100644
--- a/arch/ia64/kernel/mca_asm.S
+++ b/arch/ia64/kernel/mca_asm.S
@@ -827,7 +827,7 @@ ia64_state_restore:
ld8 r9=[temp2],16 // sal_gp
;;
ld8 r22=[temp1],16 // pal_min_state, virtual
- ld8 r21=[temp2],16 // prev_IA64_KR_CURRENT
+ ld8 r13=[temp2],16 // prev_IA64_KR_CURRENT
;;
ld8 r16=[temp1],16 // prev_IA64_KR_CURRENT_STACK
ld8 r20=[temp2],16 // prev_task
@@ -848,7 +848,7 @@ ia64_state_restore:
mov cr.iim=temp3
mov cr.iha=temp4
dep r22=0,r22,62,1 // pal_min_state, physical, uncached
- mov IA64_KR(CURRENT)=r21
+ mov IA64_KR(CURRENT)=r13
ld8 r8=[temp1] // os_status
ld8 r10=[temp2] // context
@@ -856,7 +856,7 @@ ia64_state_restore:
* avoid any dependencies on the algorithm in ia64_switch_to(), just
* purge any existing CURRENT_STACK mapping and insert the new one.
*
- * r16 contains prev_IA64_KR_CURRENT_STACK, r21 contains
+ * r16 contains prev_IA64_KR_CURRENT_STACK, r13 contains
* prev_IA64_KR_CURRENT, these values may have been changed by the C
* code. Do not use r8, r9, r10, r22, they contain values ready for
* the return to SAL.
@@ -873,7 +873,7 @@ ia64_state_restore:
;;
srlz.d
- extr.u r19=r21,61,3 // r21 = prev_IA64_KR_CURRENT
+ extr.u r19=r13,61,3 // r13 = prev_IA64_KR_CURRENT
shl r20=r16,IA64_GRANULE_SHIFT // r16 = prev_IA64_KR_CURRENT_STACK
movl r21=PAGE_KERNEL // page properties
;;
@@ -883,7 +883,7 @@ ia64_state_restore:
(p6) br.spnt 1f // the dreaded cpu 0 idle task in region 5:(
;;
mov cr.itir=r18
- mov cr.ifa=r21
+ mov cr.ifa=r13
mov r20=IA64_TR_CURRENT_STACK
;;
itr.d dtr[r20]=r21
diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c
index 37c88eb5587..ca6666b51cc 100644
--- a/arch/ia64/kernel/mca_drv.c
+++ b/arch/ia64/kernel/mca_drv.c
@@ -62,6 +62,11 @@ typedef enum {
ISOLATE_NONE
} isolate_status_t;
+typedef enum {
+ MCA_NOT_RECOVERED = 0,
+ MCA_RECOVERED = 1
+} recovery_status_t;
+
/*
* This pool keeps pointers to the section part of SAL error record
*/
@@ -71,6 +76,18 @@ static struct {
int max_idx; /* Maximum index of section pointer list pool */
} slidx_pool;
+static int
+fatal_mca(const char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ vprintk(fmt, args);
+ va_end(args);
+
+ return MCA_NOT_RECOVERED;
+}
+
/**
* mca_page_isolate - isolate a poisoned page in order not to use it later
* @paddr: poisoned memory location
@@ -424,7 +441,7 @@ recover_from_read_error(slidx_table_t *slidx,
/* Is target address valid? */
if (!pbci->tv)
- return 0;
+ return fatal_mca(KERN_ALERT "MCA: target address not valid\n");
/*
* cpu read or memory-mapped io read
@@ -442,7 +459,7 @@ recover_from_read_error(slidx_table_t *slidx,
/* Is minstate valid? */
if (!peidx_bottom(peidx) || !(peidx_bottom(peidx)->valid.minstate))
- return 0;
+ return fatal_mca(KERN_ALERT "MCA: minstate not valid\n");
psr1 =(struct ia64_psr *)&(peidx_minstate_area(peidx)->pmsa_ipsr);
psr2 =(struct ia64_psr *)&(peidx_minstate_area(peidx)->pmsa_xpsr);
@@ -476,12 +493,13 @@ recover_from_read_error(slidx_table_t *slidx,
psr2->bn = 1;
psr2->i = 0;
- return 1;
+ return MCA_RECOVERED;
}
}
- return 0;
+ return fatal_mca(KERN_ALERT "MCA: kernel context not recovered,"
+ " iip 0x%lx\n", pmsa->pmsa_iip);
}
/**
@@ -567,13 +585,13 @@ recover_from_processor_error(int platform, slidx_table_t *slidx,
* The machine check is corrected.
*/
if (psp->cm == 1)
- return 1;
+ return MCA_RECOVERED;
/*
* The error was not contained. Software must be reset.
*/
if (psp->us || psp->ci == 0)
- return 0;
+ return fatal_mca(KERN_ALERT "MCA: error not contained\n");
/*
* The cache check and bus check bits have four possible states
@@ -584,20 +602,22 @@ recover_from_processor_error(int platform, slidx_table_t *slidx,
* 1 1 Memory error, attempt recovery
*/
if (psp->bc == 0 || pbci == NULL)
- return 0;
+ return fatal_mca(KERN_ALERT "MCA: No bus check\n");
/*
* Sorry, we cannot handle so many.
*/
if (peidx_bus_check_num(peidx) > 1)
- return 0;
+ return fatal_mca(KERN_ALERT "MCA: Too many bus checks\n");
/*
* Well, here is only one bus error.
*/
- if (pbci->ib || pbci->cc)
- return 0;
+ if (pbci->ib)
+ return fatal_mca(KERN_ALERT "MCA: Internal Bus error\n");
+ if (pbci->cc)
+ return fatal_mca(KERN_ALERT "MCA: Cache-cache error\n");
if (pbci->eb && pbci->bsi > 0)
- return 0;
+ return fatal_mca(KERN_ALERT "MCA: External bus check fatal status\n");
/*
* This is a local MCA and estimated as recoverble external bus error.
@@ -609,7 +629,7 @@ recover_from_processor_error(int platform, slidx_table_t *slidx,
/*
* On account of strange SAL error record, we cannot recover.
*/
- return 0;
+ return fatal_mca(KERN_ALERT "MCA: Strange SAL record\n");
}
/**
@@ -638,12 +658,10 @@ mca_try_to_recover(void *rec, struct ia64_sal_os_state *sos)
/* Now, OS can recover when there is one processor error section */
if (n_proc_err > 1)
- return 0;
- else if (n_proc_err == 0) {
+ return fatal_mca(KERN_ALERT "MCA: Too Many Errors\n");
+ else if (n_proc_err == 0)
/* Weird SAL record ... We need not to recover */
-
- return 1;
- }
+ return fatal_mca(KERN_ALERT "MCA: Weird SAL record\n");
/* Make index of processor error section */
mca_make_peidx((sal_log_processor_info_t*)
@@ -654,7 +672,7 @@ mca_try_to_recover(void *rec, struct ia64_sal_os_state *sos)
/* Check whether MCA is global or not */
if (is_mca_global(&peidx, &pbci, sos))
- return 0;
+ return fatal_mca(KERN_ALERT "MCA: global MCA\n");
/* Try to recover a processor error */
return recover_from_processor_error(platform_err, &slidx, &peidx,
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
index 7a2f0a798d1..3a30cfc9574 100644
--- a/arch/ia64/kernel/module.c
+++ b/arch/ia64/kernel/module.c
@@ -947,7 +947,7 @@ void
percpu_modcopy (void *pcpudst, const void *src, unsigned long size)
{
unsigned int i;
- for_each_cpu(i) {
+ for_each_possible_cpu(i) {
memcpy(pcpudst + __per_cpu_offset[i], src, size);
}
}
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index 6386f63c413..859fb37ff49 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -959,7 +959,7 @@ remove_palinfo_proc_entries(unsigned int hcpu)
}
}
-static int __devinit palinfo_cpu_callback(struct notifier_block *nfb,
+static int palinfo_cpu_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 9887c8787e7..e61e15e28d8 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -1644,7 +1644,7 @@ syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
arch = AUDIT_ARCH_IA64;
}
- audit_syscall_entry(current, arch, syscall, arg0, arg1, arg2, arg3);
+ audit_syscall_entry(arch, syscall, arg0, arg1, arg2, arg3);
}
}
@@ -1662,7 +1662,7 @@ syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
if (success != AUDITSC_SUCCESS)
result = -result;
- audit_syscall_exit(current, success, result);
+ audit_syscall_exit(success, result);
}
if (test_thread_flag(TIF_SYSCALL_TRACE)
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index 9d5a823479a..663a186ad19 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -572,7 +572,7 @@ static struct file_operations salinfo_data_fops = {
};
#ifdef CONFIG_HOTPLUG_CPU
-static int __devinit
+static int
salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
{
unsigned int i, cpu = (unsigned long)hcpu;
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index b47476d655f..4f3a16b37f8 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -305,13 +305,10 @@ static struct kobj_type cache_ktype_percpu_entry = {
static void __cpuinit cpu_cache_sysfs_exit(unsigned int cpu)
{
- if (all_cpu_cache_info[cpu].cache_leaves) {
- kfree(all_cpu_cache_info[cpu].cache_leaves);
- all_cpu_cache_info[cpu].cache_leaves = NULL;
- }
+ kfree(all_cpu_cache_info[cpu].cache_leaves);
+ all_cpu_cache_info[cpu].cache_leaves = NULL;
all_cpu_cache_info[cpu].num_cache_leaves = 0;
memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject));
-
return;
}
@@ -429,7 +426,7 @@ static int __cpuinit cache_remove_dev(struct sys_device * sys_dev)
* When a cpu is hot-plugged, do a check and initiate
* cache kobject if necessary
*/
-static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
+static int cache_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
diff --git a/arch/ia64/lib/memcpy_mck.S b/arch/ia64/lib/memcpy_mck.S
index 46c9331e7ab..9e534d52b1d 100644
--- a/arch/ia64/lib/memcpy_mck.S
+++ b/arch/ia64/lib/memcpy_mck.S
@@ -6,7 +6,9 @@
* in1: source address
* in2: number of bytes to copy
* Output:
- * 0 if success, or number of byte NOT copied if error occurred.
+ * for memcpy: return dest
+ * for copy_user: return 0 if success,
+ * or number of byte NOT copied if error occurred.
*
* Copyright (C) 2002 Intel Corp.
* Copyright (C) 2002 Ken Chen <kenneth.w.chen@intel.com>
@@ -73,6 +75,7 @@ GLOBAL_ENTRY(memcpy)
and r28=0x7,in0
and r29=0x7,in1
mov f6=f0
+ mov retval=in0
br.cond.sptk .common_code
;;
END(memcpy)
@@ -84,7 +87,7 @@ GLOBAL_ENTRY(__copy_user)
mov f6=f1
mov saved_in0=in0 // save dest pointer
mov saved_in1=in1 // save src pointer
- mov saved_in2=in2 // save len
+ mov retval=r0 // initialize return value
;;
.common_code:
cmp.gt p15,p0=8,in2 // check for small size
@@ -92,7 +95,7 @@ GLOBAL_ENTRY(__copy_user)
cmp.ne p14,p0=0,r29 // check src alignment
add src0=0,in1
sub r30=8,r28 // for .align_dest
- mov retval=r0 // initialize return value
+ mov saved_in2=in2 // save len
;;
add dst0=0,in0
add dst1=1,in0 // dest odd index
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index ec9eeb89975..b6bcc9fa360 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -519,6 +519,68 @@ void __cpuinit *per_cpu_init(void)
}
#endif /* CONFIG_SMP */
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+static inline int find_next_valid_pfn_for_pgdat(pg_data_t *pgdat, int i)
+{
+ unsigned long end_address, hole_next_pfn;
+ unsigned long stop_address;
+
+ end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
+ end_address = PAGE_ALIGN(end_address);
+
+ stop_address = (unsigned long) &vmem_map[
+ pgdat->node_start_pfn + pgdat->node_spanned_pages];
+
+ do {
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ pgd = pgd_offset_k(end_address);
+ if (pgd_none(*pgd)) {
+ end_address += PGDIR_SIZE;
+ continue;
+ }
+
+ pud = pud_offset(pgd, end_address);
+ if (pud_none(*pud)) {
+ end_address += PUD_SIZE;
+ continue;
+ }
+
+ pmd = pmd_offset(pud, end_address);
+ if (pmd_none(*pmd)) {
+ end_address += PMD_SIZE;
+ continue;
+ }
+
+ pte = pte_offset_kernel(pmd, end_address);
+retry_pte:
+ if (pte_none(*pte)) {
+ end_address += PAGE_SIZE;
+ pte++;
+ if ((end_address < stop_address) &&
+ (end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
+ goto retry_pte;
+ continue;
+ }
+ /* Found next valid vmem_map page */
+ break;
+ } while (end_address < stop_address);
+
+ end_address = min(end_address, stop_address);
+ end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
+ hole_next_pfn = end_address / sizeof(struct page);
+ return hole_next_pfn - pgdat->node_start_pfn;
+}
+#else
+static inline int find_next_valid_pfn_for_pgdat(pg_data_t *pgdat, int i)
+{
+ return i + 1;
+}
+#endif
+
/**
* show_mem - give short summary of memory stats
*
@@ -547,8 +609,10 @@ void show_mem(void)
struct page *page;
if (pfn_valid(pgdat->node_start_pfn + i))
page = pfn_to_page(pgdat->node_start_pfn + i);
- else
+ else {
+ i = find_next_valid_pfn_for_pgdat(pgdat, i) - 1;
continue;
+ }
if (PageReserved(page))
reserved++;
else if (PageSwapCache(page))
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index af7eb087dca..d98ec49570b 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -60,6 +60,9 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
struct siginfo si;
unsigned long mask;
+ /* mmap_sem is performance critical.... */
+ prefetchw(&mm->mmap_sem);
+
/*
* If we're in an interrupt or have no user context, we must not take the fault..
*/
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 9ba32b2d96d..ab829a22f8a 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -31,7 +31,6 @@
#include <asm/irq.h>
#include <asm/hw_irq.h>
-
/*
* Low-level SAL-based PCI configuration access functions. Note that SAL
* calls are already serialized (via sal_lock), so we don't need another
@@ -707,7 +706,7 @@ int ia64_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
*
* Simply writes @size bytes of @val to @port.
*/
-int ia64_pci_legacy_write(struct pci_dev *bus, u16 port, u32 val, u8 size)
+int ia64_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
{
int ret = size;
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
index d917afa30b2..739c948dc50 100644
--- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c
+++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
@@ -284,6 +284,8 @@ static int sn_hwperf_get_nearest_node_objdata(struct sn_hwperf_object_info *objb
/* find nearest node with cpus and nearest memory */
for (router=NULL, j=0; j < op->ports; j++) {
dest = sn_hwperf_findobj_id(objbuf, nobj, ptdata[j].conn_id);
+ if (dest && SN_HWPERF_IS_ROUTER(dest))
+ router = dest;
if (!dest || SN_HWPERF_FOREIGN(dest) ||
!SN_HWPERF_IS_NODE(dest) || SN_HWPERF_IS_IONODE(dest)) {
continue;
@@ -299,8 +301,6 @@ static int sn_hwperf_get_nearest_node_objdata(struct sn_hwperf_object_info *objb
*near_mem_node = c;
found_mem++;
}
- if (SN_HWPERF_IS_ROUTER(dest))
- router = dest;
}
if (router && (!found_cpu || !found_mem)) {
@@ -493,7 +493,7 @@ static int sn_topology_show(struct seq_file *s, void *d)
* numalink ports
*/
sz = obj->ports * sizeof(struct sn_hwperf_port_info);
- if ((ptdata = vmalloc(sz)) == NULL)
+ if ((ptdata = kmalloc(sz, GFP_KERNEL)) == NULL)
return -ENOMEM;
e = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
SN_HWPERF_ENUM_PORTS, obj->id, sz,
@@ -541,7 +541,7 @@ static int sn_topology_show(struct seq_file *s, void *d)
(SN_HWPERF_IS_NL3ROUTER(obj) ||
SN_HWPERF_IS_NL3ROUTER(p)) ? "LLP3" : "LLP4");
}
- vfree(ptdata);
+ kfree(ptdata);
}
return 0;
diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/arch/ia64/sn/kernel/xpc_channel.c
index d0abddd9ffe..8255a9be463 100644
--- a/arch/ia64/sn/kernel/xpc_channel.c
+++ b/arch/ia64/sn/kernel/xpc_channel.c
@@ -1831,7 +1831,7 @@ xpc_initiate_allocate(partid_t partid, int ch_number, u32 flags, void **payload)
{
struct xpc_partition *part = &xpc_partitions[partid];
enum xpc_retval ret = xpcUnknownReason;
- struct xpc_msg *msg;
+ struct xpc_msg *msg = NULL;
DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
diff --git a/arch/ia64/sn/kernel/xpc_partition.c b/arch/ia64/sn/kernel/xpc_partition.c
index 94211429fd0..2a89cfce495 100644
--- a/arch/ia64/sn/kernel/xpc_partition.c
+++ b/arch/ia64/sn/kernel/xpc_partition.c
@@ -136,9 +136,7 @@ xpc_get_rsvd_page_pa(int nasid)
}
if (L1_CACHE_ALIGN(len) > buf_len) {
- if (buf_base != NULL) {
- kfree(buf_base);
- }
+ kfree(buf_base);
buf_len = L1_CACHE_ALIGN(len);
buf = (u64) xpc_kmalloc_cacheline_aligned(buf_len,
GFP_KERNEL, &buf_base);
@@ -159,9 +157,7 @@ xpc_get_rsvd_page_pa(int nasid)
}
}
- if (buf_base != NULL) {
- kfree(buf_base);
- }
+ kfree(buf_base);
if (status != SALRET_OK) {
rp_pa = 0;
diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c
index fa073cc4b56..833295624e5 100644
--- a/arch/ia64/sn/pci/tioce_provider.c
+++ b/arch/ia64/sn/pci/tioce_provider.c
@@ -682,9 +682,6 @@ tioce_reserve_m32(struct tioce_kernel *ce_kern, u64 base, u64 limit)
int ate_index, last_ate, ps;
struct tioce *ce_mmr;
- if (!TIOCE_M32_ADDR(base))
- return;
-
ce_mmr = (struct tioce *)ce_kern->ce_common->ce_pcibus.bs_base;
ps = ce_kern->ce_ate3240_pagesize;
ate_index = ATE_PAGE(base, ps);
@@ -693,6 +690,9 @@ tioce_reserve_m32(struct tioce_kernel *ce_kern, u64 base, u64 limit)
if (ate_index < 64)
ate_index = 64;
+ if (last_ate >= TIOCE_NUM_M3240_ATES)
+ last_ate = TIOCE_NUM_M3240_ATES - 1;
+
while (ate_index <= last_ate) {
u64 ate;