aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2006-12-07 16:35:17 -0500
committerTrond Myklebust <Trond.Myklebust@netapp.com>2006-12-07 16:35:17 -0500
commit21b4e736922f546e0f1aa7b9d6c442f309a2444a (patch)
treee1be8645297f8ebe87445251743ebcc52081a20d /include
parent34161db6b14d984fb9b06c735b7b42f8803f6851 (diff)
parent68380b581383c028830f79ec2670f4a193854aa6 (diff)
Merge branch 'master' of /home/trondmy/kernel/linux-2.6/ into merge_linus
Diffstat (limited to 'include')
-rw-r--r--include/acpi/platform/aclinux.h2
-rw-r--r--include/asm-alpha/dma-mapping.h4
-rw-r--r--include/asm-alpha/unistd.h182
-rw-r--r--include/asm-arm/dma-mapping.h2
-rw-r--r--include/asm-arm/setup.h104
-rw-r--r--include/asm-arm/unistd.h150
-rw-r--r--include/asm-arm26/pgalloc.h2
-rw-r--r--include/asm-arm26/setup.h4
-rw-r--r--include/asm-arm26/unistd.h133
-rw-r--r--include/asm-avr32/dma-mapping.h5
-rw-r--r--include/asm-avr32/setup.h4
-rw-r--r--include/asm-cris/arch-v10/bitops.h10
-rw-r--r--include/asm-cris/dma-mapping.h4
-rw-r--r--include/asm-cris/semaphore-helper.h8
-rw-r--r--include/asm-frv/dma-mapping.h4
-rw-r--r--include/asm-frv/highmem.h5
-rw-r--r--include/asm-frv/param.h1
-rw-r--r--include/asm-frv/setup.h6
-rw-r--r--include/asm-frv/unistd.h119
-rw-r--r--include/asm-generic/Kbuild1
-rw-r--r--include/asm-generic/Kbuild.asm1
-rw-r--r--include/asm-generic/atomic.h7
-rw-r--r--include/asm-generic/dma-mapping.h4
-rw-r--r--include/asm-generic/futex.h4
-rw-r--r--include/asm-generic/vmlinux.lds.h23
-rw-r--r--include/asm-h8300/delay.h4
-rw-r--r--include/asm-h8300/mmu_context.h4
-rw-r--r--include/asm-h8300/pci.h4
-rw-r--r--include/asm-h8300/tlbflush.h4
-rw-r--r--include/asm-h8300/unistd.h166
-rw-r--r--include/asm-i386/Kbuild1
-rw-r--r--include/asm-i386/alternative.h13
-rw-r--r--include/asm-i386/apic.h15
-rw-r--r--include/asm-i386/atomic.h6
-rw-r--r--include/asm-i386/boot.h6
-rw-r--r--include/asm-i386/bugs.h4
-rw-r--r--include/asm-i386/cpu.h3
-rw-r--r--include/asm-i386/cpufeature.h8
-rw-r--r--include/asm-i386/current.h7
-rw-r--r--include/asm-i386/delay.h13
-rw-r--r--include/asm-i386/desc.h95
-rw-r--r--include/asm-i386/dma-mapping.h4
-rw-r--r--include/asm-i386/e820.h5
-rw-r--r--include/asm-i386/elf.h2
-rw-r--r--include/asm-i386/futex.h4
-rw-r--r--include/asm-i386/genapic.h2
-rw-r--r--include/asm-i386/i387.h5
-rw-r--r--include/asm-i386/io.h8
-rw-r--r--include/asm-i386/irq.h5
-rw-r--r--include/asm-i386/irq_regs.h28
-rw-r--r--include/asm-i386/irqflags.h42
-rw-r--r--include/asm-i386/mach-default/setup_arch.h2
-rw-r--r--include/asm-i386/math_emu.h1
-rw-r--r--include/asm-i386/mmu_context.h8
-rw-r--r--include/asm-i386/mmzone.h27
-rw-r--r--include/asm-i386/module.h10
-rw-r--r--include/asm-i386/mpspec_def.h2
-rw-r--r--include/asm-i386/msr.h18
-rw-r--r--include/asm-i386/nmi.h8
-rw-r--r--include/asm-i386/page.h8
-rw-r--r--include/asm-i386/param.h1
-rw-r--r--include/asm-i386/paravirt.h505
-rw-r--r--include/asm-i386/pda.h100
-rw-r--r--include/asm-i386/percpu.h25
-rw-r--r--include/asm-i386/pgtable-2level.h10
-rw-r--r--include/asm-i386/pgtable-3level.h45
-rw-r--r--include/asm-i386/pgtable.h28
-rw-r--r--include/asm-i386/processor.h204
-rw-r--r--include/asm-i386/ptrace.h2
-rw-r--r--include/asm-i386/rwsem.h4
-rw-r--r--include/asm-i386/segment.h7
-rw-r--r--include/asm-i386/setup.h7
-rw-r--r--include/asm-i386/smp.h3
-rw-r--r--include/asm-i386/spinlock.h19
-rw-r--r--include/asm-i386/suspend.h21
-rw-r--r--include/asm-i386/system.h16
-rw-r--r--include/asm-i386/thread_info.h10
-rw-r--r--include/asm-i386/time.h41
-rw-r--r--include/asm-i386/tlbflush.h18
-rw-r--r--include/asm-i386/unistd.h98
-rw-r--r--include/asm-i386/unwind.h13
-rw-r--r--include/asm-i386/vm86.h17
-rw-r--r--include/asm-ia64/Kbuild1
-rw-r--r--include/asm-ia64/dma-mapping.h5
-rw-r--r--include/asm-ia64/futex.h4
-rw-r--r--include/asm-ia64/pgalloc.h2
-rw-r--r--include/asm-m32r/setup.h9
-rw-r--r--include/asm-m32r/unistd.h111
-rw-r--r--include/asm-m68k/dma-mapping.h4
-rw-r--r--include/asm-m68k/setup.h6
-rw-r--r--include/asm-m68k/unistd.h97
-rw-r--r--include/asm-m68knommu/setup.h5
-rw-r--r--include/asm-m68knommu/unistd.h150
-rw-r--r--include/asm-mips/dma-mapping.h4
-rw-r--r--include/asm-mips/futex.h4
-rw-r--r--include/asm-mips/highmem.h10
-rw-r--r--include/asm-mips/setup.h2
-rw-r--r--include/asm-mips/unistd.h262
-rw-r--r--include/asm-parisc/dma-mapping.h4
-rw-r--r--include/asm-parisc/futex.h4
-rw-r--r--include/asm-powerpc/dma-mapping.h6
-rw-r--r--include/asm-powerpc/elf.h2
-rw-r--r--include/asm-powerpc/futex.h4
-rw-r--r--include/asm-powerpc/pgalloc.h2
-rw-r--r--include/asm-powerpc/setup.h3
-rw-r--r--include/asm-powerpc/unistd.h109
-rw-r--r--include/asm-ppc/highmem.h8
-rw-r--r--include/asm-s390/setup.h3
-rw-r--r--include/asm-s390/unistd.h154
-rw-r--r--include/asm-sh/dma-mapping.h2
-rw-r--r--include/asm-sh/setup.h6
-rw-r--r--include/asm-sh/unistd.h137
-rw-r--r--include/asm-sh64/dma-mapping.h2
-rw-r--r--include/asm-sh64/setup.h6
-rw-r--r--include/asm-sh64/unistd.h142
-rw-r--r--include/asm-sparc/unistd.h130
-rw-r--r--include/asm-sparc64/dma-mapping.h4
-rw-r--r--include/asm-sparc64/futex.h4
-rw-r--r--include/asm-sparc64/pgalloc.h2
-rw-r--r--include/asm-sparc64/unistd.h118
-rw-r--r--include/asm-um/dma-mapping.h4
-rw-r--r--include/asm-v850/irq.h2
-rw-r--r--include/asm-v850/unistd.h160
-rw-r--r--include/asm-x86_64/Kbuild2
-rw-r--r--include/asm-x86_64/alternative.h12
-rw-r--r--include/asm-x86_64/atomic.h6
-rw-r--r--include/asm-x86_64/calgary.h2
-rw-r--r--include/asm-x86_64/cpufeature.h7
-rw-r--r--include/asm-x86_64/delay.h7
-rw-r--r--include/asm-x86_64/desc.h53
-rw-r--r--include/asm-x86_64/desc_defs.h69
-rw-r--r--include/asm-x86_64/dma-mapping.h5
-rw-r--r--include/asm-x86_64/futex.h4
-rw-r--r--include/asm-x86_64/genapic.h2
-rw-r--r--include/asm-x86_64/msr.h17
-rw-r--r--include/asm-x86_64/nmi.h3
-rw-r--r--include/asm-x86_64/pci-direct.h1
-rw-r--r--include/asm-x86_64/pgtable.h22
-rw-r--r--include/asm-x86_64/processor.h8
-rw-r--r--include/asm-x86_64/proto.h2
-rw-r--r--include/asm-x86_64/rio.h74
-rw-r--r--include/asm-x86_64/smp.h12
-rw-r--r--include/asm-x86_64/spinlock.h29
-rw-r--r--include/asm-x86_64/stacktrace.h2
-rw-r--r--include/asm-x86_64/unistd.h99
-rw-r--r--include/asm-x86_64/unwind.h8
-rw-r--r--include/asm-x86_64/vsyscall.h1
-rw-r--r--include/asm-xtensa/dma-mapping.h4
-rw-r--r--include/asm-xtensa/unistd.h184
-rw-r--r--include/crypto/b128ops.h80
-rw-r--r--include/crypto/gf128mul.h198
-rw-r--r--include/linux/Kbuild3
-rw-r--r--include/linux/aio.h2
-rw-r--r--include/linux/audit.h6
-rw-r--r--include/linux/bootmem.h3
-rw-r--r--include/linux/bottom_half.h10
-rw-r--r--include/linux/carta_random32.h29
-rw-r--r--include/linux/cciss_ioctl.h2
-rw-r--r--include/linux/cdev.h4
-rw-r--r--include/linux/cpu.h31
-rw-r--r--include/linux/cpuset.h4
-rw-r--r--include/linux/crypto.h22
-rw-r--r--include/linux/debug_locks.h2
-rw-r--r--include/linux/delayacct.h2
-rw-r--r--include/linux/device.h22
-rw-r--r--include/linux/efi.h3
-rw-r--r--include/linux/elf.h4
-rw-r--r--include/linux/ext3_jbd.h76
-rw-r--r--include/linux/ext4_jbd2.h76
-rw-r--r--include/linux/file.h4
-rw-r--r--include/linux/freezer.h87
-rw-r--r--include/linux/fs.h20
-rw-r--r--include/linux/fs_struct.h2
-rw-r--r--include/linux/fuse.h24
-rw-r--r--include/linux/genetlink.h6
-rw-r--r--include/linux/gfp.h3
-rw-r--r--include/linux/gfs2_ondisk.h138
-rw-r--r--include/linux/highmem.h8
-rw-r--r--include/linux/hugetlb.h1
-rw-r--r--include/linux/i2o.h20
-rw-r--r--include/linux/init_task.h2
-rw-r--r--include/linux/interrupt.h7
-rw-r--r--include/linux/ipmi.h45
-rw-r--r--include/linux/ipmi_msgdefs.h13
-rw-r--r--include/linux/ipmi_smi.h8
-rw-r--r--include/linux/jbd.h3
-rw-r--r--include/linux/jbd2.h3
-rw-r--r--include/linux/kexec.h1
-rw-r--r--include/linux/kprobes.h2
-rw-r--r--include/linux/ktime.h4
-rw-r--r--include/linux/lockd/lockd.h2
-rw-r--r--include/linux/lockdep.h11
-rw-r--r--include/linux/mm.h81
-rw-r--r--include/linux/mmzone.h87
-rw-r--r--include/linux/moduleparam.h3
-rw-r--r--include/linux/msg.h6
-rw-r--r--include/linux/mutex.h2
-rw-r--r--include/linux/nbd.h1
-rw-r--r--include/linux/netfilter/nf_conntrack_pptp.h3
-rw-r--r--include/linux/nmi.h5
-rw-r--r--include/linux/pci_ids.h9
-rw-r--r--include/linux/pfkeyv2.h1
-rw-r--r--include/linux/profile.h24
-rw-r--r--include/linux/quotaops.h3
-rw-r--r--include/linux/radix-tree.h101
-rw-r--r--include/linux/raid/raid5.h2
-rw-r--r--include/linux/reiserfs_fs.h2
-rw-r--r--include/linux/relay.h2
-rw-r--r--include/linux/rmap.h4
-rw-r--r--include/linux/rtmutex.h2
-rw-r--r--include/linux/rwsem-spinlock.h3
-rw-r--r--include/linux/sched.h111
-rw-r--r--include/linux/screen_info.h3
-rw-r--r--include/linux/seq_file.h4
-rw-r--r--include/linux/serial_8250.h1
-rw-r--r--include/linux/serial_core.h2
-rw-r--r--include/linux/signal.h2
-rw-r--r--include/linux/skbuff.h8
-rw-r--r--include/linux/slab.h92
-rw-r--r--include/linux/smp.h7
-rw-r--r--include/linux/spinlock.h1
-rw-r--r--include/linux/start_kernel.h12
-rw-r--r--include/linux/sunrpc/sched.h4
-rw-r--r--include/linux/suspend.h9
-rw-r--r--include/linux/swap.h6
-rw-r--r--include/linux/taskstats_kern.h43
-rw-r--r--include/linux/uaccess.h49
-rw-r--r--include/linux/workqueue.h9
-rw-r--r--include/net/dst.h2
-rw-r--r--include/net/inet_hashtables.h6
-rw-r--r--include/net/irda/irlan_filter.h2
-rw-r--r--include/net/neighbour.h2
-rw-r--r--include/net/netfilter/nf_conntrack_expect.h2
-rw-r--r--include/net/request_sock.h4
-rw-r--r--include/net/sock.h21
-rw-r--r--include/net/timewait_sock.h2
-rw-r--r--include/net/xfrm.h24
-rw-r--r--include/scsi/libsas.h4
238 files changed, 2854 insertions, 3766 deletions
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index 47faf27913a..7f1e92930b6 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -64,7 +64,7 @@
/* Host-dependent types and defines */
#define ACPI_MACHINE_WIDTH BITS_PER_LONG
-#define acpi_cache_t kmem_cache_t
+#define acpi_cache_t struct kmem_cache
#define acpi_spinlock spinlock_t *
#define ACPI_EXPORT_SYMBOL(symbol) EXPORT_SYMBOL(symbol);
#define strtoul simple_strtoul
diff --git a/include/asm-alpha/dma-mapping.h b/include/asm-alpha/dma-mapping.h
index b9ff4d8cb33..57e09f5e342 100644
--- a/include/asm-alpha/dma-mapping.h
+++ b/include/asm-alpha/dma-mapping.h
@@ -51,7 +51,7 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-#define dma_is_consistent(dev) (1)
+#define dma_is_consistent(d, h) (1)
int dma_set_mask(struct device *dev, u64 mask);
@@ -60,7 +60,7 @@ int dma_set_mask(struct device *dev, u64 mask);
#define dma_sync_single_range(dev, addr, off, size, dir) do { } while (0)
#define dma_sync_sg_for_cpu(dev, sg, nents, dir) do { } while (0)
#define dma_sync_sg_for_device(dev, sg, nents, dir) do { } while (0)
-#define dma_cache_sync(va, size, dir) do { } while (0)
+#define dma_cache_sync(dev, va, size, dir) do { } while (0)
#define dma_get_cache_alignment() L1_CACHE_BYTES
diff --git a/include/asm-alpha/unistd.h b/include/asm-alpha/unistd.h
index 2cabbd465c0..84313d14e78 100644
--- a/include/asm-alpha/unistd.h
+++ b/include/asm-alpha/unistd.h
@@ -387,188 +387,6 @@
#define NR_SYSCALLS 447
-#if defined(__GNUC__)
-
-#define _syscall_return(type) \
- return (_sc_err ? errno = _sc_ret, _sc_ret = -1L : 0), (type) _sc_ret
-
-#define _syscall_clobbers \
- "$1", "$2", "$3", "$4", "$5", "$6", "$7", "$8", \
- "$22", "$23", "$24", "$25", "$27", "$28" \
-
-#define _syscall0(type, name) \
-type name(void) \
-{ \
- long _sc_ret, _sc_err; \
- { \
- register long _sc_0 __asm__("$0"); \
- register long _sc_19 __asm__("$19"); \
- \
- _sc_0 = __NR_##name; \
- __asm__("callsys # %0 %1 %2" \
- : "=r"(_sc_0), "=r"(_sc_19) \
- : "0"(_sc_0) \
- : _syscall_clobbers); \
- _sc_ret = _sc_0, _sc_err = _sc_19; \
- } \
- _syscall_return(type); \
-}
-
-#define _syscall1(type,name,type1,arg1) \
-type name(type1 arg1) \
-{ \
- long _sc_ret, _sc_err; \
- { \
- register long _sc_0 __asm__("$0"); \
- register long _sc_16 __asm__("$16"); \
- register long _sc_19 __asm__("$19"); \
- \
- _sc_0 = __NR_##name; \
- _sc_16 = (long) (arg1); \
- __asm__("callsys # %0 %1 %2 %3" \
- : "=r"(_sc_0), "=r"(_sc_19) \
- : "0"(_sc_0), "r"(_sc_16) \
- : _syscall_clobbers); \
- _sc_ret = _sc_0, _sc_err = _sc_19; \
- } \
- _syscall_return(type); \
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2) \
-type name(type1 arg1,type2 arg2) \
-{ \
- long _sc_ret, _sc_err; \
- { \
- register long _sc_0 __asm__("$0"); \
- register long _sc_16 __asm__("$16"); \
- register long _sc_17 __asm__("$17"); \
- register long _sc_19 __asm__("$19"); \
- \
- _sc_0 = __NR_##name; \
- _sc_16 = (long) (arg1); \
- _sc_17 = (long) (arg2); \
- __asm__("callsys # %0 %1 %2 %3 %4" \
- : "=r"(_sc_0), "=r"(_sc_19) \
- : "0"(_sc_0), "r"(_sc_16), "r"(_sc_17) \
- : _syscall_clobbers); \
- _sc_ret = _sc_0, _sc_err = _sc_19; \
- } \
- _syscall_return(type); \
-}
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
-type name(type1 arg1,type2 arg2,type3 arg3) \
-{ \
- long _sc_ret, _sc_err; \
- { \
- register long _sc_0 __asm__("$0"); \
- register long _sc_16 __asm__("$16"); \
- register long _sc_17 __asm__("$17"); \
- register long _sc_18 __asm__("$18"); \
- register long _sc_19 __asm__("$19"); \
- \
- _sc_0 = __NR_##name; \
- _sc_16 = (long) (arg1); \
- _sc_17 = (long) (arg2); \
- _sc_18 = (long) (arg3); \
- __asm__("callsys # %0 %1 %2 %3 %4 %5" \
- : "=r"(_sc_0), "=r"(_sc_19) \
- : "0"(_sc_0), "r"(_sc_16), "r"(_sc_17), \
- "r"(_sc_18) \
- : _syscall_clobbers); \
- _sc_ret = _sc_0, _sc_err = _sc_19; \
- } \
- _syscall_return(type); \
-}
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
-{ \
- long _sc_ret, _sc_err; \
- { \
- register long _sc_0 __asm__("$0"); \
- register long _sc_16 __asm__("$16"); \
- register long _sc_17 __asm__("$17"); \
- register long _sc_18 __asm__("$18"); \
- register long _sc_19 __asm__("$19"); \
- \
- _sc_0 = __NR_##name; \
- _sc_16 = (long) (arg1); \
- _sc_17 = (long) (arg2); \
- _sc_18 = (long) (arg3); \
- _sc_19 = (long) (arg4); \
- __asm__("callsys # %0 %1 %2 %3 %4 %5 %6" \
- : "=r"(_sc_0), "=r"(_sc_19) \
- : "0"(_sc_0), "r"(_sc_16), "r"(_sc_17), \
- "r"(_sc_18), "1"(_sc_19) \
- : _syscall_clobbers); \
- _sc_ret = _sc_0, _sc_err = _sc_19; \
- } \
- _syscall_return(type); \
-}
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
- type5,arg5) \
-type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
-{ \
- long _sc_ret, _sc_err; \
- { \
- register long _sc_0 __asm__("$0"); \
- register long _sc_16 __asm__("$16"); \
- register long _sc_17 __asm__("$17"); \
- register long _sc_18 __asm__("$18"); \
- register long _sc_19 __asm__("$19"); \
- register long _sc_20 __asm__("$20"); \
- \
- _sc_0 = __NR_##name; \
- _sc_16 = (long) (arg1); \
- _sc_17 = (long) (arg2); \
- _sc_18 = (long) (arg3); \
- _sc_19 = (long) (arg4); \
- _sc_20 = (long) (arg5); \
- __asm__("callsys # %0 %1 %2 %3 %4 %5 %6 %7" \
- : "=r"(_sc_0), "=r"(_sc_19) \
- : "0"(_sc_0), "r"(_sc_16), "r"(_sc_17), \
- "r"(_sc_18), "1"(_sc_19), "r"(_sc_20) \
- : _syscall_clobbers); \
- _sc_ret = _sc_0, _sc_err = _sc_19; \
- } \
- _syscall_return(type); \
-}
-
-#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
- type5,arg5,type6,arg6) \
-type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, type6 arg6)\
-{ \
- long _sc_ret, _sc_err; \
- { \
- register long _sc_0 __asm__("$0"); \
- register long _sc_16 __asm__("$16"); \
- register long _sc_17 __asm__("$17"); \
- register long _sc_18 __asm__("$18"); \
- register long _sc_19 __asm__("$19"); \
- register long _sc_20 __asm__("$20"); \
- register long _sc_21 __asm__("$21"); \
- \
- _sc_0 = __NR_##name; \
- _sc_16 = (long) (arg1); \
- _sc_17 = (long) (arg2); \
- _sc_18 = (long) (arg3); \
- _sc_19 = (long) (arg4); \
- _sc_20 = (long) (arg5); \
- _sc_21 = (long) (arg6); \
- __asm__("callsys # %0 %1 %2 %3 %4 %5 %6 %7 %8" \
- : "=r"(_sc_0), "=r"(_sc_19) \
- : "0"(_sc_0), "r"(_sc_16), "r"(_sc_17), \
- "r"(_sc_18), "1"(_sc_19), "r"(_sc_20), "r"(_sc_21) \
- : _syscall_clobbers); \
- _sc_ret = _sc_0, _sc_err = _sc_19; \
- } \
- _syscall_return(type); \
-}
-
-#endif /* __GNUC__ */
-
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_STAT64
diff --git a/include/asm-arm/dma-mapping.h b/include/asm-arm/dma-mapping.h
index 666617711c8..9bc46b486af 100644
--- a/include/asm-arm/dma-mapping.h
+++ b/include/asm-arm/dma-mapping.h
@@ -48,7 +48,7 @@ static inline int dma_get_cache_alignment(void)
return 32;
}
-static inline int dma_is_consistent(dma_addr_t handle)
+static inline int dma_is_consistent(struct device *dev, dma_addr_t handle)
{
return !!arch_is_coherent();
}
diff --git a/include/asm-arm/setup.h b/include/asm-arm/setup.h
index aa4b5782f0c..e5407392afc 100644
--- a/include/asm-arm/setup.h
+++ b/include/asm-arm/setup.h
@@ -14,55 +14,57 @@
#ifndef __ASMARM_SETUP_H
#define __ASMARM_SETUP_H
+#include <asm/types.h>
+
#define COMMAND_LINE_SIZE 1024
/* The list ends with an ATAG_NONE node. */
#define ATAG_NONE 0x00000000
struct tag_header {
- u32 size;
- u32 tag;
+ __u32 size;
+ __u32 tag;
};
/* The list must start with an ATAG_CORE node */
#define ATAG_CORE 0x54410001
struct tag_core {
- u32 flags; /* bit 0 = read-only */
- u32 pagesize;
- u32 rootdev;
+ __u32 flags; /* bit 0 = read-only */
+ __u32 pagesize;
+ __u32 rootdev;
};
/* it is allowed to have multiple ATAG_MEM nodes */
#define ATAG_MEM 0x54410002
struct tag_mem32 {
- u32 size;
- u32 start; /* physical start address */
+ __u32 size;
+ __u32 start; /* physical start address */
};
/* VGA text type displays */
#define ATAG_VIDEOTEXT 0x54410003
struct tag_videotext {
- u8 x;
- u8 y;
- u16 video_page;
- u8 video_mode;
- u8 video_cols;
- u16 video_ega_bx;
- u8 video_lines;
- u8 video_isvga;
- u16 video_points;
+ __u8 x;
+ __u8 y;
+ __u16 video_page;
+ __u8 video_mode;
+ __u8 video_cols;
+ __u16 video_ega_bx;
+ __u8 video_lines;
+ __u8 video_isvga;
+ __u16 video_points;
};
/* describes how the ramdisk will be used in kernel */
#define ATAG_RAMDISK 0x54410004
struct tag_ramdisk {
- u32 flags; /* bit 0 = load, bit 1 = prompt */
- u32 size; /* decompressed ramdisk size in _kilo_ bytes */
- u32 start; /* starting block of floppy-based RAM disk image */
+ __u32 flags; /* bit 0 = load, bit 1 = prompt */
+ __u32 size; /* decompressed ramdisk size in _kilo_ bytes */
+ __u32 start; /* starting block of floppy-based RAM disk image */
};
/* describes where the compressed ramdisk image lives (virtual address) */
@@ -76,23 +78,23 @@ struct tag_ramdisk {
#define ATAG_INITRD2 0x54420005
struct tag_initrd {
- u32 start; /* physical start address */
- u32 size; /* size of compressed ramdisk image in bytes */
+ __u32 start; /* physical start address */
+ __u32 size; /* size of compressed ramdisk image in bytes */
};
/* board serial number. "64 bits should be enough for everybody" */
#define ATAG_SERIAL 0x54410006
struct tag_serialnr {
- u32 low;
- u32 high;
+ __u32 low;
+ __u32 high;
};
/* board revision */
#define ATAG_REVISION 0x54410007
struct tag_revision {
- u32 rev;
+ __u32 rev;
};
/* initial values for vesafb-type framebuffers. see struct screen_info
@@ -101,20 +103,20 @@ struct tag_revision {
#define ATAG_VIDEOLFB 0x54410008
struct tag_videolfb {
- u16 lfb_width;
- u16 lfb_height;
- u16 lfb_depth;
- u16 lfb_linelength;
- u32 lfb_base;
- u32 lfb_size;
- u8 red_size;
- u8 red_pos;
- u8 green_size;
- u8 green_pos;
- u8 blue_size;
- u8 blue_pos;
- u8 rsvd_size;
- u8 rsvd_pos;
+ __u16 lfb_width;
+ __u16 lfb_height;
+ __u16 lfb_depth;
+ __u16 lfb_linelength;
+ __u32 lfb_base;
+ __u32 lfb_size;
+ __u8 red_size;
+ __u8 red_pos;
+ __u8 green_size;
+ __u8 green_pos;
+ __u8 blue_size;
+ __u8 blue_pos;
+ __u8 rsvd_size;
+ __u8 rsvd_pos;
};
/* command line: \0 terminated string */
@@ -128,17 +130,17 @@ struct tag_cmdline {
#define ATAG_ACORN 0x41000101
struct tag_acorn {
- u32 memc_control_reg;
- u32 vram_pages;
- u8 sounddefault;
- u8 adfsdrives;
+ __u32 memc_control_reg;
+ __u32 vram_pages;
+ __u8 sounddefault;
+ __u8 adfsdrives;
};
/* footbridge memory clock, see arch/arm/mach-footbridge/arch.c */
#define ATAG_MEMCLK 0x41000402
struct tag_memclk {
- u32 fmemclk;
+ __u32 fmemclk;
};
struct tag {
@@ -167,24 +169,26 @@ struct tag {
};
struct tagtable {
- u32 tag;
+ __u32 tag;
int (*parse)(const struct tag *);
};
-#define __tag __attribute_used__ __attribute__((__section__(".taglist.init")))
-#define __tagtable(tag, fn) \
-static struct tagtable __tagtable_##fn __tag = { tag, fn }
-
#define tag_member_present(tag,member) \
((unsigned long)(&((struct tag *)0L)->member + 1) \
<= (tag)->hdr.size * 4)
-#define tag_next(t) ((struct tag *)((u32 *)(t) + (t)->hdr.size))
+#define tag_next(t) ((struct tag *)((__u32 *)(t) + (t)->hdr.size))
#define tag_size(type) ((sizeof(struct tag_header) + sizeof(struct type)) >> 2)
#define for_each_tag(t,base) \
for (t = base; t->hdr.size; t = tag_next(t))
+#ifdef __KERNEL__
+
+#define __tag __attribute_used__ __attribute__((__section__(".taglist.init")))
+#define __tagtable(tag, fn) \
+static struct tagtable __tagtable_##fn __tag = { tag, fn }
+
/*
* Memory map description
*/
@@ -217,4 +221,6 @@ struct early_params {
static struct early_params __early_##fn __attribute_used__ \
__attribute__((__section__(".early_param.init"))) = { name, fn }
+#endif /* __KERNEL__ */
+
#endif
diff --git a/include/asm-arm/unistd.h b/include/asm-arm/unistd.h
index 14a87eec5a2..d44c629d842 100644
--- a/include/asm-arm/unistd.h
+++ b/include/asm-arm/unistd.h
@@ -377,156 +377,6 @@
#endif
#ifdef __KERNEL__
-#include <linux/err.h>
-#include <linux/linkage.h>
-
-#define __sys2(x) #x
-#define __sys1(x) __sys2(x)
-
-#ifndef __syscall
-#if defined(__thumb__) || defined(__ARM_EABI__)
-#define __SYS_REG(name) register long __sysreg __asm__("r7") = __NR_##name;
-#define __SYS_REG_LIST(regs...) "r" (__sysreg) , ##regs
-#define __syscall(name) "swi\t0"
-#else
-#define __SYS_REG(name)
-#define __SYS_REG_LIST(regs...) regs
-#define __syscall(name) "swi\t" __sys1(__NR_##name) ""
-#endif
-#endif
-
-#define __syscall_return(type, res) \
-do { \
- if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
- errno = -(res); \
- res = -1; \
- } \
- return (type) (res); \
-} while (0)
-
-#define _syscall0(type,name) \
-type name(void) { \
- __SYS_REG(name) \
- register long __res_r0 __asm__("r0"); \
- long __res; \
- __asm__ __volatile__ ( \
- __syscall(name) \
- : "=r" (__res_r0) \
- : __SYS_REG_LIST() \
- : "memory" ); \
- __res = __res_r0; \
- __syscall_return(type,__res); \
-}
-
-#define _syscall1(type,name,type1,arg1) \
-type name(type1 arg1) { \
- __SYS_REG(name) \
- register long __r0 __asm__("r0") = (long)arg1; \
- register long __res_r0 __asm__("r0"); \
- long __res; \
- __asm__ __volatile__ ( \
- __syscall(name) \
- : "=r" (__res_r0) \
- : __SYS_REG_LIST( "0" (__r0) ) \
- : "memory" ); \
- __res = __res_r0; \
- __syscall_return(type,__res); \
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2) \
-type name(type1 arg1,type2 arg2) { \
- __SYS_REG(name) \
- register long __r0 __asm__("r0") = (long)arg1; \
- register long __r1 __asm__("r1") = (long)arg2; \
- register long __res_r0 __asm__("r0"); \
- long __res; \
- __asm__ __volatile__ ( \
- __syscall(name) \
- : "=r" (__res_r0) \
- : __SYS_REG_LIST( "0" (__r0), "r" (__r1) ) \
- : "memory" ); \
- __res = __res_r0; \
- __syscall_return(type,__res); \
-}
-
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
-type name(type1 arg1,type2 arg2,type3 arg3) { \
- __SYS_REG(name) \
- register long __r0 __asm__("r0") = (long)arg1; \
- register long __r1 __asm__("r1") = (long)arg2; \
- register long __r2 __asm__("r2") = (long)arg3; \
- register long __res_r0 __asm__("r0"); \
- long __res; \
- __asm__ __volatile__ ( \
- __syscall(name) \
- : "=r" (__res_r0) \
- : __SYS_REG_LIST( "0" (__r0), "r" (__r1), "r" (__r2) ) \
- : "memory" ); \
- __res = __res_r0; \
- __syscall_return(type,__res); \
-}
-
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)\
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \
- __SYS_REG(name) \
- register long __r0 __asm__("r0") = (long)arg1; \
- register long __r1 __asm__("r1") = (long)arg2; \
- register long __r2 __asm__("r2") = (long)arg3; \
- register long __r3 __asm__("r3") = (long)arg4; \
- register long __res_r0 __asm__("r0"); \
- long __res; \
- __asm__ __volatile__ ( \
- __syscall(name) \
- : "=r" (__res_r0) \
- : __SYS_REG_LIST( "0" (__r0), "r" (__r1), "r" (__r2), "r" (__r3) ) \
- : "memory" ); \
- __res = __res_r0; \
- __syscall_return(type,__res); \
-}
-
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) { \
- __SYS_REG(name) \
- register long __r0 __asm__("r0") = (long)arg1; \
- register long __r1 __asm__("r1") = (long)arg2; \
- register long __r2 __asm__("r2") = (long)arg3; \
- register long __r3 __asm__("r3") = (long)arg4; \
- register long __r4 __asm__("r4") = (long)arg5; \
- register long __res_r0 __asm__("r0"); \
- long __res; \
- __asm__ __volatile__ ( \
- __syscall(name) \
- : "=r" (__res_r0) \
- : __SYS_REG_LIST( "0" (__r0), "r" (__r1), "r" (__r2), \
- "r" (__r3), "r" (__r4) ) \
- : "memory" ); \
- __res = __res_r0; \
- __syscall_return(type,__res); \
-}
-
-#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5,type6,arg6) \
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) { \
- __SYS_REG(name) \
- register long __r0 __asm__("r0") = (long)arg1; \
- register long __r1 __asm__("r1") = (long)arg2; \
- register long __r2 __asm__("r2") = (long)arg3; \
- register long __r3 __asm__("r3") = (long)arg4; \
- register long __r4 __asm__("r4") = (long)arg5; \
- register long __r5 __asm__("r5") = (long)arg6; \
- register long __res_r0 __asm__("r0"); \
- long __res; \
- __asm__ __volatile__ ( \
- __syscall(name) \
- : "=r" (__res_r0) \
- : __SYS_REG_LIST( "0" (__r0), "r" (__r1), "r" (__r2), \
- "r" (__r3), "r" (__r4), "r" (__r5) ) \
- : "memory" ); \
- __res = __res_r0; \
- __syscall_return(type,__res); \
-}
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_STAT64
diff --git a/include/asm-arm26/pgalloc.h b/include/asm-arm26/pgalloc.h
index 6437167b1ff..7725af3ddb4 100644
--- a/include/asm-arm26/pgalloc.h
+++ b/include/asm-arm26/pgalloc.h
@@ -15,7 +15,7 @@
#include <asm/tlbflush.h>
#include <linux/slab.h>
-extern kmem_cache_t *pte_cache;
+extern struct kmem_cache *pte_cache;
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr){
return kmem_cache_alloc(pte_cache, GFP_KERNEL);
diff --git a/include/asm-arm26/setup.h b/include/asm-arm26/setup.h
index 6348931be65..1a867b4e8d5 100644
--- a/include/asm-arm26/setup.h
+++ b/include/asm-arm26/setup.h
@@ -16,6 +16,8 @@
#define COMMAND_LINE_SIZE 1024
+#ifdef __KERNEL__
+
/* The list ends with an ATAG_NONE node. */
#define ATAG_NONE 0x00000000
@@ -202,4 +204,6 @@ struct meminfo {
extern struct meminfo meminfo;
+#endif /* __KERNEL__ */
+
#endif
diff --git a/include/asm-arm26/unistd.h b/include/asm-arm26/unistd.h
index 25a5eead85b..4c3b919177e 100644
--- a/include/asm-arm26/unistd.h
+++ b/include/asm-arm26/unistd.h
@@ -311,139 +311,6 @@
#define __ARM_NR_usr26 (__ARM_NR_BASE+3)
#ifdef __KERNEL__
-#include <linux/err.h>
-#include <linux/linkage.h>
-
-#define __sys2(x) #x
-#define __sys1(x) __sys2(x)
-
-#ifndef __syscall
-#define __syscall(name) "swi\t" __sys1(__NR_##name) ""
-#endif
-
-#define __syscall_return(type, res) \
-do { \
- if ((unsigned long)(res) >= (unsigned long)-MAX_ERRNO) { \
- errno = -(res); \
- res = -1; \
- } \
- return (type) (res); \
-} while (0)
-
-#define _syscall0(type,name) \
-type name(void) { \
- register long __res_r0 __asm__("r0"); \
- long __res; \
- __asm__ __volatile__ ( \
- __syscall(name) \
- : "=r" (__res_r0) \
- : \
- : "lr"); \
- __res = __res_r0; \
- __syscall_return(type,__res); \
-}
-
-#define _syscall1(type,name,type1,arg1) \
-type name(type1 arg1) { \
- register long __r0 __asm__("r0") = (long)arg1; \
- register long __res_r0 __asm__("r0"); \
- long __res; \
- __asm__ __volatile__ ( \
- __syscall(name) \
- : "=r" (__res_r0) \
- : "r" (__r0) \
- : "lr"); \
- __res = __res_r0; \
- __syscall_return(type,__res); \
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2) \
-type name(type1 arg1,type2 arg2) { \
- register long __r0 __asm__("r0") = (long)arg1; \
- register long __r1 __asm__("r1") = (long)arg2; \
- register long __res_r0 __asm__("r0"); \
- long __res; \
- __asm__ __volatile__ ( \
- __syscall(name) \
- : "=r" (__res_r0) \
- : "r" (__r0),"r" (__r1) \
- : "lr"); \
- __res = __res_r0; \
- __syscall_return(type,__res); \
-}
-
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
-type name(type1 arg1,type2 arg2,type3 arg3) { \
- register long __r0 __asm__("r0") = (long)arg1; \
- register long __r1 __asm__("r1") = (long)arg2; \
- register long __r2 __asm__("r2") = (long)arg3; \
- register long __res_r0 __asm__("r0"); \
- long __res; \
- __asm__ __volatile__ ( \
- __syscall(name) \
- : "=r" (__res_r0) \
- : "r" (__r0),"r" (__r1),"r" (__r2) \
- : "lr"); \
- __res = __res_r0; \
- __syscall_return(type,__res); \
-}
-
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)\
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \
- register long __r0 __asm__("r0") = (long)arg1; \
- register long __r1 __asm__("r1") = (long)arg2; \
- register long __r2 __asm__("r2") = (long)arg3; \
- register long __r3 __asm__("r3") = (long)arg4; \
- register long __res_r0 __asm__("r0"); \
- long __res; \
- __asm__ __volatile__ ( \
- __syscall(name) \
- : "=r" (__res_r0) \
- : "r" (__r0),"r" (__r1),"r" (__r2),"r" (__r3) \
- : "lr"); \
- __res = __res_r0; \
- __syscall_return(type,__res); \
-}
-
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) { \
- register long __r0 __asm__("r0") = (long)arg1; \
- register long __r1 __asm__("r1") = (long)arg2; \
- register long __r2 __asm__("r2") = (long)arg3; \
- register long __r3 __asm__("r3") = (long)arg4; \
- register long __r4 __asm__("r4") = (long)arg5; \
- register long __res_r0 __asm__("r0"); \
- long __res; \
- __asm__ __volatile__ ( \
- __syscall(name) \
- : "=r" (__res_r0) \
- : "r" (__r0),"r" (__r1),"r" (__r2),"r" (__r3),"r" (__r4) \
- : "lr"); \
- __res = __res_r0; \
- __syscall_return(type,__res); \
-}
-
-#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5,type6,arg6) \
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) { \
- register long __r0 __asm__("r0") = (long)arg1; \
- register long __r1 __asm__("r1") = (long)arg2; \
- register long __r2 __asm__("r2") = (long)arg3; \
- register long __r3 __asm__("r3") = (long)arg4; \
- register long __r4 __asm__("r4") = (long)arg5; \
- register long __r5 __asm__("r5") = (long)arg6; \
- register long __res_r0 __asm__("r0"); \
- long __res; \
- __asm__ __volatile__ ( \
- __syscall(name) \
- : "=r" (__res_r0) \
- : "r" (__r0),"r" (__r1),"r" (__r2),"r" (__r3), "r" (__r4),"r" (__r5) \
- : "lr"); \
- __res = __res_r0; \
- __syscall_return(type,__res); \
-}
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
diff --git a/include/asm-avr32/dma-mapping.h b/include/asm-avr32/dma-mapping.h
index 4c40cb41cdf..0580b5d62bb 100644
--- a/include/asm-avr32/dma-mapping.h
+++ b/include/asm-avr32/dma-mapping.h
@@ -8,7 +8,8 @@
#include <asm/cacheflush.h>
#include <asm/io.h>
-extern void dma_cache_sync(void *vaddr, size_t size, int direction);
+extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ int direction);
/*
* Return whether the given device DMA address mask can be supported
@@ -307,7 +308,7 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-static inline int dma_is_consistent(dma_addr_t dma_addr)
+static inline int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
{
return 1;
}
diff --git a/include/asm-avr32/setup.h b/include/asm-avr32/setup.h
index 10193da4113..0a5224245e4 100644
--- a/include/asm-avr32/setup.h
+++ b/include/asm-avr32/setup.h
@@ -13,6 +13,8 @@
#define COMMAND_LINE_SIZE 256
+#ifdef __KERNEL__
+
/* Magic number indicating that a tag table is present */
#define ATAG_MAGIC 0xa2a25441
@@ -138,4 +140,6 @@ void chip_enable_sdram(void);
#endif /* !__ASSEMBLY__ */
+#endif /* __KERNEL__ */
+
#endif /* __ASM_AVR32_SETUP_H__ */
diff --git a/include/asm-cris/arch-v10/bitops.h b/include/asm-cris/arch-v10/bitops.h
index b73f5396e5a..be85f6de25d 100644
--- a/include/asm-cris/arch-v10/bitops.h
+++ b/include/asm-cris/arch-v10/bitops.h
@@ -10,7 +10,7 @@
* number. They differ in that the first function also inverts all bits
* in the input.
*/
-extern inline unsigned long cris_swapnwbrlz(unsigned long w)
+static inline unsigned long cris_swapnwbrlz(unsigned long w)
{
/* Let's just say we return the result in the same register as the
input. Saying we clobber the input but can return the result
@@ -26,7 +26,7 @@ extern inline unsigned long cris_swapnwbrlz(unsigned long w)
return res;
}
-extern inline unsigned long cris_swapwbrlz(unsigned long w)
+static inline unsigned long cris_swapwbrlz(unsigned long w)
{
unsigned res;
__asm__ ("swapwbr %0 \n\t"
@@ -40,7 +40,7 @@ extern inline unsigned long cris_swapwbrlz(unsigned long w)
* ffz = Find First Zero in word. Undefined if no zero exists,
* so code should check against ~0UL first..
*/
-extern inline unsigned long ffz(unsigned long w)
+static inline unsigned long ffz(unsigned long w)
{
return cris_swapnwbrlz(w);
}
@@ -51,7 +51,7 @@ extern inline unsigned long ffz(unsigned long w)
*
* Undefined if no bit exists, so code should check against 0 first.
*/
-extern inline unsigned long __ffs(unsigned long word)
+static inline unsigned long __ffs(unsigned long word)
{
return cris_swapnwbrlz(~word);
}
@@ -65,7 +65,7 @@ extern inline unsigned long __ffs(unsigned long word)
* differs in spirit from the above ffz (man ffs).
*/
-extern inline unsigned long kernel_ffs(unsigned long w)
+static inline unsigned long kernel_ffs(unsigned long w)
{
return w ? cris_swapwbrlz (w) + 1 : 0;
}
diff --git a/include/asm-cris/dma-mapping.h b/include/asm-cris/dma-mapping.h
index cbf1a98f012..662cea70152 100644
--- a/include/asm-cris/dma-mapping.h
+++ b/include/asm-cris/dma-mapping.h
@@ -156,10 +156,10 @@ dma_get_cache_alignment(void)
return (1 << INTERNODE_CACHE_SHIFT);
}
-#define dma_is_consistent(d) (1)
+#define dma_is_consistent(d, h) (1)
static inline void
-dma_cache_sync(void *vaddr, size_t size,
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
}
diff --git a/include/asm-cris/semaphore-helper.h b/include/asm-cris/semaphore-helper.h
index dbd0f30b85b..a8e1e6cb7cd 100644
--- a/include/asm-cris/semaphore-helper.h
+++ b/include/asm-cris/semaphore-helper.h
@@ -20,12 +20,12 @@
/*
* These two _must_ execute atomically wrt each other.
*/
-extern inline void wake_one_more(struct semaphore * sem)
+static inline void wake_one_more(struct semaphore * sem)
{
atomic_inc(&sem->waking);
}
-extern inline int waking_non_zero(struct semaphore *sem)
+static inline int waking_non_zero(struct semaphore *sem)
{
unsigned long flags;
int ret = 0;
@@ -40,7 +40,7 @@ extern inline int waking_non_zero(struct semaphore *sem)
return ret;
}
-extern inline int waking_non_zero_interruptible(struct semaphore *sem,
+static inline int waking_non_zero_interruptible(struct semaphore *sem,
struct task_struct *tsk)
{
int ret = 0;
@@ -59,7 +59,7 @@ extern inline int waking_non_zero_interruptible(struct semaphore *sem,
return ret;
}
-extern inline int waking_non_zero_trylock(struct semaphore *sem)
+static inline int waking_non_zero_trylock(struct semaphore *sem)
{
int ret = 1;
unsigned long flags;
diff --git a/include/asm-frv/dma-mapping.h b/include/asm-frv/dma-mapping.h
index e9fc1d47797..bcb2df68496 100644
--- a/include/asm-frv/dma-mapping.h
+++ b/include/asm-frv/dma-mapping.h
@@ -172,10 +172,10 @@ int dma_get_cache_alignment(void)
return 1 << L1_CACHE_SHIFT;
}
-#define dma_is_consistent(d) (1)
+#define dma_is_consistent(d, h) (1)
static inline
-void dma_cache_sync(void *vaddr, size_t size,
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
flush_write_buffers();
diff --git a/include/asm-frv/highmem.h b/include/asm-frv/highmem.h
index 0f390f41f81..ff4d6cdeb15 100644
--- a/include/asm-frv/highmem.h
+++ b/include/asm-frv/highmem.h
@@ -115,7 +115,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
{
unsigned long paddr;
- inc_preempt_count();
+ pagefault_disable();
paddr = page_to_phys(page);
switch (type) {
@@ -170,8 +170,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
default:
BUG();
}
- dec_preempt_count();
- preempt_check_resched();
+ pagefault_enable();
}
#endif /* !__ASSEMBLY__ */
diff --git a/include/asm-frv/param.h b/include/asm-frv/param.h
index 168381ebb41..365653b1726 100644
--- a/include/asm-frv/param.h
+++ b/include/asm-frv/param.h
@@ -18,6 +18,5 @@
#endif
#define MAXHOSTNAMELEN 64 /* max length of hostname */
-#define COMMAND_LINE_SIZE 512
#endif /* _ASM_PARAM_H */
diff --git a/include/asm-frv/setup.h b/include/asm-frv/setup.h
index 0d293b9a585..afd787ceede 100644
--- a/include/asm-frv/setup.h
+++ b/include/asm-frv/setup.h
@@ -12,6 +12,10 @@
#ifndef _ASM_SETUP_H
#define _ASM_SETUP_H
+#define COMMAND_LINE_SIZE 512
+
+#ifdef __KERNEL__
+
#include <linux/init.h>
#ifndef __ASSEMBLY__
@@ -22,4 +26,6 @@ extern unsigned long __initdata num_mappedpages;
#endif /* !__ASSEMBLY__ */
+#endif /* __KERNEL__ */
+
#endif /* _ASM_SETUP_H */
diff --git a/include/asm-frv/unistd.h b/include/asm-frv/unistd.h
index 725e854928c..584c0417ae4 100644
--- a/include/asm-frv/unistd.h
+++ b/include/asm-frv/unistd.h
@@ -320,125 +320,6 @@
#ifdef __KERNEL__
#define NR_syscalls 310
-#include <linux/err.h>
-
-/*
- * process the return value of a syscall, consigning it to one of two possible fates
- * - user-visible error numbers are in the range -1 - -4095: see <asm-frv/errno.h>
- */
-#undef __syscall_return
-#define __syscall_return(type, res) \
-do { \
- unsigned long __sr2 = (res); \
- if (__builtin_expect(__sr2 >= (unsigned long)(-MAX_ERRNO), 0)) { \
- errno = (-__sr2); \
- __sr2 = ~0UL; \
- } \
- return (type) __sr2; \
-} while (0)
-
-/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */
-
-#undef _syscall0
-#define _syscall0(type,name) \
-type name(void) \
-{ \
- register unsigned long __scnum __asm__ ("gr7") = (__NR_##name); \
- register unsigned long __sc0 __asm__ ("gr8"); \
- __asm__ __volatile__ ("tira gr0,#0" \
- : "=r" (__sc0) \
- : "r" (__scnum)); \
- __syscall_return(type, __sc0); \
-}
-
-#undef _syscall1
-#define _syscall1(type,name,type1,arg1) \
-type name(type1 arg1) \
-{ \
- register unsigned long __scnum __asm__ ("gr7") = (__NR_##name); \
- register unsigned long __sc0 __asm__ ("gr8") = (unsigned long) arg1; \
- __asm__ __volatile__ ("tira gr0,#0" \
- : "+r" (__sc0) \
- : "r" (__scnum)); \
- __syscall_return(type, __sc0); \
-}
-
-#undef _syscall2
-#define _syscall2(type,name,type1,arg1,type2,arg2) \
-type name(type1 arg1,type2 arg2) \
-{ \
- register unsigned long __scnum __asm__ ("gr7") = (__NR_##name); \
- register unsigned long __sc0 __asm__ ("gr8") = (unsigned long) arg1; \
- register unsigned long __sc1 __asm__ ("gr9") = (unsigned long) arg2; \
- __asm__ __volatile__ ("tira gr0,#0" \
- : "+r" (__sc0) \
- : "r" (__scnum), "r" (__sc1)); \
- __syscall_return(type, __sc0); \
-}
-
-#undef _syscall3
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
-type name(type1 arg1,type2 arg2,type3 arg3) \
-{ \
- register unsigned long __scnum __asm__ ("gr7") = (__NR_##name); \
- register unsigned long __sc0 __asm__ ("gr8") = (unsigned long) arg1; \
- register unsigned long __sc1 __asm__ ("gr9") = (unsigned long) arg2; \
- register unsigned long __sc2 __asm__ ("gr10") = (unsigned long) arg3; \
- __asm__ __volatile__ ("tira gr0,#0" \
- : "+r" (__sc0) \
- : "r" (__scnum), "r" (__sc1), "r" (__sc2)); \
- __syscall_return(type, __sc0); \
-}
-
-#undef _syscall4
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
-{ \
- register unsigned long __scnum __asm__ ("gr7") = (__NR_##name); \
- register unsigned long __sc0 __asm__ ("gr8") = (unsigned long) arg1; \
- register unsigned long __sc1 __asm__ ("gr9") = (unsigned long) arg2; \
- register unsigned long __sc2 __asm__ ("gr10") = (unsigned long) arg3; \
- register unsigned long __sc3 __asm__ ("gr11") = (unsigned long) arg4; \
- __asm__ __volatile__ ("tira gr0,#0" \
- : "+r" (__sc0) \
- : "r" (__scnum), "r" (__sc1), "r" (__sc2), "r" (__sc3)); \
- __syscall_return(type, __sc0); \
-}
-
-#undef _syscall5
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
-{ \
- register unsigned long __scnum __asm__ ("gr7") = (__NR_##name); \
- register unsigned long __sc0 __asm__ ("gr8") = (unsigned long) arg1; \
- register unsigned long __sc1 __asm__ ("gr9") = (unsigned long) arg2; \
- register unsigned long __sc2 __asm__ ("gr10") = (unsigned long) arg3; \
- register unsigned long __sc3 __asm__ ("gr11") = (unsigned long) arg4; \
- register unsigned long __sc4 __asm__ ("gr12") = (unsigned long) arg5; \
- __asm__ __volatile__ ("tira gr0,#0" \
- : "+r" (__sc0) \
- : "r" (__scnum), "r" (__sc1), "r" (__sc2), \
- "r" (__sc3), "r" (__sc4)); \
- __syscall_return(type, __sc0); \
-}
-
-#undef _syscall6
-#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5, type6, arg6) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) \
-{ \
- register unsigned long __scnum __asm__ ("gr7") = (__NR_##name); \
- register unsigned long __sc0 __asm__ ("gr8") = (unsigned long) arg1; \
- register unsigned long __sc1 __asm__ ("gr9") = (unsigned long) arg2; \
- register unsigned long __sc2 __asm__ ("gr10") = (unsigned long) arg3; \
- register unsigned long __sc3 __asm__ ("gr11") = (unsigned long) arg4; \
- register unsigned long __sc4 __asm__ ("gr12") = (unsigned long) arg5; \
- register unsigned long __sc5 __asm__ ("gr13") = (unsigned long) arg6; \
- __asm__ __volatile__ ("tira gr0,#0" \
- : "+r" (__sc0) \
- : "r" (__scnum), "r" (__sc1), "r" (__sc2), \
- "r" (__sc3), "r" (__sc4), "r" (__sc5)); \
- __syscall_return(type, __sc0); \
-}
#define __ARCH_WANT_IPC_PARSE_VERSION
/* #define __ARCH_WANT_OLD_READDIR */
diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild
index 3c06be38170..fa14f8cd30c 100644
--- a/include/asm-generic/Kbuild
+++ b/include/asm-generic/Kbuild
@@ -1,4 +1,3 @@
-header-y += atomic.h
header-y += errno-base.h
header-y += errno.h
header-y += fcntl.h
diff --git a/include/asm-generic/Kbuild.asm b/include/asm-generic/Kbuild.asm
index a84c3d88a18..a37e95fe58d 100644
--- a/include/asm-generic/Kbuild.asm
+++ b/include/asm-generic/Kbuild.asm
@@ -14,6 +14,7 @@ unifdef-y += posix_types.h
unifdef-y += ptrace.h
unifdef-y += resource.h
unifdef-y += sembuf.h
+unifdef-y += setup.h
unifdef-y += shmbuf.h
unifdef-y += sigcontext.h
unifdef-y += siginfo.h
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index 42a95d9a064..b7e4a0467cb 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -66,7 +66,7 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
atomic64_sub(i, v);
}
-#else
+#else /* BITS_PER_LONG == 64 */
typedef atomic_t atomic_long_t;
@@ -113,5 +113,6 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
atomic_sub(i, v);
}
-#endif
-#endif
+#endif /* BITS_PER_LONG == 64 */
+
+#endif /* _ASM_GENERIC_ATOMIC_H */
diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h
index b541e48cc54..783ab9944d7 100644
--- a/include/asm-generic/dma-mapping.h
+++ b/include/asm-generic/dma-mapping.h
@@ -266,7 +266,7 @@ dma_error(dma_addr_t dma_addr)
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-#define dma_is_consistent(d) (1)
+#define dma_is_consistent(d, h) (1)
static inline int
dma_get_cache_alignment(void)
@@ -295,7 +295,7 @@ dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
}
static inline void
-dma_cache_sync(void *vaddr, size_t size,
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
/* could define this in terms of the dma_cache ... operations,
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
index df893c16031..f422df0956a 100644
--- a/include/asm-generic/futex.h
+++ b/include/asm-generic/futex.h
@@ -21,7 +21,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
- inc_preempt_count();
+ pagefault_disable();
switch (op) {
case FUTEX_OP_SET:
@@ -33,7 +33,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
ret = -ENOSYS;
}
- dec_preempt_count();
+ pagefault_enable();
if (!ret) {
switch (cmp) {
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index e60d6f21fa6..4d4c62d1105 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -11,8 +11,8 @@
#define RODATA \
. = ALIGN(4096); \
- __start_rodata = .; \
.rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start_rodata) = .; \
*(.rodata) *(.rodata.*) \
*(__vermagic) /* Kernel version magic */ \
} \
@@ -119,17 +119,16 @@
*(__ksymtab_strings) \
} \
\
+ EH_FRAME \
+ \
/* Built-in module parameters. */ \
__param : AT(ADDR(__param) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___param) = .; \
*(__param) \
VMLINUX_SYMBOL(__stop___param) = .; \
+ VMLINUX_SYMBOL(__end_rodata) = .; \
} \
\
- /* Unwind data binary search table */ \
- EH_FRAME_HDR \
- \
- __end_rodata = .; \
. = ALIGN(4096);
#define SECURITY_INIT \
@@ -162,15 +161,23 @@
VMLINUX_SYMBOL(__kprobes_text_end) = .;
#ifdef CONFIG_STACK_UNWIND
- /* Unwind data binary search table */
-#define EH_FRAME_HDR \
+#define EH_FRAME \
+ /* Unwind data binary search table */ \
+ . = ALIGN(8); \
.eh_frame_hdr : AT(ADDR(.eh_frame_hdr) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start_unwind_hdr) = .; \
*(.eh_frame_hdr) \
VMLINUX_SYMBOL(__end_unwind_hdr) = .; \
+ } \
+ /* Unwind data */ \
+ . = ALIGN(8); \
+ .eh_frame : AT(ADDR(.eh_frame) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start_unwind) = .; \
+ *(.eh_frame) \
+ VMLINUX_SYMBOL(__end_unwind) = .; \
}
#else
-#define EH_FRAME_HDR
+#define EH_FRAME
#endif
/* DWARF debug sections.
diff --git a/include/asm-h8300/delay.h b/include/asm-h8300/delay.h
index cbccbbdd640..743beba70f8 100644
--- a/include/asm-h8300/delay.h
+++ b/include/asm-h8300/delay.h
@@ -9,7 +9,7 @@
* Delay routines, using a pre-computed "loops_per_second" value.
*/
-extern __inline__ void __delay(unsigned long loops)
+static inline void __delay(unsigned long loops)
{
__asm__ __volatile__ ("1:\n\t"
"dec.l #1,%0\n\t"
@@ -27,7 +27,7 @@ extern __inline__ void __delay(unsigned long loops)
extern unsigned long loops_per_jiffy;
-extern __inline__ void udelay(unsigned long usecs)
+static inline void udelay(unsigned long usecs)
{
usecs *= 4295; /* 2**32 / 1000000 */
usecs /= (loops_per_jiffy*HZ);
diff --git a/include/asm-h8300/mmu_context.h b/include/asm-h8300/mmu_context.h
index 855721a5dcc..5c165f7bee0 100644
--- a/include/asm-h8300/mmu_context.h
+++ b/include/asm-h8300/mmu_context.h
@@ -9,7 +9,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
-extern inline int
+static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
// mm->context = virt_to_phys(mm->pgd);
@@ -23,7 +23,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, str
{
}
-extern inline void activate_mm(struct mm_struct *prev_mm,
+static inline void activate_mm(struct mm_struct *prev_mm,
struct mm_struct *next_mm)
{
}
diff --git a/include/asm-h8300/pci.h b/include/asm-h8300/pci.h
index 5edad5b70fd..0c771b05fdd 100644
--- a/include/asm-h8300/pci.h
+++ b/include/asm-h8300/pci.h
@@ -10,12 +10,12 @@
#define pcibios_assign_all_busses() 0
#define pcibios_scan_all_fns(a, b) 0
-extern inline void pcibios_set_master(struct pci_dev *dev)
+static inline void pcibios_set_master(struct pci_dev *dev)
{
/* No special bus mastering setup handling */
}
-extern inline void pcibios_penalize_isa_irq(int irq, int active)
+static inline void pcibios_penalize_isa_irq(int irq, int active)
{
/* We don't do dynamic PCI IRQ allocation */
}
diff --git a/include/asm-h8300/tlbflush.h b/include/asm-h8300/tlbflush.h
index bbdffbeeede..9a2c5c9fd70 100644
--- a/include/asm-h8300/tlbflush.h
+++ b/include/asm-h8300/tlbflush.h
@@ -47,12 +47,12 @@ static inline void flush_tlb_range(struct mm_struct *mm,
BUG();
}
-extern inline void flush_tlb_kernel_page(unsigned long addr)
+static inline void flush_tlb_kernel_page(unsigned long addr)
{
BUG();
}
-extern inline void flush_tlb_pgtables(struct mm_struct *mm,
+static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
BUG();
diff --git a/include/asm-h8300/unistd.h b/include/asm-h8300/unistd.h
index 747788d629a..7ddd414f8d1 100644
--- a/include/asm-h8300/unistd.h
+++ b/include/asm-h8300/unistd.h
@@ -295,172 +295,6 @@
#ifdef __KERNEL__
#define NR_syscalls 289
-#include <linux/err.h>
-
-/* user-visible error numbers are in the range -1 - -MAX_ERRNO: see
- <asm-m68k/errno.h> */
-
-#define __syscall_return(type, res) \
-do { \
- if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
- /* avoid using res which is declared to be in register d0; \
- errno might expand to a function call and clobber it. */ \
- int __err = -(res); \
- errno = __err; \
- res = -1; \
- } \
- return (type) (res); \
-} while (0)
-
-#define _syscall0(type, name) \
-type name(void) \
-{ \
- register long __res __asm__("er0"); \
- __asm__ __volatile__ ("mov.l %1,er0\n\t" \
- "trapa #0\n\t" \
- : "=r" (__res) \
- : "g" (__NR_##name) \
- : "cc", "memory"); \
- __syscall_return(type, __res); \
-}
-
-#define _syscall1(type, name, atype, a) \
-type name(atype a) \
-{ \
- register long __res __asm__("er0"); \
- register long _a __asm__("er1"); \
- _a = (long)a; \
- __asm__ __volatile__ ("mov.l %1,er0\n\t" \
- "trapa #0\n\t" \
- : "=r" (__res) \
- : "g" (__NR_##name), \
- "g" (_a) \
- : "cc", "memory"); \
- __syscall_return(type, __res); \
-}
-
-#define _syscall2(type, name, atype, a, btype, b) \
-type name(atype a, btype b) \
-{ \
- register long __res __asm__("er0"); \
- register long _a __asm__("er1"); \
- register long _b __asm__("er2"); \
- _a = (long)a; \
- _b = (long)b; \
- __asm__ __volatile__ ("mov.l %1,er0\n\t" \
- "trapa #0\n\t" \
- : "=r" (__res) \
- : "g" (__NR_##name), \
- "g" (_a), \
- "g" (_b) \
- : "cc", "memory"); \
- __syscall_return(type, __res); \
-}
-
-#define _syscall3(type, name, atype, a, btype, b, ctype, c) \
-type name(atype a, btype b, ctype c) \
-{ \
- register long __res __asm__("er0"); \
- register long _a __asm__("er1"); \
- register long _b __asm__("er2"); \
- register long _c __asm__("er3"); \
- _a = (long)a; \
- _b = (long)b; \
- _c = (long)c; \
- __asm__ __volatile__ ("mov.l %1,er0\n\t" \
- "trapa #0\n\t" \
- : "=r" (__res) \
- : "g" (__NR_##name), \
- "g" (_a), \
- "g" (_b), \
- "g" (_c) \
- : "cc", "memory"); \
- __syscall_return(type, __res); \
-}
-
-#define _syscall4(type, name, atype, a, btype, b, \
- ctype, c, dtype, d) \
-type name(atype a, btype b, ctype c, dtype d) \
-{ \
- register long __res __asm__("er0"); \
- register long _a __asm__("er1"); \
- register long _b __asm__("er2"); \
- register long _c __asm__("er3"); \
- register long _d __asm__("er4"); \
- _a = (long)a; \
- _b = (long)b; \
- _c = (long)c; \
- _d = (long)d; \
- __asm__ __volatile__ ("mov.l %1,er0\n\t" \
- "trapa #0\n\t" \
- : "=r" (__res) \
- : "g" (__NR_##name), \
- "g" (_a), \
- "g" (_b), \
- "g" (_c), \
- "g" (_d) \
- : "cc", "memory"); \
- __syscall_return(type, __res); \
-}
-
-#define _syscall5(type, name, atype, a, btype, b, \
- ctype, c, dtype, d, etype, e) \
-type name(atype a, btype b, ctype c, dtype d, etype e) \
-{ \
- register long __res __asm__("er0"); \
- register long _a __asm__("er1"); \
- register long _b __asm__("er2"); \
- register long _c __asm__("er3"); \
- register long _d __asm__("er4"); \
- register long _e __asm__("er5"); \
- _a = (long)a; \
- _b = (long)b; \
- _c = (long)c; \
- _d = (long)d; \
- _e = (long)e; \
- __asm__ __volatile__ ("mov.l %1,er0\n\t" \
- "trapa #0\n\t" \
- : "=r" (__res) \
- : "g" (__NR_##name), \
- "g" (_a), \
- "g" (_b), \
- "g" (_c), \
- "g" (_d), \
- "g" (_e) \
- : "cc", "memory"); \
- __syscall_return(type, __res); \
-}
-
-#define _syscall6(type, name, atype, a, btype, b, \
- ctype, c, dtype, d, etype, e, ftype, f) \
-type name(atype a, btype b, ctype c, dtype d, etype e, ftype f) \
-{ \
- register long __res __asm__("er0"); \
- register long _a __asm__("er1"); \
- register long _b __asm__("er2"); \
- register long _c __asm__("er3"); \
- register long _d __asm__("er4"); \
- register long _e __asm__("er5"); \
- register long _f __asm__("er6"); \
- _a = (long)a; \
- _b = (long)b; \
- _c = (long)c; \
- _d = (long)d; \
- _e = (long)e; \
- _f = (long)f; \
- __asm__ __volatile__ ("mov.l %1,er0\n\t" \
- "trapa #0\n\t" \
- : "=r" (__res) \
- : "g" (__NR_##name), \
- "g" (_a), \
- "g" (_b), \
- "g" (_c), \
- "g" (_d), \
- "g" (_e) \
- "g" (_f) \
- : "cc", "memory"); \
- __syscall_return(type, __res); \
-}
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
diff --git a/include/asm-i386/Kbuild b/include/asm-i386/Kbuild
index 147e4ac1ebf..5ae93afc67e 100644
--- a/include/asm-i386/Kbuild
+++ b/include/asm-i386/Kbuild
@@ -7,5 +7,4 @@ header-y += ptrace-abi.h
header-y += ucontext.h
unifdef-y += mtrr.h
-unifdef-y += setup.h
unifdef-y += vm86.h
diff --git a/include/asm-i386/alternative.h b/include/asm-i386/alternative.h
index b01a7ec409c..b8fa9557c53 100644
--- a/include/asm-i386/alternative.h
+++ b/include/asm-i386/alternative.h
@@ -4,7 +4,7 @@
#ifdef __KERNEL__
#include <asm/types.h>
-
+#include <linux/stddef.h>
#include <linux/types.h>
struct alt_instr {
@@ -118,4 +118,15 @@ static inline void alternatives_smp_switch(int smp) {}
#define LOCK_PREFIX ""
#endif
+struct paravirt_patch;
+#ifdef CONFIG_PARAVIRT
+void apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end);
+#else
+static inline void
+apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end)
+{}
+#define __start_parainstructions NULL
+#define __stop_parainstructions NULL
+#endif
+
#endif /* _I386_ALTERNATIVE_H */
diff --git a/include/asm-i386/apic.h b/include/asm-i386/apic.h
index b9529578fc3..41a44319905 100644
--- a/include/asm-i386/apic.h
+++ b/include/asm-i386/apic.h
@@ -37,18 +37,27 @@ extern void generic_apic_probe(void);
/*
* Basic functions accessing APICs.
*/
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
+#define apic_write native_apic_write
+#define apic_write_atomic native_apic_write_atomic
+#define apic_read native_apic_read
+#endif
-static __inline void apic_write(unsigned long reg, unsigned long v)
+static __inline fastcall void native_apic_write(unsigned long reg,
+ unsigned long v)
{
*((volatile unsigned long *)(APIC_BASE+reg)) = v;
}
-static __inline void apic_write_atomic(unsigned long reg, unsigned long v)
+static __inline fastcall void native_apic_write_atomic(unsigned long reg,
+ unsigned long v)
{
xchg((volatile unsigned long *)(APIC_BASE+reg), v);
}
-static __inline unsigned long apic_read(unsigned long reg)
+static __inline fastcall unsigned long native_apic_read(unsigned long reg)
{
return *((volatile unsigned long *)(APIC_BASE+reg));
}
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h
index a6c024e2506..c57441bb290 100644
--- a/include/asm-i386/atomic.h
+++ b/include/asm-i386/atomic.h
@@ -187,9 +187,9 @@ static __inline__ int atomic_add_return(int i, atomic_t *v)
/* Modern 486+ processor */
__i = i;
__asm__ __volatile__(
- LOCK_PREFIX "xaddl %0, %1;"
- :"=r"(i)
- :"m"(v->counter), "0"(i));
+ LOCK_PREFIX "xaddl %0, %1"
+ :"+r" (i), "+m" (v->counter)
+ : : "memory");
return i + __i;
#ifdef CONFIG_M386
diff --git a/include/asm-i386/boot.h b/include/asm-i386/boot.h
index 96b228e6e79..8ce79a6fa89 100644
--- a/include/asm-i386/boot.h
+++ b/include/asm-i386/boot.h
@@ -12,4 +12,8 @@
#define EXTENDED_VGA 0xfffe /* 80x50 mode */
#define ASK_VGA 0xfffd /* ask for it at bootup */
-#endif
+/* Physical address where kenrel should be loaded. */
+#define LOAD_PHYSICAL_ADDR ((0x100000 + CONFIG_PHYSICAL_ALIGN - 1) \
+ & ~(CONFIG_PHYSICAL_ALIGN - 1))
+
+#endif /* _LINUX_BOOT_H */
diff --git a/include/asm-i386/bugs.h b/include/asm-i386/bugs.h
index 592ffeeda45..38f1aebbbdb 100644
--- a/include/asm-i386/bugs.h
+++ b/include/asm-i386/bugs.h
@@ -21,6 +21,7 @@
#include <asm/processor.h>
#include <asm/i387.h>
#include <asm/msr.h>
+#include <asm/paravirt.h>
static int __init no_halt(char *s)
{
@@ -91,6 +92,9 @@ static void __init check_fpu(void)
static void __init check_hlt(void)
{
+ if (paravirt_enabled())
+ return;
+
printk(KERN_INFO "Checking 'hlt' instruction... ");
if (!boot_cpu_data.hlt_works_ok) {
printk("disabled\n");
diff --git a/include/asm-i386/cpu.h b/include/asm-i386/cpu.h
index b1bc7b1b64b..9d914e1e4aa 100644
--- a/include/asm-i386/cpu.h
+++ b/include/asm-i386/cpu.h
@@ -13,6 +13,9 @@ struct i386_cpu {
extern int arch_register_cpu(int num);
#ifdef CONFIG_HOTPLUG_CPU
extern void arch_unregister_cpu(int);
+extern int enable_cpu_hotplug;
+#else
+#define enable_cpu_hotplug 0
#endif
DECLARE_PER_CPU(int, cpu_state);
diff --git a/include/asm-i386/cpufeature.h b/include/asm-i386/cpufeature.h
index d314ebb3d59..3f92b94e0d7 100644
--- a/include/asm-i386/cpufeature.h
+++ b/include/asm-i386/cpufeature.h
@@ -31,7 +31,7 @@
#define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */
#define X86_FEATURE_PN (0*32+18) /* Processor serial number */
#define X86_FEATURE_CLFLSH (0*32+19) /* Supports the CLFLUSH instruction */
-#define X86_FEATURE_DTES (0*32+21) /* Debug Trace Store */
+#define X86_FEATURE_DS (0*32+21) /* Debug Store */
#define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */
#define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */
#define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */
@@ -73,6 +73,8 @@
#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */
#define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */
#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */
+#define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */
+#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
@@ -134,6 +136,10 @@
#define cpu_has_phe_enabled boot_cpu_has(X86_FEATURE_PHE_EN)
#define cpu_has_pmm boot_cpu_has(X86_FEATURE_PMM)
#define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN)
+#define cpu_has_ds boot_cpu_has(X86_FEATURE_DS)
+#define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS)
+#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH)
+#define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS)
#endif /* __ASM_I386_CPUFEATURE_H */
diff --git a/include/asm-i386/current.h b/include/asm-i386/current.h
index 3cbbecd7901..5252ee0f6d7 100644
--- a/include/asm-i386/current.h
+++ b/include/asm-i386/current.h
@@ -1,13 +1,14 @@
#ifndef _I386_CURRENT_H
#define _I386_CURRENT_H
-#include <linux/thread_info.h>
+#include <asm/pda.h>
+#include <linux/compiler.h>
struct task_struct;
-static __always_inline struct task_struct * get_current(void)
+static __always_inline struct task_struct *get_current(void)
{
- return current_thread_info()->task;
+ return read_pda(pcurrent);
}
#define current get_current()
diff --git a/include/asm-i386/delay.h b/include/asm-i386/delay.h
index b1c7650dc7b..32d6678d0bb 100644
--- a/include/asm-i386/delay.h
+++ b/include/asm-i386/delay.h
@@ -7,6 +7,7 @@
* Delay routines calling functions in arch/i386/lib/delay.c
*/
+/* Undefined functions to get compile-time errors */
extern void __bad_udelay(void);
extern void __bad_ndelay(void);
@@ -15,13 +16,23 @@ extern void __ndelay(unsigned long nsecs);
extern void __const_udelay(unsigned long usecs);
extern void __delay(unsigned long loops);
+#if defined(CONFIG_PARAVIRT) && !defined(USE_REAL_TIME_DELAY)
+#define udelay(n) paravirt_ops.const_udelay((n) * 0x10c7ul)
+
+#define ndelay(n) paravirt_ops.const_udelay((n) * 5ul)
+
+#else /* !PARAVIRT || USE_REAL_TIME_DELAY */
+
+/* 0x10c7 is 2**32 / 1000000 (rounded up) */
#define udelay(n) (__builtin_constant_p(n) ? \
((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \
__udelay(n))
-
+
+/* 0x5 is 2**32 / 1000000000 (rounded up) */
#define ndelay(n) (__builtin_constant_p(n) ? \
((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
__ndelay(n))
+#endif
void use_tsc_delay(void);
diff --git a/include/asm-i386/desc.h b/include/asm-i386/desc.h
index 5874ef119ff..f398cc45644 100644
--- a/include/asm-i386/desc.h
+++ b/include/asm-i386/desc.h
@@ -4,8 +4,6 @@
#include <asm/ldt.h>
#include <asm/segment.h>
-#define CPU_16BIT_STACK_SIZE 1024
-
#ifndef __ASSEMBLY__
#include <linux/preempt.h>
@@ -16,8 +14,6 @@
extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
-DECLARE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
-
struct Xgt_desc_struct {
unsigned short size;
unsigned long address __attribute__((packed));
@@ -33,11 +29,6 @@ static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
return (struct desc_struct *)per_cpu(cpu_gdt_descr, cpu).address;
}
-/*
- * This is the ldt that every process will get unless we need
- * something other than this.
- */
-extern struct desc_struct default_ldt[];
extern struct desc_struct idt_table[];
extern void set_intr_gate(unsigned int irq, void * addr);
@@ -64,8 +55,10 @@ static inline void pack_gate(__u32 *a, __u32 *b,
#define DESCTYPE_DPL3 0x60 /* DPL-3 */
#define DESCTYPE_S 0x10 /* !system */
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
#define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8))
-#define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8))
#define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr))
#define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr))
@@ -88,6 +81,10 @@ static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
#undef C
}
+#define write_ldt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
+#define write_gdt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
+#define write_idt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
+
static inline void write_dt_entry(void *dt, int entry, __u32 entry_a, __u32 entry_b)
{
__u32 *lp = (__u32 *)((char *)dt + entry*8);
@@ -95,9 +92,25 @@ static inline void write_dt_entry(void *dt, int entry, __u32 entry_a, __u32 entr
*(lp+1) = entry_b;
}
-#define write_ldt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
-#define write_gdt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
-#define write_idt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b)
+#define set_ldt native_set_ldt
+#endif /* CONFIG_PARAVIRT */
+
+static inline fastcall void native_set_ldt(const void *addr,
+ unsigned int entries)
+{
+ if (likely(entries == 0))
+ __asm__ __volatile__("lldt %w0"::"q" (0));
+ else {
+ unsigned cpu = smp_processor_id();
+ __u32 a, b;
+
+ pack_descriptor(&a, &b, (unsigned long)addr,
+ entries * sizeof(struct desc_struct) - 1,
+ DESCTYPE_LDT, 0);
+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, a, b);
+ __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8));
+ }
+}
static inline void _set_gate(int gate, unsigned int type, void *addr, unsigned short seg)
{
@@ -115,14 +128,6 @@ static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, const vo
write_gdt_entry(get_cpu_gdt_table(cpu), entry, a, b);
}
-static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int entries)
-{
- __u32 a, b;
- pack_descriptor(&a, &b, (unsigned long)addr,
- entries * sizeof(struct desc_struct) - 1,
- DESCTYPE_LDT, 0);
- write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, a, b);
-}
#define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
@@ -153,35 +158,22 @@ static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int entri
static inline void clear_LDT(void)
{
- int cpu = get_cpu();
-
- set_ldt_desc(cpu, &default_ldt[0], 5);
- load_LDT_desc();
- put_cpu();
+ set_ldt(NULL, 0);
}
/*
* load one particular LDT into the current CPU
*/
-static inline void load_LDT_nolock(mm_context_t *pc, int cpu)
+static inline void load_LDT_nolock(mm_context_t *pc)
{
- void *segments = pc->ldt;
- int count = pc->size;
-
- if (likely(!count)) {
- segments = &default_ldt[0];
- count = 5;
- }
-
- set_ldt_desc(cpu, segments, count);
- load_LDT_desc();
+ set_ldt(pc->ldt, pc->size);
}
static inline void load_LDT(mm_context_t *pc)
{
- int cpu = get_cpu();
- load_LDT_nolock(pc, cpu);
- put_cpu();
+ preempt_disable();
+ load_LDT_nolock(pc);
+ preempt_enable();
}
static inline unsigned long get_desc_base(unsigned long *desc)
@@ -193,6 +185,29 @@ static inline unsigned long get_desc_base(unsigned long *desc)
return base;
}
+#else /* __ASSEMBLY__ */
+
+/*
+ * GET_DESC_BASE reads the descriptor base of the specified segment.
+ *
+ * Args:
+ * idx - descriptor index
+ * gdt - GDT pointer
+ * base - 32bit register to which the base will be written
+ * lo_w - lo word of the "base" register
+ * lo_b - lo byte of the "base" register
+ * hi_b - hi byte of the low word of the "base" register
+ *
+ * Example:
+ * GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah)
+ * Will read the base address of GDT_ENTRY_ESPFIX_SS and put it into %eax.
+ */
+#define GET_DESC_BASE(idx, gdt, base, lo_w, lo_b, hi_b) \
+ movb idx*8+4(gdt), lo_b; \
+ movb idx*8+7(gdt), hi_b; \
+ shll $16, base; \
+ movw idx*8+2(gdt), lo_w;
+
#endif /* !__ASSEMBLY__ */
#endif
diff --git a/include/asm-i386/dma-mapping.h b/include/asm-i386/dma-mapping.h
index 81999a3ebe7..183eebeebbd 100644
--- a/include/asm-i386/dma-mapping.h
+++ b/include/asm-i386/dma-mapping.h
@@ -156,10 +156,10 @@ dma_get_cache_alignment(void)
return (1 << INTERNODE_CACHE_SHIFT);
}
-#define dma_is_consistent(d) (1)
+#define dma_is_consistent(d, h) (1)
static inline void
-dma_cache_sync(void *vaddr, size_t size,
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
flush_write_buffers();
diff --git a/include/asm-i386/e820.h b/include/asm-i386/e820.h
index f7514fb6e8e..395077aba58 100644
--- a/include/asm-i386/e820.h
+++ b/include/asm-i386/e820.h
@@ -38,6 +38,11 @@ extern struct e820map e820;
extern int e820_all_mapped(unsigned long start, unsigned long end,
unsigned type);
+extern void find_max_pfn(void);
+extern void register_bootmem_low_pages(unsigned long max_low_pfn);
+extern void register_memory(void);
+extern void limit_regions(unsigned long long size);
+extern void print_memory_map(char *who);
#endif/*!__ASSEMBLY__*/
diff --git a/include/asm-i386/elf.h b/include/asm-i386/elf.h
index 3a05436f31c..45d21a0c95b 100644
--- a/include/asm-i386/elf.h
+++ b/include/asm-i386/elf.h
@@ -91,7 +91,7 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
pr_reg[7] = regs->xds; \
pr_reg[8] = regs->xes; \
savesegment(fs,pr_reg[9]); \
- savesegment(gs,pr_reg[10]); \
+ pr_reg[10] = regs->xgs; \
pr_reg[11] = regs->orig_eax; \
pr_reg[12] = regs->eip; \
pr_reg[13] = regs->xcs; \
diff --git a/include/asm-i386/futex.h b/include/asm-i386/futex.h
index 946d97cfea2..438ef0ec710 100644
--- a/include/asm-i386/futex.h
+++ b/include/asm-i386/futex.h
@@ -56,7 +56,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
- inc_preempt_count();
+ pagefault_disable();
if (op == FUTEX_OP_SET)
__futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
@@ -88,7 +88,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
}
}
- dec_preempt_count();
+ pagefault_enable();
if (!ret) {
switch (cmp) {
diff --git a/include/asm-i386/genapic.h b/include/asm-i386/genapic.h
index 8ffbb0f0745..fd2be593b06 100644
--- a/include/asm-i386/genapic.h
+++ b/include/asm-i386/genapic.h
@@ -122,6 +122,6 @@ struct genapic {
APICFUNC(phys_pkg_id) \
}
-extern struct genapic *genapic;
+extern struct genapic *genapic, apic_default;
#endif
diff --git a/include/asm-i386/i387.h b/include/asm-i386/i387.h
index bc1d6edae1e..434936c732d 100644
--- a/include/asm-i386/i387.h
+++ b/include/asm-i386/i387.h
@@ -76,7 +76,9 @@ static inline void __save_init_fpu( struct task_struct *tsk )
#define __unlazy_fpu( tsk ) do { \
if (task_thread_info(tsk)->status & TS_USEDFPU) \
- save_init_fpu( tsk ); \
+ save_init_fpu( tsk ); \
+ else \
+ tsk->fpu_counter = 0; \
} while (0)
#define __clear_fpu( tsk ) \
@@ -118,6 +120,7 @@ static inline void save_init_fpu( struct task_struct *tsk )
extern unsigned short get_fpu_cwd( struct task_struct *tsk );
extern unsigned short get_fpu_swd( struct task_struct *tsk );
extern unsigned short get_fpu_mxcsr( struct task_struct *tsk );
+extern asmlinkage void math_state_restore(void);
/*
* Signal frame handlers...
diff --git a/include/asm-i386/io.h b/include/asm-i386/io.h
index 68df0dc3ab8..86ff5e83be2 100644
--- a/include/asm-i386/io.h
+++ b/include/asm-i386/io.h
@@ -256,11 +256,11 @@ static inline void flush_write_buffers(void)
#endif /* __KERNEL__ */
-#ifdef SLOW_IO_BY_JUMPING
-#define __SLOW_DOWN_IO "jmp 1f; 1: jmp 1f; 1:"
+#if defined(CONFIG_PARAVIRT)
+#include <asm/paravirt.h>
#else
+
#define __SLOW_DOWN_IO "outb %%al,$0x80;"
-#endif
static inline void slow_down_io(void) {
__asm__ __volatile__(
@@ -271,6 +271,8 @@ static inline void slow_down_io(void) {
: : );
}
+#endif
+
#ifdef CONFIG_X86_NUMAQ
extern void *xquad_portio; /* Where the IO area was mapped */
#define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port)
diff --git a/include/asm-i386/irq.h b/include/asm-i386/irq.h
index 331726b4112..11761cdaae1 100644
--- a/include/asm-i386/irq.h
+++ b/include/asm-i386/irq.h
@@ -37,8 +37,13 @@ static __inline__ int irq_canonicalize(int irq)
extern int irqbalance_disable(char *str);
#endif
+extern void quirk_intel_irqbalance(void);
+
#ifdef CONFIG_HOTPLUG_CPU
extern void fixup_irqs(cpumask_t map);
#endif
+void init_IRQ(void);
+void __init native_init_IRQ(void);
+
#endif /* _ASM_IRQ_H */
diff --git a/include/asm-i386/irq_regs.h b/include/asm-i386/irq_regs.h
index 3dd9c0b7027..a1b3f7f594a 100644
--- a/include/asm-i386/irq_regs.h
+++ b/include/asm-i386/irq_regs.h
@@ -1 +1,27 @@
-#include <asm-generic/irq_regs.h>
+/*
+ * Per-cpu current frame pointer - the location of the last exception frame on
+ * the stack, stored in the PDA.
+ *
+ * Jeremy Fitzhardinge <jeremy@goop.org>
+ */
+#ifndef _ASM_I386_IRQ_REGS_H
+#define _ASM_I386_IRQ_REGS_H
+
+#include <asm/pda.h>
+
+static inline struct pt_regs *get_irq_regs(void)
+{
+ return read_pda(irq_regs);
+}
+
+static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
+{
+ struct pt_regs *old_regs;
+
+ old_regs = read_pda(irq_regs);
+ write_pda(irq_regs, new_regs);
+
+ return old_regs;
+}
+
+#endif /* _ASM_I386_IRQ_REGS_H */
diff --git a/include/asm-i386/irqflags.h b/include/asm-i386/irqflags.h
index e1bdb97c07f..17b18cf4fe9 100644
--- a/include/asm-i386/irqflags.h
+++ b/include/asm-i386/irqflags.h
@@ -10,6 +10,9 @@
#ifndef _ASM_IRQFLAGS_H
#define _ASM_IRQFLAGS_H
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
#ifndef __ASSEMBLY__
static inline unsigned long __raw_local_save_flags(void)
@@ -25,9 +28,6 @@ static inline unsigned long __raw_local_save_flags(void)
return flags;
}
-#define raw_local_save_flags(flags) \
- do { (flags) = __raw_local_save_flags(); } while (0)
-
static inline void raw_local_irq_restore(unsigned long flags)
{
__asm__ __volatile__(
@@ -66,18 +66,6 @@ static inline void halt(void)
__asm__ __volatile__("hlt": : :"memory");
}
-static inline int raw_irqs_disabled_flags(unsigned long flags)
-{
- return !(flags & (1 << 9));
-}
-
-static inline int raw_irqs_disabled(void)
-{
- unsigned long flags = __raw_local_save_flags();
-
- return raw_irqs_disabled_flags(flags);
-}
-
/*
* For spinlocks, etc:
*/
@@ -90,9 +78,33 @@ static inline unsigned long __raw_local_irq_save(void)
return flags;
}
+#else
+#define DISABLE_INTERRUPTS(clobbers) cli
+#define ENABLE_INTERRUPTS(clobbers) sti
+#define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
+#define INTERRUPT_RETURN iret
+#define GET_CR0_INTO_EAX movl %cr0, %eax
+#endif /* __ASSEMBLY__ */
+#endif /* CONFIG_PARAVIRT */
+
+#ifndef __ASSEMBLY__
+#define raw_local_save_flags(flags) \
+ do { (flags) = __raw_local_save_flags(); } while (0)
+
#define raw_local_irq_save(flags) \
do { (flags) = __raw_local_irq_save(); } while (0)
+static inline int raw_irqs_disabled_flags(unsigned long flags)
+{
+ return !(flags & (1 << 9));
+}
+
+static inline int raw_irqs_disabled(void)
+{
+ unsigned long flags = __raw_local_save_flags();
+
+ return raw_irqs_disabled_flags(flags);
+}
#endif /* __ASSEMBLY__ */
/*
diff --git a/include/asm-i386/mach-default/setup_arch.h b/include/asm-i386/mach-default/setup_arch.h
index fb42099e7bd..605e3ccb991 100644
--- a/include/asm-i386/mach-default/setup_arch.h
+++ b/include/asm-i386/mach-default/setup_arch.h
@@ -2,4 +2,6 @@
/* no action for generic */
+#ifndef ARCH_SETUP
#define ARCH_SETUP
+#endif
diff --git a/include/asm-i386/math_emu.h b/include/asm-i386/math_emu.h
index 697673b555c..a4b0aa3320e 100644
--- a/include/asm-i386/math_emu.h
+++ b/include/asm-i386/math_emu.h
@@ -21,6 +21,7 @@ struct info {
long ___eax;
long ___ds;
long ___es;
+ long ___fs;
long ___orig_eax;
long ___eip;
long ___cs;
diff --git a/include/asm-i386/mmu_context.h b/include/asm-i386/mmu_context.h
index 62b7bf18409..68ff102d6f5 100644
--- a/include/asm-i386/mmu_context.h
+++ b/include/asm-i386/mmu_context.h
@@ -44,7 +44,7 @@ static inline void switch_mm(struct mm_struct *prev,
* load the LDT, if the LDT is different:
*/
if (unlikely(prev->context.ldt != next->context.ldt))
- load_LDT_nolock(&next->context, cpu);
+ load_LDT_nolock(&next->context);
}
#ifdef CONFIG_SMP
else {
@@ -56,14 +56,14 @@ static inline void switch_mm(struct mm_struct *prev,
* tlb flush IPI delivery. We must reload %cr3.
*/
load_cr3(next->pgd);
- load_LDT_nolock(&next->context, cpu);
+ load_LDT_nolock(&next->context);
}
}
#endif
}
-#define deactivate_mm(tsk, mm) \
- asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0))
+#define deactivate_mm(tsk, mm) \
+ asm("movl %0,%%fs": :"r" (0));
#define activate_mm(prev, next) \
switch_mm((prev),(next),NULL)
diff --git a/include/asm-i386/mmzone.h b/include/asm-i386/mmzone.h
index 61b07332200..3503ad66945 100644
--- a/include/asm-i386/mmzone.h
+++ b/include/asm-i386/mmzone.h
@@ -120,13 +120,26 @@ static inline int pfn_valid(int pfn)
__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_low_pages(x) \
__alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0)
-#define alloc_bootmem_node(ignore, x) \
- __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
-#define alloc_bootmem_pages_node(ignore, x) \
- __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
-#define alloc_bootmem_low_pages_node(ignore, x) \
- __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0)
-
+#define alloc_bootmem_node(pgdat, x) \
+({ \
+ struct pglist_data __attribute__ ((unused)) \
+ *__alloc_bootmem_node__pgdat = (pgdat); \
+ __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, \
+ __pa(MAX_DMA_ADDRESS)); \
+})
+#define alloc_bootmem_pages_node(pgdat, x) \
+({ \
+ struct pglist_data __attribute__ ((unused)) \
+ *__alloc_bootmem_node__pgdat = (pgdat); \
+ __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, \
+ __pa(MAX_DMA_ADDRESS)) \
+})
+#define alloc_bootmem_low_pages_node(pgdat, x) \
+({ \
+ struct pglist_data __attribute__ ((unused)) \
+ *__alloc_bootmem_node__pgdat = (pgdat); \
+ __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0); \
+})
#endif /* CONFIG_NEED_MULTIPLE_NODES */
#endif /* _ASM_MMZONE_H_ */
diff --git a/include/asm-i386/module.h b/include/asm-i386/module.h
index 424661d25bd..02f8f541cbe 100644
--- a/include/asm-i386/module.h
+++ b/include/asm-i386/module.h
@@ -20,6 +20,8 @@ struct mod_arch_specific
#define MODULE_PROC_FAMILY "586TSC "
#elif defined CONFIG_M586MMX
#define MODULE_PROC_FAMILY "586MMX "
+#elif defined CONFIG_MCORE2
+#define MODULE_PROC_FAMILY "CORE2 "
#elif defined CONFIG_M686
#define MODULE_PROC_FAMILY "686 "
#elif defined CONFIG_MPENTIUMII
@@ -60,18 +62,12 @@ struct mod_arch_specific
#error unknown processor family
#endif
-#ifdef CONFIG_REGPARM
-#define MODULE_REGPARM "REGPARM "
-#else
-#define MODULE_REGPARM ""
-#endif
-
#ifdef CONFIG_4KSTACKS
#define MODULE_STACKSIZE "4KSTACKS "
#else
#define MODULE_STACKSIZE ""
#endif
-#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_REGPARM MODULE_STACKSIZE
+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
#endif /* _ASM_I386_MODULE_H */
diff --git a/include/asm-i386/mpspec_def.h b/include/asm-i386/mpspec_def.h
index 76feedf85a8..13bafb16e7a 100644
--- a/include/asm-i386/mpspec_def.h
+++ b/include/asm-i386/mpspec_def.h
@@ -97,7 +97,6 @@ struct mpc_config_bus
#define BUSTYPE_TC "TC"
#define BUSTYPE_VME "VME"
#define BUSTYPE_XPRESS "XPRESS"
-#define BUSTYPE_NEC98 "NEC98"
struct mpc_config_ioapic
{
@@ -182,7 +181,6 @@ enum mp_bustype {
MP_BUS_EISA,
MP_BUS_PCI,
MP_BUS_MCA,
- MP_BUS_NEC98
};
#endif
diff --git a/include/asm-i386/msr.h b/include/asm-i386/msr.h
index 62b76cd9695..5679d499307 100644
--- a/include/asm-i386/msr.h
+++ b/include/asm-i386/msr.h
@@ -1,6 +1,10 @@
#ifndef __ASM_MSR_H
#define __ASM_MSR_H
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
+
/*
* Access to machine-specific registers (available on 586 and better only)
* Note: the rd* operations modify the parameters directly (without using
@@ -77,6 +81,7 @@ static inline void wrmsrl (unsigned long msr, unsigned long long val)
__asm__ __volatile__("rdpmc" \
: "=a" (low), "=d" (high) \
: "c" (counter))
+#endif /* !CONFIG_PARAVIRT */
/* symbolic names for some interesting MSRs */
/* Intel defined MSRs. */
@@ -141,6 +146,10 @@ static inline void wrmsrl (unsigned long msr, unsigned long long val)
#define MSR_IA32_MC0_ADDR 0x402
#define MSR_IA32_MC0_MISC 0x403
+#define MSR_IA32_PEBS_ENABLE 0x3f1
+#define MSR_IA32_DS_AREA 0x600
+#define MSR_IA32_PERF_CAPABILITIES 0x345
+
/* Pentium IV performance counter MSRs */
#define MSR_P4_BPU_PERFCTR0 0x300
#define MSR_P4_BPU_PERFCTR1 0x301
@@ -284,4 +293,13 @@ static inline void wrmsrl (unsigned long msr, unsigned long long val)
#define MSR_TMTA_LRTI_READOUT 0x80868018
#define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a
+/* Intel Core-based CPU performance counters */
+#define MSR_CORE_PERF_FIXED_CTR0 0x309
+#define MSR_CORE_PERF_FIXED_CTR1 0x30a
+#define MSR_CORE_PERF_FIXED_CTR2 0x30b
+#define MSR_CORE_PERF_FIXED_CTR_CTRL 0x38d
+#define MSR_CORE_PERF_GLOBAL_STATUS 0x38e
+#define MSR_CORE_PERF_GLOBAL_CTRL 0x38f
+#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x390
+
#endif /* __ASM_MSR_H */
diff --git a/include/asm-i386/nmi.h b/include/asm-i386/nmi.h
index 269d315719c..b04333ea6f3 100644
--- a/include/asm-i386/nmi.h
+++ b/include/asm-i386/nmi.h
@@ -5,6 +5,9 @@
#define ASM_NMI_H
#include <linux/pm.h>
+#include <asm/irq.h>
+
+#ifdef ARCH_HAS_NMI_WATCHDOG
/**
* do_nmi_callback
@@ -42,4 +45,9 @@ extern int proc_nmi_enabled(struct ctl_table *, int , struct file *,
void __user *, size_t *, loff_t *);
extern int unknown_nmi_panic;
+void __trigger_all_cpu_backtrace(void);
+#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
+
+#endif
+
#endif /* ASM_NMI_H */
diff --git a/include/asm-i386/page.h b/include/asm-i386/page.h
index f5bf544c729..fd3f64ace24 100644
--- a/include/asm-i386/page.h
+++ b/include/asm-i386/page.h
@@ -52,6 +52,7 @@ typedef struct { unsigned long long pgprot; } pgprot_t;
#define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
#define __pmd(x) ((pmd_t) { (x) } )
#define HPAGE_SHIFT 21
+#include <asm-generic/pgtable-nopud.h>
#else
typedef struct { unsigned long pte_low; } pte_t;
typedef struct { unsigned long pgd; } pgd_t;
@@ -59,6 +60,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
#define boot_pte_t pte_t /* or would you rather have a typedef */
#define pte_val(x) ((x).pte_low)
#define HPAGE_SHIFT 22
+#include <asm-generic/pgtable-nopmd.h>
#endif
#define PTE_MASK PAGE_MASK
@@ -112,18 +114,18 @@ extern int page_is_ram(unsigned long pagenr);
#ifdef __ASSEMBLY__
#define __PAGE_OFFSET CONFIG_PAGE_OFFSET
-#define __PHYSICAL_START CONFIG_PHYSICAL_START
#else
#define __PAGE_OFFSET ((unsigned long)CONFIG_PAGE_OFFSET)
-#define __PHYSICAL_START ((unsigned long)CONFIG_PHYSICAL_START)
#endif
-#define __KERNEL_START (__PAGE_OFFSET + __PHYSICAL_START)
#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
#define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE)
#define MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE)
#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
+/* __pa_symbol should be used for C visible symbols.
+ This seems to be the official gcc blessed way to do such arithmetic. */
+#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x),0))
#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
#ifdef CONFIG_FLATMEM
diff --git a/include/asm-i386/param.h b/include/asm-i386/param.h
index 745dc5bd0fb..21b32466fcd 100644
--- a/include/asm-i386/param.h
+++ b/include/asm-i386/param.h
@@ -18,6 +18,5 @@
#endif
#define MAXHOSTNAMELEN 64 /* max length of hostname */
-#define COMMAND_LINE_SIZE 256
#endif
diff --git a/include/asm-i386/paravirt.h b/include/asm-i386/paravirt.h
new file mode 100644
index 00000000000..9f06265065f
--- /dev/null
+++ b/include/asm-i386/paravirt.h
@@ -0,0 +1,505 @@
+#ifndef __ASM_PARAVIRT_H
+#define __ASM_PARAVIRT_H
+/* Various instructions on x86 need to be replaced for
+ * para-virtualization: those hooks are defined here. */
+#include <linux/linkage.h>
+#include <linux/stringify.h>
+#include <asm/page.h>
+
+#ifdef CONFIG_PARAVIRT
+/* These are the most performance critical ops, so we want to be able to patch
+ * callers */
+#define PARAVIRT_IRQ_DISABLE 0
+#define PARAVIRT_IRQ_ENABLE 1
+#define PARAVIRT_RESTORE_FLAGS 2
+#define PARAVIRT_SAVE_FLAGS 3
+#define PARAVIRT_SAVE_FLAGS_IRQ_DISABLE 4
+#define PARAVIRT_INTERRUPT_RETURN 5
+#define PARAVIRT_STI_SYSEXIT 6
+
+/* Bitmask of what can be clobbered: usually at least eax. */
+#define CLBR_NONE 0x0
+#define CLBR_EAX 0x1
+#define CLBR_ECX 0x2
+#define CLBR_EDX 0x4
+#define CLBR_ANY 0x7
+
+#ifndef __ASSEMBLY__
+struct thread_struct;
+struct Xgt_desc_struct;
+struct tss_struct;
+struct mm_struct;
+struct paravirt_ops
+{
+ unsigned int kernel_rpl;
+ int paravirt_enabled;
+ const char *name;
+
+ /*
+ * Patch may replace one of the defined code sequences with arbitrary
+ * code, subject to the same register constraints. This generally
+ * means the code is not free to clobber any registers other than EAX.
+ * The patch function should return the number of bytes of code
+ * generated, as we nop pad the rest in generic code.
+ */
+ unsigned (*patch)(u8 type, u16 clobber, void *firstinsn, unsigned len);
+
+ void (*arch_setup)(void);
+ char *(*memory_setup)(void);
+ void (*init_IRQ)(void);
+
+ void (*banner)(void);
+
+ unsigned long (*get_wallclock)(void);
+ int (*set_wallclock)(unsigned long);
+ void (*time_init)(void);
+
+ /* All the function pointers here are declared as "fastcall"
+ so that we get a specific register-based calling
+ convention. This makes it easier to implement inline
+ assembler replacements. */
+
+ void (fastcall *cpuid)(unsigned int *eax, unsigned int *ebx,
+ unsigned int *ecx, unsigned int *edx);
+
+ unsigned long (fastcall *get_debugreg)(int regno);
+ void (fastcall *set_debugreg)(int regno, unsigned long value);
+
+ void (fastcall *clts)(void);
+
+ unsigned long (fastcall *read_cr0)(void);
+ void (fastcall *write_cr0)(unsigned long);
+
+ unsigned long (fastcall *read_cr2)(void);
+ void (fastcall *write_cr2)(unsigned long);
+
+ unsigned long (fastcall *read_cr3)(void);
+ void (fastcall *write_cr3)(unsigned long);
+
+ unsigned long (fastcall *read_cr4_safe)(void);
+ unsigned long (fastcall *read_cr4)(void);
+ void (fastcall *write_cr4)(unsigned long);
+
+ unsigned long (fastcall *save_fl)(void);
+ void (fastcall *restore_fl)(unsigned long);
+ void (fastcall *irq_disable)(void);
+ void (fastcall *irq_enable)(void);
+ void (fastcall *safe_halt)(void);
+ void (fastcall *halt)(void);
+ void (fastcall *wbinvd)(void);
+
+ /* err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
+ u64 (fastcall *read_msr)(unsigned int msr, int *err);
+ int (fastcall *write_msr)(unsigned int msr, u64 val);
+
+ u64 (fastcall *read_tsc)(void);
+ u64 (fastcall *read_pmc)(void);
+
+ void (fastcall *load_tr_desc)(void);
+ void (fastcall *load_gdt)(const struct Xgt_desc_struct *);
+ void (fastcall *load_idt)(const struct Xgt_desc_struct *);
+ void (fastcall *store_gdt)(struct Xgt_desc_struct *);
+ void (fastcall *store_idt)(struct Xgt_desc_struct *);
+ void (fastcall *set_ldt)(const void *desc, unsigned entries);
+ unsigned long (fastcall *store_tr)(void);
+ void (fastcall *load_tls)(struct thread_struct *t, unsigned int cpu);
+ void (fastcall *write_ldt_entry)(void *dt, int entrynum,
+ u32 low, u32 high);
+ void (fastcall *write_gdt_entry)(void *dt, int entrynum,
+ u32 low, u32 high);
+ void (fastcall *write_idt_entry)(void *dt, int entrynum,
+ u32 low, u32 high);
+ void (fastcall *load_esp0)(struct tss_struct *tss,
+ struct thread_struct *thread);
+
+ void (fastcall *set_iopl_mask)(unsigned mask);
+
+ void (fastcall *io_delay)(void);
+ void (*const_udelay)(unsigned long loops);
+
+#ifdef CONFIG_X86_LOCAL_APIC
+ void (fastcall *apic_write)(unsigned long reg, unsigned long v);
+ void (fastcall *apic_write_atomic)(unsigned long reg, unsigned long v);
+ unsigned long (fastcall *apic_read)(unsigned long reg);
+#endif
+
+ void (fastcall *flush_tlb_user)(void);
+ void (fastcall *flush_tlb_kernel)(void);
+ void (fastcall *flush_tlb_single)(u32 addr);
+
+ void (fastcall *set_pte)(pte_t *ptep, pte_t pteval);
+ void (fastcall *set_pte_at)(struct mm_struct *mm, u32 addr, pte_t *ptep, pte_t pteval);
+ void (fastcall *set_pmd)(pmd_t *pmdp, pmd_t pmdval);
+ void (fastcall *pte_update)(struct mm_struct *mm, u32 addr, pte_t *ptep);
+ void (fastcall *pte_update_defer)(struct mm_struct *mm, u32 addr, pte_t *ptep);
+#ifdef CONFIG_X86_PAE
+ void (fastcall *set_pte_atomic)(pte_t *ptep, pte_t pteval);
+ void (fastcall *set_pte_present)(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte);
+ void (fastcall *set_pud)(pud_t *pudp, pud_t pudval);
+ void (fastcall *pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
+ void (fastcall *pmd_clear)(pmd_t *pmdp);
+#endif
+
+ /* These two are jmp to, not actually called. */
+ void (fastcall *irq_enable_sysexit)(void);
+ void (fastcall *iret)(void);
+};
+
+/* Mark a paravirt probe function. */
+#define paravirt_probe(fn) \
+ static asmlinkage void (*__paravirtprobe_##fn)(void) __attribute_used__ \
+ __attribute__((__section__(".paravirtprobe"))) = fn
+
+extern struct paravirt_ops paravirt_ops;
+
+#define paravirt_enabled() (paravirt_ops.paravirt_enabled)
+
+static inline void load_esp0(struct tss_struct *tss,
+ struct thread_struct *thread)
+{
+ paravirt_ops.load_esp0(tss, thread);
+}
+
+#define ARCH_SETUP paravirt_ops.arch_setup();
+static inline unsigned long get_wallclock(void)
+{
+ return paravirt_ops.get_wallclock();
+}
+
+static inline int set_wallclock(unsigned long nowtime)
+{
+ return paravirt_ops.set_wallclock(nowtime);
+}
+
+static inline void do_time_init(void)
+{
+ return paravirt_ops.time_init();
+}
+
+/* The paravirtualized CPUID instruction. */
+static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
+ unsigned int *ecx, unsigned int *edx)
+{
+ paravirt_ops.cpuid(eax, ebx, ecx, edx);
+}
+
+/*
+ * These special macros can be used to get or set a debugging register
+ */
+#define get_debugreg(var, reg) var = paravirt_ops.get_debugreg(reg)
+#define set_debugreg(val, reg) paravirt_ops.set_debugreg(reg, val)
+
+#define clts() paravirt_ops.clts()
+
+#define read_cr0() paravirt_ops.read_cr0()
+#define write_cr0(x) paravirt_ops.write_cr0(x)
+
+#define read_cr2() paravirt_ops.read_cr2()
+#define write_cr2(x) paravirt_ops.write_cr2(x)
+
+#define read_cr3() paravirt_ops.read_cr3()
+#define write_cr3(x) paravirt_ops.write_cr3(x)
+
+#define read_cr4() paravirt_ops.read_cr4()
+#define read_cr4_safe(x) paravirt_ops.read_cr4_safe()
+#define write_cr4(x) paravirt_ops.write_cr4(x)
+
+static inline void raw_safe_halt(void)
+{
+ paravirt_ops.safe_halt();
+}
+
+static inline void halt(void)
+{
+ paravirt_ops.safe_halt();
+}
+#define wbinvd() paravirt_ops.wbinvd()
+
+#define get_kernel_rpl() (paravirt_ops.kernel_rpl)
+
+#define rdmsr(msr,val1,val2) do { \
+ int _err; \
+ u64 _l = paravirt_ops.read_msr(msr,&_err); \
+ val1 = (u32)_l; \
+ val2 = _l >> 32; \
+} while(0)
+
+#define wrmsr(msr,val1,val2) do { \
+ u64 _l = ((u64)(val2) << 32) | (val1); \
+ paravirt_ops.write_msr((msr), _l); \
+} while(0)
+
+#define rdmsrl(msr,val) do { \
+ int _err; \
+ val = paravirt_ops.read_msr((msr),&_err); \
+} while(0)
+
+#define wrmsrl(msr,val) (paravirt_ops.write_msr((msr),(val)))
+#define wrmsr_safe(msr,a,b) ({ \
+ u64 _l = ((u64)(b) << 32) | (a); \
+ paravirt_ops.write_msr((msr),_l); \
+})
+
+/* rdmsr with exception handling */
+#define rdmsr_safe(msr,a,b) ({ \
+ int _err; \
+ u64 _l = paravirt_ops.read_msr(msr,&_err); \
+ (*a) = (u32)_l; \
+ (*b) = _l >> 32; \
+ _err; })
+
+#define rdtsc(low,high) do { \
+ u64 _l = paravirt_ops.read_tsc(); \
+ low = (u32)_l; \
+ high = _l >> 32; \
+} while(0)
+
+#define rdtscl(low) do { \
+ u64 _l = paravirt_ops.read_tsc(); \
+ low = (int)_l; \
+} while(0)
+
+#define rdtscll(val) (val = paravirt_ops.read_tsc())
+
+#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
+
+#define rdpmc(counter,low,high) do { \
+ u64 _l = paravirt_ops.read_pmc(); \
+ low = (u32)_l; \
+ high = _l >> 32; \
+} while(0)
+
+#define load_TR_desc() (paravirt_ops.load_tr_desc())
+#define load_gdt(dtr) (paravirt_ops.load_gdt(dtr))
+#define load_idt(dtr) (paravirt_ops.load_idt(dtr))
+#define set_ldt(addr, entries) (paravirt_ops.set_ldt((addr), (entries)))
+#define store_gdt(dtr) (paravirt_ops.store_gdt(dtr))
+#define store_idt(dtr) (paravirt_ops.store_idt(dtr))
+#define store_tr(tr) ((tr) = paravirt_ops.store_tr())
+#define load_TLS(t,cpu) (paravirt_ops.load_tls((t),(cpu)))
+#define write_ldt_entry(dt, entry, low, high) \
+ (paravirt_ops.write_ldt_entry((dt), (entry), (low), (high)))
+#define write_gdt_entry(dt, entry, low, high) \
+ (paravirt_ops.write_gdt_entry((dt), (entry), (low), (high)))
+#define write_idt_entry(dt, entry, low, high) \
+ (paravirt_ops.write_idt_entry((dt), (entry), (low), (high)))
+#define set_iopl_mask(mask) (paravirt_ops.set_iopl_mask(mask))
+
+/* The paravirtualized I/O functions */
+static inline void slow_down_io(void) {
+ paravirt_ops.io_delay();
+#ifdef REALLY_SLOW_IO
+ paravirt_ops.io_delay();
+ paravirt_ops.io_delay();
+ paravirt_ops.io_delay();
+#endif
+}
+
+#ifdef CONFIG_X86_LOCAL_APIC
+/*
+ * Basic functions accessing APICs.
+ */
+static inline void apic_write(unsigned long reg, unsigned long v)
+{
+ paravirt_ops.apic_write(reg,v);
+}
+
+static inline void apic_write_atomic(unsigned long reg, unsigned long v)
+{
+ paravirt_ops.apic_write_atomic(reg,v);
+}
+
+static inline unsigned long apic_read(unsigned long reg)
+{
+ return paravirt_ops.apic_read(reg);
+}
+#endif
+
+
+#define __flush_tlb() paravirt_ops.flush_tlb_user()
+#define __flush_tlb_global() paravirt_ops.flush_tlb_kernel()
+#define __flush_tlb_single(addr) paravirt_ops.flush_tlb_single(addr)
+
+static inline void set_pte(pte_t *ptep, pte_t pteval)
+{
+ paravirt_ops.set_pte(ptep, pteval);
+}
+
+static inline void set_pte_at(struct mm_struct *mm, u32 addr, pte_t *ptep, pte_t pteval)
+{
+ paravirt_ops.set_pte_at(mm, addr, ptep, pteval);
+}
+
+static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval)
+{
+ paravirt_ops.set_pmd(pmdp, pmdval);
+}
+
+static inline void pte_update(struct mm_struct *mm, u32 addr, pte_t *ptep)
+{
+ paravirt_ops.pte_update(mm, addr, ptep);
+}
+
+static inline void pte_update_defer(struct mm_struct *mm, u32 addr, pte_t *ptep)
+{
+ paravirt_ops.pte_update_defer(mm, addr, ptep);
+}
+
+#ifdef CONFIG_X86_PAE
+static inline void set_pte_atomic(pte_t *ptep, pte_t pteval)
+{
+ paravirt_ops.set_pte_atomic(ptep, pteval);
+}
+
+static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
+{
+ paravirt_ops.set_pte_present(mm, addr, ptep, pte);
+}
+
+static inline void set_pud(pud_t *pudp, pud_t pudval)
+{
+ paravirt_ops.set_pud(pudp, pudval);
+}
+
+static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+ paravirt_ops.pte_clear(mm, addr, ptep);
+}
+
+static inline void pmd_clear(pmd_t *pmdp)
+{
+ paravirt_ops.pmd_clear(pmdp);
+}
+#endif
+
+/* These all sit in the .parainstructions section to tell us what to patch. */
+struct paravirt_patch {
+ u8 *instr; /* original instructions */
+ u8 instrtype; /* type of this instruction */
+ u8 len; /* length of original instruction */
+ u16 clobbers; /* what registers you may clobber */
+};
+
+#define paravirt_alt(insn_string, typenum, clobber) \
+ "771:\n\t" insn_string "\n" "772:\n" \
+ ".pushsection .parainstructions,\"a\"\n" \
+ " .long 771b\n" \
+ " .byte " __stringify(typenum) "\n" \
+ " .byte 772b-771b\n" \
+ " .short " __stringify(clobber) "\n" \
+ ".popsection"
+
+static inline unsigned long __raw_local_save_flags(void)
+{
+ unsigned long f;
+
+ __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;"
+ "call *%1;"
+ "popl %%edx; popl %%ecx",
+ PARAVIRT_SAVE_FLAGS, CLBR_NONE)
+ : "=a"(f): "m"(paravirt_ops.save_fl)
+ : "memory", "cc");
+ return f;
+}
+
+static inline void raw_local_irq_restore(unsigned long f)
+{
+ __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;"
+ "call *%1;"
+ "popl %%edx; popl %%ecx",
+ PARAVIRT_RESTORE_FLAGS, CLBR_EAX)
+ : "=a"(f) : "m" (paravirt_ops.restore_fl), "0"(f)
+ : "memory", "cc");
+}
+
+static inline void raw_local_irq_disable(void)
+{
+ __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;"
+ "call *%0;"
+ "popl %%edx; popl %%ecx",
+ PARAVIRT_IRQ_DISABLE, CLBR_EAX)
+ : : "m" (paravirt_ops.irq_disable)
+ : "memory", "eax", "cc");
+}
+
+static inline void raw_local_irq_enable(void)
+{
+ __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;"
+ "call *%0;"
+ "popl %%edx; popl %%ecx",
+ PARAVIRT_IRQ_ENABLE, CLBR_EAX)
+ : : "m" (paravirt_ops.irq_enable)
+ : "memory", "eax", "cc");
+}
+
+static inline unsigned long __raw_local_irq_save(void)
+{
+ unsigned long f;
+
+ __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;"
+ "call *%1; pushl %%eax;"
+ "call *%2; popl %%eax;"
+ "popl %%edx; popl %%ecx",
+ PARAVIRT_SAVE_FLAGS_IRQ_DISABLE,
+ CLBR_NONE)
+ : "=a"(f)
+ : "m" (paravirt_ops.save_fl),
+ "m" (paravirt_ops.irq_disable)
+ : "memory", "cc");
+ return f;
+}
+
+#define CLI_STRING paravirt_alt("pushl %%ecx; pushl %%edx;" \
+ "call *paravirt_ops+%c[irq_disable];" \
+ "popl %%edx; popl %%ecx", \
+ PARAVIRT_IRQ_DISABLE, CLBR_EAX)
+
+#define STI_STRING paravirt_alt("pushl %%ecx; pushl %%edx;" \
+ "call *paravirt_ops+%c[irq_enable];" \
+ "popl %%edx; popl %%ecx", \
+ PARAVIRT_IRQ_ENABLE, CLBR_EAX)
+#define CLI_STI_CLOBBERS , "%eax"
+#define CLI_STI_INPUT_ARGS \
+ , \
+ [irq_disable] "i" (offsetof(struct paravirt_ops, irq_disable)), \
+ [irq_enable] "i" (offsetof(struct paravirt_ops, irq_enable))
+
+#else /* __ASSEMBLY__ */
+
+#define PARA_PATCH(ptype, clobbers, ops) \
+771:; \
+ ops; \
+772:; \
+ .pushsection .parainstructions,"a"; \
+ .long 771b; \
+ .byte ptype; \
+ .byte 772b-771b; \
+ .short clobbers; \
+ .popsection
+
+#define INTERRUPT_RETURN \
+ PARA_PATCH(PARAVIRT_INTERRUPT_RETURN, CLBR_ANY, \
+ jmp *%cs:paravirt_ops+PARAVIRT_iret)
+
+#define DISABLE_INTERRUPTS(clobbers) \
+ PARA_PATCH(PARAVIRT_IRQ_DISABLE, clobbers, \
+ pushl %ecx; pushl %edx; \
+ call *paravirt_ops+PARAVIRT_irq_disable; \
+ popl %edx; popl %ecx) \
+
+#define ENABLE_INTERRUPTS(clobbers) \
+ PARA_PATCH(PARAVIRT_IRQ_ENABLE, clobbers, \
+ pushl %ecx; pushl %edx; \
+ call *%cs:paravirt_ops+PARAVIRT_irq_enable; \
+ popl %edx; popl %ecx)
+
+#define ENABLE_INTERRUPTS_SYSEXIT \
+ PARA_PATCH(PARAVIRT_STI_SYSEXIT, CLBR_ANY, \
+ jmp *%cs:paravirt_ops+PARAVIRT_irq_enable_sysexit)
+
+#define GET_CR0_INTO_EAX \
+ call *paravirt_ops+PARAVIRT_read_cr0
+
+#endif /* __ASSEMBLY__ */
+#endif /* CONFIG_PARAVIRT */
+#endif /* __ASM_PARAVIRT_H */
diff --git a/include/asm-i386/pda.h b/include/asm-i386/pda.h
new file mode 100644
index 00000000000..2ba2736aa10
--- /dev/null
+++ b/include/asm-i386/pda.h
@@ -0,0 +1,100 @@
+/*
+ Per-processor Data Areas
+ Jeremy Fitzhardinge <jeremy@goop.org> 2006
+ Based on asm-x86_64/pda.h by Andi Kleen.
+ */
+#ifndef _I386_PDA_H
+#define _I386_PDA_H
+
+#include <linux/stddef.h>
+#include <linux/types.h>
+
+struct i386_pda
+{
+ struct i386_pda *_pda; /* pointer to self */
+
+ int cpu_number;
+ struct task_struct *pcurrent; /* current process */
+ struct pt_regs *irq_regs;
+};
+
+extern struct i386_pda *_cpu_pda[];
+
+#define cpu_pda(i) (_cpu_pda[i])
+
+#define pda_offset(field) offsetof(struct i386_pda, field)
+
+extern void __bad_pda_field(void);
+
+/* This variable is never instantiated. It is only used as a stand-in
+ for the real per-cpu PDA memory, so that gcc can understand what
+ memory operations the inline asms() below are performing. This
+ eliminates the need to make the asms volatile or have memory
+ clobbers, so gcc can readily analyse them. */
+extern struct i386_pda _proxy_pda;
+
+#define pda_to_op(op,field,val) \
+ do { \
+ typedef typeof(_proxy_pda.field) T__; \
+ if (0) { T__ tmp__; tmp__ = (val); } \
+ switch (sizeof(_proxy_pda.field)) { \
+ case 1: \
+ asm(op "b %1,%%gs:%c2" \
+ : "+m" (_proxy_pda.field) \
+ :"ri" ((T__)val), \
+ "i"(pda_offset(field))); \
+ break; \
+ case 2: \
+ asm(op "w %1,%%gs:%c2" \
+ : "+m" (_proxy_pda.field) \
+ :"ri" ((T__)val), \
+ "i"(pda_offset(field))); \
+ break; \
+ case 4: \
+ asm(op "l %1,%%gs:%c2" \
+ : "+m" (_proxy_pda.field) \
+ :"ri" ((T__)val), \
+ "i"(pda_offset(field))); \
+ break; \
+ default: __bad_pda_field(); \
+ } \
+ } while (0)
+
+#define pda_from_op(op,field) \
+ ({ \
+ typeof(_proxy_pda.field) ret__; \
+ switch (sizeof(_proxy_pda.field)) { \
+ case 1: \
+ asm(op "b %%gs:%c1,%0" \
+ : "=r" (ret__) \
+ : "i" (pda_offset(field)), \
+ "m" (_proxy_pda.field)); \
+ break; \
+ case 2: \
+ asm(op "w %%gs:%c1,%0" \
+ : "=r" (ret__) \
+ : "i" (pda_offset(field)), \
+ "m" (_proxy_pda.field)); \
+ break; \
+ case 4: \
+ asm(op "l %%gs:%c1,%0" \
+ : "=r" (ret__) \
+ : "i" (pda_offset(field)), \
+ "m" (_proxy_pda.field)); \
+ break; \
+ default: __bad_pda_field(); \
+ } \
+ ret__; })
+
+/* Return a pointer to a pda field */
+#define pda_addr(field) \
+ ((typeof(_proxy_pda.field) *)((unsigned char *)read_pda(_pda) + \
+ pda_offset(field)))
+
+#define read_pda(field) pda_from_op("mov",field)
+#define write_pda(field,val) pda_to_op("mov",field,val)
+#define add_pda(field,val) pda_to_op("add",field,val)
+#define sub_pda(field,val) pda_to_op("sub",field,val)
+#define or_pda(field,val) pda_to_op("or",field,val)
+
+#endif /* _I386_PDA_H */
diff --git a/include/asm-i386/percpu.h b/include/asm-i386/percpu.h
index 5764afa4b6a..510ae1d3486 100644
--- a/include/asm-i386/percpu.h
+++ b/include/asm-i386/percpu.h
@@ -1,6 +1,31 @@
#ifndef __ARCH_I386_PERCPU__
#define __ARCH_I386_PERCPU__
+#ifndef __ASSEMBLY__
#include <asm-generic/percpu.h>
+#else
+
+/*
+ * PER_CPU finds an address of a per-cpu variable.
+ *
+ * Args:
+ * var - variable name
+ * cpu - 32bit register containing the current CPU number
+ *
+ * The resulting address is stored in the "cpu" argument.
+ *
+ * Example:
+ * PER_CPU(cpu_gdt_descr, %ebx)
+ */
+#ifdef CONFIG_SMP
+#define PER_CPU(var, cpu) \
+ movl __per_cpu_offset(,cpu,4), cpu; \
+ addl $per_cpu__/**/var, cpu;
+#else /* ! SMP */
+#define PER_CPU(var, cpu) \
+ movl $per_cpu__/**/var, cpu;
+#endif /* SMP */
+
+#endif /* !__ASSEMBLY__ */
#endif /* __ARCH_I386_PERCPU__ */
diff --git a/include/asm-i386/pgtable-2level.h b/include/asm-i386/pgtable-2level.h
index 8d8d3b9ecdb..38c3fcc0676 100644
--- a/include/asm-i386/pgtable-2level.h
+++ b/include/asm-i386/pgtable-2level.h
@@ -1,8 +1,6 @@
#ifndef _I386_PGTABLE_2LEVEL_H
#define _I386_PGTABLE_2LEVEL_H
-#include <asm-generic/pgtable-nopmd.h>
-
#define pte_ERROR(e) \
printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
#define pgd_ERROR(e) \
@@ -13,17 +11,19 @@
* within a page table are directly modified. Thus, the following
* hook is made available.
*/
+#ifndef CONFIG_PARAVIRT
#define set_pte(pteptr, pteval) (*(pteptr) = pteval)
#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
+#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
+#endif
+
#define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval)
#define set_pte_present(mm,addr,ptep,pteval) set_pte_at(mm,addr,ptep,pteval)
-#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
-#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
-#define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte_low, 0))
+#define raw_ptep_get_and_clear(xp) __pte(xchg(&(xp)->pte_low, 0))
#define pte_page(x) pfn_to_page(pte_pfn(x))
#define pte_none(x) (!(x).pte_low)
diff --git a/include/asm-i386/pgtable-3level.h b/include/asm-i386/pgtable-3level.h
index c2d701ea35b..7a2318f3830 100644
--- a/include/asm-i386/pgtable-3level.h
+++ b/include/asm-i386/pgtable-3level.h
@@ -1,8 +1,6 @@
#ifndef _I386_PGTABLE_3LEVEL_H
#define _I386_PGTABLE_3LEVEL_H
-#include <asm-generic/pgtable-nopud.h>
-
/*
* Intel Physical Address Extension (PAE) Mode - three-level page
* tables on PPro+ CPUs.
@@ -44,6 +42,7 @@ static inline int pte_exec_kernel(pte_t pte)
return pte_x(pte);
}
+#ifndef CONFIG_PARAVIRT
/* Rules for using set_pte: the pte being assigned *must* be
* either not present or in a state where the hardware will
* not attempt to update the pte. In places where this is
@@ -81,25 +80,6 @@ static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, pte
(*(pudptr) = (pudval))
/*
- * Pentium-II erratum A13: in PAE mode we explicitly have to flush
- * the TLB via cr3 if the top-level pgd is changed...
- * We do not let the generic code free and clear pgd entries due to
- * this erratum.
- */
-static inline void pud_clear (pud_t * pud) { }
-
-#define pud_page(pud) \
-((struct page *) __va(pud_val(pud) & PAGE_MASK))
-
-#define pud_page_vaddr(pud) \
-((unsigned long) __va(pud_val(pud) & PAGE_MASK))
-
-
-/* Find an entry in the second-level page table.. */
-#define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \
- pmd_index(address))
-
-/*
* For PTEs and PDEs, we must clear the P-bit first when clearing a page table
* entry, so clear the bottom half first and enforce ordering with a compiler
* barrier.
@@ -118,9 +98,28 @@ static inline void pmd_clear(pmd_t *pmd)
smp_wmb();
*(tmp + 1) = 0;
}
+#endif
+
+/*
+ * Pentium-II erratum A13: in PAE mode we explicitly have to flush
+ * the TLB via cr3 if the top-level pgd is changed...
+ * We do not let the generic code free and clear pgd entries due to
+ * this erratum.
+ */
+static inline void pud_clear (pud_t * pud) { }
+
+#define pud_page(pud) \
+((struct page *) __va(pud_val(pud) & PAGE_MASK))
+
+#define pud_page_vaddr(pud) \
+((unsigned long) __va(pud_val(pud) & PAGE_MASK))
+
+
+/* Find an entry in the second-level page table.. */
+#define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \
+ pmd_index(address))
-#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
-static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+static inline pte_t raw_ptep_get_and_clear(pte_t *ptep)
{
pte_t res;
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index 7d398f493dd..e6a4723f0eb 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -15,6 +15,7 @@
#include <asm/processor.h>
#include <asm/fixmap.h>
#include <linux/threads.h>
+#include <asm/paravirt.h>
#ifndef _I386_BITOPS_H
#include <asm/bitops.h>
@@ -34,14 +35,14 @@ struct vm_area_struct;
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
extern unsigned long empty_zero_page[1024];
extern pgd_t swapper_pg_dir[1024];
-extern kmem_cache_t *pgd_cache;
-extern kmem_cache_t *pmd_cache;
+extern struct kmem_cache *pgd_cache;
+extern struct kmem_cache *pmd_cache;
extern spinlock_t pgd_lock;
extern struct page *pgd_list;
-void pmd_ctor(void *, kmem_cache_t *, unsigned long);
-void pgd_ctor(void *, kmem_cache_t *, unsigned long);
-void pgd_dtor(void *, kmem_cache_t *, unsigned long);
+void pmd_ctor(void *, struct kmem_cache *, unsigned long);
+void pgd_ctor(void *, struct kmem_cache *, unsigned long);
+void pgd_dtor(void *, struct kmem_cache *, unsigned long);
void pgtable_cache_init(void);
void paging_init(void);
@@ -246,6 +247,7 @@ static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return p
# include <asm/pgtable-2level.h>
#endif
+#ifndef CONFIG_PARAVIRT
/*
* Rules for using pte_update - it must be called after any PTE update which
* has not been done using the set_pte / clear_pte interfaces. It is used by
@@ -261,7 +263,7 @@ static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return p
*/
#define pte_update(mm, addr, ptep) do { } while (0)
#define pte_update_defer(mm, addr, ptep) do { } while (0)
-
+#endif
/*
* We only update the dirty/accessed state if we set
@@ -275,7 +277,7 @@ static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return p
do { \
if (dirty) { \
(ptep)->pte_low = (entry).pte_low; \
- pte_update_defer((vma)->vm_mm, (addr), (ptep)); \
+ pte_update_defer((vma)->vm_mm, (address), (ptep)); \
flush_tlb_page(vma, address); \
} \
} while (0)
@@ -305,7 +307,7 @@ do { \
__dirty = pte_dirty(*(ptep)); \
if (__dirty) { \
clear_bit(_PAGE_BIT_DIRTY, &(ptep)->pte_low); \
- pte_update_defer((vma)->vm_mm, (addr), (ptep)); \
+ pte_update_defer((vma)->vm_mm, (address), (ptep)); \
flush_tlb_page(vma, address); \
} \
__dirty; \
@@ -318,12 +320,20 @@ do { \
__young = pte_young(*(ptep)); \
if (__young) { \
clear_bit(_PAGE_BIT_ACCESSED, &(ptep)->pte_low); \
- pte_update_defer((vma)->vm_mm, (addr), (ptep)); \
+ pte_update_defer((vma)->vm_mm, (address), (ptep)); \
flush_tlb_page(vma, address); \
} \
__young; \
})
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+ pte_t pte = raw_ptep_get_and_clear(ptep);
+ pte_update(mm, addr, ptep);
+ return pte;
+}
+
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
{
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index e0ddca94d50..a52d6544042 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -20,6 +20,7 @@
#include <linux/threads.h>
#include <asm/percpu.h>
#include <linux/cpumask.h>
+#include <linux/init.h>
/* flag for disabling the tsc */
extern int tsc_disable;
@@ -72,6 +73,7 @@ struct cpuinfo_x86 {
#endif
unsigned char x86_max_cores; /* cpuid returned max cores value */
unsigned char apicid;
+ unsigned short x86_clflush_size;
#ifdef CONFIG_SMP
unsigned char booted_cores; /* number of cores as seen by OS */
__u8 phys_proc_id; /* Physical processor id. */
@@ -111,6 +113,8 @@ extern struct cpuinfo_x86 cpu_data[];
extern int cpu_llc_id[NR_CPUS];
extern char ignore_fpu_irq;
+void __init cpu_detect(struct cpuinfo_x86 *c);
+
extern void identify_cpu(struct cpuinfo_x86 *);
extern void print_cpu_info(struct cpuinfo_x86 *);
extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
@@ -143,8 +147,8 @@ static inline void detect_ht(struct cpuinfo_x86 *c) {}
#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
-static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
- unsigned int *ecx, unsigned int *edx)
+static inline fastcall void native_cpuid(unsigned int *eax, unsigned int *ebx,
+ unsigned int *ecx, unsigned int *edx)
{
/* ecx is often an input as well as an output. */
__asm__("cpuid"
@@ -155,59 +159,6 @@ static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
: "0" (*eax), "2" (*ecx));
}
-/*
- * Generic CPUID function
- * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
- * resulting in stale register contents being returned.
- */
-static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
-{
- *eax = op;
- *ecx = 0;
- __cpuid(eax, ebx, ecx, edx);
-}
-
-/* Some CPUID calls want 'count' to be placed in ecx */
-static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
- int *edx)
-{
- *eax = op;
- *ecx = count;
- __cpuid(eax, ebx, ecx, edx);
-}
-
-/*
- * CPUID functions returning a single datum
- */
-static inline unsigned int cpuid_eax(unsigned int op)
-{
- unsigned int eax, ebx, ecx, edx;
-
- cpuid(op, &eax, &ebx, &ecx, &edx);
- return eax;
-}
-static inline unsigned int cpuid_ebx(unsigned int op)
-{
- unsigned int eax, ebx, ecx, edx;
-
- cpuid(op, &eax, &ebx, &ecx, &edx);
- return ebx;
-}
-static inline unsigned int cpuid_ecx(unsigned int op)
-{
- unsigned int eax, ebx, ecx, edx;
-
- cpuid(op, &eax, &ebx, &ecx, &edx);
- return ecx;
-}
-static inline unsigned int cpuid_edx(unsigned int op)
-{
- unsigned int eax, ebx, ecx, edx;
-
- cpuid(op, &eax, &ebx, &ecx, &edx);
- return edx;
-}
-
#define load_cr3(pgdir) write_cr3(__pa(pgdir))
/*
@@ -473,6 +424,7 @@ struct thread_struct {
.vm86_info = NULL, \
.sysenter_cs = __KERNEL_CS, \
.io_bitmap_ptr = NULL, \
+ .gs = __KERNEL_PDA, \
}
/*
@@ -489,18 +441,9 @@ struct thread_struct {
.io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \
}
-static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread)
-{
- tss->esp0 = thread->esp0;
- /* This can only happen when SEP is enabled, no need to test "SEP"arately */
- if (unlikely(tss->ss1 != thread->sysenter_cs)) {
- tss->ss1 = thread->sysenter_cs;
- wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
- }
-}
-
#define start_thread(regs, new_eip, new_esp) do { \
- __asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0)); \
+ __asm__("movl %0,%%fs": :"r" (0)); \
+ regs->xgs = 0; \
set_fs(USER_DS); \
regs->xds = __USER_DS; \
regs->xes = __USER_DS; \
@@ -510,33 +453,6 @@ static inline void load_esp0(struct tss_struct *tss, struct thread_struct *threa
regs->esp = new_esp; \
} while (0)
-/*
- * These special macros can be used to get or set a debugging register
- */
-#define get_debugreg(var, register) \
- __asm__("movl %%db" #register ", %0" \
- :"=r" (var))
-#define set_debugreg(value, register) \
- __asm__("movl %0,%%db" #register \
- : /* no output */ \
- :"r" (value))
-
-/*
- * Set IOPL bits in EFLAGS from given mask
- */
-static inline void set_iopl_mask(unsigned mask)
-{
- unsigned int reg;
- __asm__ __volatile__ ("pushfl;"
- "popl %0;"
- "andl %1, %0;"
- "orl %2, %0;"
- "pushl %0;"
- "popfl"
- : "=&r" (reg)
- : "i" (~X86_EFLAGS_IOPL), "r" (mask));
-}
-
/* Forward declaration, a strange C thing */
struct task_struct;
struct mm_struct;
@@ -628,6 +544,105 @@ static inline void rep_nop(void)
#define cpu_relax() rep_nop()
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
+#define paravirt_enabled() 0
+#define __cpuid native_cpuid
+
+static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread)
+{
+ tss->esp0 = thread->esp0;
+ /* This can only happen when SEP is enabled, no need to test "SEP"arately */
+ if (unlikely(tss->ss1 != thread->sysenter_cs)) {
+ tss->ss1 = thread->sysenter_cs;
+ wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
+ }
+}
+
+/*
+ * These special macros can be used to get or set a debugging register
+ */
+#define get_debugreg(var, register) \
+ __asm__("movl %%db" #register ", %0" \
+ :"=r" (var))
+#define set_debugreg(value, register) \
+ __asm__("movl %0,%%db" #register \
+ : /* no output */ \
+ :"r" (value))
+
+#define set_iopl_mask native_set_iopl_mask
+#endif /* CONFIG_PARAVIRT */
+
+/*
+ * Set IOPL bits in EFLAGS from given mask
+ */
+static fastcall inline void native_set_iopl_mask(unsigned mask)
+{
+ unsigned int reg;
+ __asm__ __volatile__ ("pushfl;"
+ "popl %0;"
+ "andl %1, %0;"
+ "orl %2, %0;"
+ "pushl %0;"
+ "popfl"
+ : "=&r" (reg)
+ : "i" (~X86_EFLAGS_IOPL), "r" (mask));
+}
+
+/*
+ * Generic CPUID function
+ * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
+ * resulting in stale register contents being returned.
+ */
+static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
+{
+ *eax = op;
+ *ecx = 0;
+ __cpuid(eax, ebx, ecx, edx);
+}
+
+/* Some CPUID calls want 'count' to be placed in ecx */
+static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
+ int *edx)
+{
+ *eax = op;
+ *ecx = count;
+ __cpuid(eax, ebx, ecx, edx);
+}
+
+/*
+ * CPUID functions returning a single datum
+ */
+static inline unsigned int cpuid_eax(unsigned int op)
+{
+ unsigned int eax, ebx, ecx, edx;
+
+ cpuid(op, &eax, &ebx, &ecx, &edx);
+ return eax;
+}
+static inline unsigned int cpuid_ebx(unsigned int op)
+{
+ unsigned int eax, ebx, ecx, edx;
+
+ cpuid(op, &eax, &ebx, &ecx, &edx);
+ return ebx;
+}
+static inline unsigned int cpuid_ecx(unsigned int op)
+{
+ unsigned int eax, ebx, ecx, edx;
+
+ cpuid(op, &eax, &ebx, &ecx, &edx);
+ return ecx;
+}
+static inline unsigned int cpuid_edx(unsigned int op)
+{
+ unsigned int eax, ebx, ecx, edx;
+
+ cpuid(op, &eax, &ebx, &ecx, &edx);
+ return edx;
+}
+
/* generic versions from gas */
#define GENERIC_NOP1 ".byte 0x90\n"
#define GENERIC_NOP2 ".byte 0x89,0xf6\n"
@@ -727,4 +742,7 @@ extern unsigned long boot_option_idle_override;
extern void enable_sep_cpu(void);
extern int sysenter_setup(void);
+extern int init_gdt(int cpu, struct task_struct *idle);
+extern void secondary_cpu_init(void);
+
#endif /* __ASM_I386_PROCESSOR_H */
diff --git a/include/asm-i386/ptrace.h b/include/asm-i386/ptrace.h
index d505f501077..bdbc894339b 100644
--- a/include/asm-i386/ptrace.h
+++ b/include/asm-i386/ptrace.h
@@ -16,6 +16,8 @@ struct pt_regs {
long eax;
int xds;
int xes;
+ /* int xfs; */
+ int xgs;
long orig_eax;
long eip;
int xcs;
diff --git a/include/asm-i386/rwsem.h b/include/asm-i386/rwsem.h
index bc598d6388e..041906f3c6d 100644
--- a/include/asm-i386/rwsem.h
+++ b/include/asm-i386/rwsem.h
@@ -75,8 +75,8 @@ struct rw_semaphore {
#define __RWSEM_INITIALIZER(name) \
-{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \
- __RWSEM_DEP_MAP_INIT(name) }
+{ RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
+ LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
#define DECLARE_RWSEM(name) \
struct rw_semaphore name = __RWSEM_INITIALIZER(name)
diff --git a/include/asm-i386/segment.h b/include/asm-i386/segment.h
index b7ab59685ba..3c796af3377 100644
--- a/include/asm-i386/segment.h
+++ b/include/asm-i386/segment.h
@@ -39,7 +39,7 @@
* 25 - APM BIOS support
*
* 26 - ESPFIX small SS
- * 27 - unused
+ * 27 - PDA [ per-cpu private data area ]
* 28 - unused
* 29 - unused
* 30 - unused
@@ -74,6 +74,9 @@
#define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
#define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
+#define GDT_ENTRY_PDA (GDT_ENTRY_KERNEL_BASE + 15)
+#define __KERNEL_PDA (GDT_ENTRY_PDA * 8)
+
#define GDT_ENTRY_DOUBLEFAULT_TSS 31
/*
@@ -128,5 +131,7 @@
#define SEGMENT_LDT 0x4
#define SEGMENT_GDT 0x0
+#ifndef CONFIG_PARAVIRT
#define get_kernel_rpl() 0
#endif
+#endif
diff --git a/include/asm-i386/setup.h b/include/asm-i386/setup.h
index 2734909eff8..67659dbaf12 100644
--- a/include/asm-i386/setup.h
+++ b/include/asm-i386/setup.h
@@ -6,6 +6,8 @@
#ifndef _i386_SETUP_H
#define _i386_SETUP_H
+#define COMMAND_LINE_SIZE 256
+
#ifdef __KERNEL__
#include <linux/pfn.h>
@@ -14,10 +16,8 @@
*/
#define MAXMEM_PFN PFN_DOWN(MAXMEM)
#define MAX_NONPAE_PFN (1 << 20)
-#endif
#define PARAM_SIZE 4096
-#define COMMAND_LINE_SIZE 256
#define OLD_CL_MAGIC_ADDR 0x90020
#define OLD_CL_MAGIC 0xA33F
@@ -70,6 +70,7 @@ extern unsigned char boot_params[PARAM_SIZE];
struct e820entry;
char * __init machine_specific_memory_setup(void);
+char *memory_setup(void);
int __init copy_e820_map(struct e820entry * biosmap, int nr_map);
int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map);
@@ -78,4 +79,6 @@ void __init add_memory_region(unsigned long long start,
#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+
#endif /* _i386_SETUP_H */
diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h
index bd59c1508e7..64fe624c02c 100644
--- a/include/asm-i386/smp.h
+++ b/include/asm-i386/smp.h
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/threads.h>
#include <linux/cpumask.h>
+#include <asm/pda.h>
#endif
#ifdef CONFIG_X86_LOCAL_APIC
@@ -56,7 +57,7 @@ extern void cpu_uninit(void);
* from the initial startup. We map APIC_BASE very early in page_setup(),
* so this is correct in the x86 case.
*/
-#define raw_smp_processor_id() (current_thread_info()->cpu)
+#define raw_smp_processor_id() (read_pda(cpu_number))
extern cpumask_t cpu_callout_map;
extern cpumask_t cpu_callin_map;
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h
index c18b71fae6b..d3bcebed60c 100644
--- a/include/asm-i386/spinlock.h
+++ b/include/asm-i386/spinlock.h
@@ -7,8 +7,14 @@
#include <asm/processor.h>
#include <linux/compiler.h>
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
#define CLI_STRING "cli"
#define STI_STRING "sti"
+#define CLI_STI_CLOBBERS
+#define CLI_STI_INPUT_ARGS
+#endif /* CONFIG_PARAVIRT */
/*
* Your basic SMP spinlocks, allowing only a single CPU anywhere
@@ -53,25 +59,28 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
{
asm volatile(
"\n1:\t"
- LOCK_PREFIX " ; decb %0\n\t"
+ LOCK_PREFIX " ; decb %[slock]\n\t"
"jns 5f\n"
"2:\t"
- "testl $0x200, %1\n\t"
+ "testl $0x200, %[flags]\n\t"
"jz 4f\n\t"
STI_STRING "\n"
"3:\t"
"rep;nop\n\t"
- "cmpb $0, %0\n\t"
+ "cmpb $0, %[slock]\n\t"
"jle 3b\n\t"
CLI_STRING "\n\t"
"jmp 1b\n"
"4:\t"
"rep;nop\n\t"
- "cmpb $0, %0\n\t"
+ "cmpb $0, %[slock]\n\t"
"jg 1b\n\t"
"jmp 4b\n"
"5:\n\t"
- : "+m" (lock->slock) : "r" (flags) : "memory");
+ : [slock] "+m" (lock->slock)
+ : [flags] "r" (flags)
+ CLI_STI_INPUT_ARGS
+ : "memory" CLI_STI_CLOBBERS);
}
#endif
diff --git a/include/asm-i386/suspend.h b/include/asm-i386/suspend.h
index 08be1e5009d..8dbaafe611f 100644
--- a/include/asm-i386/suspend.h
+++ b/include/asm-i386/suspend.h
@@ -6,29 +6,14 @@
#include <asm/desc.h>
#include <asm/i387.h>
-static inline int
-arch_prepare_suspend(void)
-{
- /* If you want to make non-PSE machine work, turn off paging
- in swsusp_arch_suspend. swsusp_pg_dir should have identity mapping, so
- it could work... */
- if (!cpu_has_pse) {
- printk(KERN_ERR "PSE is required for swsusp.\n");
- return -EPERM;
- }
- return 0;
-}
+static inline int arch_prepare_suspend(void) { return 0; }
/* image of the saved processor state */
struct saved_context {
u16 es, fs, gs, ss;
unsigned long cr0, cr2, cr3, cr4;
- u16 gdt_pad;
- u16 gdt_limit;
- unsigned long gdt_base;
- u16 idt_pad;
- u16 idt_limit;
- unsigned long idt_base;
+ struct Xgt_desc_struct gdt;
+ struct Xgt_desc_struct idt;
u16 ldt;
u16 tss;
unsigned long tr;
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index a6dabbcd6e6..a6d20d9a1a3 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -88,6 +88,9 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
#define savesegment(seg, value) \
asm volatile("mov %%" #seg ",%0":"=rm" (value))
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
#define read_cr0() ({ \
unsigned int __dummy; \
__asm__ __volatile__( \
@@ -139,17 +142,18 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
#define write_cr4(x) \
__asm__ __volatile__("movl %0,%%cr4": :"r" (x))
-/*
- * Clear and set 'TS' bit respectively
- */
+#define wbinvd() \
+ __asm__ __volatile__ ("wbinvd": : :"memory")
+
+/* Clear the 'TS' bit */
#define clts() __asm__ __volatile__ ("clts")
+#endif/* CONFIG_PARAVIRT */
+
+/* Set the 'TS' bit */
#define stts() write_cr0(8 | read_cr0())
#endif /* __KERNEL__ */
-#define wbinvd() \
- __asm__ __volatile__ ("wbinvd": : :"memory")
-
static inline unsigned long get_limit(unsigned long segment)
{
unsigned long __limit;
diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h
index 54d6d7aea93..46d32ad9208 100644
--- a/include/asm-i386/thread_info.h
+++ b/include/asm-i386/thread_info.h
@@ -95,15 +95,7 @@ static inline struct thread_info *current_thread_info(void)
/* thread information allocation */
#ifdef CONFIG_DEBUG_STACK_USAGE
-#define alloc_thread_info(tsk) \
- ({ \
- struct thread_info *ret; \
- \
- ret = kmalloc(THREAD_SIZE, GFP_KERNEL); \
- if (ret) \
- memset(ret, 0, THREAD_SIZE); \
- ret; \
- })
+#define alloc_thread_info(tsk) kzalloc(THREAD_SIZE, GFP_KERNEL)
#else
#define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL)
#endif
diff --git a/include/asm-i386/time.h b/include/asm-i386/time.h
new file mode 100644
index 00000000000..ea8065af825
--- /dev/null
+++ b/include/asm-i386/time.h
@@ -0,0 +1,41 @@
+#ifndef _ASMi386_TIME_H
+#define _ASMi386_TIME_H
+
+#include <linux/efi.h>
+#include "mach_time.h"
+
+static inline unsigned long native_get_wallclock(void)
+{
+ unsigned long retval;
+
+ if (efi_enabled)
+ retval = efi_get_time();
+ else
+ retval = mach_get_cmos_time();
+
+ return retval;
+}
+
+static inline int native_set_wallclock(unsigned long nowtime)
+{
+ int retval;
+
+ if (efi_enabled)
+ retval = efi_set_rtc_mmss(nowtime);
+ else
+ retval = mach_set_rtc_mmss(nowtime);
+
+ return retval;
+}
+
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else /* !CONFIG_PARAVIRT */
+
+#define get_wallclock() native_get_wallclock()
+#define set_wallclock(x) native_set_wallclock(x)
+#define do_time_init() time_init_hook()
+
+#endif /* CONFIG_PARAVIRT */
+
+#endif
diff --git a/include/asm-i386/tlbflush.h b/include/asm-i386/tlbflush.h
index 360648b0f2b..4dd82840d53 100644
--- a/include/asm-i386/tlbflush.h
+++ b/include/asm-i386/tlbflush.h
@@ -4,7 +4,15 @@
#include <linux/mm.h>
#include <asm/processor.h>
-#define __flush_tlb() \
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else
+#define __flush_tlb() __native_flush_tlb()
+#define __flush_tlb_global() __native_flush_tlb_global()
+#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
+#endif
+
+#define __native_flush_tlb() \
do { \
unsigned int tmpreg; \
\
@@ -19,7 +27,7 @@
* Global pages have to be flushed a bit differently. Not a real
* performance problem because this does not happen often.
*/
-#define __flush_tlb_global() \
+#define __native_flush_tlb_global() \
do { \
unsigned int tmpreg, cr4, cr4_orig; \
\
@@ -36,6 +44,9 @@
: "memory"); \
} while (0)
+#define __native_flush_tlb_single(addr) \
+ __asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory")
+
# define __flush_tlb_all() \
do { \
if (cpu_has_pge) \
@@ -46,9 +57,6 @@
#define cpu_has_invlpg (boot_cpu_data.x86 > 3)
-#define __flush_tlb_single(addr) \
- __asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory")
-
#ifdef CONFIG_X86_INVLPG
# define __flush_tlb_one(addr) __flush_tlb_single(addr)
#else
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h
index beeeaf6b054..833fa1704ff 100644
--- a/include/asm-i386/unistd.h
+++ b/include/asm-i386/unistd.h
@@ -329,104 +329,6 @@
#ifdef __KERNEL__
#define NR_syscalls 320
-#include <linux/err.h>
-
-/*
- * user-visible error numbers are in the range -1 - -MAX_ERRNO: see
- * <asm-i386/errno.h>
- */
-#define __syscall_return(type, res) \
-do { \
- if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
- errno = -(res); \
- res = -1; \
- } \
- return (type) (res); \
-} while (0)
-
-/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */
-#define _syscall0(type,name) \
-type name(void) \
-{ \
-long __res; \
-__asm__ volatile ("int $0x80" \
- : "=a" (__res) \
- : "0" (__NR_##name)); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall1(type,name,type1,arg1) \
-type name(type1 arg1) \
-{ \
-long __res; \
-__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \
- : "=a" (__res) \
- : "0" (__NR_##name),"ri" ((long)(arg1)) : "memory"); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2) \
-type name(type1 arg1,type2 arg2) \
-{ \
-long __res; \
-__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \
- : "=a" (__res) \
- : "0" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)) \
- : "memory"); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
-type name(type1 arg1,type2 arg2,type3 arg3) \
-{ \
-long __res; \
-__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \
- : "=a" (__res) \
- : "0" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)), \
- "d" ((long)(arg3)) : "memory"); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
-{ \
-long __res; \
-__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx" \
- : "=a" (__res) \
- : "0" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)), \
- "d" ((long)(arg3)),"S" ((long)(arg4)) : "memory"); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
- type5,arg5) \
-type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
-{ \
-long __res; \
-__asm__ volatile ("push %%ebx ; movl %2,%%ebx ; movl %1,%%eax ; " \
- "int $0x80 ; pop %%ebx" \
- : "=a" (__res) \
- : "i" (__NR_##name),"ri" ((long)(arg1)),"c" ((long)(arg2)), \
- "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)) \
- : "memory"); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
- type5,arg5,type6,arg6) \
-type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,type6 arg6) \
-{ \
-long __res; \
- struct { long __a1; long __a6; } __s = { (long)arg1, (long)arg6 }; \
-__asm__ volatile ("push %%ebp ; push %%ebx ; movl 4(%2),%%ebp ; " \
- "movl 0(%2),%%ebx ; movl %1,%%eax ; int $0x80 ; " \
- "pop %%ebx ; pop %%ebp" \
- : "=a" (__res) \
- : "i" (__NR_##name),"0" ((long)(&__s)),"c" ((long)(arg2)), \
- "d" ((long)(arg3)),"S" ((long)(arg4)),"D" ((long)(arg5)) \
- : "memory"); \
-__syscall_return(type,__res); \
-}
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
diff --git a/include/asm-i386/unwind.h b/include/asm-i386/unwind.h
index 5031d693b89..aa2c931e30d 100644
--- a/include/asm-i386/unwind.h
+++ b/include/asm-i386/unwind.h
@@ -71,6 +71,7 @@ static inline void arch_unw_init_blocked(struct unwind_frame_info *info)
info->regs.xss = __KERNEL_DS;
info->regs.xds = __USER_DS;
info->regs.xes = __USER_DS;
+ info->regs.xgs = __KERNEL_PDA;
}
extern asmlinkage int arch_unwind_init_running(struct unwind_frame_info *,
@@ -78,17 +79,13 @@ extern asmlinkage int arch_unwind_init_running(struct unwind_frame_info *,
void *arg),
void *arg);
-static inline int arch_unw_user_mode(const struct unwind_frame_info *info)
+static inline int arch_unw_user_mode(/*const*/ struct unwind_frame_info *info)
{
-#if 0 /* This can only work when selector register and EFLAGS saves/restores
- are properly annotated (and tracked in UNW_REGISTER_INFO). */
- return user_mode_vm(&info->regs);
-#else
- return info->regs.eip < PAGE_OFFSET
+ return user_mode_vm(&info->regs)
+ || info->regs.eip < PAGE_OFFSET
|| (info->regs.eip >= __fix_to_virt(FIX_VDSO)
- && info->regs.eip < __fix_to_virt(FIX_VDSO) + PAGE_SIZE)
+ && info->regs.eip < __fix_to_virt(FIX_VDSO) + PAGE_SIZE)
|| info->regs.esp < PAGE_OFFSET;
-#endif
}
#else
diff --git a/include/asm-i386/vm86.h b/include/asm-i386/vm86.h
index 952fd695738..a5edf517b99 100644
--- a/include/asm-i386/vm86.h
+++ b/include/asm-i386/vm86.h
@@ -145,26 +145,13 @@ struct vm86plus_struct {
* at the end of the structure. Look at ptrace.h to see the "normal"
* setup. For user space layout see 'struct vm86_regs' above.
*/
+#include <asm/ptrace.h>
struct kernel_vm86_regs {
/*
* normal regs, with special meaning for the segment descriptors..
*/
- long ebx;
- long ecx;
- long edx;
- long esi;
- long edi;
- long ebp;
- long eax;
- long __null_ds;
- long __null_es;
- long orig_eax;
- long eip;
- unsigned short cs, __csh;
- long eflags;
- long esp;
- unsigned short ss, __ssh;
+ struct pt_regs pt;
/*
* these are specific to v86 mode:
*/
diff --git a/include/asm-ia64/Kbuild b/include/asm-ia64/Kbuild
index 15818a18bc5..4a1e48b9f40 100644
--- a/include/asm-ia64/Kbuild
+++ b/include/asm-ia64/Kbuild
@@ -10,7 +10,6 @@ header-y += intrinsics.h
header-y += perfmon_default_smpl.h
header-y += ptrace_offsets.h
header-y += rse.h
-header-y += setup.h
header-y += ucontext.h
unifdef-y += perfmon.h
diff --git a/include/asm-ia64/dma-mapping.h b/include/asm-ia64/dma-mapping.h
index 99a8f8e1218..ebd5887f4b1 100644
--- a/include/asm-ia64/dma-mapping.h
+++ b/include/asm-ia64/dma-mapping.h
@@ -50,7 +50,8 @@ dma_set_mask (struct device *dev, u64 mask)
extern int dma_get_cache_alignment(void);
static inline void
-dma_cache_sync (void *vaddr, size_t size, enum dma_data_direction dir)
+dma_cache_sync (struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction dir)
{
/*
* IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to
@@ -59,6 +60,6 @@ dma_cache_sync (void *vaddr, size_t size, enum dma_data_direction dir)
mb();
}
-#define dma_is_consistent(dma_handle) (1) /* all we do is coherent memory... */
+#define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */
#endif /* _ASM_IA64_DMA_MAPPING_H */
diff --git a/include/asm-ia64/futex.h b/include/asm-ia64/futex.h
index 07d77f3a8cb..8a98a265413 100644
--- a/include/asm-ia64/futex.h
+++ b/include/asm-ia64/futex.h
@@ -59,7 +59,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
- inc_preempt_count();
+ pagefault_disable();
switch (op) {
case FUTEX_OP_SET:
@@ -83,7 +83,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
ret = -ENOSYS;
}
- dec_preempt_count();
+ pagefault_enable();
if (!ret) {
switch (cmp) {
diff --git a/include/asm-ia64/pgalloc.h b/include/asm-ia64/pgalloc.h
index 9cb68e9b377..393e04c42a2 100644
--- a/include/asm-ia64/pgalloc.h
+++ b/include/asm-ia64/pgalloc.h
@@ -60,7 +60,7 @@ static inline void *pgtable_quicklist_alloc(void)
static inline void pgtable_quicklist_free(void *pgtable_entry)
{
#ifdef CONFIG_NUMA
- unsigned long nid = page_to_nid(virt_to_page(pgtable_entry));
+ int nid = page_to_nid(virt_to_page(pgtable_entry));
if (unlikely(nid != numa_node_id())) {
free_page((unsigned long)pgtable_entry);
diff --git a/include/asm-m32r/setup.h b/include/asm-m32r/setup.h
index 52f4fa29abf..6a0b32202d4 100644
--- a/include/asm-m32r/setup.h
+++ b/include/asm-m32r/setup.h
@@ -1,6 +1,11 @@
/*
* This is set up by the setup-routine at boot-time
*/
+
+#define COMMAND_LINE_SIZE 512
+
+#ifdef __KERNEL__
+
#define PARAM ((unsigned char *)empty_zero_page)
#define MOUNT_ROOT_RDONLY (*(unsigned long *) (PARAM+0x000))
@@ -18,8 +23,6 @@
#define SCREEN_INFO (*(struct screen_info *) (PARAM+0x200))
-#define COMMAND_LINE_SIZE (512)
-
#define RAMDISK_IMAGE_START_MASK (0x07FF)
#define RAMDISK_PROMPT_FLAG (0x8000)
#define RAMDISK_LOAD_FLAG (0x4000)
@@ -27,3 +30,5 @@
extern unsigned long memory_start;
extern unsigned long memory_end;
+#endif /* __KERNEL__ */
+
diff --git a/include/asm-m32r/unistd.h b/include/asm-m32r/unistd.h
index 95aa34298d8..5b66bd3c6ed 100644
--- a/include/asm-m32r/unistd.h
+++ b/include/asm-m32r/unistd.h
@@ -296,117 +296,6 @@
#ifdef __KERNEL__
#define NR_syscalls 285
-#include <linux/err.h>
-
-/* user-visible error numbers are in the range -1 - -MAX_ERRNO: see
- * <asm-m32r/errno.h>
- */
-
-#include <asm/syscall.h> /* SYSCALL_* */
-
-#define __syscall_return(type, res) \
-do { \
- if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
- /* Avoid using "res" which is declared to be in register r0; \
- errno might expand to a function call and clobber it. */ \
- int __err = -(res); \
- errno = __err; \
- res = -1; \
- } \
- return (type) (res); \
-} while (0)
-
-#define _syscall0(type,name) \
-type name(void) \
-{ \
-register long __scno __asm__ ("r7") = __NR_##name; \
-register long __res __asm__("r0"); \
-__asm__ __volatile__ (\
- "trap #" SYSCALL_VECTOR "|| nop"\
- : "=r" (__res) \
- : "r" (__scno) \
- : "memory"); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall1(type,name,type1,arg1) \
-type name(type1 arg1) \
-{ \
-register long __scno __asm__ ("r7") = __NR_##name; \
-register long __res __asm__ ("r0") = (long)(arg1); \
-__asm__ __volatile__ (\
- "trap #" SYSCALL_VECTOR "|| nop"\
- : "=r" (__res) \
- : "r" (__scno), "0" (__res) \
- : "memory"); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2) \
-type name(type1 arg1,type2 arg2) \
-{ \
-register long __scno __asm__ ("r7") = __NR_##name; \
-register long __arg2 __asm__ ("r1") = (long)(arg2); \
-register long __res __asm__ ("r0") = (long)(arg1); \
-__asm__ __volatile__ (\
- "trap #" SYSCALL_VECTOR "|| nop"\
- : "=r" (__res) \
- : "r" (__scno), "0" (__res), "r" (__arg2) \
- : "memory"); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
-type name(type1 arg1,type2 arg2,type3 arg3) \
-{ \
-register long __scno __asm__ ("r7") = __NR_##name; \
-register long __arg3 __asm__ ("r2") = (long)(arg3); \
-register long __arg2 __asm__ ("r1") = (long)(arg2); \
-register long __res __asm__ ("r0") = (long)(arg1); \
-__asm__ __volatile__ (\
- "trap #" SYSCALL_VECTOR "|| nop"\
- : "=r" (__res) \
- : "r" (__scno), "0" (__res), "r" (__arg2), \
- "r" (__arg3) \
- : "memory"); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
-type name(type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
-{ \
-register long __scno __asm__ ("r7") = __NR_##name; \
-register long __arg4 __asm__ ("r3") = (long)(arg4); \
-register long __arg3 __asm__ ("r2") = (long)(arg3); \
-register long __arg2 __asm__ ("r1") = (long)(arg2); \
-register long __res __asm__ ("r0") = (long)(arg1); \
-__asm__ __volatile__ (\
- "trap #" SYSCALL_VECTOR "|| nop"\
- : "=r" (__res) \
- : "r" (__scno), "0" (__res), "r" (__arg2), \
- "r" (__arg3), "r" (__arg4) \
- : "memory"); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
- type5,arg5) \
-type name(type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
-{ \
-register long __scno __asm__ ("r7") = __NR_##name; \
-register long __arg5 __asm__ ("r4") = (long)(arg5); \
-register long __arg4 __asm__ ("r3") = (long)(arg4); \
-register long __arg3 __asm__ ("r2") = (long)(arg3); \
-register long __arg2 __asm__ ("r1") = (long)(arg2); \
-register long __res __asm__ ("r0") = (long)(arg1); \
-__asm__ __volatile__ (\
- "trap #" SYSCALL_VECTOR "|| nop"\
- : "=r" (__res) \
- : "r" (__scno), "0" (__res), "r" (__arg2), \
- "r" (__arg3), "r" (__arg4), "r" (__arg5) \
- : "memory"); \
-__syscall_return(type,__res); \
-}
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_STAT64
diff --git a/include/asm-m68k/dma-mapping.h b/include/asm-m68k/dma-mapping.h
index d90d841d3df..00259ed6fc9 100644
--- a/include/asm-m68k/dma-mapping.h
+++ b/include/asm-m68k/dma-mapping.h
@@ -21,7 +21,7 @@ static inline int dma_get_cache_alignment(void)
return 1 << L1_CACHE_SHIFT;
}
-static inline int dma_is_consistent(dma_addr_t dma_addr)
+static inline int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
{
return 0;
}
@@ -41,7 +41,7 @@ static inline void dma_free_noncoherent(struct device *dev, size_t size,
{
dma_free_coherent(dev, size, addr, handle);
}
-static inline void dma_cache_sync(void *vaddr, size_t size,
+static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir)
{
/* we use coherent allocation, so not much to do here. */
diff --git a/include/asm-m68k/setup.h b/include/asm-m68k/setup.h
index 7facc9a46e7..2a8853cd655 100644
--- a/include/asm-m68k/setup.h
+++ b/include/asm-m68k/setup.h
@@ -41,8 +41,12 @@
#define MACH_Q40 10
#define MACH_SUN3X 11
+#define COMMAND_LINE_SIZE 256
+
#ifdef __KERNEL__
+#define CL_SIZE COMMAND_LINE_SIZE
+
#ifndef __ASSEMBLY__
extern unsigned long m68k_machtype;
#endif /* !__ASSEMBLY__ */
@@ -355,8 +359,6 @@ extern int m68k_is040or060;
*/
#define NUM_MEMINFO 4
-#define CL_SIZE 256
-#define COMMAND_LINE_SIZE CL_SIZE
#ifndef __ASSEMBLY__
struct mem_info {
diff --git a/include/asm-m68k/unistd.h b/include/asm-m68k/unistd.h
index ad4348058c6..fdbb60e6a0d 100644
--- a/include/asm-m68k/unistd.h
+++ b/include/asm-m68k/unistd.h
@@ -317,103 +317,6 @@
#ifdef __KERNEL__
#define NR_syscalls 311
-#include <linux/err.h>
-
-/* user-visible error numbers are in the range -1 - -MAX_ERRNO: see
- <asm-m68k/errno.h> */
-
-#define __syscall_return(type, res) \
-do { \
- if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
- /* avoid using res which is declared to be in register d0; \
- errno might expand to a function call and clobber it. */ \
- int __err = -(res); \
- errno = __err; \
- res = -1; \
- } \
- return (type) (res); \
-} while (0)
-
-#define _syscall0(type,name) \
-type name(void) \
-{ \
-register long __res __asm__ ("%d0") = __NR_##name; \
-__asm__ __volatile__ ("trap #0" \
- : "+d" (__res) ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall1(type,name,atype,a) \
-type name(atype a) \
-{ \
-register long __res __asm__ ("%d0") = __NR_##name; \
-register long __a __asm__ ("%d1") = (long)(a); \
-__asm__ __volatile__ ("trap #0" \
- : "+d" (__res) \
- : "d" (__a) ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall2(type,name,atype,a,btype,b) \
-type name(atype a,btype b) \
-{ \
-register long __res __asm__ ("%d0") = __NR_##name; \
-register long __a __asm__ ("%d1") = (long)(a); \
-register long __b __asm__ ("%d2") = (long)(b); \
-__asm__ __volatile__ ("trap #0" \
- : "+d" (__res) \
- : "d" (__a), "d" (__b) \
- ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall3(type,name,atype,a,btype,b,ctype,c) \
-type name(atype a,btype b,ctype c) \
-{ \
-register long __res __asm__ ("%d0") = __NR_##name; \
-register long __a __asm__ ("%d1") = (long)(a); \
-register long __b __asm__ ("%d2") = (long)(b); \
-register long __c __asm__ ("%d3") = (long)(c); \
-__asm__ __volatile__ ("trap #0" \
- : "+d" (__res) \
- : "d" (__a), "d" (__b), \
- "d" (__c) \
- ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall4(type,name,atype,a,btype,b,ctype,c,dtype,d) \
-type name (atype a, btype b, ctype c, dtype d) \
-{ \
-register long __res __asm__ ("%d0") = __NR_##name; \
-register long __a __asm__ ("%d1") = (long)(a); \
-register long __b __asm__ ("%d2") = (long)(b); \
-register long __c __asm__ ("%d3") = (long)(c); \
-register long __d __asm__ ("%d4") = (long)(d); \
-__asm__ __volatile__ ("trap #0" \
- : "+d" (__res) \
- : "d" (__a), "d" (__b), \
- "d" (__c), "d" (__d) \
- ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall5(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e) \
-type name (atype a,btype b,ctype c,dtype d,etype e) \
-{ \
-register long __res __asm__ ("%d0") = __NR_##name; \
-register long __a __asm__ ("%d1") = (long)(a); \
-register long __b __asm__ ("%d2") = (long)(b); \
-register long __c __asm__ ("%d3") = (long)(c); \
-register long __d __asm__ ("%d4") = (long)(d); \
-register long __e __asm__ ("%d5") = (long)(e); \
-__asm__ __volatile__ ("trap #0" \
- : "+d" (__res) \
- : "d" (__a), "d" (__b), \
- "d" (__c), "d" (__d), "d" (__e) \
- ); \
-__syscall_return(type,__res); \
-}
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
diff --git a/include/asm-m68knommu/setup.h b/include/asm-m68knommu/setup.h
index d2b0fcce41b..fb86bb2a607 100644
--- a/include/asm-m68knommu/setup.h
+++ b/include/asm-m68knommu/setup.h
@@ -1,5 +1,10 @@
+#ifdef __KERNEL__
+
#include <asm-m68k/setup.h>
/* We have a bigger command line buffer. */
#undef COMMAND_LINE_SIZE
+
+#endif /* __KERNEL__ */
+
#define COMMAND_LINE_SIZE 512
diff --git a/include/asm-m68knommu/unistd.h b/include/asm-m68knommu/unistd.h
index ebaf0319711..82e03195f32 100644
--- a/include/asm-m68knommu/unistd.h
+++ b/include/asm-m68knommu/unistd.h
@@ -318,156 +318,6 @@
#ifdef __KERNEL__
#define NR_syscalls 311
-#include <linux/err.h>
-
-/* user-visible error numbers are in the range -1 - -MAX_ERRNO: see
- <asm-m68k/errno.h> */
-
-#define __syscall_return(type, res) \
-do { \
- if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
- /* avoid using res which is declared to be in register d0; \
- errno might expand to a function call and clobber it. */ \
- int __err = -(res); \
- errno = __err; \
- res = -1; \
- } \
- return (type) (res); \
-} while (0)
-
-#define _syscall0(type, name) \
-type name(void) \
-{ \
- long __res; \
- __asm__ __volatile__ ("movel %1, %%d0\n\t" \
- "trap #0\n\t" \
- "movel %%d0, %0" \
- : "=g" (__res) \
- : "i" (__NR_##name) \
- : "cc", "%d0"); \
- if ((unsigned long)(__res) >= (unsigned long)(-125)) { \
- errno = -__res; \
- __res = -1; \
- } \
- return (type)__res; \
-}
-
-#define _syscall1(type, name, atype, a) \
-type name(atype a) \
-{ \
- long __res; \
- __asm__ __volatile__ ("movel %2, %%d1\n\t" \
- "movel %1, %%d0\n\t" \
- "trap #0\n\t" \
- "movel %%d0, %0" \
- : "=g" (__res) \
- : "i" (__NR_##name), \
- "g" ((long)a) \
- : "cc", "%d0", "%d1"); \
- if ((unsigned long)(__res) >= (unsigned long)(-125)) { \
- errno = -__res; \
- __res = -1; \
- } \
- return (type)__res; \
-}
-
-#define _syscall2(type, name, atype, a, btype, b) \
-type name(atype a, btype b) \
-{ \
- long __res; \
- __asm__ __volatile__ ("movel %3, %%d2\n\t" \
- "movel %2, %%d1\n\t" \
- "movel %1, %%d0\n\t" \
- "trap #0\n\t" \
- "movel %%d0, %0" \
- : "=g" (__res) \
- : "i" (__NR_##name), \
- "a" ((long)a), \
- "g" ((long)b) \
- : "cc", "%d0", "%d1", "%d2"); \
- if ((unsigned long)(__res) >= (unsigned long)(-125)) { \
- errno = -__res; \
- __res = -1; \
- } \
- return (type)__res; \
-}
-
-#define _syscall3(type, name, atype, a, btype, b, ctype, c) \
-type name(atype a, btype b, ctype c) \
-{ \
- long __res; \
- __asm__ __volatile__ ("movel %4, %%d3\n\t" \
- "movel %3, %%d2\n\t" \
- "movel %2, %%d1\n\t" \
- "movel %1, %%d0\n\t" \
- "trap #0\n\t" \
- "movel %%d0, %0" \
- : "=g" (__res) \
- : "i" (__NR_##name), \
- "a" ((long)a), \
- "a" ((long)b), \
- "g" ((long)c) \
- : "cc", "%d0", "%d1", "%d2", "%d3"); \
- if ((unsigned long)(__res) >= (unsigned long)(-125)) { \
- errno = -__res; \
- __res = -1; \
- } \
- return (type)__res; \
-}
-
-#define _syscall4(type, name, atype, a, btype, b, ctype, c, dtype, d) \
-type name(atype a, btype b, ctype c, dtype d) \
-{ \
- long __res; \
- __asm__ __volatile__ ("movel %5, %%d4\n\t" \
- "movel %4, %%d3\n\t" \
- "movel %3, %%d2\n\t" \
- "movel %2, %%d1\n\t" \
- "movel %1, %%d0\n\t" \
- "trap #0\n\t" \
- "movel %%d0, %0" \
- : "=g" (__res) \
- : "i" (__NR_##name), \
- "a" ((long)a), \
- "a" ((long)b), \
- "a" ((long)c), \
- "g" ((long)d) \
- : "cc", "%d0", "%d1", "%d2", "%d3", \
- "%d4"); \
- if ((unsigned long)(__res) >= (unsigned long)(-125)) { \
- errno = -__res; \
- __res = -1; \
- } \
- return (type)__res; \
-}
-
-#define _syscall5(type, name, atype, a, btype, b, ctype, c, dtype, d, etype, e) \
-type name(atype a, btype b, ctype c, dtype d, etype e) \
-{ \
- long __res; \
- __asm__ __volatile__ ("movel %6, %%d5\n\t" \
- "movel %5, %%d4\n\t" \
- "movel %4, %%d3\n\t" \
- "movel %3, %%d2\n\t" \
- "movel %2, %%d1\n\t" \
- "movel %1, %%d0\n\t" \
- "trap #0\n\t" \
- "movel %%d0, %0" \
- : "=g" (__res) \
- : "i" (__NR_##name), \
- "a" ((long)a), \
- "a" ((long)b), \
- "a" ((long)c), \
- "a" ((long)d), \
- "g" ((long)e) \
- : "cc", "%d0", "%d1", "%d2", "%d3", \
- "%d4", "%d5"); \
- if ((unsigned long)(__res) >= (unsigned long)(-125)) { \
- errno = -__res; \
- __res = -1; \
- } \
- return (type)__res; \
-}
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
diff --git a/include/asm-mips/dma-mapping.h b/include/asm-mips/dma-mapping.h
index 43288634c38..236d1a467cc 100644
--- a/include/asm-mips/dma-mapping.h
+++ b/include/asm-mips/dma-mapping.h
@@ -63,9 +63,9 @@ dma_get_cache_alignment(void)
return 128;
}
-extern int dma_is_consistent(dma_addr_t dma_addr);
+extern int dma_is_consistent(struct device *dev, dma_addr_t dma_addr);
-extern void dma_cache_sync(void *vaddr, size_t size,
+extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction);
#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
diff --git a/include/asm-mips/futex.h b/include/asm-mips/futex.h
index 927a216bd53..47e5679c235 100644
--- a/include/asm-mips/futex.h
+++ b/include/asm-mips/futex.h
@@ -88,7 +88,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
- inc_preempt_count();
+ pagefault_disable();
switch (op) {
case FUTEX_OP_SET:
@@ -115,7 +115,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
ret = -ENOSYS;
}
- dec_preempt_count();
+ pagefault_enable();
if (!ret) {
switch (cmp) {
diff --git a/include/asm-mips/highmem.h b/include/asm-mips/highmem.h
index c976bfaaba8..f8c8182f7f2 100644
--- a/include/asm-mips/highmem.h
+++ b/include/asm-mips/highmem.h
@@ -21,6 +21,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/uaccess.h>
#include <asm/kmap_types.h>
/* undef for production */
@@ -70,11 +71,16 @@ static inline void *kmap(struct page *page)
static inline void *kmap_atomic(struct page *page, enum km_type type)
{
+ pagefault_disable();
return page_address(page);
}
-static inline void kunmap_atomic(void *kvaddr, enum km_type type) { }
-#define kmap_atomic_pfn(pfn, idx) page_address(pfn_to_page(pfn))
+static inline void kunmap_atomic(void *kvaddr, enum km_type type)
+{
+ pagefault_enable();
+}
+
+#define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx))
#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
diff --git a/include/asm-mips/setup.h b/include/asm-mips/setup.h
index 737fa4a6912..70009a90263 100644
--- a/include/asm-mips/setup.h
+++ b/include/asm-mips/setup.h
@@ -1,8 +1,6 @@
-#ifdef __KERNEL__
#ifndef _MIPS_SETUP_H
#define _MIPS_SETUP_H
#define COMMAND_LINE_SIZE 256
#endif /* __SETUP_H */
-#endif /* __KERNEL__ */
diff --git a/include/asm-mips/unistd.h b/include/asm-mips/unistd.h
index ec56aa52f66..696cff39a1d 100644
--- a/include/asm-mips/unistd.h
+++ b/include/asm-mips/unistd.h
@@ -933,268 +933,6 @@
#ifndef __ASSEMBLY__
-/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */
-#define _syscall0(type,name) \
-type name(void) \
-{ \
- register unsigned long __a3 asm("$7"); \
- unsigned long __v0; \
- \
- __asm__ volatile ( \
- ".set\tnoreorder\n\t" \
- "li\t$2, %2\t\t\t# " #name "\n\t" \
- "syscall\n\t" \
- "move\t%0, $2\n\t" \
- ".set\treorder" \
- : "=&r" (__v0), "=r" (__a3) \
- : "i" (__NR_##name) \
- : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
- "memory"); \
- \
- if (__a3 == 0) \
- return (type) __v0; \
- errno = __v0; \
- return (type) -1; \
-}
-
-/*
- * DANGER: This macro isn't usable for the pipe(2) call
- * which has a unusual return convention.
- */
-#define _syscall1(type,name,atype,a) \
-type name(atype a) \
-{ \
- register unsigned long __a0 asm("$4") = (unsigned long) a; \
- register unsigned long __a3 asm("$7"); \
- unsigned long __v0; \
- \
- __asm__ volatile ( \
- ".set\tnoreorder\n\t" \
- "li\t$2, %3\t\t\t# " #name "\n\t" \
- "syscall\n\t" \
- "move\t%0, $2\n\t" \
- ".set\treorder" \
- : "=&r" (__v0), "=r" (__a3) \
- : "r" (__a0), "i" (__NR_##name) \
- : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
- "memory"); \
- \
- if (__a3 == 0) \
- return (type) __v0; \
- errno = __v0; \
- return (type) -1; \
-}
-
-#define _syscall2(type,name,atype,a,btype,b) \
-type name(atype a, btype b) \
-{ \
- register unsigned long __a0 asm("$4") = (unsigned long) a; \
- register unsigned long __a1 asm("$5") = (unsigned long) b; \
- register unsigned long __a3 asm("$7"); \
- unsigned long __v0; \
- \
- __asm__ volatile ( \
- ".set\tnoreorder\n\t" \
- "li\t$2, %4\t\t\t# " #name "\n\t" \
- "syscall\n\t" \
- "move\t%0, $2\n\t" \
- ".set\treorder" \
- : "=&r" (__v0), "=r" (__a3) \
- : "r" (__a0), "r" (__a1), "i" (__NR_##name) \
- : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
- "memory"); \
- \
- if (__a3 == 0) \
- return (type) __v0; \
- errno = __v0; \
- return (type) -1; \
-}
-
-#define _syscall3(type,name,atype,a,btype,b,ctype,c) \
-type name(atype a, btype b, ctype c) \
-{ \
- register unsigned long __a0 asm("$4") = (unsigned long) a; \
- register unsigned long __a1 asm("$5") = (unsigned long) b; \
- register unsigned long __a2 asm("$6") = (unsigned long) c; \
- register unsigned long __a3 asm("$7"); \
- unsigned long __v0; \
- \
- __asm__ volatile ( \
- ".set\tnoreorder\n\t" \
- "li\t$2, %5\t\t\t# " #name "\n\t" \
- "syscall\n\t" \
- "move\t%0, $2\n\t" \
- ".set\treorder" \
- : "=&r" (__v0), "=r" (__a3) \
- : "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_##name) \
- : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
- "memory"); \
- \
- if (__a3 == 0) \
- return (type) __v0; \
- errno = __v0; \
- return (type) -1; \
-}
-
-#define _syscall4(type,name,atype,a,btype,b,ctype,c,dtype,d) \
-type name(atype a, btype b, ctype c, dtype d) \
-{ \
- register unsigned long __a0 asm("$4") = (unsigned long) a; \
- register unsigned long __a1 asm("$5") = (unsigned long) b; \
- register unsigned long __a2 asm("$6") = (unsigned long) c; \
- register unsigned long __a3 asm("$7") = (unsigned long) d; \
- unsigned long __v0; \
- \
- __asm__ volatile ( \
- ".set\tnoreorder\n\t" \
- "li\t$2, %5\t\t\t# " #name "\n\t" \
- "syscall\n\t" \
- "move\t%0, $2\n\t" \
- ".set\treorder" \
- : "=&r" (__v0), "+r" (__a3) \
- : "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_##name) \
- : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
- "memory"); \
- \
- if (__a3 == 0) \
- return (type) __v0; \
- errno = __v0; \
- return (type) -1; \
-}
-
-#if (_MIPS_SIM == _MIPS_SIM_ABI32)
-
-/*
- * Using those means your brain needs more than an oil change ;-)
- */
-
-#define _syscall5(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e) \
-type name(atype a, btype b, ctype c, dtype d, etype e) \
-{ \
- register unsigned long __a0 asm("$4") = (unsigned long) a; \
- register unsigned long __a1 asm("$5") = (unsigned long) b; \
- register unsigned long __a2 asm("$6") = (unsigned long) c; \
- register unsigned long __a3 asm("$7") = (unsigned long) d; \
- unsigned long __v0; \
- \
- __asm__ volatile ( \
- ".set\tnoreorder\n\t" \
- "lw\t$2, %6\n\t" \
- "subu\t$29, 32\n\t" \
- "sw\t$2, 16($29)\n\t" \
- "li\t$2, %5\t\t\t# " #name "\n\t" \
- "syscall\n\t" \
- "move\t%0, $2\n\t" \
- "addiu\t$29, 32\n\t" \
- ".set\treorder" \
- : "=&r" (__v0), "+r" (__a3) \
- : "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_##name), \
- "m" ((unsigned long)e) \
- : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
- "memory"); \
- \
- if (__a3 == 0) \
- return (type) __v0; \
- errno = __v0; \
- return (type) -1; \
-}
-
-#define _syscall6(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e,ftype,f) \
-type name(atype a, btype b, ctype c, dtype d, etype e, ftype f) \
-{ \
- register unsigned long __a0 asm("$4") = (unsigned long) a; \
- register unsigned long __a1 asm("$5") = (unsigned long) b; \
- register unsigned long __a2 asm("$6") = (unsigned long) c; \
- register unsigned long __a3 asm("$7") = (unsigned long) d; \
- unsigned long __v0; \
- \
- __asm__ volatile ( \
- ".set\tnoreorder\n\t" \
- "lw\t$2, %6\n\t" \
- "lw\t$8, %7\n\t" \
- "subu\t$29, 32\n\t" \
- "sw\t$2, 16($29)\n\t" \
- "sw\t$8, 20($29)\n\t" \
- "li\t$2, %5\t\t\t# " #name "\n\t" \
- "syscall\n\t" \
- "move\t%0, $2\n\t" \
- "addiu\t$29, 32\n\t" \
- ".set\treorder" \
- : "=&r" (__v0), "+r" (__a3) \
- : "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_##name), \
- "m" ((unsigned long)e), "m" ((unsigned long)f) \
- : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
- "memory"); \
- \
- if (__a3 == 0) \
- return (type) __v0; \
- errno = __v0; \
- return (type) -1; \
-}
-
-#endif /* (_MIPS_SIM == _MIPS_SIM_ABI32) */
-
-#if (_MIPS_SIM == _MIPS_SIM_NABI32) || (_MIPS_SIM == _MIPS_SIM_ABI64)
-
-#define _syscall5(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e) \
-type name (atype a,btype b,ctype c,dtype d,etype e) \
-{ \
- register unsigned long __a0 asm("$4") = (unsigned long) a; \
- register unsigned long __a1 asm("$5") = (unsigned long) b; \
- register unsigned long __a2 asm("$6") = (unsigned long) c; \
- register unsigned long __a3 asm("$7") = (unsigned long) d; \
- register unsigned long __a4 asm("$8") = (unsigned long) e; \
- unsigned long __v0; \
- \
- __asm__ volatile ( \
- ".set\tnoreorder\n\t" \
- "li\t$2, %6\t\t\t# " #name "\n\t" \
- "syscall\n\t" \
- "move\t%0, $2\n\t" \
- ".set\treorder" \
- : "=&r" (__v0), "+r" (__a3) \
- : "r" (__a0), "r" (__a1), "r" (__a2), "r" (__a4), "i" (__NR_##name) \
- : "$2", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
- "memory"); \
- \
- if (__a3 == 0) \
- return (type) __v0; \
- errno = __v0; \
- return (type) -1; \
-}
-
-#define _syscall6(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e,ftype,f) \
-type name (atype a,btype b,ctype c,dtype d,etype e,ftype f) \
-{ \
- register unsigned long __a0 asm("$4") = (unsigned long) a; \
- register unsigned long __a1 asm("$5") = (unsigned long) b; \
- register unsigned long __a2 asm("$6") = (unsigned long) c; \
- register unsigned long __a3 asm("$7") = (unsigned long) d; \
- register unsigned long __a4 asm("$8") = (unsigned long) e; \
- register unsigned long __a5 asm("$9") = (unsigned long) f; \
- unsigned long __v0; \
- \
- __asm__ volatile ( \
- ".set\tnoreorder\n\t" \
- "li\t$2, %7\t\t\t# " #name "\n\t" \
- "syscall\n\t" \
- "move\t%0, $2\n\t" \
- ".set\treorder" \
- : "=&r" (__v0), "+r" (__a3) \
- : "r" (__a0), "r" (__a1), "r" (__a2), "r" (__a4), "r" (__a5), \
- "i" (__NR_##name) \
- : "$2", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
- "memory"); \
- \
- if (__a3 == 0) \
- return (type) __v0; \
- errno = __v0; \
- return (type) -1; \
-}
-
-#endif /* (_MIPS_SIM == _MIPS_SIM_NABI32) || (_MIPS_SIM == _MIPS_SIM_ABI64) */
-
-
#define __ARCH_OMIT_COMPAT_SYS_GETDENTS64
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
diff --git a/include/asm-parisc/dma-mapping.h b/include/asm-parisc/dma-mapping.h
index 1e387e1dad3..66f0b408c66 100644
--- a/include/asm-parisc/dma-mapping.h
+++ b/include/asm-parisc/dma-mapping.h
@@ -191,13 +191,13 @@ dma_get_cache_alignment(void)
}
static inline int
-dma_is_consistent(dma_addr_t dma_addr)
+dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
{
return (hppa_dma_ops->dma_sync_single_for_cpu == NULL);
}
static inline void
-dma_cache_sync(void *vaddr, size_t size,
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
if(hppa_dma_ops->dma_sync_single_for_cpu)
diff --git a/include/asm-parisc/futex.h b/include/asm-parisc/futex.h
index d84bbb283fd..dbee6e60aa8 100644
--- a/include/asm-parisc/futex.h
+++ b/include/asm-parisc/futex.h
@@ -21,7 +21,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
- inc_preempt_count();
+ pagefault_disable();
switch (op) {
case FUTEX_OP_SET:
@@ -33,7 +33,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
ret = -ENOSYS;
}
- dec_preempt_count();
+ pagefault_enable();
if (!ret) {
switch (cmp) {
diff --git a/include/asm-powerpc/dma-mapping.h b/include/asm-powerpc/dma-mapping.h
index 7e38b5fddad..7c7de87bd8a 100644
--- a/include/asm-powerpc/dma-mapping.h
+++ b/include/asm-powerpc/dma-mapping.h
@@ -342,9 +342,9 @@ static inline int dma_mapping_error(dma_addr_t dma_addr)
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#ifdef CONFIG_NOT_COHERENT_CACHE
-#define dma_is_consistent(d) (0)
+#define dma_is_consistent(d, h) (0)
#else
-#define dma_is_consistent(d) (1)
+#define dma_is_consistent(d, h) (1)
#endif
static inline int dma_get_cache_alignment(void)
@@ -378,7 +378,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
}
-static inline void dma_cache_sync(void *vaddr, size_t size,
+static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
diff --git a/include/asm-powerpc/elf.h b/include/asm-powerpc/elf.h
index b5436642a10..d36426c01b6 100644
--- a/include/asm-powerpc/elf.h
+++ b/include/asm-powerpc/elf.h
@@ -124,12 +124,10 @@ typedef elf_greg_t32 elf_gregset_t32[ELF_NGREG];
# define ELF_DATA ELFDATA2MSB
typedef elf_greg_t64 elf_greg_t;
typedef elf_gregset_t64 elf_gregset_t;
-# define elf_addr_t unsigned long
#else
/* Assumption: ELF_ARCH == EM_PPC and ELF_CLASS == ELFCLASS32 */
typedef elf_greg_t32 elf_greg_t;
typedef elf_gregset_t32 elf_gregset_t;
-# define elf_addr_t __u32
#endif /* ELF_ARCH */
/* Floating point registers */
diff --git a/include/asm-powerpc/futex.h b/include/asm-powerpc/futex.h
index 936422e5489..3f3673fd3ff 100644
--- a/include/asm-powerpc/futex.h
+++ b/include/asm-powerpc/futex.h
@@ -43,7 +43,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
- inc_preempt_count();
+ pagefault_disable();
switch (op) {
case FUTEX_OP_SET:
@@ -65,7 +65,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
ret = -ENOSYS;
}
- dec_preempt_count();
+ pagefault_enable();
if (!ret) {
switch (cmp) {
diff --git a/include/asm-powerpc/pgalloc.h b/include/asm-powerpc/pgalloc.h
index ae63db7b3e7..b0830db68f8 100644
--- a/include/asm-powerpc/pgalloc.h
+++ b/include/asm-powerpc/pgalloc.h
@@ -11,7 +11,7 @@
#include <linux/cpumask.h>
#include <linux/percpu.h>
-extern kmem_cache_t *pgtable_cache[];
+extern struct kmem_cache *pgtable_cache[];
#ifdef CONFIG_PPC_64K_PAGES
#define PTE_CACHE_NUM 0
diff --git a/include/asm-powerpc/setup.h b/include/asm-powerpc/setup.h
index 3d9740aae01..817fac0a071 100644
--- a/include/asm-powerpc/setup.h
+++ b/include/asm-powerpc/setup.h
@@ -1,9 +1,6 @@
#ifndef _ASM_POWERPC_SETUP_H
#define _ASM_POWERPC_SETUP_H
-#ifdef __KERNEL__
-
#define COMMAND_LINE_SIZE 512
-#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_SETUP_H */
diff --git a/include/asm-powerpc/unistd.h b/include/asm-powerpc/unistd.h
index 04b6c17cc59..0ae954e3d25 100644
--- a/include/asm-powerpc/unistd.h
+++ b/include/asm-powerpc/unistd.h
@@ -334,115 +334,6 @@
#ifndef __ASSEMBLY__
-/* On powerpc a system call basically clobbers the same registers like a
- * function call, with the exception of LR (which is needed for the
- * "sc; bnslr" sequence) and CR (where only CR0.SO is clobbered to signal
- * an error return status).
- */
-
-#define __syscall_nr(nr, type, name, args...) \
- unsigned long __sc_ret, __sc_err; \
- { \
- register unsigned long __sc_0 __asm__ ("r0"); \
- register unsigned long __sc_3 __asm__ ("r3"); \
- register unsigned long __sc_4 __asm__ ("r4"); \
- register unsigned long __sc_5 __asm__ ("r5"); \
- register unsigned long __sc_6 __asm__ ("r6"); \
- register unsigned long __sc_7 __asm__ ("r7"); \
- register unsigned long __sc_8 __asm__ ("r8"); \
- \
- __sc_loadargs_##nr(name, args); \
- __asm__ __volatile__ \
- ("sc \n\t" \
- "mfcr %0 " \
- : "=&r" (__sc_0), \
- "=&r" (__sc_3), "=&r" (__sc_4), \
- "=&r" (__sc_5), "=&r" (__sc_6), \
- "=&r" (__sc_7), "=&r" (__sc_8) \
- : __sc_asm_input_##nr \
- : "cr0", "ctr", "memory", \
- "r9", "r10","r11", "r12"); \
- __sc_ret = __sc_3; \
- __sc_err = __sc_0; \
- } \
- if (__sc_err & 0x10000000) \
- { \
- errno = __sc_ret; \
- __sc_ret = -1; \
- } \
- return (type) __sc_ret
-
-#define __sc_loadargs_0(name, dummy...) \
- __sc_0 = __NR_##name
-#define __sc_loadargs_1(name, arg1) \
- __sc_loadargs_0(name); \
- __sc_3 = (unsigned long) (arg1)
-#define __sc_loadargs_2(name, arg1, arg2) \
- __sc_loadargs_1(name, arg1); \
- __sc_4 = (unsigned long) (arg2)
-#define __sc_loadargs_3(name, arg1, arg2, arg3) \
- __sc_loadargs_2(name, arg1, arg2); \
- __sc_5 = (unsigned long) (arg3)
-#define __sc_loadargs_4(name, arg1, arg2, arg3, arg4) \
- __sc_loadargs_3(name, arg1, arg2, arg3); \
- __sc_6 = (unsigned long) (arg4)
-#define __sc_loadargs_5(name, arg1, arg2, arg3, arg4, arg5) \
- __sc_loadargs_4(name, arg1, arg2, arg3, arg4); \
- __sc_7 = (unsigned long) (arg5)
-#define __sc_loadargs_6(name, arg1, arg2, arg3, arg4, arg5, arg6) \
- __sc_loadargs_5(name, arg1, arg2, arg3, arg4, arg5); \
- __sc_8 = (unsigned long) (arg6)
-
-#define __sc_asm_input_0 "0" (__sc_0)
-#define __sc_asm_input_1 __sc_asm_input_0, "1" (__sc_3)
-#define __sc_asm_input_2 __sc_asm_input_1, "2" (__sc_4)
-#define __sc_asm_input_3 __sc_asm_input_2, "3" (__sc_5)
-#define __sc_asm_input_4 __sc_asm_input_3, "4" (__sc_6)
-#define __sc_asm_input_5 __sc_asm_input_4, "5" (__sc_7)
-#define __sc_asm_input_6 __sc_asm_input_5, "6" (__sc_8)
-
-#define _syscall0(type,name) \
-type name(void) \
-{ \
- __syscall_nr(0, type, name); \
-}
-
-#define _syscall1(type,name,type1,arg1) \
-type name(type1 arg1) \
-{ \
- __syscall_nr(1, type, name, arg1); \
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2) \
-type name(type1 arg1, type2 arg2) \
-{ \
- __syscall_nr(2, type, name, arg1, arg2); \
-}
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
-type name(type1 arg1, type2 arg2, type3 arg3) \
-{ \
- __syscall_nr(3, type, name, arg1, arg2, arg3); \
-}
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
-{ \
- __syscall_nr(4, type, name, arg1, arg2, arg3, arg4); \
-}
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
-{ \
- __syscall_nr(5, type, name, arg1, arg2, arg3, arg4, arg5); \
-}
-#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5,type6,arg6) \
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) \
-{ \
- __syscall_nr(6, type, name, arg1, arg2, arg3, arg4, arg5, arg6); \
-}
-
-
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/linkage.h>
diff --git a/include/asm-ppc/highmem.h b/include/asm-ppc/highmem.h
index 1d2c4ef81c2..f7b21ee302b 100644
--- a/include/asm-ppc/highmem.h
+++ b/include/asm-ppc/highmem.h
@@ -79,7 +79,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
unsigned long vaddr;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
- inc_preempt_count();
+ pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
@@ -101,8 +101,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
unsigned int idx = type + KM_TYPE_NR*smp_processor_id();
if (vaddr < KMAP_FIX_BEGIN) { // FIXME
- dec_preempt_count();
- preempt_check_resched();
+ pagefault_enable();
return;
}
@@ -115,8 +114,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
pte_clear(&init_mm, vaddr, kmap_pte+idx);
flush_tlb_page(NULL, vaddr);
#endif
- dec_preempt_count();
- preempt_check_resched();
+ pagefault_enable();
}
static inline struct page *kmap_atomic_to_page(void *ptr)
diff --git a/include/asm-s390/setup.h b/include/asm-s390/setup.h
index 7664bacdd83..9574fe80a04 100644
--- a/include/asm-s390/setup.h
+++ b/include/asm-s390/setup.h
@@ -8,12 +8,13 @@
#ifndef _ASM_S390_SETUP_H
#define _ASM_S390_SETUP_H
+#define COMMAND_LINE_SIZE 896
+
#ifdef __KERNEL__
#include <asm/types.h>
#define PARMAREA 0x10400
-#define COMMAND_LINE_SIZE 896
#define MEMORY_CHUNKS 16 /* max 0x7fff */
#define IPL_PARMBLOCK_ORIGIN 0x2000
diff --git a/include/asm-s390/unistd.h b/include/asm-s390/unistd.h
index 71d3c21b84f..fb6fef97d73 100644
--- a/include/asm-s390/unistd.h
+++ b/include/asm-s390/unistd.h
@@ -345,160 +345,6 @@
#ifdef __KERNEL__
-#include <linux/err.h>
-
-#define __syscall_return(type, res) \
-do { \
- if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
- errno = -(res); \
- res = -1; \
- } \
- return (type) (res); \
-} while (0)
-
-#define _svc_clobber "1", "cc", "memory"
-
-#define _syscall0(type,name) \
-type name(void) { \
- register long __svcres asm("2"); \
- long __res; \
- asm volatile( \
- " .if %1 < 256\n" \
- " svc %b1\n" \
- " .else\n" \
- " la %%r1,%1\n" \
- " svc 0\n" \
- " .endif" \
- : "=d" (__svcres) \
- : "i" (__NR_##name) \
- : _svc_clobber); \
- __res = __svcres; \
- __syscall_return(type,__res); \
-}
-
-#define _syscall1(type,name,type1,arg1) \
-type name(type1 arg1) { \
- register type1 __arg1 asm("2") = arg1; \
- register long __svcres asm("2"); \
- long __res; \
- asm volatile( \
- " .if %1 < 256\n" \
- " svc %b1\n" \
- " .else\n" \
- " la %%r1,%1\n" \
- " svc 0\n" \
- " .endif" \
- : "=d" (__svcres) \
- : "i" (__NR_##name), \
- "0" (__arg1) \
- : _svc_clobber); \
- __res = __svcres; \
- __syscall_return(type,__res); \
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2) \
-type name(type1 arg1, type2 arg2) { \
- register type1 __arg1 asm("2") = arg1; \
- register type2 __arg2 asm("3") = arg2; \
- register long __svcres asm("2"); \
- long __res; \
- asm volatile( \
- " .if %1 < 256\n" \
- " svc %b1\n" \
- " .else\n" \
- " la %%r1,%1\n" \
- " svc 0\n" \
- " .endif" \
- : "=d" (__svcres) \
- : "i" (__NR_##name), \
- "0" (__arg1), \
- "d" (__arg2) \
- : _svc_clobber ); \
- __res = __svcres; \
- __syscall_return(type,__res); \
-}
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
-type name(type1 arg1, type2 arg2, type3 arg3) { \
- register type1 __arg1 asm("2") = arg1; \
- register type2 __arg2 asm("3") = arg2; \
- register type3 __arg3 asm("4") = arg3; \
- register long __svcres asm("2"); \
- long __res; \
- asm volatile( \
- " .if %1 < 256\n" \
- " svc %b1\n" \
- " .else\n" \
- " la %%r1,%1\n" \
- " svc 0\n" \
- " .endif" \
- : "=d" (__svcres) \
- : "i" (__NR_##name), \
- "0" (__arg1), \
- "d" (__arg2), \
- "d" (__arg3) \
- : _svc_clobber); \
- __res = __svcres; \
- __syscall_return(type,__res); \
-}
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3, \
- type4,name4) \
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \
- register type1 __arg1 asm("2") = arg1; \
- register type2 __arg2 asm("3") = arg2; \
- register type3 __arg3 asm("4") = arg3; \
- register type4 __arg4 asm("5") = arg4; \
- register long __svcres asm("2"); \
- long __res; \
- asm volatile( \
- " .if %1 < 256\n" \
- " svc %b1\n" \
- " .else\n" \
- " la %%r1,%1\n" \
- " svc 0\n" \
- " .endif" \
- : "=d" (__svcres) \
- : "i" (__NR_##name), \
- "0" (__arg1), \
- "d" (__arg2), \
- "d" (__arg3), \
- "d" (__arg4) \
- : _svc_clobber); \
- __res = __svcres; \
- __syscall_return(type,__res); \
-}
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3, \
- type4,name4,type5,name5) \
-type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
- type5 arg5) { \
- register type1 __arg1 asm("2") = arg1; \
- register type2 __arg2 asm("3") = arg2; \
- register type3 __arg3 asm("4") = arg3; \
- register type4 __arg4 asm("5") = arg4; \
- register type5 __arg5 asm("6") = arg5; \
- register long __svcres asm("2"); \
- long __res; \
- asm volatile( \
- " .if %1 < 256\n" \
- " svc %b1\n" \
- " .else\n" \
- " la %%r1,%1\n" \
- " svc 0\n" \
- " .endif" \
- : "=d" (__svcres) \
- : "i" (__NR_##name), \
- "0" (__arg1), \
- "d" (__arg2), \
- "d" (__arg3), \
- "d" (__arg4), \
- "d" (__arg5) \
- : _svc_clobber); \
- __res = __svcres; \
- __syscall_return(type,__res); \
-}
-
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_SYS_ALARM
diff --git a/include/asm-sh/dma-mapping.h b/include/asm-sh/dma-mapping.h
index 56cd4b97723..37ab0c131a4 100644
--- a/include/asm-sh/dma-mapping.h
+++ b/include/asm-sh/dma-mapping.h
@@ -53,7 +53,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
consistent_free(vaddr, size);
}
-static inline void dma_cache_sync(void *vaddr, size_t size,
+static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir)
{
consistent_sync(vaddr, size, (int)dir);
diff --git a/include/asm-sh/setup.h b/include/asm-sh/setup.h
index 34ca8a7f06b..1583c6b7bda 100644
--- a/include/asm-sh/setup.h
+++ b/include/asm-sh/setup.h
@@ -1,10 +1,12 @@
-#ifdef __KERNEL__
#ifndef _SH_SETUP_H
#define _SH_SETUP_H
#define COMMAND_LINE_SIZE 256
+#ifdef __KERNEL__
+
int setup_early_printk(char *);
-#endif /* _SH_SETUP_H */
#endif /* __KERNEL__ */
+
+#endif /* _SH_SETUP_H */
diff --git a/include/asm-sh/unistd.h b/include/asm-sh/unistd.h
index 0cae1d24876..f982073dc6c 100644
--- a/include/asm-sh/unistd.h
+++ b/include/asm-sh/unistd.h
@@ -332,143 +332,6 @@
#ifdef __KERNEL__
-#include <linux/err.h>
-
-/* user-visible error numbers are in the range -1 - -MAX_ERRNO:
- * see <asm-sh/errno.h> */
-
-#define __syscall_return(type, res) \
-do { \
- if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
- /* Avoid using "res" which is declared to be in register r0; \
- errno might expand to a function call and clobber it. */ \
- int __err = -(res); \
- errno = __err; \
- res = -1; \
- } \
- return (type) (res); \
-} while (0)
-
-#if defined(__sh2__) || defined(__SH2E__) || defined(__SH2A__)
-#define SYSCALL_ARG0 "trapa #0x20"
-#define SYSCALL_ARG1 "trapa #0x21"
-#define SYSCALL_ARG2 "trapa #0x22"
-#define SYSCALL_ARG3 "trapa #0x23"
-#define SYSCALL_ARG4 "trapa #0x24"
-#define SYSCALL_ARG5 "trapa #0x25"
-#define SYSCALL_ARG6 "trapa #0x26"
-#else
-#define SYSCALL_ARG0 "trapa #0x10"
-#define SYSCALL_ARG1 "trapa #0x11"
-#define SYSCALL_ARG2 "trapa #0x12"
-#define SYSCALL_ARG3 "trapa #0x13"
-#define SYSCALL_ARG4 "trapa #0x14"
-#define SYSCALL_ARG5 "trapa #0x15"
-#define SYSCALL_ARG6 "trapa #0x16"
-#endif
-
-/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */
-#define _syscall0(type,name) \
-type name(void) \
-{ \
-register long __sc0 __asm__ ("r3") = __NR_##name; \
-__asm__ __volatile__ (SYSCALL_ARG0 \
- : "=z" (__sc0) \
- : "0" (__sc0) \
- : "memory" ); \
-__syscall_return(type,__sc0); \
-}
-
-#define _syscall1(type,name,type1,arg1) \
-type name(type1 arg1) \
-{ \
-register long __sc0 __asm__ ("r3") = __NR_##name; \
-register long __sc4 __asm__ ("r4") = (long) arg1; \
-__asm__ __volatile__ (SYSCALL_ARG1 \
- : "=z" (__sc0) \
- : "0" (__sc0), "r" (__sc4) \
- : "memory"); \
-__syscall_return(type,__sc0); \
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2) \
-type name(type1 arg1,type2 arg2) \
-{ \
-register long __sc0 __asm__ ("r3") = __NR_##name; \
-register long __sc4 __asm__ ("r4") = (long) arg1; \
-register long __sc5 __asm__ ("r5") = (long) arg2; \
-__asm__ __volatile__ (SYSCALL_ARG2 \
- : "=z" (__sc0) \
- : "0" (__sc0), "r" (__sc4), "r" (__sc5) \
- : "memory"); \
-__syscall_return(type,__sc0); \
-}
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
-type name(type1 arg1,type2 arg2,type3 arg3) \
-{ \
-register long __sc0 __asm__ ("r3") = __NR_##name; \
-register long __sc4 __asm__ ("r4") = (long) arg1; \
-register long __sc5 __asm__ ("r5") = (long) arg2; \
-register long __sc6 __asm__ ("r6") = (long) arg3; \
-__asm__ __volatile__ (SYSCALL_ARG3 \
- : "=z" (__sc0) \
- : "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6) \
- : "memory"); \
-__syscall_return(type,__sc0); \
-}
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
-{ \
-register long __sc0 __asm__ ("r3") = __NR_##name; \
-register long __sc4 __asm__ ("r4") = (long) arg1; \
-register long __sc5 __asm__ ("r5") = (long) arg2; \
-register long __sc6 __asm__ ("r6") = (long) arg3; \
-register long __sc7 __asm__ ("r7") = (long) arg4; \
-__asm__ __volatile__ (SYSCALL_ARG4 \
- : "=z" (__sc0) \
- : "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6), \
- "r" (__sc7) \
- : "memory" ); \
-__syscall_return(type,__sc0); \
-}
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
-{ \
-register long __sc3 __asm__ ("r3") = __NR_##name; \
-register long __sc4 __asm__ ("r4") = (long) arg1; \
-register long __sc5 __asm__ ("r5") = (long) arg2; \
-register long __sc6 __asm__ ("r6") = (long) arg3; \
-register long __sc7 __asm__ ("r7") = (long) arg4; \
-register long __sc0 __asm__ ("r0") = (long) arg5; \
-__asm__ __volatile__ (SYSCALL_ARG5 \
- : "=z" (__sc0) \
- : "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6), "r" (__sc7), \
- "r" (__sc3) \
- : "memory" ); \
-__syscall_return(type,__sc0); \
-}
-
-#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5,type6,arg6) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) \
-{ \
-register long __sc3 __asm__ ("r3") = __NR_##name; \
-register long __sc4 __asm__ ("r4") = (long) arg1; \
-register long __sc5 __asm__ ("r5") = (long) arg2; \
-register long __sc6 __asm__ ("r6") = (long) arg3; \
-register long __sc7 __asm__ ("r7") = (long) arg4; \
-register long __sc0 __asm__ ("r0") = (long) arg5; \
-register long __sc1 __asm__ ("r1") = (long) arg6; \
-__asm__ __volatile__ (SYSCALL_ARG6 \
- : "=z" (__sc0) \
- : "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6), "r" (__sc7), \
- "r" (__sc3), "r" (__sc1) \
- : "memory" ); \
-__syscall_return(type,__sc0); \
-}
-
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_OLD_STAT
diff --git a/include/asm-sh64/dma-mapping.h b/include/asm-sh64/dma-mapping.h
index 68e27a8fca3..5efe906c59f 100644
--- a/include/asm-sh64/dma-mapping.h
+++ b/include/asm-sh64/dma-mapping.h
@@ -35,7 +35,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
consistent_free(NULL, size, vaddr, dma_handle);
}
-static inline void dma_cache_sync(void *vaddr, size_t size,
+static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir)
{
dma_cache_wback_inv((unsigned long)vaddr, size);
diff --git a/include/asm-sh64/setup.h b/include/asm-sh64/setup.h
index ebd42eb1b70..5b07b14c292 100644
--- a/include/asm-sh64/setup.h
+++ b/include/asm-sh64/setup.h
@@ -1,6 +1,10 @@
#ifndef __ASM_SH64_SETUP_H
#define __ASM_SH64_SETUP_H
+#define COMMAND_LINE_SIZE 256
+
+#ifdef __KERNEL__
+
#define PARAM ((unsigned char *)empty_zero_page)
#define MOUNT_ROOT_RDONLY (*(unsigned long *) (PARAM+0x000))
#define RAMDISK_FLAGS (*(unsigned long *) (PARAM+0x004))
@@ -12,5 +16,7 @@
#define COMMAND_LINE ((char *) (PARAM+256))
#define COMMAND_LINE_SIZE 256
+#endif /* __KERNEL__ */
+
#endif /* __ASM_SH64_SETUP_H */
diff --git a/include/asm-sh64/unistd.h b/include/asm-sh64/unistd.h
index ee7828b27ad..1f38a7aacaa 100644
--- a/include/asm-sh64/unistd.h
+++ b/include/asm-sh64/unistd.h
@@ -347,148 +347,6 @@
#ifdef __KERNEL__
#define NR_syscalls 321
-#include <linux/err.h>
-
-/* user-visible error numbers are in the range -1 - -MAX_ERRNO:
- * see <asm-sh64/errno.h> */
-
-#define __syscall_return(type, res) \
-do { \
- /* Note: when returning from kernel the return value is in r9 \
- ** This prevents conflicts between return value and arg1 \
- ** when dispatching signal handler, in other words makes \
- ** life easier in the system call epilogue (see entry.S) \
- */ \
- register unsigned long __sr2 __asm__ ("r2") = res; \
- if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
- errno = -(res); \
- __sr2 = -1; \
- } \
- return (type) (__sr2); \
-} while (0)
-
-/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */
-
-#define _syscall0(type,name) \
-type name(void) \
-{ \
-register unsigned long __sc0 __asm__ ("r9") = ((0x10 << 16) | __NR_##name); \
-__asm__ __volatile__ ("trapa %1 !\t\t\t" #name "()" \
- : "=r" (__sc0) \
- : "r" (__sc0) ); \
-__syscall_return(type,__sc0); \
-}
-
- /*
- * The apparent spurious "dummy" assembler comment is *needed*,
- * as without it, the compiler treats the arg<n> variables
- * as no longer live just before the asm. The compiler can
- * then optimize the storage into any registers it wishes.
- * The additional dummy statement forces the compiler to put
- * the arguments into the correct registers before the TRAPA.
- */
-#define _syscall1(type,name,type1,arg1) \
-type name(type1 arg1) \
-{ \
-register unsigned long __sc0 __asm__ ("r9") = ((0x11 << 16) | __NR_##name); \
-register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1; \
-__asm__ __volatile__ ("trapa %1 !\t\t\t" #name "(%2)" \
- : "=r" (__sc0) \
- : "r" (__sc0), "r" (__sc2)); \
-__asm__ __volatile__ ("!dummy %0 %1" \
- : \
- : "r" (__sc0), "r" (__sc2)); \
-__syscall_return(type,__sc0); \
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2) \
-type name(type1 arg1,type2 arg2) \
-{ \
-register unsigned long __sc0 __asm__ ("r9") = ((0x12 << 16) | __NR_##name); \
-register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1; \
-register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2; \
-__asm__ __volatile__ ("trapa %1 !\t\t\t" #name "(%2,%3)" \
- : "=r" (__sc0) \
- : "r" (__sc0), "r" (__sc2), "r" (__sc3) ); \
-__asm__ __volatile__ ("!dummy %0 %1 %2" \
- : \
- : "r" (__sc0), "r" (__sc2), "r" (__sc3) ); \
-__syscall_return(type,__sc0); \
-}
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
-type name(type1 arg1,type2 arg2,type3 arg3) \
-{ \
-register unsigned long __sc0 __asm__ ("r9") = ((0x13 << 16) | __NR_##name); \
-register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1; \
-register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2; \
-register unsigned long __sc4 __asm__ ("r4") = (unsigned long) arg3; \
-__asm__ __volatile__ ("trapa %1 !\t\t\t" #name "(%2,%3,%4)" \
- : "=r" (__sc0) \
- : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4) ); \
-__asm__ __volatile__ ("!dummy %0 %1 %2 %3" \
- : \
- : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4) ); \
-__syscall_return(type,__sc0); \
-}
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
-{ \
-register unsigned long __sc0 __asm__ ("r9") = ((0x14 << 16) | __NR_##name); \
-register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1; \
-register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2; \
-register unsigned long __sc4 __asm__ ("r4") = (unsigned long) arg3; \
-register unsigned long __sc5 __asm__ ("r5") = (unsigned long) arg4; \
-__asm__ __volatile__ ("trapa %1 !\t\t\t" #name "(%2,%3,%4,%5)" \
- : "=r" (__sc0) \
- : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5) );\
-__asm__ __volatile__ ("!dummy %0 %1 %2 %3 %4" \
- : \
- : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5) );\
-__syscall_return(type,__sc0); \
-}
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
-{ \
-register unsigned long __sc0 __asm__ ("r9") = ((0x15 << 16) | __NR_##name); \
-register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1; \
-register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2; \
-register unsigned long __sc4 __asm__ ("r4") = (unsigned long) arg3; \
-register unsigned long __sc5 __asm__ ("r5") = (unsigned long) arg4; \
-register unsigned long __sc6 __asm__ ("r6") = (unsigned long) arg5; \
-__asm__ __volatile__ ("trapa %1 !\t\t\t" #name "(%2,%3,%4,%5,%6)" \
- : "=r" (__sc0) \
- : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5), \
- "r" (__sc6)); \
-__asm__ __volatile__ ("!dummy %0 %1 %2 %3 %4 %5" \
- : \
- : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5), \
- "r" (__sc6)); \
-__syscall_return(type,__sc0); \
-}
-
-#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5, type6, arg6) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) \
-{ \
-register unsigned long __sc0 __asm__ ("r9") = ((0x16 << 16) | __NR_##name); \
-register unsigned long __sc2 __asm__ ("r2") = (unsigned long) arg1; \
-register unsigned long __sc3 __asm__ ("r3") = (unsigned long) arg2; \
-register unsigned long __sc4 __asm__ ("r4") = (unsigned long) arg3; \
-register unsigned long __sc5 __asm__ ("r5") = (unsigned long) arg4; \
-register unsigned long __sc6 __asm__ ("r6") = (unsigned long) arg5; \
-register unsigned long __sc7 __asm__ ("r7") = (unsigned long) arg6; \
-__asm__ __volatile__ ("trapa %1 !\t\t\t" #name "(%2,%3,%4,%5,%6,%7)" \
- : "=r" (__sc0) \
- : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5), \
- "r" (__sc6), "r" (__sc7)); \
-__asm__ __volatile__ ("!dummy %0 %1 %2 %3 %4 %5 %6" \
- : \
- : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4), "r" (__sc5), \
- "r" (__sc6), "r" (__sc7)); \
-__syscall_return(type,__sc0); \
-}
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
diff --git a/include/asm-sparc/unistd.h b/include/asm-sparc/unistd.h
index f7827fa4cd5..d5b2f8053b3 100644
--- a/include/asm-sparc/unistd.h
+++ b/include/asm-sparc/unistd.h
@@ -329,136 +329,6 @@
* find a free slot in the 0-302 range.
*/
-#define _syscall0(type,name) \
-type name(void) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-__asm__ __volatile__ ("t 0x10\n\t" \
- "bcc 1f\n\t" \
- "mov %%o0, %0\n\t" \
- "sub %%g0, %%o0, %0\n\t" \
- "1:\n\t" \
- : "=r" (__res)\
- : "r" (__g1) \
- : "o0", "cc"); \
-if (__res < -255 || __res >= 0) \
- return (type) __res; \
-errno = -__res; \
-return -1; \
-}
-
-#define _syscall1(type,name,type1,arg1) \
-type name(type1 arg1) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-__asm__ __volatile__ ("t 0x10\n\t" \
- "bcc 1f\n\t" \
- "mov %%o0, %0\n\t" \
- "sub %%g0, %%o0, %0\n\t" \
- "1:\n\t" \
- : "=r" (__res), "=&r" (__o0) \
- : "1" (__o0), "r" (__g1) \
- : "cc"); \
-if (__res < -255 || __res >= 0) \
- return (type) __res; \
-errno = -__res; \
-return -1; \
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2) \
-type name(type1 arg1,type2 arg2) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-register long __o1 __asm__ ("o1") = (long)(arg2); \
-__asm__ __volatile__ ("t 0x10\n\t" \
- "bcc 1f\n\t" \
- "mov %%o0, %0\n\t" \
- "sub %%g0, %%o0, %0\n\t" \
- "1:\n\t" \
- : "=r" (__res), "=&r" (__o0) \
- : "1" (__o0), "r" (__o1), "r" (__g1) \
- : "cc"); \
-if (__res < -255 || __res >= 0) \
- return (type) __res; \
-errno = -__res; \
-return -1; \
-}
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
-type name(type1 arg1,type2 arg2,type3 arg3) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-register long __o1 __asm__ ("o1") = (long)(arg2); \
-register long __o2 __asm__ ("o2") = (long)(arg3); \
-__asm__ __volatile__ ("t 0x10\n\t" \
- "bcc 1f\n\t" \
- "mov %%o0, %0\n\t" \
- "sub %%g0, %%o0, %0\n\t" \
- "1:\n\t" \
- : "=r" (__res), "=&r" (__o0) \
- : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__g1) \
- : "cc"); \
-if (__res < -255 || __res>=0) \
- return (type) __res; \
-errno = -__res; \
-return -1; \
-}
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-register long __o1 __asm__ ("o1") = (long)(arg2); \
-register long __o2 __asm__ ("o2") = (long)(arg3); \
-register long __o3 __asm__ ("o3") = (long)(arg4); \
-__asm__ __volatile__ ("t 0x10\n\t" \
- "bcc 1f\n\t" \
- "mov %%o0, %0\n\t" \
- "sub %%g0, %%o0, %0\n\t" \
- "1:\n\t" \
- : "=r" (__res), "=&r" (__o0) \
- : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__o3), "r" (__g1) \
- : "cc"); \
-if (__res < -255 || __res>=0) \
- return (type) __res; \
-errno = -__res; \
-return -1; \
-}
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
- type5,arg5) \
-type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-register long __o1 __asm__ ("o1") = (long)(arg2); \
-register long __o2 __asm__ ("o2") = (long)(arg3); \
-register long __o3 __asm__ ("o3") = (long)(arg4); \
-register long __o4 __asm__ ("o4") = (long)(arg5); \
-__asm__ __volatile__ ("t 0x10\n\t" \
- "bcc 1f\n\t" \
- "mov %%o0, %0\n\t" \
- "sub %%g0, %%o0, %0\n\t" \
- "1:\n\t" \
- : "=r" (__res), "=&r" (__o0) \
- : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__o3), "r" (__o4), "r" (__g1) \
- : "cc"); \
-if (__res < -255 || __res>=0) \
- return (type) __res; \
-errno = -__res; \
-return -1; \
-}
-
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_STAT64
diff --git a/include/asm-sparc64/dma-mapping.h b/include/asm-sparc64/dma-mapping.h
index 27c46fbeebd..2f858a2df94 100644
--- a/include/asm-sparc64/dma-mapping.h
+++ b/include/asm-sparc64/dma-mapping.h
@@ -181,7 +181,7 @@ dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t siz
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-#define dma_is_consistent(d) (1)
+#define dma_is_consistent(d, h) (1)
static inline int
dma_get_cache_alignment(void)
@@ -210,7 +210,7 @@ dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
}
static inline void
-dma_cache_sync(void *vaddr, size_t size,
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
/* could define this in terms of the dma_cache ... operations,
diff --git a/include/asm-sparc64/futex.h b/include/asm-sparc64/futex.h
index 7392fc4a954..876312fe82c 100644
--- a/include/asm-sparc64/futex.h
+++ b/include/asm-sparc64/futex.h
@@ -45,7 +45,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
- inc_preempt_count();
+ pagefault_disable();
switch (op) {
case FUTEX_OP_SET:
@@ -67,7 +67,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
ret = -ENOSYS;
}
- dec_preempt_count();
+ pagefault_enable();
if (!ret) {
switch (cmp) {
diff --git a/include/asm-sparc64/pgalloc.h b/include/asm-sparc64/pgalloc.h
index 010f9cd0a67..5891ff7ba76 100644
--- a/include/asm-sparc64/pgalloc.h
+++ b/include/asm-sparc64/pgalloc.h
@@ -13,7 +13,7 @@
#include <asm/page.h>
/* Page table allocation/freeing. */
-extern kmem_cache_t *pgtable_cache;
+extern struct kmem_cache *pgtable_cache;
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
diff --git a/include/asm-sparc64/unistd.h b/include/asm-sparc64/unistd.h
index 63669dad0d7..47047536f26 100644
--- a/include/asm-sparc64/unistd.h
+++ b/include/asm-sparc64/unistd.h
@@ -332,124 +332,6 @@
* find a free slot in the 0-302 range.
*/
-#define _syscall0(type,name) \
-type name(void) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-__asm__ __volatile__ ("t 0x6d\n\t" \
- "sub %%g0, %%o0, %0\n\t" \
- "movcc %%xcc, %%o0, %0\n\t" \
- : "=r" (__res)\
- : "r" (__g1) \
- : "o0", "cc"); \
-if (__res >= 0) \
- return (type) __res; \
-errno = -__res; \
-return -1; \
-}
-
-#define _syscall1(type,name,type1,arg1) \
-type name(type1 arg1) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-__asm__ __volatile__ ("t 0x6d\n\t" \
- "sub %%g0, %%o0, %0\n\t" \
- "movcc %%xcc, %%o0, %0\n\t" \
- : "=r" (__res), "=&r" (__o0) \
- : "1" (__o0), "r" (__g1) \
- : "cc"); \
-if (__res >= 0) \
- return (type) __res; \
-errno = -__res; \
-return -1; \
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2) \
-type name(type1 arg1,type2 arg2) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-register long __o1 __asm__ ("o1") = (long)(arg2); \
-__asm__ __volatile__ ("t 0x6d\n\t" \
- "sub %%g0, %%o0, %0\n\t" \
- "movcc %%xcc, %%o0, %0\n\t" \
- : "=r" (__res), "=&r" (__o0) \
- : "1" (__o0), "r" (__o1), "r" (__g1) \
- : "cc"); \
-if (__res >= 0) \
- return (type) __res; \
-errno = -__res; \
-return -1; \
-}
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
-type name(type1 arg1,type2 arg2,type3 arg3) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-register long __o1 __asm__ ("o1") = (long)(arg2); \
-register long __o2 __asm__ ("o2") = (long)(arg3); \
-__asm__ __volatile__ ("t 0x6d\n\t" \
- "sub %%g0, %%o0, %0\n\t" \
- "movcc %%xcc, %%o0, %0\n\t" \
- : "=r" (__res), "=&r" (__o0) \
- : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__g1) \
- : "cc"); \
-if (__res>=0) \
- return (type) __res; \
-errno = -__res; \
-return -1; \
-}
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-register long __o1 __asm__ ("o1") = (long)(arg2); \
-register long __o2 __asm__ ("o2") = (long)(arg3); \
-register long __o3 __asm__ ("o3") = (long)(arg4); \
-__asm__ __volatile__ ("t 0x6d\n\t" \
- "sub %%g0, %%o0, %0\n\t" \
- "movcc %%xcc, %%o0, %0\n\t" \
- : "=r" (__res), "=&r" (__o0) \
- : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__o3), "r" (__g1) \
- : "cc"); \
-if (__res>=0) \
- return (type) __res; \
-errno = -__res; \
-return -1; \
-}
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
- type5,arg5) \
-type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
-{ \
-long __res; \
-register long __g1 __asm__ ("g1") = __NR_##name; \
-register long __o0 __asm__ ("o0") = (long)(arg1); \
-register long __o1 __asm__ ("o1") = (long)(arg2); \
-register long __o2 __asm__ ("o2") = (long)(arg3); \
-register long __o3 __asm__ ("o3") = (long)(arg4); \
-register long __o4 __asm__ ("o4") = (long)(arg5); \
-__asm__ __volatile__ ("t 0x6d\n\t" \
- "sub %%g0, %%o0, %0\n\t" \
- "movcc %%xcc, %%o0, %0\n\t" \
- : "=r" (__res), "=&r" (__o0) \
- : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__o3), "r" (__o4), "r" (__g1) \
- : "cc"); \
-if (__res>=0) \
- return (type) __res; \
-errno = -__res; \
-return -1; \
-}
-
/* sysconf options, for SunOS compatibility */
#define _SC_ARG_MAX 1
#define _SC_CHILD_MAX 2
diff --git a/include/asm-um/dma-mapping.h b/include/asm-um/dma-mapping.h
index babd2989511..f0ee4fb5591 100644
--- a/include/asm-um/dma-mapping.h
+++ b/include/asm-um/dma-mapping.h
@@ -94,7 +94,7 @@ dma_sync_sg(struct device *dev, struct scatterlist *sg, int nelems,
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-#define dma_is_consistent(d) (1)
+#define dma_is_consistent(d, h) (1)
static inline int
dma_get_cache_alignment(void)
@@ -112,7 +112,7 @@ dma_sync_single_range(struct device *dev, dma_addr_t dma_handle,
}
static inline void
-dma_cache_sync(void *vaddr, size_t size,
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
BUG();
diff --git a/include/asm-v850/irq.h b/include/asm-v850/irq.h
index 1bf096db8f4..88687c181f0 100644
--- a/include/asm-v850/irq.h
+++ b/include/asm-v850/irq.h
@@ -46,8 +46,6 @@ extern void
init_irq_handlers (int base_irq, int num, int interval,
struct hw_interrupt_type *irq_type);
-typedef void (*irq_handler_t)(int irq, void *data, struct pt_regs *regs);
-
/* Handle interrupt IRQ. REGS are the registers at the time of ther
interrupt. */
extern unsigned int handle_irq (int irq, struct pt_regs *regs);
diff --git a/include/asm-v850/unistd.h b/include/asm-v850/unistd.h
index 737401e7d3a..2241ed45ecf 100644
--- a/include/asm-v850/unistd.h
+++ b/include/asm-v850/unistd.h
@@ -204,168 +204,8 @@
#define __NR_gettid 201
#define __NR_tkill 202
-
-/* Syscall protocol:
- Syscall number in r12, args in r6-r9, r13-r14
- Return value in r10
- Trap 0 for `short' syscalls, where all the args can fit in function
- call argument registers, and trap 1 when there are additional args in
- r13-r14. */
-
-#define SYSCALL_NUM "r12"
-#define SYSCALL_ARG0 "r6"
-#define SYSCALL_ARG1 "r7"
-#define SYSCALL_ARG2 "r8"
-#define SYSCALL_ARG3 "r9"
-#define SYSCALL_ARG4 "r13"
-#define SYSCALL_ARG5 "r14"
-#define SYSCALL_RET "r10"
-
-#define SYSCALL_SHORT_TRAP "0"
-#define SYSCALL_LONG_TRAP "1"
-
-/* Registers clobbered by any syscall. This _doesn't_ include the syscall
- number (r12) or the `extended arg' registers (r13, r14), even though
- they are actually clobbered too (this is because gcc's `asm' statement
- doesn't allow a clobber to be used as an input or output). */
-#define SYSCALL_CLOBBERS "r1", "r5", "r11", "r15", "r16", \
- "r17", "r18", "r19"
-
-/* Registers clobbered by a `short' syscall. This includes all clobbers
- except the syscall number (r12). */
-#define SYSCALL_SHORT_CLOBBERS SYSCALL_CLOBBERS, "r13", "r14"
-
#ifdef __KERNEL__
-#include <asm/clinkage.h>
-#include <linux/err.h>
-
-#define __syscall_return(type, res) \
- do { \
- /* user-visible error numbers are in the range -1 - -MAX_ERRNO: \
- see <asm-v850/errno.h> */ \
- if (__builtin_expect ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO), 0)) { \
- errno = -(res); \
- res = -1; \
- } \
- return (type) (res); \
- } while (0)
-
-
-#define _syscall0(type, name) \
-type name (void) \
-{ \
- register unsigned long __syscall __asm__ (SYSCALL_NUM) = __NR_##name; \
- register unsigned long __ret __asm__ (SYSCALL_RET); \
- __asm__ __volatile__ ("trap " SYSCALL_SHORT_TRAP \
- : "=r" (__ret), "=r" (__syscall) \
- : "1" (__syscall) \
- : SYSCALL_SHORT_CLOBBERS); \
- __syscall_return (type, __ret); \
-}
-
-#define _syscall1(type, name, atype, a) \
-type name (atype a) \
-{ \
- register atype __a __asm__ (SYSCALL_ARG0) = a; \
- register unsigned long __syscall __asm__ (SYSCALL_NUM) = __NR_##name; \
- register unsigned long __ret __asm__ (SYSCALL_RET); \
- __asm__ __volatile__ ("trap " SYSCALL_SHORT_TRAP \
- : "=r" (__ret), "=r" (__syscall) \
- : "1" (__syscall), "r" (__a) \
- : SYSCALL_SHORT_CLOBBERS); \
- __syscall_return (type, __ret); \
-}
-
-#define _syscall2(type, name, atype, a, btype, b) \
-type name (atype a, btype b) \
-{ \
- register atype __a __asm__ (SYSCALL_ARG0) = a; \
- register btype __b __asm__ (SYSCALL_ARG1) = b; \
- register unsigned long __syscall __asm__ (SYSCALL_NUM) = __NR_##name; \
- register unsigned long __ret __asm__ (SYSCALL_RET); \
- __asm__ __volatile__ ("trap " SYSCALL_SHORT_TRAP \
- : "=r" (__ret), "=r" (__syscall) \
- : "1" (__syscall), "r" (__a), "r" (__b) \
- : SYSCALL_SHORT_CLOBBERS); \
- __syscall_return (type, __ret); \
-}
-
-#define _syscall3(type, name, atype, a, btype, b, ctype, c) \
-type name (atype a, btype b, ctype c) \
-{ \
- register atype __a __asm__ (SYSCALL_ARG0) = a; \
- register btype __b __asm__ (SYSCALL_ARG1) = b; \
- register ctype __c __asm__ (SYSCALL_ARG2) = c; \
- register unsigned long __syscall __asm__ (SYSCALL_NUM) = __NR_##name; \
- register unsigned long __ret __asm__ (SYSCALL_RET); \
- __asm__ __volatile__ ("trap " SYSCALL_SHORT_TRAP \
- : "=r" (__ret), "=r" (__syscall) \
- : "1" (__syscall), "r" (__a), "r" (__b), "r" (__c) \
- : SYSCALL_SHORT_CLOBBERS); \
- __syscall_return (type, __ret); \
-}
-
-#define _syscall4(type, name, atype, a, btype, b, ctype, c, dtype, d) \
-type name (atype a, btype b, ctype c, dtype d) \
-{ \
- register atype __a __asm__ (SYSCALL_ARG0) = a; \
- register btype __b __asm__ (SYSCALL_ARG1) = b; \
- register ctype __c __asm__ (SYSCALL_ARG2) = c; \
- register dtype __d __asm__ (SYSCALL_ARG3) = d; \
- register unsigned long __syscall __asm__ (SYSCALL_NUM) = __NR_##name; \
- register unsigned long __ret __asm__ (SYSCALL_RET); \
- __asm__ __volatile__ ("trap " SYSCALL_SHORT_TRAP \
- : "=r" (__ret), "=r" (__syscall) \
- : "1" (__syscall), \
- "r" (__a), "r" (__b), "r" (__c), "r" (__d) \
- : SYSCALL_SHORT_CLOBBERS); \
- __syscall_return (type, __ret); \
-}
-
-#define _syscall5(type, name, atype, a, btype, b, ctype, c, dtype, d, etype,e)\
-type name (atype a, btype b, ctype c, dtype d, etype e) \
-{ \
- register atype __a __asm__ (SYSCALL_ARG0) = a; \
- register btype __b __asm__ (SYSCALL_ARG1) = b; \
- register ctype __c __asm__ (SYSCALL_ARG2) = c; \
- register dtype __d __asm__ (SYSCALL_ARG3) = d; \
- register etype __e __asm__ (SYSCALL_ARG4) = e; \
- register unsigned long __syscall __asm__ (SYSCALL_NUM) = __NR_##name; \
- register unsigned long __ret __asm__ (SYSCALL_RET); \
- __asm__ __volatile__ ("trap " SYSCALL_LONG_TRAP \
- : "=r" (__ret), "=r" (__syscall), "=r" (__e) \
- : "1" (__syscall), \
- "r" (__a), "r" (__b), "r" (__c), "r" (__d), "2" (__e) \
- : SYSCALL_CLOBBERS); \
- __syscall_return (type, __ret); \
-}
-
-#define __SYSCALL6_TRAP(syscall, ret, a, b, c, d, e, f) \
- __asm__ __volatile__ ("trap " SYSCALL_LONG_TRAP \
- : "=r" (ret), "=r" (syscall), \
- "=r" (e), "=r" (f) \
- : "1" (syscall), \
- "r" (a), "r" (b), "r" (c), "r" (d), \
- "2" (e), "3" (f) \
- : SYSCALL_CLOBBERS);
-
-#define _syscall6(type, name, atype, a, btype, b, ctype, c, dtype, d, etype, e, ftype, f) \
-type name (atype a, btype b, ctype c, dtype d, etype e, ftype f) \
-{ \
- register atype __a __asm__ (SYSCALL_ARG0) = a; \
- register btype __b __asm__ (SYSCALL_ARG1) = b; \
- register ctype __c __asm__ (SYSCALL_ARG2) = c; \
- register dtype __d __asm__ (SYSCALL_ARG3) = d; \
- register etype __e __asm__ (SYSCALL_ARG4) = e; \
- register etype __f __asm__ (SYSCALL_ARG5) = f; \
- register unsigned long __syscall __asm__ (SYSCALL_NUM) = __NR_##name; \
- register unsigned long __ret __asm__ (SYSCALL_RET); \
- __SYSCALL6_TRAP(__syscall, __ret, __a, __b, __c, __d, __e, __f); \
- __syscall_return (type, __ret); \
-}
-
-
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_STAT64
diff --git a/include/asm-x86_64/Kbuild b/include/asm-x86_64/Kbuild
index 1ee9b07f3fe..ebd7117782a 100644
--- a/include/asm-x86_64/Kbuild
+++ b/include/asm-x86_64/Kbuild
@@ -6,13 +6,11 @@ ALTARCHDEF := defined __i386__
header-y += boot.h
header-y += bootsetup.h
-header-y += cpufeature.h
header-y += debugreg.h
header-y += ldt.h
header-y += msr.h
header-y += prctl.h
header-y += ptrace-abi.h
-header-y += setup.h
header-y += sigcontext32.h
header-y += ucontext.h
header-y += vsyscall32.h
diff --git a/include/asm-x86_64/alternative.h b/include/asm-x86_64/alternative.h
index a584826cc57..a6657b4f3e0 100644
--- a/include/asm-x86_64/alternative.h
+++ b/include/asm-x86_64/alternative.h
@@ -4,6 +4,7 @@
#ifdef __KERNEL__
#include <linux/types.h>
+#include <linux/stddef.h>
#include <asm/cpufeature.h>
struct alt_instr {
@@ -133,4 +134,15 @@ static inline void alternatives_smp_switch(int smp) {}
#define LOCK_PREFIX ""
#endif
+struct paravirt_patch;
+#ifdef CONFIG_PARAVIRT
+void apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end);
+#else
+static inline void
+apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end)
+{}
+#define __start_parainstructions NULL
+#define __stop_parainstructions NULL
+#endif
+
#endif /* _X86_64_ALTERNATIVE_H */
diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h
index 93849f7abc2..706ca4b6000 100644
--- a/include/asm-x86_64/atomic.h
+++ b/include/asm-x86_64/atomic.h
@@ -189,9 +189,9 @@ static __inline__ int atomic_add_return(int i, atomic_t *v)
{
int __i = i;
__asm__ __volatile__(
- LOCK_PREFIX "xaddl %0, %1;"
- :"=r"(i)
- :"m"(v->counter), "0"(i));
+ LOCK_PREFIX "xaddl %0, %1"
+ :"+r" (i), "+m" (v->counter)
+ : : "memory");
return i + __i;
}
diff --git a/include/asm-x86_64/calgary.h b/include/asm-x86_64/calgary.h
index 6b93f5a3a5c..7ee90064571 100644
--- a/include/asm-x86_64/calgary.h
+++ b/include/asm-x86_64/calgary.h
@@ -51,6 +51,8 @@ struct iommu_table {
#define TCE_TABLE_SIZE_4M 6
#define TCE_TABLE_SIZE_8M 7
+extern int use_calgary;
+
#ifdef CONFIG_CALGARY_IOMMU
extern int calgary_iommu_init(void);
extern void detect_calgary(void);
diff --git a/include/asm-x86_64/cpufeature.h b/include/asm-x86_64/cpufeature.h
index ee792faaca0..0b3c686139f 100644
--- a/include/asm-x86_64/cpufeature.h
+++ b/include/asm-x86_64/cpufeature.h
@@ -29,7 +29,7 @@
#define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */
#define X86_FEATURE_PN (0*32+18) /* Processor serial number */
#define X86_FEATURE_CLFLSH (0*32+19) /* Supports the CLFLUSH instruction */
-#define X86_FEATURE_DTES (0*32+21) /* Debug Trace Store */
+#define X86_FEATURE_DS (0*32+21) /* Debug Store */
#define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */
#define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */
#define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */
@@ -68,6 +68,8 @@
#define X86_FEATURE_FXSAVE_LEAK (3*32+7) /* FIP/FOP/FDP leaks through FXSAVE */
#define X86_FEATURE_UP (3*32+8) /* SMP kernel running on UP */
#define X86_FEATURE_ARCH_PERFMON (3*32+9) /* Intel Architectural PerfMon */
+#define X86_FEATURE_PEBS (3*32+10) /* Precise-Event Based Sampling */
+#define X86_FEATURE_BTS (3*32+11) /* Branch Trace Store */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
@@ -112,5 +114,8 @@
#define cpu_has_cyrix_arr 0
#define cpu_has_centaur_mcr 0
#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH)
+#define cpu_has_ds boot_cpu_has(X86_FEATURE_DS)
+#define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS)
+#define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS)
#endif /* __ASM_X8664_CPUFEATURE_H */
diff --git a/include/asm-x86_64/delay.h b/include/asm-x86_64/delay.h
index 65f64acc531..c2669f1f552 100644
--- a/include/asm-x86_64/delay.h
+++ b/include/asm-x86_64/delay.h
@@ -7,18 +7,21 @@
* Delay routines calling functions in arch/x86_64/lib/delay.c
*/
+/* Undefined functions to get compile-time errors */
extern void __bad_udelay(void);
extern void __bad_ndelay(void);
extern void __udelay(unsigned long usecs);
-extern void __ndelay(unsigned long usecs);
+extern void __ndelay(unsigned long nsecs);
extern void __const_udelay(unsigned long usecs);
extern void __delay(unsigned long loops);
+/* 0x10c7 is 2**32 / 1000000 (rounded up) */
#define udelay(n) (__builtin_constant_p(n) ? \
- ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c6ul)) : \
+ ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \
__udelay(n))
+/* 0x5 is 2**32 / 1000000000 (rounded up) */
#define ndelay(n) (__builtin_constant_p(n) ? \
((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
__ndelay(n))
diff --git a/include/asm-x86_64/desc.h b/include/asm-x86_64/desc.h
index eb7723a4679..913d6ac0003 100644
--- a/include/asm-x86_64/desc.h
+++ b/include/asm-x86_64/desc.h
@@ -9,64 +9,13 @@
#include <linux/string.h>
#include <linux/smp.h>
+#include <asm/desc_defs.h>
#include <asm/segment.h>
#include <asm/mmu.h>
-// 8 byte segment descriptor
-struct desc_struct {
- u16 limit0;
- u16 base0;
- unsigned base1 : 8, type : 4, s : 1, dpl : 2, p : 1;
- unsigned limit : 4, avl : 1, l : 1, d : 1, g : 1, base2 : 8;
-} __attribute__((packed));
-
-struct n_desc_struct {
- unsigned int a,b;
-};
-
extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
-enum {
- GATE_INTERRUPT = 0xE,
- GATE_TRAP = 0xF,
- GATE_CALL = 0xC,
-};
-
-// 16byte gate
-struct gate_struct {
- u16 offset_low;
- u16 segment;
- unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1;
- u16 offset_middle;
- u32 offset_high;
- u32 zero1;
-} __attribute__((packed));
-
-#define PTR_LOW(x) ((unsigned long)(x) & 0xFFFF)
-#define PTR_MIDDLE(x) (((unsigned long)(x) >> 16) & 0xFFFF)
-#define PTR_HIGH(x) ((unsigned long)(x) >> 32)
-
-enum {
- DESC_TSS = 0x9,
- DESC_LDT = 0x2,
-};
-
-// LDT or TSS descriptor in the GDT. 16 bytes.
-struct ldttss_desc {
- u16 limit0;
- u16 base0;
- unsigned base1 : 8, type : 5, dpl : 2, p : 1;
- unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
- u32 base3;
- u32 zero1;
-} __attribute__((packed));
-
-struct desc_ptr {
- unsigned short size;
- unsigned long address;
-} __attribute__((packed)) ;
-
#define load_TR_desc() asm volatile("ltr %w0"::"r" (GDT_ENTRY_TSS*8))
#define load_LDT_desc() asm volatile("lldt %w0"::"r" (GDT_ENTRY_LDT*8))
#define clear_LDT() asm volatile("lldt %w0"::"r" (0))
diff --git a/include/asm-x86_64/desc_defs.h b/include/asm-x86_64/desc_defs.h
new file mode 100644
index 00000000000..08900407009
--- /dev/null
+++ b/include/asm-x86_64/desc_defs.h
@@ -0,0 +1,69 @@
+/* Written 2000 by Andi Kleen */
+#ifndef __ARCH_DESC_DEFS_H
+#define __ARCH_DESC_DEFS_H
+
+/*
+ * Segment descriptor structure definitions, usable from both x86_64 and i386
+ * archs.
+ */
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+
+// 8 byte segment descriptor
+struct desc_struct {
+ u16 limit0;
+ u16 base0;
+ unsigned base1 : 8, type : 4, s : 1, dpl : 2, p : 1;
+ unsigned limit : 4, avl : 1, l : 1, d : 1, g : 1, base2 : 8;
+} __attribute__((packed));
+
+struct n_desc_struct {
+ unsigned int a,b;
+};
+
+enum {
+ GATE_INTERRUPT = 0xE,
+ GATE_TRAP = 0xF,
+ GATE_CALL = 0xC,
+};
+
+// 16byte gate
+struct gate_struct {
+ u16 offset_low;
+ u16 segment;
+ unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1;
+ u16 offset_middle;
+ u32 offset_high;
+ u32 zero1;
+} __attribute__((packed));
+
+#define PTR_LOW(x) ((unsigned long)(x) & 0xFFFF)
+#define PTR_MIDDLE(x) (((unsigned long)(x) >> 16) & 0xFFFF)
+#define PTR_HIGH(x) ((unsigned long)(x) >> 32)
+
+enum {
+ DESC_TSS = 0x9,
+ DESC_LDT = 0x2,
+};
+
+// LDT or TSS descriptor in the GDT. 16 bytes.
+struct ldttss_desc {
+ u16 limit0;
+ u16 base0;
+ unsigned base1 : 8, type : 5, dpl : 2, p : 1;
+ unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
+ u32 base3;
+ u32 zero1;
+} __attribute__((packed));
+
+struct desc_ptr {
+ unsigned short size;
+ unsigned long address;
+} __attribute__((packed)) ;
+
+
+#endif /* !__ASSEMBLY__ */
+
+#endif
diff --git a/include/asm-x86_64/dma-mapping.h b/include/asm-x86_64/dma-mapping.h
index 10174b110a5..be9ec689072 100644
--- a/include/asm-x86_64/dma-mapping.h
+++ b/include/asm-x86_64/dma-mapping.h
@@ -180,12 +180,13 @@ static inline int dma_get_cache_alignment(void)
return boot_cpu_data.x86_clflush_size;
}
-#define dma_is_consistent(h) 1
+#define dma_is_consistent(d, h) 1
extern int dma_set_mask(struct device *dev, u64 mask);
static inline void
-dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction dir)
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction dir)
{
flush_write_buffers();
}
diff --git a/include/asm-x86_64/futex.h b/include/asm-x86_64/futex.h
index 9804bf07b09..5cdfb08013c 100644
--- a/include/asm-x86_64/futex.h
+++ b/include/asm-x86_64/futex.h
@@ -55,7 +55,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
- inc_preempt_count();
+ pagefault_disable();
switch (op) {
case FUTEX_OP_SET:
@@ -78,7 +78,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
ret = -ENOSYS;
}
- dec_preempt_count();
+ pagefault_enable();
if (!ret) {
switch (cmp) {
diff --git a/include/asm-x86_64/genapic.h b/include/asm-x86_64/genapic.h
index a0e9a4b9348..b80f4bb5f27 100644
--- a/include/asm-x86_64/genapic.h
+++ b/include/asm-x86_64/genapic.h
@@ -30,6 +30,6 @@ struct genapic {
};
-extern struct genapic *genapic;
+extern struct genapic *genapic, *genapic_force, apic_flat;
#endif
diff --git a/include/asm-x86_64/msr.h b/include/asm-x86_64/msr.h
index 37e194169fa..952783d35c7 100644
--- a/include/asm-x86_64/msr.h
+++ b/include/asm-x86_64/msr.h
@@ -169,8 +169,8 @@ static inline unsigned int cpuid_edx(unsigned int op)
#define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */
#define MSR_CSTAR 0xc0000083 /* compatibility mode SYSCALL target */
#define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */
-#define MSR_FS_BASE 0xc0000100 /* 64bit GS base */
-#define MSR_GS_BASE 0xc0000101 /* 64bit FS base */
+#define MSR_FS_BASE 0xc0000100 /* 64bit FS base */
+#define MSR_GS_BASE 0xc0000101 /* 64bit GS base */
#define MSR_KERNEL_GS_BASE 0xc0000102 /* SwapGS GS shadow (or USER_GS from kernel) */
/* EFER bits: */
#define _EFER_SCE 0 /* SYSCALL/SYSRET */
@@ -210,6 +210,10 @@ static inline unsigned int cpuid_edx(unsigned int op)
#define MSR_IA32_LASTINTFROMIP 0x1dd
#define MSR_IA32_LASTINTTOIP 0x1de
+#define MSR_IA32_PEBS_ENABLE 0x3f1
+#define MSR_IA32_DS_AREA 0x600
+#define MSR_IA32_PERF_CAPABILITIES 0x345
+
#define MSR_MTRRfix64K_00000 0x250
#define MSR_MTRRfix16K_80000 0x258
#define MSR_MTRRfix16K_A0000 0x259
@@ -407,4 +411,13 @@ static inline unsigned int cpuid_edx(unsigned int op)
#define MSR_P4_U2L_ESCR0 0x3b0
#define MSR_P4_U2L_ESCR1 0x3b1
+/* Intel Core-based CPU performance counters */
+#define MSR_CORE_PERF_FIXED_CTR0 0x309
+#define MSR_CORE_PERF_FIXED_CTR1 0x30a
+#define MSR_CORE_PERF_FIXED_CTR2 0x30b
+#define MSR_CORE_PERF_FIXED_CTR_CTRL 0x38d
+#define MSR_CORE_PERF_GLOBAL_STATUS 0x38e
+#define MSR_CORE_PERF_GLOBAL_CTRL 0x38f
+#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x390
+
#endif
diff --git a/include/asm-x86_64/nmi.h b/include/asm-x86_64/nmi.h
index f367d4014b4..72375e7d32a 100644
--- a/include/asm-x86_64/nmi.h
+++ b/include/asm-x86_64/nmi.h
@@ -77,4 +77,7 @@ extern int proc_nmi_enabled(struct ctl_table *, int , struct file *,
extern int unknown_nmi_panic;
+void __trigger_all_cpu_backtrace(void);
+#define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace()
+
#endif /* ASM_NMI_H */
diff --git a/include/asm-x86_64/pci-direct.h b/include/asm-x86_64/pci-direct.h
index eba9cb471df..6823fa4f1af 100644
--- a/include/asm-x86_64/pci-direct.h
+++ b/include/asm-x86_64/pci-direct.h
@@ -10,6 +10,7 @@ extern u32 read_pci_config(u8 bus, u8 slot, u8 func, u8 offset);
extern u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset);
extern u16 read_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset);
extern void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset, u32 val);
+extern void write_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset, u8 val);
extern int early_pci_allowed(void);
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
index 0555c1c4d8f..59901c690a0 100644
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -221,20 +221,19 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long
#define __S110 PAGE_SHARED_EXEC
#define __S111 PAGE_SHARED_EXEC
-static inline unsigned long pgd_bad(pgd_t pgd)
-{
- unsigned long val = pgd_val(pgd);
- val &= ~PTE_MASK;
- val &= ~(_PAGE_USER | _PAGE_DIRTY);
- return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED);
-}
+static inline unsigned long pgd_bad(pgd_t pgd)
+{
+ return pgd_val(pgd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
+}
static inline unsigned long pud_bad(pud_t pud)
{
- unsigned long val = pud_val(pud);
- val &= ~PTE_MASK;
- val &= ~(_PAGE_USER | _PAGE_DIRTY);
- return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED);
+ return pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
+}
+
+static inline unsigned long pmd_bad(pmd_t pmd)
+{
+ return pmd_val(pmd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
}
#define pte_none(x) (!pte_val(x))
@@ -347,7 +346,6 @@ static inline int pmd_large(pmd_t pte) {
#define pmd_none(x) (!pmd_val(x))
#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
-#define pmd_bad(x) ((pmd_val(x) & (~PTE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE )
#define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot)))
#define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h
index cef17e0f828..76552d72804 100644
--- a/include/asm-x86_64/processor.h
+++ b/include/asm-x86_64/processor.h
@@ -475,6 +475,14 @@ static inline void __mwait(unsigned long eax, unsigned long ecx)
: :"a" (eax), "c" (ecx));
}
+static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
+{
+ /* "mwait %eax,%ecx;" */
+ asm volatile(
+ "sti; .byte 0x0f,0x01,0xc9;"
+ : :"a" (eax), "c" (ecx));
+}
+
extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
#define stack_current() \
diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h
index e72cfcdf534..6d324b83897 100644
--- a/include/asm-x86_64/proto.h
+++ b/include/asm-x86_64/proto.h
@@ -61,7 +61,6 @@ extern void numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn);
extern unsigned long numa_free_all_bootmem(void);
extern void reserve_bootmem_generic(unsigned long phys, unsigned len);
-extern void free_bootmem_generic(unsigned long phys, unsigned len);
extern void load_gs_index(unsigned gs);
@@ -88,6 +87,7 @@ extern void syscall32_cpu_init(void);
extern void setup_node_bootmem(int nodeid, unsigned long start, unsigned long end);
extern void early_quirks(void);
+extern void quirk_intel_irqbalance(void);
extern void check_efer(void);
extern int unhandled_signal(struct task_struct *tsk, int sig);
diff --git a/include/asm-x86_64/rio.h b/include/asm-x86_64/rio.h
new file mode 100644
index 00000000000..c7350f6d201
--- /dev/null
+++ b/include/asm-x86_64/rio.h
@@ -0,0 +1,74 @@
+/*
+ * Derived from include/asm-i386/mach-summit/mach_mpparse.h
+ * and include/asm-i386/mach-default/bios_ebda.h
+ *
+ * Author: Laurent Vivier <Laurent.Vivier@bull.net>
+ */
+
+#ifndef __ASM_RIO_H
+#define __ASM_RIO_H
+
+#define RIO_TABLE_VERSION 3
+
+struct rio_table_hdr {
+ u8 version; /* Version number of this data structure */
+ u8 num_scal_dev; /* # of Scalability devices */
+ u8 num_rio_dev; /* # of RIO I/O devices */
+} __attribute__((packed));
+
+struct scal_detail {
+ u8 node_id; /* Scalability Node ID */
+ u32 CBAR; /* Address of 1MB register space */
+ u8 port0node; /* Node ID port connected to: 0xFF=None */
+ u8 port0port; /* Port num port connected to: 0,1,2, or */
+ /* 0xFF=None */
+ u8 port1node; /* Node ID port connected to: 0xFF = None */
+ u8 port1port; /* Port num port connected to: 0,1,2, or */
+ /* 0xFF=None */
+ u8 port2node; /* Node ID port connected to: 0xFF = None */
+ u8 port2port; /* Port num port connected to: 0,1,2, or */
+ /* 0xFF=None */
+ u8 chassis_num; /* 1 based Chassis number (1 = boot node) */
+} __attribute__((packed));
+
+struct rio_detail {
+ u8 node_id; /* RIO Node ID */
+ u32 BBAR; /* Address of 1MB register space */
+ u8 type; /* Type of device */
+ u8 owner_id; /* Node ID of Hurricane that owns this */
+ /* node */
+ u8 port0node; /* Node ID port connected to: 0xFF=None */
+ u8 port0port; /* Port num port connected to: 0,1,2, or */
+ /* 0xFF=None */
+ u8 port1node; /* Node ID port connected to: 0xFF=None */
+ u8 port1port; /* Port num port connected to: 0,1,2, or */
+ /* 0xFF=None */
+ u8 first_slot; /* Lowest slot number below this Calgary */
+ u8 status; /* Bit 0 = 1 : the XAPIC is used */
+ /* = 0 : the XAPIC is not used, ie: */
+ /* ints fwded to another XAPIC */
+ /* Bits1:7 Reserved */
+ u8 WP_index; /* instance index - lower ones have */
+ /* lower slot numbers/PCI bus numbers */
+ u8 chassis_num; /* 1 based Chassis number */
+} __attribute__((packed));
+
+enum {
+ HURR_SCALABILTY = 0, /* Hurricane Scalability info */
+ HURR_RIOIB = 2, /* Hurricane RIOIB info */
+ COMPAT_CALGARY = 4, /* Compatibility Calgary */
+ ALT_CALGARY = 5, /* Second Planar Calgary */
+};
+
+/*
+ * there is a real-mode segmented pointer pointing to the
+ * 4K EBDA area at 0x40E.
+ */
+static inline unsigned long get_bios_ebda(void)
+{
+ unsigned long address = *(unsigned short *)phys_to_virt(0x40EUL);
+ address <<= 4;
+ return address;
+}
+
+#endif /* __ASM_RIO_H */
diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h
index d6b7c057edb..e17b9ec42e9 100644
--- a/include/asm-x86_64/smp.h
+++ b/include/asm-x86_64/smp.h
@@ -82,11 +82,6 @@ extern u8 x86_cpu_to_apicid[NR_CPUS]; /* physical ID */
extern u8 x86_cpu_to_log_apicid[NR_CPUS];
extern u8 bios_cpu_apicid[];
-static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
-{
- return cpus_addr(cpumask)[0];
-}
-
static inline int cpu_present_to_apicid(int mps_cpu)
{
if (mps_cpu < NR_CPUS)
@@ -118,13 +113,6 @@ static __inline int logical_smp_processor_id(void)
#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu]
#else
#define cpu_physical_id(cpu) boot_cpu_id
-static inline int smp_call_function_single(int cpuid, void (*func) (void *info),
- void *info, int retry, int wait)
-{
- /* Disable interrupts here? */
- func(info);
- return 0;
-}
#endif /* !CONFIG_SMP */
#endif
diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h
index 05ef097ba55..88bf981e73c 100644
--- a/include/asm-x86_64/spinlock.h
+++ b/include/asm-x86_64/spinlock.h
@@ -36,7 +36,34 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
"2:\t" : "=m" (lock->slock) : : "memory");
}
-#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+/*
+ * Same as __raw_spin_lock, but reenable interrupts during spinning.
+ */
+#ifndef CONFIG_PROVE_LOCKING
+static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+{
+ asm volatile(
+ "\n1:\t"
+ LOCK_PREFIX " ; decl %0\n\t"
+ "jns 5f\n"
+ "testl $0x200, %1\n\t" /* interrupts were disabled? */
+ "jz 4f\n\t"
+ "sti\n"
+ "3:\t"
+ "rep;nop\n\t"
+ "cmpl $0, %0\n\t"
+ "jle 3b\n\t"
+ "cli\n\t"
+ "jmp 1b\n"
+ "4:\t"
+ "rep;nop\n\t"
+ "cmpl $0, %0\n\t"
+ "jg 1b\n\t"
+ "jmp 4b\n"
+ "5:\n\t"
+ : "+m" (lock->slock) : "r" ((unsigned)flags) : "memory");
+}
+#endif
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
{
diff --git a/include/asm-x86_64/stacktrace.h b/include/asm-x86_64/stacktrace.h
index 5eb9799bef7..6f0b5459430 100644
--- a/include/asm-x86_64/stacktrace.h
+++ b/include/asm-x86_64/stacktrace.h
@@ -1,6 +1,8 @@
#ifndef _ASM_STACKTRACE_H
#define _ASM_STACKTRACE_H 1
+extern int kstack_depth_to_print;
+
/* Generic stack tracer with callbacks */
struct stacktrace_ops {
diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h
index 777288eb7e7..c5f596e71fa 100644
--- a/include/asm-x86_64/unistd.h
+++ b/include/asm-x86_64/unistd.h
@@ -622,25 +622,7 @@ __SYSCALL(__NR_move_pages, sys_move_pages)
#define __NR_syscall_max __NR_move_pages
-#ifdef __KERNEL__
-#include <linux/err.h>
-#endif
-
#ifndef __NO_STUBS
-
-/* user-visible error numbers are in the range -1 - -MAX_ERRNO */
-
-#define __syscall_clobber "r11","rcx","memory"
-
-#define __syscall_return(type, res) \
-do { \
- if ((unsigned long)(res) >= (unsigned long)(-MAX_ERRNO)) { \
- errno = -(res); \
- res = -1; \
- } \
- return (type) (res); \
-} while (0)
-
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_OLD_STAT
#define __ARCH_WANT_SYS_ALARM
@@ -664,87 +646,6 @@ do { \
#define __ARCH_WANT_SYS_TIME
#define __ARCH_WANT_COMPAT_SYS_TIME
-#define __syscall "syscall"
-
-#define _syscall0(type,name) \
-type name(void) \
-{ \
-long __res; \
-__asm__ volatile (__syscall \
- : "=a" (__res) \
- : "0" (__NR_##name) : __syscall_clobber ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall1(type,name,type1,arg1) \
-type name(type1 arg1) \
-{ \
-long __res; \
-__asm__ volatile (__syscall \
- : "=a" (__res) \
- : "0" (__NR_##name),"D" ((long)(arg1)) : __syscall_clobber ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall2(type,name,type1,arg1,type2,arg2) \
-type name(type1 arg1,type2 arg2) \
-{ \
-long __res; \
-__asm__ volatile (__syscall \
- : "=a" (__res) \
- : "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)) : __syscall_clobber ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
-type name(type1 arg1,type2 arg2,type3 arg3) \
-{ \
-long __res; \
-__asm__ volatile (__syscall \
- : "=a" (__res) \
- : "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)), \
- "d" ((long)(arg3)) : __syscall_clobber); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
-type name (type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
-{ \
-long __res; \
-__asm__ volatile ("movq %5,%%r10 ;" __syscall \
- : "=a" (__res) \
- : "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)), \
- "d" ((long)(arg3)),"g" ((long)(arg4)) : __syscall_clobber,"r10" ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
- type5,arg5) \
-type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
-{ \
-long __res; \
-__asm__ volatile ("movq %5,%%r10 ; movq %6,%%r8 ; " __syscall \
- : "=a" (__res) \
- : "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)), \
- "d" ((long)(arg3)),"g" ((long)(arg4)),"g" ((long)(arg5)) : \
- __syscall_clobber,"r8","r10" ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
- type5,arg5,type6,arg6) \
-type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,type6 arg6) \
-{ \
-long __res; \
-__asm__ volatile ("movq %5,%%r10 ; movq %6,%%r8 ; movq %7,%%r9 ; " __syscall \
- : "=a" (__res) \
- : "0" (__NR_##name),"D" ((long)(arg1)),"S" ((long)(arg2)), \
- "d" ((long)(arg3)), "g" ((long)(arg4)), "g" ((long)(arg5)), \
- "g" ((long)(arg6)) : \
- __syscall_clobber,"r8","r10","r9" ); \
-__syscall_return(type,__res); \
-}
-
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
diff --git a/include/asm-x86_64/unwind.h b/include/asm-x86_64/unwind.h
index 2e7ff10fd77..2f6349e4871 100644
--- a/include/asm-x86_64/unwind.h
+++ b/include/asm-x86_64/unwind.h
@@ -87,14 +87,10 @@ extern int arch_unwind_init_running(struct unwind_frame_info *,
static inline int arch_unw_user_mode(const struct unwind_frame_info *info)
{
-#if 0 /* This can only work when selector register saves/restores
- are properly annotated (and tracked in UNW_REGISTER_INFO). */
- return user_mode(&info->regs);
-#else
- return (long)info->regs.rip >= 0
+ return user_mode(&info->regs)
+ || (long)info->regs.rip >= 0
|| (info->regs.rip >= VSYSCALL_START && info->regs.rip < VSYSCALL_END)
|| (long)info->regs.rsp >= 0;
-#endif
}
#else
diff --git a/include/asm-x86_64/vsyscall.h b/include/asm-x86_64/vsyscall.h
index 01d1c17e284..05cb8dd200d 100644
--- a/include/asm-x86_64/vsyscall.h
+++ b/include/asm-x86_64/vsyscall.h
@@ -10,6 +10,7 @@ enum vsyscall_num {
#define VSYSCALL_START (-10UL << 20)
#define VSYSCALL_SIZE 1024
#define VSYSCALL_END (-2UL << 20)
+#define VSYSCALL_MAPPED_PAGES 1
#define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr))
#ifdef __KERNEL__
diff --git a/include/asm-xtensa/dma-mapping.h b/include/asm-xtensa/dma-mapping.h
index c39c91dfcc6..82b03b3a2ee 100644
--- a/include/asm-xtensa/dma-mapping.h
+++ b/include/asm-xtensa/dma-mapping.h
@@ -170,10 +170,10 @@ dma_get_cache_alignment(void)
return L1_CACHE_BYTES;
}
-#define dma_is_consistent(d) (1)
+#define dma_is_consistent(d, h) (1)
static inline void
-dma_cache_sync(void *vaddr, size_t size,
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
consistent_sync(vaddr, size, direction);
diff --git a/include/asm-xtensa/unistd.h b/include/asm-xtensa/unistd.h
index 411f810a55c..2e1a1b997e7 100644
--- a/include/asm-xtensa/unistd.h
+++ b/include/asm-xtensa/unistd.h
@@ -218,190 +218,6 @@
#define SYSXTENSA_COUNT 5 /* count of syscall0 functions*/
-#ifdef __KERNEL__
-#include <linux/linkage.h>
-
-#define __syscall_return(type, res) return ((type)(res))
-
-/* Tensilica's xt-xcc compiler is much more agressive at code
- * optimization than gcc. Multiple __asm__ statements are
- * insufficient for xt-xcc because subsequent optimization passes
- * (beyond the front-end that knows of __asm__ statements and other
- * such GNU Extensions to C) can modify the register selection for
- * containment of C variables.
- *
- * xt-xcc cannot modify the contents of a single __asm__ statement, so
- * we create single-asm versions of the syscall macros that are
- * suitable and optimal for both xt-xcc and gcc.
- *
- * Linux takes system-call arguments in registers. The following
- * design is optimized for user-land apps (e.g., glibc) which
- * typically have a function wrapper around the "syscall" assembly
- * instruction. It satisfies the Xtensa ABI while minizing argument
- * shifting.
- *
- * The Xtensa ABI and software conventions require the system-call
- * number in a2. If an argument exists in a2, we move it to the next
- * available register. Note that for improved efficiency, we do NOT
- * shift all parameters down one register to maintain the original
- * order.
- *
- * At best case (zero arguments), we just write the syscall number to
- * a2. At worst case (1 to 6 arguments), we move the argument in a2
- * to the next available register, then write the syscall number to
- * a2.
- *
- * For clarity, the following truth table enumerates all possibilities.
- *
- * arguments syscall number arg0, arg1, arg2, arg3, arg4, arg5
- * --------- -------------- ----------------------------------
- * 0 a2
- * 1 a2 a3
- * 2 a2 a4, a3
- * 3 a2 a5, a3, a4
- * 4 a2 a6, a3, a4, a5
- * 5 a2 a7, a3, a4, a5, a6
- * 6 a2 a8, a3, a4, a5, a6, a7
- */
-
-#define _syscall0(type,name) \
-type name(void) \
-{ \
-long __res; \
-__asm__ __volatile__ ( \
- " movi a2, %1 \n" \
- " syscall \n" \
- " mov %0, a2 \n" \
- : "=a" (__res) \
- : "i" (__NR_##name) \
- : "a2" \
- ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall1(type,name,type0,arg0) \
-type name(type0 arg0) \
-{ \
-long __res; \
-__asm__ __volatile__ ( \
- " mov a3, %2 \n" \
- " movi a2, %1 \n" \
- " syscall \n" \
- " mov %0, a2 \n" \
- : "=a" (__res) \
- : "i" (__NR_##name), "a" (arg0) \
- : "a2", "a3" \
- ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall2(type,name,type0,arg0,type1,arg1) \
-type name(type0 arg0,type1 arg1) \
-{ \
-long __res; \
-__asm__ __volatile__ ( \
- " mov a4, %2 \n" \
- " mov a3, %3 \n" \
- " movi a2, %1 \n" \
- " syscall \n" \
- " mov %0, a2 \n" \
- : "=a" (__res) \
- : "i" (__NR_##name), "a" (arg0), "a" (arg1) \
- : "a2", "a3", "a4" \
- ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall3(type,name,type0,arg0,type1,arg1,type2,arg2) \
-type name(type0 arg0,type1 arg1,type2 arg2) \
-{ \
-long __res; \
-__asm__ __volatile__ ( \
- " mov a5, %2 \n" \
- " mov a4, %4 \n" \
- " mov a3, %3 \n" \
- " movi a2, %1 \n" \
- " syscall \n" \
- " mov %0, a2 \n" \
- : "=a" (__res) \
- : "i" (__NR_##name), "a" (arg0), "a" (arg1), "a" (arg2) \
- : "a2", "a3", "a4", "a5" \
- ); \
-__syscall_return(type,__res); \
-}
-
-#define _syscall4(type,name,type0,arg0,type1,arg1,type2,arg2,type3,arg3) \
-type name(type0 arg0,type1 arg1,type2 arg2,type3 arg3) \
-{ \
-long __res; \
-__asm__ __volatile__ ( \
- " mov a6, %2 \n" \
- " mov a5, %5 \n" \
- " mov a4, %4 \n" \
- " mov a3, %3 \n" \
- " movi a2, %1 \n" \
- " syscall \n" \
- " mov %0, a2 \n" \
- : "=a" (__res) \
- : "i" (__NR_##name), "a" (arg0), "a" (arg1), "a" (arg2), "a" (arg3) \
- : "a2", "a3", "a4", "a5", "a6" \
- ); \
-__syscall_return(type,__res); \
-}
-
-/* Note that we save and restore the a7 frame pointer.
- * Including a7 in the clobber list doesn't do what you'd expect.
- */
-#define _syscall5(type,name,type0,arg0,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
-type name(type0 arg0,type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
-{ \
-long __res; \
-__asm__ __volatile__ ( \
- " mov a9, a7 \n" \
- " mov a7, %2 \n" \
- " mov a6, %6 \n" \
- " mov a5, %5 \n" \
- " mov a4, %4 \n" \
- " mov a3, %3 \n" \
- " movi a2, %1 \n" \
- " syscall \n" \
- " mov a7, a9 \n" \
- " mov %0, a2 \n" \
- : "=a" (__res) \
- : "i" (__NR_##name), "a" (arg0), "a" (arg1), "a" (arg2), \
- "a" (arg3), "a" (arg4) \
- : "a2", "a3", "a4", "a5", "a6", "a9" \
- ); \
-__syscall_return(type,__res); \
-}
-
-/* Note that we save and restore the a7 frame pointer.
- * Including a7 in the clobber list doesn't do what you'd expect.
- */
-#define _syscall6(type,name,type0,arg0,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5) \
-type name(type0 arg0,type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
-{ \
-long __res; \
-__asm__ __volatile__ ( \
- " mov a9, a7 \n" \
- " mov a8, %2 \n" \
- " mov a7, %7 \n" \
- " mov a6, %6 \n" \
- " mov a5, %5 \n" \
- " mov a4, %4 \n" \
- " mov a3, %3 \n" \
- " movi a2, %1 \n" \
- " syscall \n" \
- " mov a7, a9 \n" \
- " mov %0, a2 \n" \
- : "=a" (__res) \
- : "i" (__NR_##name), "a" (arg0), "a" (arg1), "a" (arg2), \
- "a" (arg3), "a" (arg4), "a" (arg5) \
- : "a2", "a3", "a4", "a5", "a6", "a8", "a9" \
- ); \
-__syscall_return(type,__res); \
-}
-
/*
* "Conditional" syscalls
*
diff --git a/include/crypto/b128ops.h b/include/crypto/b128ops.h
new file mode 100644
index 00000000000..0b8e6bc5530
--- /dev/null
+++ b/include/crypto/b128ops.h
@@ -0,0 +1,80 @@
+/* b128ops.h - common 128-bit block operations
+ *
+ * Copyright (c) 2003, Dr Brian Gladman, Worcester, UK.
+ * Copyright (c) 2006, Rik Snel <rsnel@cube.dyndns.org>
+ *
+ * Based on Dr Brian Gladman's (GPL'd) work published at
+ * http://fp.gladman.plus.com/cryptography_technology/index.htm
+ * See the original copyright notice below.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+/*
+ ---------------------------------------------------------------------------
+ Copyright (c) 2003, Dr Brian Gladman, Worcester, UK. All rights reserved.
+
+ LICENSE TERMS
+
+ The free distribution and use of this software in both source and binary
+ form is allowed (with or without changes) provided that:
+
+ 1. distributions of this source code include the above copyright
+ notice, this list of conditions and the following disclaimer;
+
+ 2. distributions in binary form include the above copyright
+ notice, this list of conditions and the following disclaimer
+ in the documentation and/or other associated materials;
+
+ 3. the copyright holder's name is not used to endorse products
+ built using this software without specific written permission.
+
+ ALTERNATIVELY, provided that this notice is retained in full, this product
+ may be distributed under the terms of the GNU General Public License (GPL),
+ in which case the provisions of the GPL apply INSTEAD OF those given above.
+
+ DISCLAIMER
+
+ This software is provided 'as is' with no explicit or implied warranties
+ in respect of its properties, including, but not limited to, correctness
+ and/or fitness for purpose.
+ ---------------------------------------------------------------------------
+ Issue Date: 13/06/2006
+*/
+
+#ifndef _CRYPTO_B128OPS_H
+#define _CRYPTO_B128OPS_H
+
+#include <linux/types.h>
+
+typedef struct {
+ u64 a, b;
+} u128;
+
+typedef struct {
+ __be64 a, b;
+} be128;
+
+typedef struct {
+ __le64 b, a;
+} le128;
+
+static inline void u128_xor(u128 *r, const u128 *p, const u128 *q)
+{
+ r->a = p->a ^ q->a;
+ r->b = p->b ^ q->b;
+}
+
+static inline void be128_xor(be128 *r, const be128 *p, const be128 *q)
+{
+ u128_xor((u128 *)r, (u128 *)p, (u128 *)q);
+}
+
+static inline void le128_xor(le128 *r, const le128 *p, const le128 *q)
+{
+ u128_xor((u128 *)r, (u128 *)p, (u128 *)q);
+}
+
+#endif /* _CRYPTO_B128OPS_H */
diff --git a/include/crypto/gf128mul.h b/include/crypto/gf128mul.h
new file mode 100644
index 00000000000..4fd31520244
--- /dev/null
+++ b/include/crypto/gf128mul.h
@@ -0,0 +1,198 @@
+/* gf128mul.h - GF(2^128) multiplication functions
+ *
+ * Copyright (c) 2003, Dr Brian Gladman, Worcester, UK.
+ * Copyright (c) 2006 Rik Snel <rsnel@cube.dyndns.org>
+ *
+ * Based on Dr Brian Gladman's (GPL'd) work published at
+ * http://fp.gladman.plus.com/cryptography_technology/index.htm
+ * See the original copyright notice below.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+/*
+ ---------------------------------------------------------------------------
+ Copyright (c) 2003, Dr Brian Gladman, Worcester, UK. All rights reserved.
+
+ LICENSE TERMS
+
+ The free distribution and use of this software in both source and binary
+ form is allowed (with or without changes) provided that:
+
+ 1. distributions of this source code include the above copyright
+ notice, this list of conditions and the following disclaimer;
+
+ 2. distributions in binary form include the above copyright
+ notice, this list of conditions and the following disclaimer
+ in the documentation and/or other associated materials;
+
+ 3. the copyright holder's name is not used to endorse products
+ built using this software without specific written permission.
+
+ ALTERNATIVELY, provided that this notice is retained in full, this product
+ may be distributed under the terms of the GNU General Public License (GPL),
+ in which case the provisions of the GPL apply INSTEAD OF those given above.
+
+ DISCLAIMER
+
+ This software is provided 'as is' with no explicit or implied warranties
+ in respect of its properties, including, but not limited to, correctness
+ and/or fitness for purpose.
+ ---------------------------------------------------------------------------
+ Issue Date: 31/01/2006
+
+ An implementation of field multiplication in Galois Field GF(128)
+*/
+
+#ifndef _CRYPTO_GF128MUL_H
+#define _CRYPTO_GF128MUL_H
+
+#include <crypto/b128ops.h>
+#include <linux/slab.h>
+
+/* Comment by Rik:
+ *
+ * For some background on GF(2^128) see for example: http://-
+ * csrc.nist.gov/CryptoToolkit/modes/proposedmodes/gcm/gcm-revised-spec.pdf
+ *
+ * The elements of GF(2^128) := GF(2)[X]/(X^128-X^7-X^2-X^1-1) can
+ * be mapped to computer memory in a variety of ways. Let's examine
+ * three common cases.
+ *
+ * Take a look at the 16 binary octets below in memory order. The msb's
+ * are left and the lsb's are right. char b[16] is an array and b[0] is
+ * the first octet.
+ *
+ * 80000000 00000000 00000000 00000000 .... 00000000 00000000 00000000
+ * b[0] b[1] b[2] b[3] b[13] b[14] b[15]
+ *
+ * Every bit is a coefficient of some power of X. We can store the bits
+ * in every byte in little-endian order and the bytes themselves also in
+ * little endian order. I will call this lle (little-little-endian).
+ * The above buffer represents the polynomial 1, and X^7+X^2+X^1+1 looks
+ * like 11100001 00000000 .... 00000000 = { 0xE1, 0x00, }.
+ * This format was originally implemented in gf128mul and is used
+ * in GCM (Galois/Counter mode) and in ABL (Arbitrary Block Length).
+ *
+ * Another convention says: store the bits in bigendian order and the
+ * bytes also. This is bbe (big-big-endian). Now the buffer above
+ * represents X^127. X^7+X^2+X^1+1 looks like 00000000 .... 10000111,
+ * b[15] = 0x87 and the rest is 0. LRW uses this convention and bbe
+ * is partly implemented.
+ *
+ * Both of the above formats are easy to implement on big-endian
+ * machines.
+ *
+ * EME (which is patent encumbered) uses the ble format (bits are stored
+ * in big endian order and the bytes in little endian). The above buffer
+ * represents X^7 in this case and the primitive polynomial is b[0] = 0x87.
+ *
+ * The common machine word-size is smaller than 128 bits, so to make
+ * an efficient implementation we must split into machine word sizes.
+ * This file uses one 32bit for the moment. Machine endianness comes into
+ * play. The lle format in relation to machine endianness is discussed
+ * below by the original author of gf128mul Dr Brian Gladman.
+ *
+ * Let's look at the bbe and ble format on a little endian machine.
+ *
+ * bbe on a little endian machine u32 x[4]:
+ *
+ * MS x[0] LS MS x[1] LS
+ * ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls
+ * 103..96 111.104 119.112 127.120 71...64 79...72 87...80 95...88
+ *
+ * MS x[2] LS MS x[3] LS
+ * ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls
+ * 39...32 47...40 55...48 63...56 07...00 15...08 23...16 31...24
+ *
+ * ble on a little endian machine
+ *
+ * MS x[0] LS MS x[1] LS
+ * ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls
+ * 31...24 23...16 15...08 07...00 63...56 55...48 47...40 39...32
+ *
+ * MS x[2] LS MS x[3] LS
+ * ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls
+ * 95...88 87...80 79...72 71...64 127.120 199.112 111.104 103..96
+ *
+ * Multiplications in GF(2^128) are mostly bit-shifts, so you see why
+ * ble (and lbe also) are easier to implement on a little-endian
+ * machine than on a big-endian machine. The converse holds for bbe
+ * and lle.
+ *
+ * Note: to have good alignment, it seems to me that it is sufficient
+ * to keep elements of GF(2^128) in type u64[2]. On 32-bit wordsize
+ * machines this will automatically aligned to wordsize and on a 64-bit
+ * machine also.
+ */
+/* Multiply a GF128 field element by x. Field elements are held in arrays
+ of bytes in which field bits 8n..8n + 7 are held in byte[n], with lower
+ indexed bits placed in the more numerically significant bit positions
+ within bytes.
+
+ On little endian machines the bit indexes translate into the bit
+ positions within four 32-bit words in the following way
+
+ MS x[0] LS MS x[1] LS
+ ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls
+ 24...31 16...23 08...15 00...07 56...63 48...55 40...47 32...39
+
+ MS x[2] LS MS x[3] LS
+ ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls
+ 88...95 80...87 72...79 64...71 120.127 112.119 104.111 96..103
+
+ On big endian machines the bit indexes translate into the bit
+ positions within four 32-bit words in the following way
+
+ MS x[0] LS MS x[1] LS
+ ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls
+ 00...07 08...15 16...23 24...31 32...39 40...47 48...55 56...63
+
+ MS x[2] LS MS x[3] LS
+ ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls
+ 64...71 72...79 80...87 88...95 96..103 104.111 112.119 120.127
+*/
+
+/* A slow generic version of gf_mul, implemented for lle and bbe
+ * It multiplies a and b and puts the result in a */
+void gf128mul_lle(be128 *a, const be128 *b);
+
+void gf128mul_bbe(be128 *a, const be128 *b);
+
+
+/* 4k table optimization */
+
+struct gf128mul_4k {
+ be128 t[256];
+};
+
+struct gf128mul_4k *gf128mul_init_4k_lle(const be128 *g);
+struct gf128mul_4k *gf128mul_init_4k_bbe(const be128 *g);
+void gf128mul_4k_lle(be128 *a, struct gf128mul_4k *t);
+void gf128mul_4k_bbe(be128 *a, struct gf128mul_4k *t);
+
+static inline void gf128mul_free_4k(struct gf128mul_4k *t)
+{
+ kfree(t);
+}
+
+
+/* 64k table optimization, implemented for lle and bbe */
+
+struct gf128mul_64k {
+ struct gf128mul_4k *t[16];
+};
+
+/* first initialize with the constant factor with which you
+ * want to multiply and then call gf128_64k_lle with the other
+ * factor in the first argument, the table in the second and a
+ * scratch register in the third. Afterwards *a = *r. */
+struct gf128mul_64k *gf128mul_init_64k_lle(const be128 *g);
+struct gf128mul_64k *gf128mul_init_64k_bbe(const be128 *g);
+void gf128mul_free_64k(struct gf128mul_64k *t);
+void gf128mul_64k_lle(be128 *a, struct gf128mul_64k *t);
+void gf128mul_64k_bbe(be128 *a, struct gf128mul_64k *t);
+
+#endif /* _CRYPTO_GF128MUL_H */
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index ff433126361..e618b25b5ad 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -221,6 +221,7 @@ unifdef-y += if_bridge.h
unifdef-y += if_ec.h
unifdef-y += if_eql.h
unifdef-y += if_ether.h
+unifdef-y += if_fddi.h
unifdef-y += if_frad.h
unifdef-y += if_ltalk.h
unifdef-y += if_pppox.h
@@ -282,6 +283,7 @@ unifdef-y += nvram.h
unifdef-y += parport.h
unifdef-y += patchkey.h
unifdef-y += pci.h
+unifdef-y += personality.h
unifdef-y += pktcdvd.h
unifdef-y += pmu.h
unifdef-y += poll.h
@@ -337,6 +339,7 @@ unifdef-y += videodev.h
unifdef-y += wait.h
unifdef-y += wanrouter.h
unifdef-y += watchdog.h
+unifdef-y += wireless.h
unifdef-y += xfrm.h
objhdr-y += version.h
diff --git a/include/linux/aio.h b/include/linux/aio.h
index 9e350fd44d7..3372ec6bf53 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -111,7 +111,6 @@ struct kiocb {
size_t ki_nbytes; /* copy of iocb->aio_nbytes */
char __user *ki_buf; /* remaining iocb->aio_buf */
size_t ki_left; /* remaining bytes */
- long ki_retried; /* just for testing */
struct iovec ki_inline_vec; /* inline vector */
struct iovec *ki_iovec;
unsigned long ki_nr_segs;
@@ -238,7 +237,6 @@ do { \
} while (0)
#define io_wait_to_kiocb(wait) container_of(wait, struct kiocb, ki_wait)
-#define is_retried_kiocb(iocb) ((iocb)->ki_retried > 1)
#include <linux/aio_abi.h>
diff --git a/include/linux/audit.h b/include/linux/audit.h
index b2ca666d999..0e07db6cc0d 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -101,6 +101,10 @@
#define AUDIT_MAC_CIPSOV4_DEL 1408 /* NetLabel: del CIPSOv4 DOI entry */
#define AUDIT_MAC_MAP_ADD 1409 /* NetLabel: add LSM domain mapping */
#define AUDIT_MAC_MAP_DEL 1410 /* NetLabel: del LSM domain mapping */
+#define AUDIT_MAC_IPSEC_ADDSA 1411 /* Add a XFRM state */
+#define AUDIT_MAC_IPSEC_DELSA 1412 /* Delete a XFRM state */
+#define AUDIT_MAC_IPSEC_ADDSPD 1413 /* Add a XFRM policy */
+#define AUDIT_MAC_IPSEC_DELSPD 1414 /* Delete a XFRM policy */
#define AUDIT_FIRST_KERN_ANOM_MSG 1700
#define AUDIT_LAST_KERN_ANOM_MSG 1799
@@ -377,6 +381,7 @@ extern void auditsc_get_stamp(struct audit_context *ctx,
struct timespec *t, unsigned int *serial);
extern int audit_set_loginuid(struct task_struct *task, uid_t loginuid);
extern uid_t audit_get_loginuid(struct audit_context *ctx);
+extern void audit_log_task_context(struct audit_buffer *ab);
extern int __audit_ipc_obj(struct kern_ipc_perm *ipcp);
extern int __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode);
extern int audit_bprm(struct linux_binprm *bprm);
@@ -449,6 +454,7 @@ extern int audit_n_rules;
#define audit_inode_update(i) do { ; } while (0)
#define auditsc_get_stamp(c,t,s) do { BUG(); } while (0)
#define audit_get_loginuid(c) ({ -1; })
+#define audit_log_task_context(b) do { ; } while (0)
#define audit_ipc_obj(i) ({ 0; })
#define audit_ipc_set_perm(q,u,g,m) ({ 0; })
#define audit_bprm(p) ({ 0; })
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 31e9abb6d97..2275f274870 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -119,8 +119,7 @@ extern void *alloc_large_system_hash(const char *tablename,
unsigned int *_hash_mask,
unsigned long limit);
-#define HASH_HIGHMEM 0x00000001 /* Consider highmem? */
-#define HASH_EARLY 0x00000002 /* Allocating during early boot? */
+#define HASH_EARLY 0x00000001 /* Allocating during early boot? */
/* Only NUMA needs hash distribution.
* IA64 is known to have sufficient vmalloc space.
diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h
new file mode 100644
index 00000000000..777dbf695d4
--- /dev/null
+++ b/include/linux/bottom_half.h
@@ -0,0 +1,10 @@
+#ifndef _LINUX_BH_H
+#define _LINUX_BH_H
+
+extern void local_bh_disable(void);
+extern void __local_bh_enable(void);
+extern void _local_bh_enable(void);
+extern void local_bh_enable(void);
+extern void local_bh_enable_ip(unsigned long ip);
+
+#endif /* _LINUX_BH_H */
diff --git a/include/linux/carta_random32.h b/include/linux/carta_random32.h
deleted file mode 100644
index f6f3bd9f20b..00000000000
--- a/include/linux/carta_random32.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Fast, simple, yet decent quality random number generator based on
- * a paper by David G. Carta ("Two Fast Implementations of the
- * `Minimal Standard' Random Number Generator," Communications of the
- * ACM, January, 1990).
- *
- * Copyright (c) 2002-2006 Hewlett-Packard Development Company, L.P.
- * Contributed by Stephane Eranian <eranian@hpl.hp.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307 USA
- */
-#ifndef _LINUX_CARTA_RANDOM32_H_
-#define _LINUX_CARTA_RANDOM32_H_
-
-u64 carta_random32(u64 seed);
-
-#endif /* _LINUX_CARTA_RANDOM32_H_ */
diff --git a/include/linux/cciss_ioctl.h b/include/linux/cciss_ioctl.h
index 6e27f42e3a5..cb57c30081a 100644
--- a/include/linux/cciss_ioctl.h
+++ b/include/linux/cciss_ioctl.h
@@ -80,7 +80,7 @@ typedef __u32 DriverVer_type;
#define HWORD __u16
#define DWORD __u32
-#define CISS_MAX_LUN 16
+#define CISS_MAX_LUN 1024
#define LEVEL2LUN 1 // index into Target(x) structure, due to byte swapping
#define LEVEL3LUN 0
diff --git a/include/linux/cdev.h b/include/linux/cdev.h
index ee5f53f2ca1..f309b00e986 100644
--- a/include/linux/cdev.h
+++ b/include/linux/cdev.h
@@ -2,6 +2,10 @@
#define _LINUX_CDEV_H
#ifdef __KERNEL__
+#include <linux/kobject.h>
+#include <linux/kdev_t.h>
+#include <linux/list.h>
+
struct cdev {
struct kobject kobj;
struct module *owner;
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index f02d71bf689..bfb520212d7 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -24,10 +24,11 @@
#include <linux/compiler.h>
#include <linux/cpumask.h>
#include <asm/semaphore.h>
+#include <linux/mutex.h>
struct cpu {
int node_id; /* The node which contains the CPU */
- int no_control; /* Should the sysfs control file be created? */
+ int hotpluggable; /* creates sysfs control file if hotpluggable */
struct sys_device sysdev;
};
@@ -74,6 +75,17 @@ extern struct sysdev_class cpu_sysdev_class;
#ifdef CONFIG_HOTPLUG_CPU
/* Stop CPUs going up and down. */
+
+static inline void cpuhotplug_mutex_lock(struct mutex *cpu_hp_mutex)
+{
+ mutex_lock(cpu_hp_mutex);
+}
+
+static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex)
+{
+ mutex_unlock(cpu_hp_mutex);
+}
+
extern void lock_cpu_hotplug(void);
extern void unlock_cpu_hotplug(void);
#define hotcpu_notifier(fn, pri) { \
@@ -85,17 +97,24 @@ extern void unlock_cpu_hotplug(void);
#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
int cpu_down(unsigned int cpu);
#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
-#else
+
+#else /* CONFIG_HOTPLUG_CPU */
+
+static inline void cpuhotplug_mutex_lock(struct mutex *cpu_hp_mutex)
+{ }
+static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex)
+{ }
+
#define lock_cpu_hotplug() do { } while (0)
#define unlock_cpu_hotplug() do { } while (0)
#define lock_cpu_hotplug_interruptible() 0
-#define hotcpu_notifier(fn, pri) do { } while (0)
-#define register_hotcpu_notifier(nb) do { } while (0)
-#define unregister_hotcpu_notifier(nb) do { } while (0)
+#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
+#define register_hotcpu_notifier(nb) do { (void)(nb); } while (0)
+#define unregister_hotcpu_notifier(nb) do { (void)(nb); } while (0)
/* CPUs don't go offline once they're online w/o CONFIG_HOTPLUG_CPU */
static inline int cpu_is_offline(int cpu) { return 0; }
-#endif
+#endif /* CONFIG_HOTPLUG_CPU */
#ifdef CONFIG_SUSPEND_SMP
extern int disable_nonboot_cpus(void);
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 4d8adf66368..8821e1f75b4 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -23,6 +23,7 @@ extern void cpuset_fork(struct task_struct *p);
extern void cpuset_exit(struct task_struct *p);
extern cpumask_t cpuset_cpus_allowed(struct task_struct *p);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
+#define cpuset_current_mems_allowed (current->mems_allowed)
void cpuset_init_current_mems_allowed(void);
void cpuset_update_task_memory_state(void);
#define cpuset_nodes_subset_current_mems_allowed(nodes) \
@@ -45,7 +46,7 @@ extern int cpuset_excl_nodes_overlap(const struct task_struct *p);
extern int cpuset_memory_pressure_enabled;
extern void __cpuset_memory_pressure_bump(void);
-extern struct file_operations proc_cpuset_operations;
+extern const struct file_operations proc_cpuset_operations;
extern char *cpuset_task_status_allowed(struct task_struct *task, char *buffer);
extern void cpuset_lock(void);
@@ -83,6 +84,7 @@ static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
return node_possible_map;
}
+#define cpuset_current_mems_allowed (node_online_map)
static inline void cpuset_init_current_mems_allowed(void) {}
static inline void cpuset_update_task_memory_state(void) {}
#define cpuset_nodes_subset_current_mems_allowed(nodes) (1)
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 6485e9716b3..4aa9046601d 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -241,12 +241,8 @@ int crypto_unregister_alg(struct crypto_alg *alg);
* Algorithm query interface.
*/
#ifdef CONFIG_CRYPTO
-int crypto_alg_available(const char *name, u32 flags)
- __deprecated_for_modules;
int crypto_has_alg(const char *name, u32 type, u32 mask);
#else
-static int crypto_alg_available(const char *name, u32 flags)
- __deprecated_for_modules;
static inline int crypto_alg_available(const char *name, u32 flags)
{
return 0;
@@ -707,16 +703,6 @@ static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
dst, src);
}
-void crypto_digest_init(struct crypto_tfm *tfm) __deprecated_for_modules;
-void crypto_digest_update(struct crypto_tfm *tfm,
- struct scatterlist *sg, unsigned int nsg)
- __deprecated_for_modules;
-void crypto_digest_final(struct crypto_tfm *tfm, u8 *out)
- __deprecated_for_modules;
-void crypto_digest_digest(struct crypto_tfm *tfm,
- struct scatterlist *sg, unsigned int nsg, u8 *out)
- __deprecated_for_modules;
-
static inline struct crypto_hash *__crypto_hash_cast(struct crypto_tfm *tfm)
{
return (struct crypto_hash *)tfm;
@@ -729,14 +715,6 @@ static inline struct crypto_hash *crypto_hash_cast(struct crypto_tfm *tfm)
return __crypto_hash_cast(tfm);
}
-static int crypto_digest_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen) __deprecated;
-static inline int crypto_digest_setkey(struct crypto_tfm *tfm,
- const u8 *key, unsigned int keylen)
-{
- return tfm->crt_hash.setkey(crypto_hash_cast(tfm), key, keylen);
-}
-
static inline struct crypto_hash *crypto_alloc_hash(const char *alg_name,
u32 type, u32 mask)
{
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
index 952bee79a8f..a1c10b0c4cf 100644
--- a/include/linux/debug_locks.h
+++ b/include/linux/debug_locks.h
@@ -24,7 +24,7 @@ extern int debug_locks_off(void);
int __ret = 0; \
\
if (unlikely(c)) { \
- if (debug_locks_off()) \
+ if (debug_locks_silent || debug_locks_off()) \
WARN_ON(1); \
__ret = 1; \
} \
diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h
index 561e2a77805..55d1ca5e60f 100644
--- a/include/linux/delayacct.h
+++ b/include/linux/delayacct.h
@@ -30,7 +30,7 @@
#ifdef CONFIG_TASK_DELAY_ACCT
extern int delayacct_on; /* Delay accounting turned on/off */
-extern kmem_cache_t *delayacct_cache;
+extern struct kmem_cache *delayacct_cache;
extern void delayacct_init(void);
extern void __delayacct_tsk_init(struct task_struct *);
extern void __delayacct_tsk_exit(struct task_struct *);
diff --git a/include/linux/device.h b/include/linux/device.h
index 583a341e016..49ab53ce92d 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -371,6 +371,9 @@ struct device {
core doesn't touch it */
struct dev_pm_info power;
+#ifdef CONFIG_NUMA
+ int numa_node; /* NUMA node this device is close to */
+#endif
u64 *dma_mask; /* dma mask (if dma'able device) */
u64 coherent_dma_mask;/* Like dma_mask, but for
alloc_coherent mappings as
@@ -394,6 +397,25 @@ struct device {
void (*release)(struct device * dev);
};
+#ifdef CONFIG_NUMA
+static inline int dev_to_node(struct device *dev)
+{
+ return dev->numa_node;
+}
+static inline void set_dev_node(struct device *dev, int node)
+{
+ dev->numa_node = node;
+}
+#else
+static inline int dev_to_node(struct device *dev)
+{
+ return -1;
+}
+static inline void set_dev_node(struct device *dev, int node)
+{
+}
+#endif
+
static inline void *
dev_get_drvdata (struct device *dev)
{
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 66d621dbcb6..df1c91855f0 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -300,8 +300,9 @@ extern int efi_mem_attribute_range (unsigned long phys_addr, unsigned long size,
extern int __init efi_uart_console_only (void);
extern void efi_initialize_iomem_resources(struct resource *code_resource,
struct resource *data_resource);
-extern unsigned long __init efi_get_time(void);
+extern unsigned long efi_get_time(void);
extern int __init efi_set_rtc_mmss(unsigned long nowtime);
+extern int is_available_memory(efi_memory_desc_t * md);
extern struct efi_memory_map memmap;
/**
diff --git a/include/linux/elf.h b/include/linux/elf.h
index 743d5c8e6d3..60713e6ea29 100644
--- a/include/linux/elf.h
+++ b/include/linux/elf.h
@@ -6,6 +6,8 @@
#include <linux/elf-em.h>
#include <asm/elf.h>
+struct file;
+
#ifndef elf_read_implies_exec
/* Executables for which elf_read_implies_exec() returns TRUE will
have the READ_IMPLIES_EXEC personality flag set automatically.
@@ -358,6 +360,7 @@ extern Elf32_Dyn _DYNAMIC [];
#define elfhdr elf32_hdr
#define elf_phdr elf32_phdr
#define elf_note elf32_note
+#define elf_addr_t Elf32_Off
#else
@@ -365,6 +368,7 @@ extern Elf64_Dyn _DYNAMIC [];
#define elfhdr elf64_hdr
#define elf_phdr elf64_phdr
#define elf_note elf64_note
+#define elf_addr_t Elf64_Off
#endif
diff --git a/include/linux/ext3_jbd.h b/include/linux/ext3_jbd.h
index ce0e6109aff..8c43b13a02f 100644
--- a/include/linux/ext3_jbd.h
+++ b/include/linux/ext3_jbd.h
@@ -109,74 +109,32 @@ int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode);
* been done yet.
*/
-void ext3_journal_abort_handle(const char *caller, const char *err_fn,
- struct buffer_head *bh, handle_t *handle, int err);
-
-static inline int
-__ext3_journal_get_undo_access(const char *where, handle_t *handle,
- struct buffer_head *bh)
+static inline void ext3_journal_release_buffer(handle_t *handle,
+ struct buffer_head *bh)
{
- int err = journal_get_undo_access(handle, bh);
- if (err)
- ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
- return err;
+ journal_release_buffer(handle, bh);
}
-static inline int
-__ext3_journal_get_write_access(const char *where, handle_t *handle,
- struct buffer_head *bh)
-{
- int err = journal_get_write_access(handle, bh);
- if (err)
- ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
- return err;
-}
+void ext3_journal_abort_handle(const char *caller, const char *err_fn,
+ struct buffer_head *bh, handle_t *handle, int err);
-static inline void
-ext3_journal_release_buffer(handle_t *handle, struct buffer_head *bh)
-{
- journal_release_buffer(handle, bh);
-}
+int __ext3_journal_get_undo_access(const char *where, handle_t *handle,
+ struct buffer_head *bh);
-static inline int
-__ext3_journal_forget(const char *where, handle_t *handle, struct buffer_head *bh)
-{
- int err = journal_forget(handle, bh);
- if (err)
- ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
- return err;
-}
+int __ext3_journal_get_write_access(const char *where, handle_t *handle,
+ struct buffer_head *bh);
-static inline int
-__ext3_journal_revoke(const char *where, handle_t *handle,
- unsigned long blocknr, struct buffer_head *bh)
-{
- int err = journal_revoke(handle, blocknr, bh);
- if (err)
- ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
- return err;
-}
+int __ext3_journal_forget(const char *where, handle_t *handle,
+ struct buffer_head *bh);
-static inline int
-__ext3_journal_get_create_access(const char *where,
- handle_t *handle, struct buffer_head *bh)
-{
- int err = journal_get_create_access(handle, bh);
- if (err)
- ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
- return err;
-}
+int __ext3_journal_revoke(const char *where, handle_t *handle,
+ unsigned long blocknr, struct buffer_head *bh);
-static inline int
-__ext3_journal_dirty_metadata(const char *where,
- handle_t *handle, struct buffer_head *bh)
-{
- int err = journal_dirty_metadata(handle, bh);
- if (err)
- ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
- return err;
-}
+int __ext3_journal_get_create_access(const char *where,
+ handle_t *handle, struct buffer_head *bh);
+int __ext3_journal_dirty_metadata(const char *where,
+ handle_t *handle, struct buffer_head *bh);
#define ext3_journal_get_undo_access(handle, bh) \
__ext3_journal_get_undo_access(__FUNCTION__, (handle), (bh))
diff --git a/include/linux/ext4_jbd2.h b/include/linux/ext4_jbd2.h
index 72dd631912e..d716e6392cf 100644
--- a/include/linux/ext4_jbd2.h
+++ b/include/linux/ext4_jbd2.h
@@ -114,74 +114,32 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode);
* been done yet.
*/
-void ext4_journal_abort_handle(const char *caller, const char *err_fn,
- struct buffer_head *bh, handle_t *handle, int err);
-
-static inline int
-__ext4_journal_get_undo_access(const char *where, handle_t *handle,
- struct buffer_head *bh)
+static inline void ext4_journal_release_buffer(handle_t *handle,
+ struct buffer_head *bh)
{
- int err = jbd2_journal_get_undo_access(handle, bh);
- if (err)
- ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
- return err;
+ jbd2_journal_release_buffer(handle, bh);
}
-static inline int
-__ext4_journal_get_write_access(const char *where, handle_t *handle,
- struct buffer_head *bh)
-{
- int err = jbd2_journal_get_write_access(handle, bh);
- if (err)
- ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
- return err;
-}
+void ext4_journal_abort_handle(const char *caller, const char *err_fn,
+ struct buffer_head *bh, handle_t *handle, int err);
-static inline void
-ext4_journal_release_buffer(handle_t *handle, struct buffer_head *bh)
-{
- jbd2_journal_release_buffer(handle, bh);
-}
+int __ext4_journal_get_undo_access(const char *where, handle_t *handle,
+ struct buffer_head *bh);
-static inline int
-__ext4_journal_forget(const char *where, handle_t *handle, struct buffer_head *bh)
-{
- int err = jbd2_journal_forget(handle, bh);
- if (err)
- ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
- return err;
-}
+int __ext4_journal_get_write_access(const char *where, handle_t *handle,
+ struct buffer_head *bh);
-static inline int
-__ext4_journal_revoke(const char *where, handle_t *handle,
- ext4_fsblk_t blocknr, struct buffer_head *bh)
-{
- int err = jbd2_journal_revoke(handle, blocknr, bh);
- if (err)
- ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
- return err;
-}
+int __ext4_journal_forget(const char *where, handle_t *handle,
+ struct buffer_head *bh);
-static inline int
-__ext4_journal_get_create_access(const char *where,
- handle_t *handle, struct buffer_head *bh)
-{
- int err = jbd2_journal_get_create_access(handle, bh);
- if (err)
- ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
- return err;
-}
+int __ext4_journal_revoke(const char *where, handle_t *handle,
+ ext4_fsblk_t blocknr, struct buffer_head *bh);
-static inline int
-__ext4_journal_dirty_metadata(const char *where,
- handle_t *handle, struct buffer_head *bh)
-{
- int err = jbd2_journal_dirty_metadata(handle, bh);
- if (err)
- ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
- return err;
-}
+int __ext4_journal_get_create_access(const char *where,
+ handle_t *handle, struct buffer_head *bh);
+int __ext4_journal_dirty_metadata(const char *where,
+ handle_t *handle, struct buffer_head *bh);
#define ext4_journal_get_undo_access(handle, bh) \
__ext4_journal_get_undo_access(__FUNCTION__, (handle), (bh))
diff --git a/include/linux/file.h b/include/linux/file.h
index 74183e6f7f4..6e77b9177f9 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -64,6 +64,8 @@ struct files_struct {
#define files_fdtable(files) (rcu_dereference((files)->fdt))
+extern struct kmem_cache *filp_cachep;
+
extern void FASTCALL(__fput(struct file *));
extern void FASTCALL(fput(struct file *));
@@ -114,4 +116,6 @@ struct files_struct *get_files_struct(struct task_struct *);
void FASTCALL(put_files_struct(struct files_struct *fs));
void reset_files_struct(struct task_struct *, struct files_struct *);
+extern struct kmem_cache *files_cachep;
+
#endif /* __LINUX_FILE_H */
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
new file mode 100644
index 00000000000..6e05e3e7ce3
--- /dev/null
+++ b/include/linux/freezer.h
@@ -0,0 +1,87 @@
+/* Freezer declarations */
+
+#ifdef CONFIG_PM
+/*
+ * Check if a process has been frozen
+ */
+static inline int frozen(struct task_struct *p)
+{
+ return p->flags & PF_FROZEN;
+}
+
+/*
+ * Check if there is a request to freeze a process
+ */
+static inline int freezing(struct task_struct *p)
+{
+ return p->flags & PF_FREEZE;
+}
+
+/*
+ * Request that a process be frozen
+ * FIXME: SMP problem. We may not modify other process' flags!
+ */
+static inline void freeze(struct task_struct *p)
+{
+ p->flags |= PF_FREEZE;
+}
+
+/*
+ * Sometimes we may need to cancel the previous 'freeze' request
+ */
+static inline void do_not_freeze(struct task_struct *p)
+{
+ p->flags &= ~PF_FREEZE;
+}
+
+/*
+ * Wake up a frozen process
+ */
+static inline int thaw_process(struct task_struct *p)
+{
+ if (frozen(p)) {
+ p->flags &= ~PF_FROZEN;
+ wake_up_process(p);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * freezing is complete, mark process as frozen
+ */
+static inline void frozen_process(struct task_struct *p)
+{
+ p->flags = (p->flags & ~PF_FREEZE) | PF_FROZEN;
+}
+
+extern void refrigerator(void);
+extern int freeze_processes(void);
+extern void thaw_processes(void);
+
+static inline int try_to_freeze(void)
+{
+ if (freezing(current)) {
+ refrigerator();
+ return 1;
+ } else
+ return 0;
+}
+
+extern void thaw_some_processes(int all);
+
+#else
+static inline int frozen(struct task_struct *p) { return 0; }
+static inline int freezing(struct task_struct *p) { return 0; }
+static inline void freeze(struct task_struct *p) { BUG(); }
+static inline int thaw_process(struct task_struct *p) { return 1; }
+static inline void frozen_process(struct task_struct *p) { BUG(); }
+
+static inline void refrigerator(void) {}
+static inline int freeze_processes(void) { BUG(); return 0; }
+static inline void thaw_processes(void) {}
+
+static inline int try_to_freeze(void) { return 0; }
+
+
+#endif
diff --git a/include/linux/fs.h b/include/linux/fs.h
index cac7b1ef954..70b99fbb560 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -543,19 +543,22 @@ struct inode {
struct list_head i_dentry;
unsigned long i_ino;
atomic_t i_count;
- umode_t i_mode;
unsigned int i_nlink;
uid_t i_uid;
gid_t i_gid;
dev_t i_rdev;
+ unsigned long i_version;
loff_t i_size;
+#ifdef __NEED_I_SIZE_ORDERED
+ seqcount_t i_size_seqcount;
+#endif
struct timespec i_atime;
struct timespec i_mtime;
struct timespec i_ctime;
unsigned int i_blkbits;
- unsigned long i_version;
blkcnt_t i_blocks;
unsigned short i_bytes;
+ umode_t i_mode;
spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
struct mutex i_mutex;
struct rw_semaphore i_alloc_sem;
@@ -598,9 +601,6 @@ struct inode {
void *i_security;
#endif
void *i_private; /* fs or device private pointer */
-#ifdef __NEED_I_SIZE_ORDERED
- seqcount_t i_size_seqcount;
-#endif
};
/*
@@ -636,7 +636,7 @@ extern void inode_double_unlock(struct inode *inode1, struct inode *inode2);
* cmpxchg8b without the need of the lock prefix). For SMP compiles
* and 64bit archs it makes no difference if preempt is enabled or not.
*/
-static inline loff_t i_size_read(struct inode *inode)
+static inline loff_t i_size_read(const struct inode *inode)
{
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
loff_t i_size;
@@ -679,12 +679,12 @@ static inline void i_size_write(struct inode *inode, loff_t i_size)
#endif
}
-static inline unsigned iminor(struct inode *inode)
+static inline unsigned iminor(const struct inode *inode)
{
return MINOR(inode->i_rdev);
}
-static inline unsigned imajor(struct inode *inode)
+static inline unsigned imajor(const struct inode *inode)
{
return MAJOR(inode->i_rdev);
}
@@ -1481,7 +1481,9 @@ extern char * getname(const char __user *);
extern void __init vfs_caches_init_early(void);
extern void __init vfs_caches_init(unsigned long);
-#define __getname() kmem_cache_alloc(names_cachep, SLAB_KERNEL)
+extern struct kmem_cache *names_cachep;
+
+#define __getname() kmem_cache_alloc(names_cachep, GFP_KERNEL)
#define __putname(name) kmem_cache_free(names_cachep, (void *)(name))
#ifndef CONFIG_AUDITSYSCALL
#define putname(name) __putname(name)
diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
index c623d12a486..11a36ceddf7 100644
--- a/include/linux/fs_struct.h
+++ b/include/linux/fs_struct.h
@@ -18,6 +18,8 @@ struct fs_struct {
.umask = 0022, \
}
+extern struct kmem_cache *fs_cachep;
+
extern void exit_fs(struct task_struct *);
extern void set_fs_altroot(void);
extern void set_fs_root(struct fs_struct *, struct vfsmount *, struct dentry *);
diff --git a/include/linux/fuse.h b/include/linux/fuse.h
index 9fc48a674b8..534744efe30 100644
--- a/include/linux/fuse.h
+++ b/include/linux/fuse.h
@@ -15,7 +15,7 @@
#define FUSE_KERNEL_VERSION 7
/** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 7
+#define FUSE_KERNEL_MINOR_VERSION 8
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
@@ -92,6 +92,11 @@ struct fuse_file_lock {
#define FUSE_ASYNC_READ (1 << 0)
#define FUSE_POSIX_LOCKS (1 << 1)
+/**
+ * Release flags
+ */
+#define FUSE_RELEASE_FLUSH (1 << 0)
+
enum fuse_opcode {
FUSE_LOOKUP = 1,
FUSE_FORGET = 2, /* no reply */
@@ -127,6 +132,8 @@ enum fuse_opcode {
FUSE_ACCESS = 34,
FUSE_CREATE = 35,
FUSE_INTERRUPT = 36,
+ FUSE_BMAP = 37,
+ FUSE_DESTROY = 38,
};
/* The read buffer is required to be at least 8k, but may be much larger */
@@ -205,12 +212,13 @@ struct fuse_open_out {
struct fuse_release_in {
__u64 fh;
__u32 flags;
- __u32 padding;
+ __u32 release_flags;
+ __u64 lock_owner;
};
struct fuse_flush_in {
__u64 fh;
- __u32 flush_flags;
+ __u32 unused;
__u32 padding;
__u64 lock_owner;
};
@@ -296,6 +304,16 @@ struct fuse_interrupt_in {
__u64 unique;
};
+struct fuse_bmap_in {
+ __u64 block;
+ __u32 blocksize;
+ __u32 padding;
+};
+
+struct fuse_bmap_out {
+ __u64 block;
+};
+
struct fuse_in_header {
__u32 len;
__u32 opcode;
diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h
index 9049dc65ae5..f7a93770e1b 100644
--- a/include/linux/genetlink.h
+++ b/include/linux/genetlink.h
@@ -17,6 +17,9 @@ struct genlmsghdr {
#define GENL_HDRLEN NLMSG_ALIGN(sizeof(struct genlmsghdr))
#define GENL_ADMIN_PERM 0x01
+#define GENL_CMD_CAP_DO 0x02
+#define GENL_CMD_CAP_DUMP 0x04
+#define GENL_CMD_CAP_HASPOL 0x08
/*
* List of reserved static generic netlink identifiers:
@@ -58,9 +61,6 @@ enum {
CTRL_ATTR_OP_UNSPEC,
CTRL_ATTR_OP_ID,
CTRL_ATTR_OP_FLAGS,
- CTRL_ATTR_OP_POLICY,
- CTRL_ATTR_OP_DOIT,
- CTRL_ATTR_OP_DUMPIT,
__CTRL_ATTR_OP_MAX,
};
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index bf2b6bc3f6f..00c314aedab 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -116,6 +116,9 @@ static inline enum zone_type gfp_zone(gfp_t flags)
#ifndef HAVE_ARCH_FREE_PAGE
static inline void arch_free_page(struct page *page, int order) { }
#endif
+#ifndef HAVE_ARCH_ALLOC_PAGE
+static inline void arch_alloc_page(struct page *page, int order) { }
+#endif
extern struct page *
FASTCALL(__alloc_pages(gfp_t, unsigned int, struct zonelist *));
diff --git a/include/linux/gfs2_ondisk.h b/include/linux/gfs2_ondisk.h
index a7ae7c177ca..8b7e4c1e32a 100644
--- a/include/linux/gfs2_ondisk.h
+++ b/include/linux/gfs2_ondisk.h
@@ -54,8 +54,13 @@ struct gfs2_inum {
__be64 no_addr;
};
-static inline int gfs2_inum_equal(const struct gfs2_inum *ino1,
- const struct gfs2_inum *ino2)
+struct gfs2_inum_host {
+ __u64 no_formal_ino;
+ __u64 no_addr;
+};
+
+static inline int gfs2_inum_equal(const struct gfs2_inum_host *ino1,
+ const struct gfs2_inum_host *ino2)
{
return ino1->no_formal_ino == ino2->no_formal_ino &&
ino1->no_addr == ino2->no_addr;
@@ -89,6 +94,12 @@ struct gfs2_meta_header {
__be32 __pad1; /* Was incarnation number in gfs1 */
};
+struct gfs2_meta_header_host {
+ __u32 mh_magic;
+ __u32 mh_type;
+ __u32 mh_format;
+};
+
/*
* super-block structure
*
@@ -128,6 +139,23 @@ struct gfs2_sb {
/* In gfs1, quota and license dinodes followed */
};
+struct gfs2_sb_host {
+ struct gfs2_meta_header_host sb_header;
+
+ __u32 sb_fs_format;
+ __u32 sb_multihost_format;
+
+ __u32 sb_bsize;
+ __u32 sb_bsize_shift;
+
+ struct gfs2_inum_host sb_master_dir; /* Was jindex dinode in gfs1 */
+ struct gfs2_inum_host sb_root_dir;
+
+ char sb_lockproto[GFS2_LOCKNAME_LEN];
+ char sb_locktable[GFS2_LOCKNAME_LEN];
+ /* In gfs1, quota and license dinodes followed */
+};
+
/*
* resource index structure
*/
@@ -145,6 +173,14 @@ struct gfs2_rindex {
__u8 ri_reserved[64];
};
+struct gfs2_rindex_host {
+ __u64 ri_addr; /* grp block disk address */
+ __u64 ri_data0; /* first data location */
+ __u32 ri_length; /* length of rgrp header in fs blocks */
+ __u32 ri_data; /* num of data blocks in rgrp */
+ __u32 ri_bitbytes; /* number of bytes in data bitmaps */
+};
+
/*
* resource group header structure
*/
@@ -176,6 +212,13 @@ struct gfs2_rgrp {
__u8 rg_reserved[80]; /* Several fields from gfs1 now reserved */
};
+struct gfs2_rgrp_host {
+ __u32 rg_flags;
+ __u32 rg_free;
+ __u32 rg_dinodes;
+ __u64 rg_igeneration;
+};
+
/*
* quota structure
*/
@@ -187,6 +230,12 @@ struct gfs2_quota {
__u8 qu_reserved[64];
};
+struct gfs2_quota_host {
+ __u64 qu_limit;
+ __u64 qu_warn;
+ __u64 qu_value;
+};
+
/*
* dinode structure
*/
@@ -270,6 +319,27 @@ struct gfs2_dinode {
__u8 di_reserved[56];
};
+struct gfs2_dinode_host {
+ __u64 di_size; /* number of bytes in file */
+ __u64 di_blocks; /* number of blocks in file */
+
+ /* This section varies from gfs1. Padding added to align with
+ * remainder of dinode
+ */
+ __u64 di_goal_meta; /* rgrp to alloc from next */
+ __u64 di_goal_data; /* data block goal */
+ __u64 di_generation; /* generation number for NFS */
+
+ __u32 di_flags; /* GFS2_DIF_... */
+ __u16 di_height; /* height of metadata */
+
+ /* These only apply to directories */
+ __u16 di_depth; /* Number of bits in the table */
+ __u32 di_entries; /* The number of entries in the directory */
+
+ __u64 di_eattr; /* extended attribute block number */
+};
+
/*
* directory structure - many of these per directory file
*/
@@ -344,6 +414,16 @@ struct gfs2_log_header {
__be32 lh_hash;
};
+struct gfs2_log_header_host {
+ struct gfs2_meta_header_host lh_header;
+
+ __u64 lh_sequence; /* Sequence number of this transaction */
+ __u32 lh_flags; /* GFS2_LOG_HEAD_... */
+ __u32 lh_tail; /* Block number of log tail */
+ __u32 lh_blkno;
+ __u32 lh_hash;
+};
+
/*
* Log type descriptor
*/
@@ -384,6 +464,11 @@ struct gfs2_inum_range {
__be64 ir_length;
};
+struct gfs2_inum_range_host {
+ __u64 ir_start;
+ __u64 ir_length;
+};
+
/*
* Statfs change
* Describes an change to the pool of free and allocated
@@ -396,6 +481,12 @@ struct gfs2_statfs_change {
__be64 sc_dinodes;
};
+struct gfs2_statfs_change_host {
+ __u64 sc_total;
+ __u64 sc_free;
+ __u64 sc_dinodes;
+};
+
/*
* Quota change
* Describes an allocation change for a particular
@@ -410,33 +501,38 @@ struct gfs2_quota_change {
__be32 qc_id;
};
+struct gfs2_quota_change_host {
+ __u64 qc_change;
+ __u32 qc_flags; /* GFS2_QCF_... */
+ __u32 qc_id;
+};
+
#ifdef __KERNEL__
/* Translation functions */
-extern void gfs2_inum_in(struct gfs2_inum *no, const void *buf);
-extern void gfs2_inum_out(const struct gfs2_inum *no, void *buf);
-extern void gfs2_sb_in(struct gfs2_sb *sb, const void *buf);
-extern void gfs2_rindex_in(struct gfs2_rindex *ri, const void *buf);
-extern void gfs2_rindex_out(const struct gfs2_rindex *ri, void *buf);
-extern void gfs2_rgrp_in(struct gfs2_rgrp *rg, const void *buf);
-extern void gfs2_rgrp_out(const struct gfs2_rgrp *rg, void *buf);
-extern void gfs2_quota_in(struct gfs2_quota *qu, const void *buf);
-extern void gfs2_quota_out(const struct gfs2_quota *qu, void *buf);
-extern void gfs2_dinode_in(struct gfs2_dinode *di, const void *buf);
-extern void gfs2_dinode_out(const struct gfs2_dinode *di, void *buf);
+extern void gfs2_inum_in(struct gfs2_inum_host *no, const void *buf);
+extern void gfs2_inum_out(const struct gfs2_inum_host *no, void *buf);
+extern void gfs2_sb_in(struct gfs2_sb_host *sb, const void *buf);
+extern void gfs2_rindex_in(struct gfs2_rindex_host *ri, const void *buf);
+extern void gfs2_rindex_out(const struct gfs2_rindex_host *ri, void *buf);
+extern void gfs2_rgrp_in(struct gfs2_rgrp_host *rg, const void *buf);
+extern void gfs2_rgrp_out(const struct gfs2_rgrp_host *rg, void *buf);
+extern void gfs2_quota_in(struct gfs2_quota_host *qu, const void *buf);
+struct gfs2_inode;
+extern void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf);
extern void gfs2_ea_header_in(struct gfs2_ea_header *ea, const void *buf);
extern void gfs2_ea_header_out(const struct gfs2_ea_header *ea, void *buf);
-extern void gfs2_log_header_in(struct gfs2_log_header *lh, const void *buf);
-extern void gfs2_inum_range_in(struct gfs2_inum_range *ir, const void *buf);
-extern void gfs2_inum_range_out(const struct gfs2_inum_range *ir, void *buf);
-extern void gfs2_statfs_change_in(struct gfs2_statfs_change *sc, const void *buf);
-extern void gfs2_statfs_change_out(const struct gfs2_statfs_change *sc, void *buf);
-extern void gfs2_quota_change_in(struct gfs2_quota_change *qc, const void *buf);
+extern void gfs2_log_header_in(struct gfs2_log_header_host *lh, const void *buf);
+extern void gfs2_inum_range_in(struct gfs2_inum_range_host *ir, const void *buf);
+extern void gfs2_inum_range_out(const struct gfs2_inum_range_host *ir, void *buf);
+extern void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf);
+extern void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc, void *buf);
+extern void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *buf);
/* Printing functions */
-extern void gfs2_rindex_print(const struct gfs2_rindex *ri);
-extern void gfs2_dinode_print(const struct gfs2_dinode *di);
+extern void gfs2_rindex_print(const struct gfs2_rindex_host *ri);
+extern void gfs2_dinode_print(const struct gfs2_inode *ip);
#endif /* __KERNEL__ */
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index fd7d12daa94..3d8768b619e 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -3,6 +3,7 @@
#include <linux/fs.h>
#include <linux/mm.h>
+#include <linux/uaccess.h>
#include <asm/cacheflush.h>
@@ -41,9 +42,10 @@ static inline void *kmap(struct page *page)
#define kunmap(page) do { (void) (page); } while (0)
-#define kmap_atomic(page, idx) page_address(page)
-#define kunmap_atomic(addr, idx) do { } while (0)
-#define kmap_atomic_pfn(pfn, idx) page_address(pfn_to_page(pfn))
+#define kmap_atomic(page, idx) \
+ ({ pagefault_disable(); page_address(page); })
+#define kunmap_atomic(addr, idx) do { pagefault_enable(); } while (0)
+#define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx))
#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
#endif
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index ace64e57e17..a60995afe33 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -35,6 +35,7 @@ extern int sysctl_hugetlb_shm_group;
pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr);
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
+int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
int write);
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
diff --git a/include/linux/i2o.h b/include/linux/i2o.h
index 1fb02e17f6f..52f53e2e70c 100644
--- a/include/linux/i2o.h
+++ b/include/linux/i2o.h
@@ -490,7 +490,7 @@ struct i2o_dma {
*/
struct i2o_pool {
char *name;
- kmem_cache_t *slab;
+ struct kmem_cache *slab;
mempool_t *mempool;
};
@@ -986,7 +986,8 @@ extern void i2o_driver_unregister(struct i2o_driver *);
/**
* i2o_driver_notify_controller_add - Send notification of added controller
- * to a single I2O driver
+ * @drv: I2O driver
+ * @c: I2O controller
*
* Send notification of added controller to a single registered driver.
*/
@@ -998,8 +999,9 @@ static inline void i2o_driver_notify_controller_add(struct i2o_driver *drv,
};
/**
- * i2o_driver_notify_controller_remove - Send notification of removed
- * controller to a single I2O driver
+ * i2o_driver_notify_controller_remove - Send notification of removed controller
+ * @drv: I2O driver
+ * @c: I2O controller
*
* Send notification of removed controller to a single registered driver.
*/
@@ -1011,8 +1013,9 @@ static inline void i2o_driver_notify_controller_remove(struct i2o_driver *drv,
};
/**
- * i2o_driver_notify_device_add - Send notification of added device to a
- * single I2O driver
+ * i2o_driver_notify_device_add - Send notification of added device
+ * @drv: I2O driver
+ * @i2o_dev: the added i2o_device
*
* Send notification of added device to a single registered driver.
*/
@@ -1025,7 +1028,8 @@ static inline void i2o_driver_notify_device_add(struct i2o_driver *drv,
/**
* i2o_driver_notify_device_remove - Send notification of removed device
- * to a single I2O driver
+ * @drv: I2O driver
+ * @i2o_dev: the added i2o_device
*
* Send notification of removed device to a single registered driver.
*/
@@ -1148,7 +1152,7 @@ static inline void i2o_msg_post(struct i2o_controller *c,
/**
* i2o_msg_post_wait - Post and wait a message and wait until return
* @c: controller
- * @m: message to post
+ * @msg: message to post
* @timeout: time in seconds to wait
*
* This API allows an OSM to post a message and then be told whether or
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 33c5daacc74..733790d4f7d 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -73,7 +73,7 @@
extern struct nsproxy init_nsproxy;
#define INIT_NSPROXY(nsproxy) { \
.count = ATOMIC_INIT(1), \
- .nslock = SPIN_LOCK_UNLOCKED, \
+ .nslock = __SPIN_LOCK_UNLOCKED(nsproxy.nslock), \
.uts_ns = &init_uts_ns, \
.namespace = NULL, \
INIT_IPC_NS(ipc_ns) \
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 5b83e7b5962..de7593f4e89 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -11,6 +11,7 @@
#include <linux/hardirq.h>
#include <linux/sched.h>
#include <linux/irqflags.h>
+#include <linux/bottom_half.h>
#include <asm/atomic.h>
#include <asm/ptrace.h>
#include <asm/system.h>
@@ -217,12 +218,6 @@ static inline void __deprecated save_and_cli(unsigned long *x)
#define save_and_cli(x) save_and_cli(&x)
#endif /* CONFIG_SMP */
-extern void local_bh_disable(void);
-extern void __local_bh_enable(void);
-extern void _local_bh_enable(void);
-extern void local_bh_enable(void);
-extern void local_bh_enable_ip(unsigned long ip);
-
/* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
frequency threaded job scheduling. For almost all the purposes
tasklets are more than enough. F.e. all serial device BHs et
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h
index 796ca009fd4..7a9db390c56 100644
--- a/include/linux/ipmi.h
+++ b/include/linux/ipmi.h
@@ -208,6 +208,15 @@ struct kernel_ipmi_msg
code as the first byte of the incoming data, unlike a response. */
+/*
+ * Modes for ipmi_set_maint_mode() and the userland IOCTL. The AUTO
+ * setting is the default and means it will be set on certain
+ * commands. Hard setting it on and off will override automatic
+ * operation.
+ */
+#define IPMI_MAINTENANCE_MODE_AUTO 0
+#define IPMI_MAINTENANCE_MODE_OFF 1
+#define IPMI_MAINTENANCE_MODE_ON 2
#ifdef __KERNEL__
@@ -374,6 +383,35 @@ int ipmi_unregister_for_cmd(ipmi_user_t user,
unsigned int chans);
/*
+ * Go into a mode where the driver will not autonomously attempt to do
+ * things with the interface. It will still respond to attentions and
+ * interrupts, and it will expect that commands will complete. It
+ * will not automatcially check for flags, events, or things of that
+ * nature.
+ *
+ * This is primarily used for firmware upgrades. The idea is that
+ * when you go into firmware upgrade mode, you do this operation
+ * and the driver will not attempt to do anything but what you tell
+ * it or what the BMC asks for.
+ *
+ * Note that if you send a command that resets the BMC, the driver
+ * will still expect a response from that command. So the BMC should
+ * reset itself *after* the response is sent. Resetting before the
+ * response is just silly.
+ *
+ * If in auto maintenance mode, the driver will automatically go into
+ * maintenance mode for 30 seconds if it sees a cold reset, a warm
+ * reset, or a firmware NetFN. This means that code that uses only
+ * firmware NetFN commands to do upgrades will work automatically
+ * without change, assuming it sends a message every 30 seconds or
+ * less.
+ *
+ * See the IPMI_MAINTENANCE_MODE_xxx defines for what the mode means.
+ */
+int ipmi_get_maintenance_mode(ipmi_user_t user);
+int ipmi_set_maintenance_mode(ipmi_user_t user, int mode);
+
+/*
* Allow run-to-completion mode to be set for the interface of
* a specific user.
*/
@@ -656,4 +694,11 @@ struct ipmi_timing_parms
#define IPMICTL_GET_TIMING_PARMS_CMD _IOR(IPMI_IOC_MAGIC, 23, \
struct ipmi_timing_parms)
+/*
+ * Set the maintenance mode. See ipmi_set_maintenance_mode() above
+ * for a description of what this does.
+ */
+#define IPMICTL_GET_MAINTENANCE_MODE_CMD _IOR(IPMI_IOC_MAGIC, 30, int)
+#define IPMICTL_SET_MAINTENANCE_MODE_CMD _IOW(IPMI_IOC_MAGIC, 31, int)
+
#endif /* __LINUX_IPMI_H */
diff --git a/include/linux/ipmi_msgdefs.h b/include/linux/ipmi_msgdefs.h
index 4d04d8b58a0..b56a158d587 100644
--- a/include/linux/ipmi_msgdefs.h
+++ b/include/linux/ipmi_msgdefs.h
@@ -46,6 +46,8 @@
#define IPMI_NETFN_APP_REQUEST 0x06
#define IPMI_NETFN_APP_RESPONSE 0x07
#define IPMI_GET_DEVICE_ID_CMD 0x01
+#define IPMI_COLD_RESET_CMD 0x02
+#define IPMI_WARM_RESET_CMD 0x03
#define IPMI_CLEAR_MSG_FLAGS_CMD 0x30
#define IPMI_GET_DEVICE_GUID_CMD 0x08
#define IPMI_GET_MSG_FLAGS_CMD 0x31
@@ -60,20 +62,27 @@
#define IPMI_NETFN_STORAGE_RESPONSE 0x0b
#define IPMI_ADD_SEL_ENTRY_CMD 0x44
+#define IPMI_NETFN_FIRMWARE_REQUEST 0x08
+#define IPMI_NETFN_FIRMWARE_RESPONSE 0x09
+
/* The default slave address */
#define IPMI_BMC_SLAVE_ADDR 0x20
/* The BT interface on high-end HP systems supports up to 255 bytes in
* one transfer. Its "virtual" BMC supports some commands that are longer
* than 128 bytes. Use the full 256, plus NetFn/LUN, Cmd, cCode, plus
- * some overhead. It would be nice to base this on the "BT Capabilities"
- * but that's too hard to propagate to the rest of the driver. */
+ * some overhead; it's not worth the effort to dynamically size this based
+ * on the results of the "Get BT Capabilities" command. */
#define IPMI_MAX_MSG_LENGTH 272 /* multiple of 16 */
#define IPMI_CC_NO_ERROR 0x00
#define IPMI_NODE_BUSY_ERR 0xc0
#define IPMI_INVALID_COMMAND_ERR 0xc1
+#define IPMI_TIMEOUT_ERR 0xc3
#define IPMI_ERR_MSG_TRUNCATED 0xc6
+#define IPMI_REQ_LEN_INVALID_ERR 0xc7
+#define IPMI_REQ_LEN_EXCEEDED_ERR 0xc8
+#define IPMI_NOT_IN_MY_STATE_ERR 0xd5 /* IPMI 2.0 */
#define IPMI_LOST_ARBITRATION_ERR 0x81
#define IPMI_BUS_ERR 0x82
#define IPMI_NAK_ON_WRITE_ERR 0x83
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index 6d9c7e4da47..c0633108d05 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -115,6 +115,13 @@ struct ipmi_smi_handlers
poll for operations during things like crash dumps. */
void (*poll)(void *send_info);
+ /* Enable/disable firmware maintenance mode. Note that this
+ is *not* the modes defined, this is simply an on/off
+ setting. The message handler does the mode handling. Note
+ that this is called from interupt context, so it cannot
+ block. */
+ void (*set_maintenance_mode)(void *send_info, int enable);
+
/* Tell the handler that we are using it/not using it. The
message handler get the modules that this handler belongs
to; this function lets the SMI claim any modules that it
@@ -173,6 +180,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
void *send_info,
struct ipmi_device_id *device_id,
struct device *dev,
+ const char *sysfs_name,
unsigned char slave_addr);
/*
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index fe89444b1c6..45273755126 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -839,7 +839,6 @@ struct journal_s
*/
/* Filing buffers */
-extern void __journal_temp_unlink_buffer(struct journal_head *jh);
extern void journal_unfile_buffer(journal_t *, struct journal_head *);
extern void __journal_unfile_buffer(struct journal_head *);
extern void __journal_refile_buffer(struct journal_head *);
@@ -949,7 +948,7 @@ void journal_put_journal_head(struct journal_head *jh);
/*
* handle management
*/
-extern kmem_cache_t *jbd_handle_cache;
+extern struct kmem_cache *jbd_handle_cache;
static inline handle_t *jbd_alloc_handle(gfp_t gfp_flags)
{
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index ddb12879578..0e0fedd2039 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -848,7 +848,6 @@ struct journal_s
*/
/* Filing buffers */
-extern void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh);
extern void jbd2_journal_unfile_buffer(journal_t *, struct journal_head *);
extern void __jbd2_journal_unfile_buffer(struct journal_head *);
extern void __jbd2_journal_refile_buffer(struct journal_head *);
@@ -958,7 +957,7 @@ void jbd2_journal_put_journal_head(struct journal_head *jh);
/*
* handle management
*/
-extern kmem_cache_t *jbd2_handle_cache;
+extern struct kmem_cache *jbd2_handle_cache;
static inline handle_t *jbd_alloc_handle(gfp_t gfp_flags)
{
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index a4ede62b339..e3abcec6c51 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -105,6 +105,7 @@ extern struct page *kimage_alloc_control_pages(struct kimage *image,
unsigned int order);
extern void crash_kexec(struct pt_regs *);
int kexec_should_crash(struct task_struct *);
+void crash_save_cpu(struct pt_regs *regs, int cpu);
extern struct kimage *kexec_image;
extern struct kimage *kexec_crash_image;
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index ac4c0559f75..769be39b968 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -165,7 +165,7 @@ extern void arch_disarm_kprobe(struct kprobe *p);
extern int arch_init_kprobes(void);
extern void show_registers(struct pt_regs *regs);
extern kprobe_opcode_t *get_insn_slot(void);
-extern void free_insn_slot(kprobe_opcode_t *slot);
+extern void free_insn_slot(kprobe_opcode_t *slot, int dirty);
extern void kprobes_inc_nmissed_count(struct kprobe *p);
/* Get the kprobe at this addr (if any) - called with preemption disabled */
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 84eeecd60a0..611f17f79ee 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -248,9 +248,9 @@ static inline struct timeval ktime_to_timeval(const ktime_t kt)
*
* Returns the scalar nanoseconds representation of kt
*/
-static inline u64 ktime_to_ns(const ktime_t kt)
+static inline s64 ktime_to_ns(const ktime_t kt)
{
- return (u64) kt.tv.sec * NSEC_PER_SEC + kt.tv.nsec;
+ return (s64) kt.tv.sec * NSEC_PER_SEC + kt.tv.nsec;
}
#endif
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index 862d9730a60..8c39654549d 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -164,14 +164,12 @@ void nlmclnt_next_cookie(struct nlm_cookie *);
*/
struct nlm_host * nlmclnt_lookup_host(const struct sockaddr_in *, int, int, const char *, int);
struct nlm_host * nlmsvc_lookup_host(struct svc_rqst *, const char *, int);
-struct nlm_host * nlm_lookup_host(int server, const struct sockaddr_in *, int, int, const char *, int);
struct rpc_clnt * nlm_bind_host(struct nlm_host *);
void nlm_rebind_host(struct nlm_host *);
struct nlm_host * nlm_get_host(struct nlm_host *);
void nlm_release_host(struct nlm_host *);
void nlm_shutdown_hosts(void);
extern void nlm_host_rebooted(const struct sockaddr_in *, const char *, int, u32);
-struct nsm_handle *nsm_find(const struct sockaddr_in *, const char *, int);
void nsm_release(struct nsm_handle *);
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 819f08f1310..498bfbd3b4e 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -193,7 +193,6 @@ extern void lockdep_free_key_range(void *start, unsigned long size);
extern void lockdep_off(void);
extern void lockdep_on(void);
-extern int lockdep_internal(void);
/*
* These methods are used by specific locking variants (spinlocks,
@@ -243,6 +242,8 @@ extern void lock_release(struct lockdep_map *lock, int nested,
# define INIT_LOCKDEP .lockdep_recursion = 0,
+#define lockdep_depth(tsk) ((tsk)->lockdep_depth)
+
#else /* !LOCKDEP */
static inline void lockdep_off(void)
@@ -253,11 +254,6 @@ static inline void lockdep_on(void)
{
}
-static inline int lockdep_internal(void)
-{
- return 0;
-}
-
# define lock_acquire(l, s, t, r, c, i) do { } while (0)
# define lock_release(l, n, i) do { } while (0)
# define lockdep_init() do { } while (0)
@@ -277,6 +273,9 @@ static inline int lockdep_internal(void)
* The class key takes no space if lockdep is disabled:
*/
struct lock_class_key { };
+
+#define lockdep_depth(tsk) (0)
+
#endif /* !LOCKDEP */
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index d538de90196..a17b147c61e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -114,6 +114,8 @@ struct vm_area_struct {
#endif
};
+extern struct kmem_cache *vm_area_cachep;
+
/*
* This struct defines the per-mm list of VMAs for uClinux. If CONFIG_MMU is
* disabled, then there's a single shared list of VMAs maintained by the
@@ -294,6 +296,24 @@ void put_pages_list(struct list_head *pages);
void split_page(struct page *page, unsigned int order);
/*
+ * Compound pages have a destructor function. Provide a
+ * prototype for that function and accessor functions.
+ * These are _only_ valid on the head of a PG_compound page.
+ */
+typedef void compound_page_dtor(struct page *);
+
+static inline void set_compound_page_dtor(struct page *page,
+ compound_page_dtor *dtor)
+{
+ page[1].lru.next = (void *)dtor;
+}
+
+static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
+{
+ return (compound_page_dtor *)page[1].lru.next;
+}
+
+/*
* Multiple processes may "see" the same page. E.g. for untouched
* mappings of /dev/null, all processes see the same page full of
* zeroes, and text pages of executables and shared libraries have
@@ -396,7 +416,9 @@ void split_page(struct page *page, unsigned int order);
* We are going to use the flags for the page to node mapping if its in
* there. This includes the case where there is no node, so it is implicit.
*/
-#define FLAGS_HAS_NODE (NODES_WIDTH > 0 || NODES_SHIFT == 0)
+#if !(NODES_WIDTH > 0 || NODES_SHIFT == 0)
+#define NODE_NOT_IN_PAGE_FLAGS
+#endif
#ifndef PFN_SECTION_SHIFT
#define PFN_SECTION_SHIFT 0
@@ -411,13 +433,18 @@ void split_page(struct page *page, unsigned int order);
#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
-/* NODE:ZONE or SECTION:ZONE is used to lookup the zone from a page. */
-#if FLAGS_HAS_NODE
-#define ZONETABLE_SHIFT (NODES_SHIFT + ZONES_SHIFT)
+/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allcator */
+#ifdef NODE_NOT_IN_PAGEFLAGS
+#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
+#else
+#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
+#endif
+
+#if ZONES_WIDTH > 0
+#define ZONEID_PGSHIFT ZONES_PGSHIFT
#else
-#define ZONETABLE_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
+#define ZONEID_PGSHIFT NODES_PGOFF
#endif
-#define ZONETABLE_PGSHIFT ZONES_PGSHIFT
#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED
#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED
@@ -426,26 +453,28 @@ void split_page(struct page *page, unsigned int order);
#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
-#define ZONETABLE_MASK ((1UL << ZONETABLE_SHIFT) - 1)
+#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
static inline enum zone_type page_zonenum(struct page *page)
{
return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
}
-struct zone;
-extern struct zone *zone_table[];
-
+/*
+ * The identification function is only used by the buddy allocator for
+ * determining if two pages could be buddies. We are not really
+ * identifying a zone since we could be using a the section number
+ * id if we have not node id available in page flags.
+ * We guarantee only that it will return the same value for two
+ * combinable pages in a zone.
+ */
static inline int page_zone_id(struct page *page)
{
- return (page->flags >> ZONETABLE_PGSHIFT) & ZONETABLE_MASK;
-}
-static inline struct zone *page_zone(struct page *page)
-{
- return zone_table[page_zone_id(page)];
+ BUILD_BUG_ON(ZONEID_PGSHIFT == 0 && ZONEID_MASK);
+ return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
}
-static inline unsigned long zone_to_nid(struct zone *zone)
+static inline int zone_to_nid(struct zone *zone)
{
#ifdef CONFIG_NUMA
return zone->node;
@@ -454,13 +483,20 @@ static inline unsigned long zone_to_nid(struct zone *zone)
#endif
}
-static inline unsigned long page_to_nid(struct page *page)
+#ifdef NODE_NOT_IN_PAGE_FLAGS
+extern int page_to_nid(struct page *page);
+#else
+static inline int page_to_nid(struct page *page)
+{
+ return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
+}
+#endif
+
+static inline struct zone *page_zone(struct page *page)
{
- if (FLAGS_HAS_NODE)
- return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
- else
- return zone_to_nid(page_zone(page));
+ return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
}
+
static inline unsigned long page_to_section(struct page *page)
{
return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
@@ -477,6 +513,7 @@ static inline void set_page_node(struct page *page, unsigned long node)
page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
}
+
static inline void set_page_section(struct page *page, unsigned long section)
{
page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
@@ -947,8 +984,6 @@ extern void mem_init(void);
extern void show_mem(void);
extern void si_meminfo(struct sysinfo * val);
extern void si_meminfo_node(struct sysinfo *val, int nid);
-extern void zonetable_add(struct zone *zone, int nid, enum zone_type zid,
- unsigned long pfn, unsigned long size);
#ifdef CONFIG_NUMA
extern void setup_per_cpu_pageset(void);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index e06683e2bea..e339a7345f2 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -278,7 +278,7 @@ struct zone {
/*
* rarely used fields:
*/
- char *name;
+ const char *name;
} ____cacheline_internodealigned_in_smp;
/*
@@ -288,19 +288,94 @@ struct zone {
*/
#define DEF_PRIORITY 12
+/* Maximum number of zones on a zonelist */
+#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
+
+#ifdef CONFIG_NUMA
+/*
+ * We cache key information from each zonelist for smaller cache
+ * footprint when scanning for free pages in get_page_from_freelist().
+ *
+ * 1) The BITMAP fullzones tracks which zones in a zonelist have come
+ * up short of free memory since the last time (last_fullzone_zap)
+ * we zero'd fullzones.
+ * 2) The array z_to_n[] maps each zone in the zonelist to its node
+ * id, so that we can efficiently evaluate whether that node is
+ * set in the current tasks mems_allowed.
+ *
+ * Both fullzones and z_to_n[] are one-to-one with the zonelist,
+ * indexed by a zones offset in the zonelist zones[] array.
+ *
+ * The get_page_from_freelist() routine does two scans. During the
+ * first scan, we skip zones whose corresponding bit in 'fullzones'
+ * is set or whose corresponding node in current->mems_allowed (which
+ * comes from cpusets) is not set. During the second scan, we bypass
+ * this zonelist_cache, to ensure we look methodically at each zone.
+ *
+ * Once per second, we zero out (zap) fullzones, forcing us to
+ * reconsider nodes that might have regained more free memory.
+ * The field last_full_zap is the time we last zapped fullzones.
+ *
+ * This mechanism reduces the amount of time we waste repeatedly
+ * reexaming zones for free memory when they just came up low on
+ * memory momentarilly ago.
+ *
+ * The zonelist_cache struct members logically belong in struct
+ * zonelist. However, the mempolicy zonelists constructed for
+ * MPOL_BIND are intentionally variable length (and usually much
+ * shorter). A general purpose mechanism for handling structs with
+ * multiple variable length members is more mechanism than we want
+ * here. We resort to some special case hackery instead.
+ *
+ * The MPOL_BIND zonelists don't need this zonelist_cache (in good
+ * part because they are shorter), so we put the fixed length stuff
+ * at the front of the zonelist struct, ending in a variable length
+ * zones[], as is needed by MPOL_BIND.
+ *
+ * Then we put the optional zonelist cache on the end of the zonelist
+ * struct. This optional stuff is found by a 'zlcache_ptr' pointer in
+ * the fixed length portion at the front of the struct. This pointer
+ * both enables us to find the zonelist cache, and in the case of
+ * MPOL_BIND zonelists, (which will just set the zlcache_ptr to NULL)
+ * to know that the zonelist cache is not there.
+ *
+ * The end result is that struct zonelists come in two flavors:
+ * 1) The full, fixed length version, shown below, and
+ * 2) The custom zonelists for MPOL_BIND.
+ * The custom MPOL_BIND zonelists have a NULL zlcache_ptr and no zlcache.
+ *
+ * Even though there may be multiple CPU cores on a node modifying
+ * fullzones or last_full_zap in the same zonelist_cache at the same
+ * time, we don't lock it. This is just hint data - if it is wrong now
+ * and then, the allocator will still function, perhaps a bit slower.
+ */
+
+
+struct zonelist_cache {
+ unsigned short z_to_n[MAX_ZONES_PER_ZONELIST]; /* zone->nid */
+ DECLARE_BITMAP(fullzones, MAX_ZONES_PER_ZONELIST); /* zone full? */
+ unsigned long last_full_zap; /* when last zap'd (jiffies) */
+};
+#else
+struct zonelist_cache;
+#endif
+
/*
* One allocation request operates on a zonelist. A zonelist
* is a list of zones, the first one is the 'goal' of the
* allocation, the other zones are fallback zones, in decreasing
* priority.
*
- * Right now a zonelist takes up less than a cacheline. We never
- * modify it apart from boot-up, and only a few indices are used,
- * so despite the zonelist table being relatively big, the cache
- * footprint of this construct is very small.
+ * If zlcache_ptr is not NULL, then it is just the address of zlcache,
+ * as explained above. If zlcache_ptr is NULL, there is no zlcache.
*/
+
struct zonelist {
- struct zone *zones[MAX_NUMNODES * MAX_NR_ZONES + 1]; // NULL delimited
+ struct zonelist_cache *zlcache_ptr; // NULL or &zlcache
+ struct zone *zones[MAX_ZONES_PER_ZONELIST + 1]; // NULL delimited
+#ifdef CONFIG_NUMA
+ struct zonelist_cache zlcache; // optional ...
+#endif
};
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index 7c0c2c198f1..4a189dadb16 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -63,6 +63,9 @@ struct kparam_array
not there, read bits mean it's readable, write bits mean it's
writable. */
#define __module_param_call(prefix, name, set, get, arg, perm) \
+ /* Default value instead of permissions? */ \
+ static int __param_perm_check_##name __attribute__((unused)) = \
+ BUILD_BUG_ON_ZERO((perm) < 0 || (perm) > 0777 || ((perm) & 2)); \
static char __param_str_##name[] = prefix #name; \
static struct kernel_param const __param_##name \
__attribute_used__ \
diff --git a/include/linux/msg.h b/include/linux/msg.h
index acc7c174ff0..f1b60740d64 100644
--- a/include/linux/msg.h
+++ b/include/linux/msg.h
@@ -92,6 +92,12 @@ struct msg_queue {
struct list_head q_senders;
};
+/* Helper routines for sys_msgsnd and sys_msgrcv */
+extern long do_msgsnd(int msqid, long mtype, void __user *mtext,
+ size_t msgsz, int msgflg);
+extern long do_msgrcv(int msqid, long *pmtype, void __user *mtext,
+ size_t msgsz, long msgtyp, int msgflg);
+
#endif /* __KERNEL__ */
#endif /* _LINUX_MSG_H */
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 27c48daa318..b2b91c47756 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -94,7 +94,7 @@ do { \
#define __MUTEX_INITIALIZER(lockname) \
{ .count = ATOMIC_INIT(1) \
- , .wait_lock = SPIN_LOCK_UNLOCKED \
+ , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
, .wait_list = LIST_HEAD_INIT(lockname.wait_list) \
__DEBUG_MUTEX_INITIALIZER(lockname) \
__DEP_MAP_MUTEX_INITIALIZER(lockname) }
diff --git a/include/linux/nbd.h b/include/linux/nbd.h
index d6b6dc09ad9..0f3e6930254 100644
--- a/include/linux/nbd.h
+++ b/include/linux/nbd.h
@@ -64,6 +64,7 @@ struct nbd_device {
struct gendisk *disk;
int blksize;
u64 bytesize;
+ pid_t pid; /* pid of nbd-client, if attached */
};
#endif
diff --git a/include/linux/netfilter/nf_conntrack_pptp.h b/include/linux/netfilter/nf_conntrack_pptp.h
index fb049ec11ff..9d8144a488c 100644
--- a/include/linux/netfilter/nf_conntrack_pptp.h
+++ b/include/linux/netfilter/nf_conntrack_pptp.h
@@ -2,6 +2,8 @@
#ifndef _NF_CONNTRACK_PPTP_H
#define _NF_CONNTRACK_PPTP_H
+#include <linux/netfilter/nf_conntrack_common.h>
+
/* state of the control session */
enum pptp_ctrlsess_state {
PPTP_SESSION_NONE, /* no session present */
@@ -295,7 +297,6 @@ union pptp_ctrl_union {
/* crap needed for nf_conntrack_compat.h */
struct nf_conn;
struct nf_conntrack_expect;
-enum ip_conntrack_info;
extern int
(*nf_nat_pptp_hook_outbound)(struct sk_buff **pskb,
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index e16904e28c3..acb4ed13024 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -15,9 +15,14 @@
* disables interrupts for a long time. This call is stateless.
*/
#ifdef ARCH_HAS_NMI_WATCHDOG
+#include <asm/nmi.h>
extern void touch_nmi_watchdog(void);
#else
# define touch_nmi_watchdog() touch_softlockup_watchdog()
#endif
+#ifndef trigger_all_cpu_backtrace
+#define trigger_all_cpu_backtrace() do { } while (0)
+#endif
+
#endif
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index c09da1e30c5..4d972bbef31 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -390,7 +390,7 @@
#define PCI_DEVICE_ID_NS_CS5535_IDE 0x002d
#define PCI_DEVICE_ID_NS_CS5535_AUDIO 0x002e
#define PCI_DEVICE_ID_NS_CS5535_USB 0x002f
-#define PCI_DEVICE_ID_NS_CS5535_VIDEO 0x0030
+#define PCI_DEVICE_ID_NS_GX_VIDEO 0x0030
#define PCI_DEVICE_ID_NS_SATURN 0x0035
#define PCI_DEVICE_ID_NS_SCx200_BRIDGE 0x0500
#define PCI_DEVICE_ID_NS_SCx200_SMI 0x0501
@@ -403,8 +403,7 @@
#define PCI_DEVICE_ID_NS_SC1100_XBUS 0x0515
#define PCI_DEVICE_ID_NS_87410 0xd001
-#define PCI_DEVICE_ID_NS_CS5535_HOST_BRIDGE 0x0028
-#define PCI_DEVICE_ID_NS_CS5535_ISA_BRIDGE 0x002b
+#define PCI_DEVICE_ID_NS_GX_HOST_BRIDGE 0x0028
#define PCI_VENDOR_ID_TSENG 0x100c
#define PCI_DEVICE_ID_TSENG_W32P_2 0x3202
@@ -1864,6 +1863,7 @@
#define PCI_DEVICE_ID_OXSEMI_16PCI95N 0x9511
#define PCI_DEVICE_ID_OXSEMI_16PCI954PP 0x9513
#define PCI_DEVICE_ID_OXSEMI_16PCI952 0x9521
+#define PCI_DEVICE_ID_OXSEMI_16PCI952PP 0x9523
#define PCI_VENDOR_ID_SAMSUNG 0x144d
@@ -1931,6 +1931,7 @@
#define PCI_DEVICE_ID_TIGON3_5750M 0x167c
#define PCI_DEVICE_ID_TIGON3_5751M 0x167d
#define PCI_DEVICE_ID_TIGON3_5751F 0x167e
+#define PCI_DEVICE_ID_TIGON3_5787F 0x167f
#define PCI_DEVICE_ID_TIGON3_5787M 0x1693
#define PCI_DEVICE_ID_TIGON3_5782 0x1696
#define PCI_DEVICE_ID_TIGON3_5786 0x169a
@@ -2002,6 +2003,8 @@
#define PCI_DEVICE_ID_FARSITE_TE1 0x1610
#define PCI_DEVICE_ID_FARSITE_TE1C 0x1612
+#define PCI_VENDOR_ID_ARIMA 0x161f
+
#define PCI_VENDOR_ID_SIBYTE 0x166d
#define PCI_DEVICE_ID_BCM1250_PCI 0x0001
#define PCI_DEVICE_ID_BCM1250_HT 0x0002
diff --git a/include/linux/pfkeyv2.h b/include/linux/pfkeyv2.h
index 0f0b880c428..265bafab649 100644
--- a/include/linux/pfkeyv2.h
+++ b/include/linux/pfkeyv2.h
@@ -285,6 +285,7 @@ struct sadb_x_sec_ctx {
#define SADB_X_AALG_SHA2_384HMAC 6
#define SADB_X_AALG_SHA2_512HMAC 7
#define SADB_X_AALG_RIPEMD160HMAC 8
+#define SADB_X_AALG_AES_XCBC_MAC 9
#define SADB_X_AALG_NULL 251 /* kame */
#define SADB_AALG_MAX 251
diff --git a/include/linux/profile.h b/include/linux/profile.h
index acce53fd38b..5670b340c4e 100644
--- a/include/linux/profile.h
+++ b/include/linux/profile.h
@@ -6,10 +6,15 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/cpumask.h>
+#include <linux/cache.h>
+
#include <asm/errno.h>
+extern int prof_on __read_mostly;
+
#define CPU_PROFILING 1
#define SCHED_PROFILING 2
+#define SLEEP_PROFILING 3
struct proc_dir_entry;
struct pt_regs;
@@ -18,7 +23,24 @@ struct notifier_block;
/* init basic kernel profiler */
void __init profile_init(void);
void profile_tick(int);
-void profile_hit(int, void *);
+
+/*
+ * Add multiple profiler hits to a given address:
+ */
+void profile_hits(int, void *ip, unsigned int nr_hits);
+
+/*
+ * Single profiler hit:
+ */
+static inline void profile_hit(int type, void *ip)
+{
+ /*
+ * Speedup for the common (no profiling enabled) case:
+ */
+ if (unlikely(prof_on == type))
+ profile_hits(type, ip, 1);
+}
+
#ifdef CONFIG_PROC_FS
void create_prof_cpu_mask(struct proc_dir_entry *);
#else
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 5110201a415..90c23f690c0 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -37,6 +37,9 @@ extern int dquot_release(struct dquot *dquot);
extern int dquot_commit_info(struct super_block *sb, int type);
extern int dquot_mark_dquot_dirty(struct dquot *dquot);
+int remove_inode_dquot_ref(struct inode *inode, int type,
+ struct list_head *tofree_head);
+
extern int vfs_quota_on(struct super_block *sb, int type, int format_id, char *path);
extern int vfs_quota_on_mount(struct super_block *sb, char *qf_name,
int format_id, int type);
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index cbfa1153742..0deb842541a 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -1,6 +1,7 @@
/*
* Copyright (C) 2001 Momchil Velikov
* Portions Copyright (C) 2001 Christoph Hellwig
+ * Copyright (C) 2006 Nick Piggin
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -21,6 +22,35 @@
#include <linux/preempt.h>
#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/rcupdate.h>
+
+/*
+ * A direct pointer (root->rnode pointing directly to a data item,
+ * rather than another radix_tree_node) is signalled by the low bit
+ * set in the root->rnode pointer.
+ *
+ * In this case root->height is also NULL, but the direct pointer tests are
+ * needed for RCU lookups when root->height is unreliable.
+ */
+#define RADIX_TREE_DIRECT_PTR 1
+
+static inline void *radix_tree_ptr_to_direct(void *ptr)
+{
+ return (void *)((unsigned long)ptr | RADIX_TREE_DIRECT_PTR);
+}
+
+static inline void *radix_tree_direct_to_ptr(void *ptr)
+{
+ return (void *)((unsigned long)ptr & ~RADIX_TREE_DIRECT_PTR);
+}
+
+static inline int radix_tree_is_direct_ptr(void *ptr)
+{
+ return (int)((unsigned long)ptr & RADIX_TREE_DIRECT_PTR);
+}
+
+/*** radix-tree API starts here ***/
#define RADIX_TREE_MAX_TAGS 2
@@ -47,6 +77,77 @@ do { \
(root)->rnode = NULL; \
} while (0)
+/**
+ * Radix-tree synchronization
+ *
+ * The radix-tree API requires that users provide all synchronisation (with
+ * specific exceptions, noted below).
+ *
+ * Synchronization of access to the data items being stored in the tree, and
+ * management of their lifetimes must be completely managed by API users.
+ *
+ * For API usage, in general,
+ * - any function _modifying_ the the tree or tags (inserting or deleting
+ * items, setting or clearing tags must exclude other modifications, and
+ * exclude any functions reading the tree.
+ * - any function _reading_ the the tree or tags (looking up items or tags,
+ * gang lookups) must exclude modifications to the tree, but may occur
+ * concurrently with other readers.
+ *
+ * The notable exceptions to this rule are the following functions:
+ * radix_tree_lookup
+ * radix_tree_tag_get
+ * radix_tree_gang_lookup
+ * radix_tree_gang_lookup_tag
+ * radix_tree_tagged
+ *
+ * The first 4 functions are able to be called locklessly, using RCU. The
+ * caller must ensure calls to these functions are made within rcu_read_lock()
+ * regions. Other readers (lock-free or otherwise) and modifications may be
+ * running concurrently.
+ *
+ * It is still required that the caller manage the synchronization and lifetimes
+ * of the items. So if RCU lock-free lookups are used, typically this would mean
+ * that the items have their own locks, or are amenable to lock-free access; and
+ * that the items are freed by RCU (or only freed after having been deleted from
+ * the radix tree *and* a synchronize_rcu() grace period).
+ *
+ * (Note, rcu_assign_pointer and rcu_dereference are not needed to control
+ * access to data items when inserting into or looking up from the radix tree)
+ *
+ * radix_tree_tagged is able to be called without locking or RCU.
+ */
+
+/**
+ * radix_tree_deref_slot - dereference a slot
+ * @pslot: pointer to slot, returned by radix_tree_lookup_slot
+ * Returns: item that was stored in that slot with any direct pointer flag
+ * removed.
+ *
+ * For use with radix_tree_lookup_slot(). Caller must hold tree at least read
+ * locked across slot lookup and dereference. More likely, will be used with
+ * radix_tree_replace_slot(), as well, so caller will hold tree write locked.
+ */
+static inline void *radix_tree_deref_slot(void **pslot)
+{
+ return radix_tree_direct_to_ptr(*pslot);
+}
+/**
+ * radix_tree_replace_slot - replace item in a slot
+ * @pslot: pointer to slot, returned by radix_tree_lookup_slot
+ * @item: new item to store in the slot.
+ *
+ * For use with radix_tree_lookup_slot(). Caller must hold tree write locked
+ * across slot lookup and replacement.
+ */
+static inline void radix_tree_replace_slot(void **pslot, void *item)
+{
+ BUG_ON(radix_tree_is_direct_ptr(item));
+ rcu_assign_pointer(*pslot,
+ (void *)((unsigned long)item |
+ ((unsigned long)*pslot & RADIX_TREE_DIRECT_PTR)));
+}
+
int radix_tree_insert(struct radix_tree_root *, unsigned long, void *);
void *radix_tree_lookup(struct radix_tree_root *, unsigned long);
void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long);
diff --git a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h
index f13299a1559..03636d7918f 100644
--- a/include/linux/raid/raid5.h
+++ b/include/linux/raid/raid5.h
@@ -235,7 +235,7 @@ struct raid5_private_data {
*/
int active_name;
char cache_name[2][20];
- kmem_cache_t *slab_cache; /* for allocating stripes */
+ struct kmem_cache *slab_cache; /* for allocating stripes */
int seq_flush, seq_write;
int quiesce;
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
index 7bc6bfb8625..d0e4dce33ad 100644
--- a/include/linux/reiserfs_fs.h
+++ b/include/linux/reiserfs_fs.h
@@ -739,7 +739,7 @@ struct block_head {
#define PUT_B_FREE_SPACE(p_s_bh,val) do { set_blkh_free_space(B_BLK_HEAD(p_s_bh),val); } while (0)
/* Get right delimiting key. -- little endian */
-#define B_PRIGHT_DELIM_KEY(p_s_bh) (&(blk_right_delim_key(B_BLK_HEAD(p_s_bh))
+#define B_PRIGHT_DELIM_KEY(p_s_bh) (&(blk_right_delim_key(B_BLK_HEAD(p_s_bh))))
/* Does the buffer contain a disk leaf. */
#define B_IS_ITEMS_LEVEL(p_s_bh) (B_LEVEL(p_s_bh) == DISK_LEAF_NODE_LEVEL)
diff --git a/include/linux/relay.h b/include/linux/relay.h
index 0e3d91b7699..c6a48bfc8b1 100644
--- a/include/linux/relay.h
+++ b/include/linux/relay.h
@@ -274,7 +274,7 @@ static inline void subbuf_start_reserve(struct rchan_buf *buf,
/*
* exported relay file operations, kernel/relay.c
*/
-extern struct file_operations relay_file_operations;
+extern const struct file_operations relay_file_operations;
#endif /* _LINUX_RELAY_H */
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index db2c1df4fef..36f850373d2 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -30,11 +30,11 @@ struct anon_vma {
#ifdef CONFIG_MMU
-extern kmem_cache_t *anon_vma_cachep;
+extern struct kmem_cache *anon_vma_cachep;
static inline struct anon_vma *anon_vma_alloc(void)
{
- return kmem_cache_alloc(anon_vma_cachep, SLAB_KERNEL);
+ return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
}
static inline void anon_vma_free(struct anon_vma *anon_vma)
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index 5d41dee82f8..b0090e9f788 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -63,7 +63,7 @@ struct hrtimer_sleeper;
#endif
#define __RT_MUTEX_INITIALIZER(mutexname) \
- { .wait_lock = SPIN_LOCK_UNLOCKED \
+ { .wait_lock = __SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
, .wait_list = PLIST_HEAD_INIT(mutexname.wait_list, mutexname.wait_lock) \
, .owner = NULL \
__DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
index ae1fcadd598..813cee13da0 100644
--- a/include/linux/rwsem-spinlock.h
+++ b/include/linux/rwsem-spinlock.h
@@ -44,7 +44,8 @@ struct rw_semaphore {
#endif
#define __RWSEM_INITIALIZER(name) \
-{ 0, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
+{ 0, __SPIN_LOCK_UNLOCKED(name.wait_lock), LIST_HEAD_INIT((name).wait_list) \
+ __RWSEM_DEP_MAP_INIT(name) }
#define DECLARE_RWSEM(name) \
struct rw_semaphore name = __RWSEM_INITIALIZER(name)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index eafe4a7b823..dede82c6344 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -194,7 +194,16 @@ extern void init_idle(struct task_struct *idle, int cpu);
extern cpumask_t nohz_cpu_mask;
-extern void show_state(void);
+/*
+ * Only dump TASK_* tasks. (-1 for all tasks)
+ */
+extern void show_state_filter(unsigned long state_filter);
+
+static inline void show_state(void)
+{
+ show_state_filter(-1);
+}
+
extern void show_regs(struct pt_regs *);
/*
@@ -338,15 +347,23 @@ struct mm_struct {
unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
- unsigned dumpable:2;
cpumask_t cpu_vm_mask;
/* Architecture-specific MM context */
mm_context_t context;
- /* Token based thrashing protection. */
- unsigned long swap_token_time;
- char recent_pagein;
+ /* Swap token stuff */
+ /*
+ * Last value of global fault stamp as seen by this process.
+ * In other words, this value gives an indication of how long
+ * it has been since this task got the token.
+ * Look at mm/thrash.c
+ */
+ unsigned int faultstamp;
+ unsigned int token_priority;
+ unsigned int last_interval;
+
+ unsigned char dumpable:2;
/* coredumping support */
int core_waiters;
@@ -556,7 +573,7 @@ struct sched_info {
#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
#ifdef CONFIG_SCHEDSTATS
-extern struct file_operations proc_schedstat_operations;
+extern const struct file_operations proc_schedstat_operations;
#endif /* CONFIG_SCHEDSTATS */
#ifdef CONFIG_TASK_DELAY_ACCT
@@ -1288,7 +1305,6 @@ extern int kill_pgrp(struct pid *pid, int sig, int priv);
extern int kill_pid(struct pid *pid, int sig, int priv);
extern int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp);
extern int kill_pg_info(int, struct siginfo *, pid_t);
-extern int kill_proc_info(int, struct siginfo *, pid_t);
extern void do_notify_parent(struct task_struct *, int);
extern void force_sig(int, struct task_struct *);
extern void force_sig_specific(int, struct task_struct *);
@@ -1610,87 +1626,6 @@ extern int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls);
extern void normalize_rt_tasks(void);
-#ifdef CONFIG_PM
-/*
- * Check if a process has been frozen
- */
-static inline int frozen(struct task_struct *p)
-{
- return p->flags & PF_FROZEN;
-}
-
-/*
- * Check if there is a request to freeze a process
- */
-static inline int freezing(struct task_struct *p)
-{
- return p->flags & PF_FREEZE;
-}
-
-/*
- * Request that a process be frozen
- * FIXME: SMP problem. We may not modify other process' flags!
- */
-static inline void freeze(struct task_struct *p)
-{
- p->flags |= PF_FREEZE;
-}
-
-/*
- * Sometimes we may need to cancel the previous 'freeze' request
- */
-static inline void do_not_freeze(struct task_struct *p)
-{
- p->flags &= ~PF_FREEZE;
-}
-
-/*
- * Wake up a frozen process
- */
-static inline int thaw_process(struct task_struct *p)
-{
- if (frozen(p)) {
- p->flags &= ~PF_FROZEN;
- wake_up_process(p);
- return 1;
- }
- return 0;
-}
-
-/*
- * freezing is complete, mark process as frozen
- */
-static inline void frozen_process(struct task_struct *p)
-{
- p->flags = (p->flags & ~PF_FREEZE) | PF_FROZEN;
-}
-
-extern void refrigerator(void);
-extern int freeze_processes(void);
-extern void thaw_processes(void);
-
-static inline int try_to_freeze(void)
-{
- if (freezing(current)) {
- refrigerator();
- return 1;
- } else
- return 0;
-}
-#else
-static inline int frozen(struct task_struct *p) { return 0; }
-static inline int freezing(struct task_struct *p) { return 0; }
-static inline void freeze(struct task_struct *p) { BUG(); }
-static inline int thaw_process(struct task_struct *p) { return 1; }
-static inline void frozen_process(struct task_struct *p) { BUG(); }
-
-static inline void refrigerator(void) {}
-static inline int freeze_processes(void) { BUG(); return 0; }
-static inline void thaw_processes(void) {}
-
-static inline int try_to_freeze(void) { return 0; }
-
-#endif /* CONFIG_PM */
#endif /* __KERNEL__ */
#endif
diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
index 2925e66a673..b02308ee766 100644
--- a/include/linux/screen_info.h
+++ b/include/linux/screen_info.h
@@ -42,7 +42,8 @@ struct screen_info {
u16 pages; /* 0x32 */
u16 vesa_attributes; /* 0x34 */
u32 capabilities; /* 0x36 */
- /* 0x3a -- 0x3f reserved for future expansion */
+ /* 0x3a -- 0x3b reserved for future expansion */
+ /* 0x3c -- 0x3f micro stack for relocatable kernels */
};
extern struct screen_info screen_info;
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index b95f6eb7254..3e3cccbb1ca 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -20,7 +20,7 @@ struct seq_file {
loff_t index;
loff_t version;
struct mutex lock;
- struct seq_operations *op;
+ const struct seq_operations *op;
void *private;
};
@@ -31,7 +31,7 @@ struct seq_operations {
int (*show) (struct seq_file *m, void *v);
};
-int seq_open(struct file *, struct seq_operations *);
+int seq_open(struct file *, const struct seq_operations *);
ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
loff_t seq_lseek(struct file *, loff_t, int);
int seq_release(struct inode *, struct file *);
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index 8e968141372..71310d80c09 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -41,6 +41,7 @@ enum {
PLAT8250_DEV_FOURPORT,
PLAT8250_DEV_ACCENT,
PLAT8250_DEV_BOCA,
+ PLAT8250_DEV_EXAR_ST16C554,
PLAT8250_DEV_HUB6,
PLAT8250_DEV_MCA,
PLAT8250_DEV_AU1X00,
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 463ab953b09..82767213664 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -132,6 +132,8 @@
#define PORT_S3C2412 73
+/* Xilinx uartlite */
+#define PORT_UARTLITE 74
#ifdef __KERNEL__
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 117135e33d6..14749056dd6 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -241,6 +241,8 @@ extern int sigprocmask(int, sigset_t *, sigset_t *);
struct pt_regs;
extern int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, struct pt_regs *regs, void *cookie);
+extern struct kmem_cache *sighand_cachep;
+
#endif /* __KERNEL__ */
#endif /* _LINUX_SIGNAL_H */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index a05a5f7c0b7..4ff3940210d 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -332,20 +332,20 @@ struct sk_buff {
extern void kfree_skb(struct sk_buff *skb);
extern void __kfree_skb(struct sk_buff *skb);
extern struct sk_buff *__alloc_skb(unsigned int size,
- gfp_t priority, int fclone);
+ gfp_t priority, int fclone, int node);
static inline struct sk_buff *alloc_skb(unsigned int size,
gfp_t priority)
{
- return __alloc_skb(size, priority, 0);
+ return __alloc_skb(size, priority, 0, -1);
}
static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
gfp_t priority)
{
- return __alloc_skb(size, priority, 1);
+ return __alloc_skb(size, priority, 1, -1);
}
-extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
+extern struct sk_buff *alloc_skb_from_cache(struct kmem_cache *cp,
unsigned int size,
gfp_t priority);
extern void kfree_skbmem(struct sk_buff *skb);
diff --git a/include/linux/slab.h b/include/linux/slab.h
index c4947b8a2c0..2271886744f 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -7,27 +7,17 @@
#ifndef _LINUX_SLAB_H
#define _LINUX_SLAB_H
-#if defined(__KERNEL__)
+#ifdef __KERNEL__
-typedef struct kmem_cache kmem_cache_t;
+#include <linux/gfp.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
+#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
+#include <linux/compiler.h>
-#include <linux/gfp.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
-#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
-
-/* flags for kmem_cache_alloc() */
-#define SLAB_NOFS GFP_NOFS
-#define SLAB_NOIO GFP_NOIO
-#define SLAB_ATOMIC GFP_ATOMIC
-#define SLAB_USER GFP_USER
-#define SLAB_KERNEL GFP_KERNEL
-#define SLAB_DMA GFP_DMA
-
-#define SLAB_LEVEL_MASK GFP_LEVEL_MASK
-
-#define SLAB_NO_GROW __GFP_NO_GROW /* don't grow a cache */
+/* kmem_cache_t exists for legacy reasons and is not used by code in mm */
+typedef struct kmem_cache kmem_cache_t __deprecated;
/* flags to pass to kmem_cache_create().
* The first 3 are only valid when the allocator as been build
@@ -57,22 +47,23 @@ typedef struct kmem_cache kmem_cache_t;
/* prototypes */
extern void __init kmem_cache_init(void);
-extern kmem_cache_t *kmem_cache_create(const char *, size_t, size_t, unsigned long,
- void (*)(void *, kmem_cache_t *, unsigned long),
- void (*)(void *, kmem_cache_t *, unsigned long));
-extern void kmem_cache_destroy(kmem_cache_t *);
-extern int kmem_cache_shrink(kmem_cache_t *);
-extern void *kmem_cache_alloc(kmem_cache_t *, gfp_t);
+extern struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
+ unsigned long,
+ void (*)(void *, struct kmem_cache *, unsigned long),
+ void (*)(void *, struct kmem_cache *, unsigned long));
+extern void kmem_cache_destroy(struct kmem_cache *);
+extern int kmem_cache_shrink(struct kmem_cache *);
+extern void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
extern void *kmem_cache_zalloc(struct kmem_cache *, gfp_t);
-extern void kmem_cache_free(kmem_cache_t *, void *);
-extern unsigned int kmem_cache_size(kmem_cache_t *);
-extern const char *kmem_cache_name(kmem_cache_t *);
+extern void kmem_cache_free(struct kmem_cache *, void *);
+extern unsigned int kmem_cache_size(struct kmem_cache *);
+extern const char *kmem_cache_name(struct kmem_cache *);
/* Size description struct for general caches. */
struct cache_sizes {
- size_t cs_size;
- kmem_cache_t *cs_cachep;
- kmem_cache_t *cs_dmacachep;
+ size_t cs_size;
+ struct kmem_cache *cs_cachep;
+ struct kmem_cache *cs_dmacachep;
};
extern struct cache_sizes malloc_sizes[];
@@ -211,7 +202,7 @@ extern unsigned int ksize(const void *);
extern int slab_is_available(void);
#ifdef CONFIG_NUMA
-extern void *kmem_cache_alloc_node(kmem_cache_t *, gfp_t flags, int node);
+extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
@@ -236,8 +227,27 @@ found:
}
return __kmalloc_node(size, flags, node);
}
+
+/*
+ * kmalloc_node_track_caller is a special version of kmalloc_node that
+ * records the calling function of the routine calling it for slab leak
+ * tracking instead of just the calling function (confusing, eh?).
+ * It's useful when the call to kmalloc_node comes from a widely-used
+ * standard allocator where we care about the real place the memory
+ * allocation request comes from.
+ */
+#ifndef CONFIG_DEBUG_SLAB
+#define kmalloc_node_track_caller(size, flags, node) \
+ __kmalloc_node(size, flags, node)
#else
-static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int node)
+extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *);
+#define kmalloc_node_track_caller(size, flags, node) \
+ __kmalloc_node_track_caller(size, flags, node, \
+ __builtin_return_address(0))
+#endif
+#else /* CONFIG_NUMA */
+static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
+ gfp_t flags, int node)
{
return kmem_cache_alloc(cachep, flags);
}
@@ -245,10 +255,13 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
return kmalloc(size, flags);
}
+
+#define kmalloc_node_track_caller(size, flags, node) \
+ kmalloc_track_caller(size, flags)
#endif
extern int FASTCALL(kmem_cache_reap(int));
-extern int FASTCALL(kmem_ptr_validate(kmem_cache_t *cachep, void *ptr));
+extern int FASTCALL(kmem_ptr_validate(struct kmem_cache *cachep, void *ptr));
#else /* CONFIG_SLOB */
@@ -283,16 +296,9 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
#define kzalloc(s, f) __kzalloc(s, f)
#define kmalloc_track_caller kmalloc
-#endif /* CONFIG_SLOB */
+#define kmalloc_node_track_caller kmalloc_node
-/* System wide caches */
-extern kmem_cache_t *vm_area_cachep;
-extern kmem_cache_t *names_cachep;
-extern kmem_cache_t *files_cachep;
-extern kmem_cache_t *filp_cachep;
-extern kmem_cache_t *fs_cachep;
-extern kmem_cache_t *sighand_cachep;
-extern kmem_cache_t *bio_cachep;
+#endif /* CONFIG_SLOB */
#endif /* __KERNEL__ */
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 51649987f69..7ba23ec8211 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -99,6 +99,13 @@ static inline int up_smp_call_function(void)
static inline void smp_send_reschedule(int cpu) { }
#define num_booting_cpus() 1
#define smp_prepare_boot_cpu() do {} while (0)
+static inline int smp_call_function_single(int cpuid, void (*func) (void *info),
+ void *info, int retry, int wait)
+{
+ /* Disable interrupts here? */
+ func(info);
+ return 0;
+}
#endif /* !SMP */
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 8451052ca66..94b767d6427 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -52,6 +52,7 @@
#include <linux/thread_info.h>
#include <linux/kernel.h>
#include <linux/stringify.h>
+#include <linux/bottom_half.h>
#include <asm/system.h>
diff --git a/include/linux/start_kernel.h b/include/linux/start_kernel.h
new file mode 100644
index 00000000000..d3e5f275654
--- /dev/null
+++ b/include/linux/start_kernel.h
@@ -0,0 +1,12 @@
+#ifndef _LINUX_START_KERNEL_H
+#define _LINUX_START_KERNEL_H
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+/* Define the prototype for start_kernel here, rather than cluttering
+ up something else. */
+
+extern asmlinkage void __init start_kernel(void);
+
+#endif /* _LINUX_START_KERNEL_H */
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index b6b6ad6253b..97c76165258 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -217,7 +217,7 @@ struct rpc_wait_queue {
#ifndef RPC_DEBUG
# define RPC_WAITQ_INIT(var,qname) { \
- .lock = SPIN_LOCK_UNLOCKED, \
+ .lock = __SPIN_LOCK_UNLOCKED(var.lock), \
.tasks = { \
[0] = LIST_HEAD_INIT(var.tasks[0]), \
[1] = LIST_HEAD_INIT(var.tasks[1]), \
@@ -226,7 +226,7 @@ struct rpc_wait_queue {
}
#else
# define RPC_WAITQ_INIT(var,qname) { \
- .lock = SPIN_LOCK_UNLOCKED, \
+ .lock = __SPIN_LOCK_UNLOCKED(var.lock), \
.tasks = { \
[0] = LIST_HEAD_INIT(var.tasks[0]), \
[1] = LIST_HEAD_INIT(var.tasks[1]), \
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index b1237f16ecd..bf99bd49f8e 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -9,10 +9,13 @@
#include <linux/init.h>
#include <linux/pm.h>
-/* page backup entry */
+/* struct pbe is used for creating lists of pages that should be restored
+ * atomically during the resume from disk, because the page frames they have
+ * occupied before the suspend are in use.
+ */
struct pbe {
- unsigned long address; /* address of the copy */
- unsigned long orig_address; /* original address of page */
+ void *address; /* address of the copy */
+ void *orig_address; /* original address of a page */
struct pbe *next;
};
diff --git a/include/linux/swap.h b/include/linux/swap.h
index e7c36ba2a2d..add51cebc8d 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -218,8 +218,6 @@ extern void swap_unplug_io_fn(struct backing_dev_info *, struct page *);
/* linux/mm/page_io.c */
extern int swap_readpage(struct file *, struct page *);
extern int swap_writepage(struct page *page, struct writeback_control *wbc);
-extern int rw_swap_page_sync(int rw, swp_entry_t entry, struct page *page,
- struct bio **bio_chain);
extern int end_swap_bio_read(struct bio *bio, unsigned int bytes_done, int err);
/* linux/mm/swap_state.c */
@@ -247,9 +245,10 @@ extern int swap_duplicate(swp_entry_t);
extern int valid_swaphandles(swp_entry_t, unsigned long *);
extern void swap_free(swp_entry_t);
extern void free_swap_and_cache(swp_entry_t);
-extern int swap_type_of(dev_t);
+extern int swap_type_of(dev_t, sector_t);
extern unsigned int count_swap_pages(int, int);
extern sector_t map_swap_page(struct swap_info_struct *, pgoff_t);
+extern sector_t swapdev_block(int, pgoff_t);
extern struct swap_info_struct *get_swap_info_struct(unsigned);
extern int can_share_swap_page(struct page *);
extern int remove_exclusive_swap_page(struct page *);
@@ -259,7 +258,6 @@ extern spinlock_t swap_lock;
/* linux/mm/thrash.c */
extern struct mm_struct * swap_token_mm;
-extern unsigned long swap_token_default_timeout;
extern void grab_swap_token(void);
extern void __put_swap_token(struct mm_struct *);
diff --git a/include/linux/taskstats_kern.h b/include/linux/taskstats_kern.h
index 6562a2050a2..7e9680f4afd 100644
--- a/include/linux/taskstats_kern.h
+++ b/include/linux/taskstats_kern.h
@@ -12,64 +12,27 @@
#include <net/genetlink.h>
#ifdef CONFIG_TASKSTATS
-extern kmem_cache_t *taskstats_cache;
+extern struct kmem_cache *taskstats_cache;
extern struct mutex taskstats_exit_mutex;
-static inline void taskstats_exit_free(struct taskstats *tidstats)
-{
- if (tidstats)
- kmem_cache_free(taskstats_cache, tidstats);
-}
-
static inline void taskstats_tgid_init(struct signal_struct *sig)
{
sig->stats = NULL;
}
-static inline void taskstats_tgid_alloc(struct task_struct *tsk)
-{
- struct signal_struct *sig = tsk->signal;
- struct taskstats *stats;
-
- if (sig->stats != NULL)
- return;
-
- /* No problem if kmem_cache_zalloc() fails */
- stats = kmem_cache_zalloc(taskstats_cache, SLAB_KERNEL);
-
- spin_lock_irq(&tsk->sighand->siglock);
- if (!sig->stats) {
- sig->stats = stats;
- stats = NULL;
- }
- spin_unlock_irq(&tsk->sighand->siglock);
-
- if (stats)
- kmem_cache_free(taskstats_cache, stats);
-}
-
static inline void taskstats_tgid_free(struct signal_struct *sig)
{
if (sig->stats)
kmem_cache_free(taskstats_cache, sig->stats);
}
-extern void taskstats_exit_alloc(struct taskstats **, unsigned int *);
-extern void taskstats_exit_send(struct task_struct *, struct taskstats *, int, unsigned int);
+extern void taskstats_exit(struct task_struct *, int group_dead);
extern void taskstats_init_early(void);
#else
-static inline void taskstats_exit_alloc(struct taskstats **ptidstats, unsigned int *mycpu)
-{}
-static inline void taskstats_exit_free(struct taskstats *ptidstats)
-{}
-static inline void taskstats_exit_send(struct task_struct *tsk,
- struct taskstats *tidstats,
- int group_dead, unsigned int cpu)
+static inline void taskstats_exit(struct task_struct *tsk, int group_dead)
{}
static inline void taskstats_tgid_init(struct signal_struct *sig)
{}
-static inline void taskstats_tgid_alloc(struct task_struct *tsk)
-{}
static inline void taskstats_tgid_free(struct signal_struct *sig)
{}
static inline void taskstats_init_early(void)
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index a48d7f11c7b..975c963e578 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -1,8 +1,43 @@
#ifndef __LINUX_UACCESS_H__
#define __LINUX_UACCESS_H__
+#include <linux/preempt.h>
#include <asm/uaccess.h>
+/*
+ * These routines enable/disable the pagefault handler in that
+ * it will not take any locks and go straight to the fixup table.
+ *
+ * They have great resemblance to the preempt_disable/enable calls
+ * and in fact they are identical; this is because currently there is
+ * no other way to make the pagefault handlers do this. So we do
+ * disable preemption but we don't necessarily care about that.
+ */
+static inline void pagefault_disable(void)
+{
+ inc_preempt_count();
+ /*
+ * make sure to have issued the store before a pagefault
+ * can hit.
+ */
+ barrier();
+}
+
+static inline void pagefault_enable(void)
+{
+ /*
+ * make sure to issue those last loads/stores before enabling
+ * the pagefault handler again.
+ */
+ barrier();
+ dec_preempt_count();
+ /*
+ * make sure we do..
+ */
+ barrier();
+ preempt_check_resched();
+}
+
#ifndef ARCH_HAS_NOCACHE_UACCESS
static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
@@ -30,14 +65,22 @@ static inline unsigned long __copy_from_user_nocache(void *to,
* do_page_fault() doesn't attempt to take mmap_sem. This makes
* probe_kernel_address() suitable for use within regions where the caller
* already holds mmap_sem, or other locks which nest inside mmap_sem.
+ * This must be a macro because __get_user() needs to know the types of the
+ * args.
+ *
+ * We don't include enough header files to be able to do the set_fs(). We
+ * require that the probe_kernel_address() caller will do that.
*/
#define probe_kernel_address(addr, retval) \
({ \
long ret; \
+ mm_segment_t old_fs = get_fs(); \
\
- inc_preempt_count(); \
- ret = __get_user(retval, addr); \
- dec_preempt_count(); \
+ set_fs(KERNEL_DS); \
+ pagefault_disable(); \
+ ret = __get_user(retval, (__force typeof(retval) __user *)(addr)); \
+ pagefault_enable(); \
+ set_fs(old_fs); \
ret; \
})
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 4a3ea83c6d1..edef8d50b26 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -147,9 +147,11 @@ struct execute_work {
extern struct workqueue_struct *__create_workqueue(const char *name,
- int singlethread);
-#define create_workqueue(name) __create_workqueue((name), 0)
-#define create_singlethread_workqueue(name) __create_workqueue((name), 1)
+ int singlethread,
+ int freezeable);
+#define create_workqueue(name) __create_workqueue((name), 0, 0)
+#define create_freezeable_workqueue(name) __create_workqueue((name), 0, 1)
+#define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0)
extern void destroy_workqueue(struct workqueue_struct *wq);
@@ -160,6 +162,7 @@ extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq));
extern int FASTCALL(schedule_work(struct work_struct *work));
+extern int FASTCALL(run_scheduled_work(struct work_struct *work));
extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, unsigned long delay));
extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay);
diff --git a/include/net/dst.h b/include/net/dst.h
index e156e38e4ac..62b7e7598e9 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -98,7 +98,7 @@ struct dst_ops
int entry_size;
atomic_t entries;
- kmem_cache_t *kmem_cachep;
+ struct kmem_cache *kmem_cachep;
};
#ifdef __KERNEL__
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index a9eb2eaf094..34cc76e3ddb 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -125,7 +125,7 @@ struct inet_hashinfo {
rwlock_t lhash_lock ____cacheline_aligned;
atomic_t lhash_users;
wait_queue_head_t lhash_wait;
- kmem_cache_t *bind_bucket_cachep;
+ struct kmem_cache *bind_bucket_cachep;
};
static inline struct inet_ehash_bucket *inet_ehash_bucket(
@@ -136,10 +136,10 @@ static inline struct inet_ehash_bucket *inet_ehash_bucket(
}
extern struct inet_bind_bucket *
- inet_bind_bucket_create(kmem_cache_t *cachep,
+ inet_bind_bucket_create(struct kmem_cache *cachep,
struct inet_bind_hashbucket *head,
const unsigned short snum);
-extern void inet_bind_bucket_destroy(kmem_cache_t *cachep,
+extern void inet_bind_bucket_destroy(struct kmem_cache *cachep,
struct inet_bind_bucket *tb);
static inline int inet_bhashfn(const __u16 lport, const int bhash_size)
diff --git a/include/net/irda/irlan_filter.h b/include/net/irda/irlan_filter.h
index 492dedaa8ac..1720539ac2c 100644
--- a/include/net/irda/irlan_filter.h
+++ b/include/net/irda/irlan_filter.h
@@ -28,6 +28,8 @@
void irlan_check_command_param(struct irlan_cb *self, char *param,
char *value);
void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb);
+#ifdef CONFIG_PROC_FS
void irlan_print_filter(struct seq_file *seq, int filter_type);
+#endif
#endif /* IRLAN_FILTER_H */
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index c8aacbd2e33..23967031ddb 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -160,7 +160,7 @@ struct neigh_table
atomic_t entries;
rwlock_t lock;
unsigned long last_rand;
- kmem_cache_t *kmem_cachep;
+ struct kmem_cache *kmem_cachep;
struct neigh_statistics *stats;
struct neighbour **hash_buckets;
unsigned int hash_mask;
diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h
index cef3136e22a..41bcc9eb420 100644
--- a/include/net/netfilter/nf_conntrack_expect.h
+++ b/include/net/netfilter/nf_conntrack_expect.h
@@ -7,7 +7,7 @@
#include <net/netfilter/nf_conntrack.h>
extern struct list_head nf_conntrack_expect_list;
-extern kmem_cache_t *nf_conntrack_expect_cachep;
+extern struct kmem_cache *nf_conntrack_expect_cachep;
extern struct file_operations exp_file_ops;
struct nf_conntrack_expect
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index e37baaf2080..7aed02ce2b6 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -29,7 +29,7 @@ struct proto;
struct request_sock_ops {
int family;
int obj_size;
- kmem_cache_t *slab;
+ struct kmem_cache *slab;
int (*rtx_syn_ack)(struct sock *sk,
struct request_sock *req,
struct dst_entry *dst);
@@ -60,7 +60,7 @@ struct request_sock {
static inline struct request_sock *reqsk_alloc(const struct request_sock_ops *ops)
{
- struct request_sock *req = kmem_cache_alloc(ops->slab, SLAB_ATOMIC);
+ struct request_sock *req = kmem_cache_alloc(ops->slab, GFP_ATOMIC);
if (req != NULL)
req->rsk_ops = ops;
diff --git a/include/net/sock.h b/include/net/sock.h
index fe3a33fad03..03684e702d1 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -571,7 +571,7 @@ struct proto {
int *sysctl_rmem;
int max_header;
- kmem_cache_t *slab;
+ struct kmem_cache *slab;
unsigned int obj_size;
atomic_t *orphan_count;
@@ -746,6 +746,25 @@ static inline int sk_stream_wmem_schedule(struct sock *sk, int size)
*/
#define sock_owned_by_user(sk) ((sk)->sk_lock.owner)
+/*
+ * Macro so as to not evaluate some arguments when
+ * lockdep is not enabled.
+ *
+ * Mark both the sk_lock and the sk_lock.slock as a
+ * per-address-family lock class.
+ */
+#define sock_lock_init_class_and_name(sk, sname, skey, name, key) \
+do { \
+ sk->sk_lock.owner = NULL; \
+ init_waitqueue_head(&sk->sk_lock.wq); \
+ spin_lock_init(&(sk)->sk_lock.slock); \
+ debug_check_no_locks_freed((void *)&(sk)->sk_lock, \
+ sizeof((sk)->sk_lock)); \
+ lockdep_set_class_and_name(&(sk)->sk_lock.slock, \
+ (skey), (sname)); \
+ lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
+} while (0)
+
extern void FASTCALL(lock_sock_nested(struct sock *sk, int subclass));
static inline void lock_sock(struct sock *sk)
diff --git a/include/net/timewait_sock.h b/include/net/timewait_sock.h
index d7a306ea560..1e1ee3253fd 100644
--- a/include/net/timewait_sock.h
+++ b/include/net/timewait_sock.h
@@ -15,7 +15,7 @@
#include <net/sock.h>
struct timewait_sock_ops {
- kmem_cache_t *twsk_slab;
+ struct kmem_cache *twsk_slab;
unsigned int twsk_obj_size;
int (*twsk_unique)(struct sock *sk,
struct sock *sktw, void *twp);
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 15ec19dcf9c..e4765413cf8 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -392,6 +392,20 @@ extern int xfrm_unregister_km(struct xfrm_mgr *km);
extern unsigned int xfrm_policy_count[XFRM_POLICY_MAX*2];
+/* Audit Information */
+struct xfrm_audit
+{
+ uid_t loginuid;
+ u32 secid;
+};
+
+#ifdef CONFIG_AUDITSYSCALL
+extern void xfrm_audit_log(uid_t auid, u32 secid, int type, int result,
+ struct xfrm_policy *xp, struct xfrm_state *x);
+#else
+#define xfrm_audit_log(a,s,t,r,p,x) do { ; } while (0)
+#endif /* CONFIG_AUDITSYSCALL */
+
static inline void xfrm_pol_hold(struct xfrm_policy *policy)
{
if (likely(policy != NULL))
@@ -906,7 +920,7 @@ static inline int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **s
#endif
extern struct xfrm_state *xfrm_find_acq_byseq(u32 seq);
extern int xfrm_state_delete(struct xfrm_state *x);
-extern void xfrm_state_flush(u8 proto);
+extern void xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info);
extern int xfrm_replay_check(struct xfrm_state *x, __be32 seq);
extern void xfrm_replay_advance(struct xfrm_state *x, __be32 seq);
extern void xfrm_replay_notify(struct xfrm_state *x, int event);
@@ -959,13 +973,13 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(u8 type, int dir,
struct xfrm_selector *sel,
struct xfrm_sec_ctx *ctx, int delete);
struct xfrm_policy *xfrm_policy_byid(u8, int dir, u32 id, int delete);
-void xfrm_policy_flush(u8 type);
+void xfrm_policy_flush(u8 type, struct xfrm_audit *audit_info);
u32 xfrm_get_acqseq(void);
void xfrm_alloc_spi(struct xfrm_state *x, __be32 minspi, __be32 maxspi);
-struct xfrm_state * xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
- xfrm_address_t *daddr, xfrm_address_t *saddr,
+struct xfrm_state * xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
+ xfrm_address_t *daddr, xfrm_address_t *saddr,
int create, unsigned short family);
-extern void xfrm_policy_flush(u8 type);
+extern void xfrm_policy_flush(u8 type, struct xfrm_audit *audit_info);
extern int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
extern int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *xdst,
struct flowi *fl, int family, int strict);
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 9233ed5de66..0c775fceb67 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -557,7 +557,7 @@ struct sas_task {
static inline struct sas_task *sas_alloc_task(gfp_t flags)
{
- extern kmem_cache_t *sas_task_cache;
+ extern struct kmem_cache *sas_task_cache;
struct sas_task *task = kmem_cache_alloc(sas_task_cache, flags);
if (task) {
@@ -575,7 +575,7 @@ static inline struct sas_task *sas_alloc_task(gfp_t flags)
static inline void sas_free_task(struct sas_task *task)
{
if (task) {
- extern kmem_cache_t *sas_task_cache;
+ extern struct kmem_cache *sas_task_cache;
BUG_ON(!list_empty(&task->list));
kmem_cache_free(sas_task_cache, task);
}