aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acpi_bus.h3
-rw-r--r--include/asm-alpha/atomic.h64
-rw-r--r--include/asm-alpha/kdebug.h1
-rw-r--r--include/asm-alpha/local.h126
-rw-r--r--include/asm-alpha/pgtable.h4
-rw-r--r--include/asm-alpha/system.h226
-rw-r--r--include/asm-arm/atomic.h1
-rw-r--r--include/asm-arm/kdebug.h1
-rw-r--r--include/asm-arm/kexec.h2
-rw-r--r--include/asm-arm/pgtable-nommu.h4
-rw-r--r--include/asm-arm/pgtable.h4
-rw-r--r--include/asm-arm/system.h4
-rw-r--r--include/asm-arm26/atomic.h1
-rw-r--r--include/asm-arm26/kdebug.h1
-rw-r--r--include/asm-arm26/pgtable.h4
-rw-r--r--include/asm-arm26/system.h2
-rw-r--r--include/asm-avr32/kdebug.h25
-rw-r--r--include/asm-avr32/pgtable.h4
-rw-r--r--include/asm-blackfin/system.h1
-rw-r--r--include/asm-cris/kdebug.h1
-rw-r--r--include/asm-frv/atomic.h91
-rw-r--r--include/asm-frv/kdebug.h1
-rw-r--r--include/asm-frv/pgtable.h4
-rw-r--r--include/asm-frv/semaphore.h14
-rw-r--r--include/asm-frv/system.h70
-rw-r--r--include/asm-generic/atomic.h140
-rw-r--r--include/asm-generic/kdebug.h8
-rw-r--r--include/asm-generic/local.h33
-rw-r--r--include/asm-h8300/kdebug.h1
-rw-r--r--include/asm-h8300/pgtable.h4
-rw-r--r--include/asm-h8300/system.h1
-rw-r--r--include/asm-i386/atomic.h45
-rw-r--r--include/asm-i386/cmpxchg.h293
-rw-r--r--include/asm-i386/elf.h2
-rw-r--r--include/asm-i386/ioctls.h4
-rw-r--r--include/asm-i386/kdebug.h25
-rw-r--r--include/asm-i386/kexec.h2
-rw-r--r--include/asm-i386/local.h205
-rw-r--r--include/asm-i386/pgtable.h6
-rw-r--r--include/asm-i386/serial.h16
-rw-r--r--include/asm-i386/system.h234
-rw-r--r--include/asm-i386/termbits.h14
-rw-r--r--include/asm-i386/termios.h6
-rw-r--r--include/asm-i386/unistd.h3
-rw-r--r--include/asm-ia64/atomic.h53
-rw-r--r--include/asm-ia64/kdebug.h27
-rw-r--r--include/asm-ia64/kexec.h2
-rw-r--r--include/asm-ia64/local.h51
-rw-r--r--include/asm-ia64/pgtable.h4
-rw-r--r--include/asm-m32r/atomic.h23
-rw-r--r--include/asm-m32r/kdebug.h1
-rw-r--r--include/asm-m32r/pgtable.h4
-rw-r--r--include/asm-m32r/system.h2
-rw-r--r--include/asm-m68k/atomic.h31
-rw-r--r--include/asm-m68k/kdebug.h1
-rw-r--r--include/asm-m68k/pgtable.h4
-rw-r--r--include/asm-m68k/system.h1
-rw-r--r--include/asm-m68knommu/atomic.h25
-rw-r--r--include/asm-m68knommu/kdebug.h1
-rw-r--r--include/asm-m68knommu/pgtable.h4
-rw-r--r--include/asm-m68knommu/system.h1
-rw-r--r--include/asm-mips/atomic.h57
-rw-r--r--include/asm-mips/kdebug.h1
-rw-r--r--include/asm-mips/kexec.h2
-rw-r--r--include/asm-mips/local.h304
-rw-r--r--include/asm-mips/mach-au1x00/au1550_spi.h16
-rw-r--r--include/asm-mips/pgtable.h4
-rw-r--r--include/asm-mips/system.h126
-rw-r--r--include/asm-parisc/atomic.h56
-rw-r--r--include/asm-parisc/kdebug.h1
-rw-r--r--include/asm-parisc/local.h41
-rw-r--r--include/asm-parisc/pgtable.h4
-rw-r--r--include/asm-powerpc/atomic.h7
-rw-r--r--include/asm-powerpc/bitops.h1
-rw-r--r--include/asm-powerpc/iommu.h14
-rw-r--r--include/asm-powerpc/kdebug.h34
-rw-r--r--include/asm-powerpc/kexec.h2
-rw-r--r--include/asm-powerpc/kprobes.h7
-rw-r--r--include/asm-powerpc/local.h201
-rw-r--r--include/asm-powerpc/machdep.h19
-rw-r--r--include/asm-powerpc/mmu-44x.h78
-rw-r--r--include/asm-powerpc/mmu.h7
-rw-r--r--include/asm-powerpc/mpc52xx.h11
-rw-r--r--include/asm-powerpc/mpic.h20
-rw-r--r--include/asm-powerpc/of_device.h2
-rw-r--r--include/asm-powerpc/page.h10
-rw-r--r--include/asm-powerpc/page_32.h2
-rw-r--r--include/asm-powerpc/parport.h5
-rw-r--r--include/asm-powerpc/pgalloc-32.h41
-rw-r--r--include/asm-powerpc/pgalloc-64.h152
-rw-r--r--include/asm-powerpc/pgalloc.h154
-rw-r--r--include/asm-powerpc/pgtable-4k.h3
-rw-r--r--include/asm-powerpc/pgtable-64k.h5
-rw-r--r--include/asm-powerpc/pgtable-ppc32.h813
-rw-r--r--include/asm-powerpc/pgtable-ppc64.h492
-rw-r--r--include/asm-powerpc/pgtable.h493
-rw-r--r--include/asm-powerpc/pmac_feature.h2
-rw-r--r--include/asm-powerpc/prom.h34
-rw-r--r--include/asm-powerpc/ps3.h33
-rw-r--r--include/asm-powerpc/suspend.h9
-rw-r--r--include/asm-powerpc/system.h130
-rw-r--r--include/asm-powerpc/tsi108.h12
-rw-r--r--include/asm-powerpc/tsi108_pci.h45
-rw-r--r--include/asm-powerpc/udbg.h1
-rw-r--r--include/asm-ppc/kdebug.h1
-rw-r--r--include/asm-ppc/pgtable.h4
-rw-r--r--include/asm-ppc/system.h2
-rw-r--r--include/asm-s390/kdebug.h30
-rw-r--r--include/asm-s390/kexec.h2
-rw-r--r--include/asm-sh/kdebug.h1
-rw-r--r--include/asm-sh/kexec.h2
-rw-r--r--include/asm-sh/pgtable.h4
-rw-r--r--include/asm-sh/system.h10
-rw-r--r--include/asm-sh64/kdebug.h1
-rw-r--r--include/asm-sh64/pgtable.h4
-rw-r--r--include/asm-sh64/system.h2
-rw-r--r--include/asm-sparc/kdebug.h4
-rw-r--r--include/asm-sparc/system.h1
-rw-r--r--include/asm-sparc64/Kbuild1
-rw-r--r--include/asm-sparc64/atomic.h53
-rw-r--r--include/asm-sparc64/const.h19
-rw-r--r--include/asm-sparc64/kdebug.h23
-rw-r--r--include/asm-sparc64/local.h41
-rw-r--r--include/asm-sparc64/lsu.h2
-rw-r--r--include/asm-sparc64/mmu.h2
-rw-r--r--include/asm-sparc64/page.h2
-rw-r--r--include/asm-sparc64/pgtable.h2
-rw-r--r--include/asm-sparc64/pstate.h2
-rw-r--r--include/asm-sparc64/sfafsr.h2
-rw-r--r--include/asm-sparc64/system.h1
-rw-r--r--include/asm-um/cmpxchg.h6
-rw-r--r--include/asm-um/kdebug.h1
-rw-r--r--include/asm-v850/kdebug.h1
-rw-r--r--include/asm-v850/system.h1
-rw-r--r--include/asm-x86_64/Kbuild1
-rw-r--r--include/asm-x86_64/atomic.h65
-rw-r--r--include/asm-x86_64/cmpxchg.h134
-rw-r--r--include/asm-x86_64/kdebug.h25
-rw-r--r--include/asm-x86_64/kexec.h2
-rw-r--r--include/asm-x86_64/local.h196
-rw-r--r--include/asm-x86_64/page.h9
-rw-r--r--include/asm-x86_64/pgtable.h17
-rw-r--r--include/asm-x86_64/serial.h16
-rw-r--r--include/asm-x86_64/system.h96
-rw-r--r--include/asm-x86_64/termbits.h2
-rw-r--r--include/asm-x86_64/unistd.h2
-rw-r--r--include/asm-xtensa/atomic.h23
-rw-r--r--include/asm-xtensa/kdebug.h1
-rw-r--r--include/asm-xtensa/system.h2
-rw-r--r--include/linux/Kbuild7
-rw-r--r--include/linux/awe_voice.h525
-rw-r--r--include/linux/byteorder/generic.h25
-rw-r--r--include/linux/byteorder/swab.h108
-rw-r--r--include/linux/clockchips.h10
-rw-r--r--include/linux/clocksource.h15
-rw-r--r--include/linux/compat_ioctl.h830
-rw-r--r--include/linux/console.h7
-rw-r--r--include/linux/console_struct.h3
-rw-r--r--include/linux/const.h (renamed from include/asm-x86_64/const.h)9
-rw-r--r--include/linux/cpu.h3
-rw-r--r--include/linux/cyclades.h229
-rw-r--r--include/linux/dcache.h6
-rw-r--r--include/linux/device.h4
-rw-r--r--include/linux/display.h61
-rw-r--r--include/linux/ds1wm.h11
-rw-r--r--include/linux/efi.h1
-rw-r--r--include/linux/ext3_fs.h1
-rw-r--r--include/linux/fb.h57
-rw-r--r--include/linux/font.h3
-rw-r--r--include/linux/fs.h15
-rw-r--r--include/linux/futex.h29
-rw-r--r--include/linux/init.h3
-rw-r--r--include/linux/init_task.h2
-rw-r--r--include/linux/interrupt.h38
-rw-r--r--include/linux/ioctl32.h17
-rw-r--r--include/linux/ipc.h11
-rw-r--r--include/linux/irq.h4
-rw-r--r--include/linux/isdn/capiutil.h1
-rw-r--r--include/linux/isdn_divertif.h5
-rw-r--r--include/linux/kallsyms.h13
-rw-r--r--include/linux/kdebug.h20
-rw-r--r--include/linux/kexec.h17
-rw-r--r--include/linux/kprobes.h22
-rw-r--r--include/linux/list.h11
-rw-r--r--include/linux/loop.h2
-rw-r--r--include/linux/mc146818rtc.h7
-rw-r--r--include/linux/mnt_namespace.h5
-rw-r--r--include/linux/module.h30
-rw-r--r--include/linux/msdos_fs.h3
-rw-r--r--include/linux/nfs_fs_sb.h2
-rw-r--r--include/linux/nsproxy.h3
-rw-r--r--include/linux/pagemap.h11
-rw-r--r--include/linux/parport.h8
-rw-r--r--include/linux/parport_pc.h3
-rw-r--r--include/linux/phantom.h42
-rw-r--r--include/linux/pid_namespace.h2
-rw-r--r--include/linux/pmu.h8
-rw-r--r--include/linux/pnp.h3
-rw-r--r--include/linux/poison.h4
-rw-r--r--include/linux/proc_fs.h3
-rw-r--r--include/linux/quota.h4
-rw-r--r--include/linux/quotaops.h3
-rw-r--r--include/linux/reiserfs_fs_sb.h3
-rw-r--r--include/linux/rtc.h39
-rw-r--r--include/linux/sched.h35
-rw-r--r--include/linux/spi/Kbuild1
-rw-r--r--include/linux/spi/spi.h82
-rw-r--r--include/linux/spi/spidev.h124
-rw-r--r--include/linux/spinlock_types.h6
-rw-r--r--include/linux/stacktrace.h6
-rw-r--r--include/linux/stat.h3
-rw-r--r--include/linux/suspend.h2
-rw-r--r--include/linux/svga.h1
-rw-r--r--include/linux/sysdev.h1
-rw-r--r--include/linux/time.h3
-rw-r--r--include/linux/timer.h1
-rw-r--r--include/linux/tty.h2
-rw-r--r--include/linux/uinput.h2
-rw-r--r--include/linux/utsname.h19
-rw-r--r--include/linux/vmalloc.h1
-rw-r--r--include/linux/vt_kern.h3
-rw-r--r--include/linux/workqueue.h6
-rw-r--r--include/math-emu/extended.h396
-rw-r--r--include/net/sock.h9
-rw-r--r--include/video/mach64.h1
-rw-r--r--include/video/permedia2.h4
-rw-r--r--include/video/tgafb.h47
227 files changed, 5235 insertions, 4033 deletions
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 16c3c441256..9cfd5b1a48e 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -303,6 +303,9 @@ struct acpi_device {
#define to_acpi_device(d) container_of(d, struct acpi_device, dev)
#define to_acpi_driver(d) container_of(d, struct acpi_driver, drv)
+/* acpi_device.dev.bus == &acpi_bus_type */
+extern struct bus_type acpi_bus_type;
+
/*
* Events
* ------
diff --git a/include/asm-alpha/atomic.h b/include/asm-alpha/atomic.h
index fc77f741308..f5cb7b878af 100644
--- a/include/asm-alpha/atomic.h
+++ b/include/asm-alpha/atomic.h
@@ -2,6 +2,7 @@
#define _ALPHA_ATOMIC_H
#include <asm/barrier.h>
+#include <asm/system.h>
/*
* Atomic operations that C can't guarantee us. Useful for
@@ -175,19 +176,64 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
return result;
}
-#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
+#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+
+#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-#define atomic_add_unless(v, a, u) \
-({ \
- int c, old; \
- c = atomic_read(v); \
- while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
- c = old; \
- c != (u); \
-})
+/**
+ * atomic_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns non-zero if @v was not @u, and zero otherwise.
+ */
+static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+{
+ int c, old;
+ c = atomic_read(v);
+ for (;;) {
+ if (unlikely(c == (u)))
+ break;
+ old = atomic_cmpxchg((v), c, c + (a));
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return c != (u);
+}
+
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+/**
+ * atomic64_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns non-zero if @v was not @u, and zero otherwise.
+ */
+static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+{
+ long c, old;
+ c = atomic64_read(v);
+ for (;;) {
+ if (unlikely(c == (u)))
+ break;
+ old = atomic64_cmpxchg((v), c, c + (a));
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return c != (u);
+}
+
+#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
diff --git a/include/asm-alpha/kdebug.h b/include/asm-alpha/kdebug.h
new file mode 100644
index 00000000000..6ece1b03766
--- /dev/null
+++ b/include/asm-alpha/kdebug.h
@@ -0,0 +1 @@
+#include <asm-generic/kdebug.h>
diff --git a/include/asm-alpha/local.h b/include/asm-alpha/local.h
index 90a510fa358..6ad3ea69642 100644
--- a/include/asm-alpha/local.h
+++ b/include/asm-alpha/local.h
@@ -4,37 +4,115 @@
#include <linux/percpu.h>
#include <asm/atomic.h>
-typedef atomic64_t local_t;
+typedef struct
+{
+ atomic_long_t a;
+} local_t;
-#define LOCAL_INIT(i) ATOMIC64_INIT(i)
-#define local_read(v) atomic64_read(v)
-#define local_set(v,i) atomic64_set(v,i)
+#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
+#define local_read(l) atomic_long_read(&(l)->a)
+#define local_set(l,i) atomic_long_set(&(l)->a, (i))
+#define local_inc(l) atomic_long_inc(&(l)->a)
+#define local_dec(l) atomic_long_dec(&(l)->a)
+#define local_add(i,l) atomic_long_add((i),(&(l)->a))
+#define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
-#define local_inc(v) atomic64_inc(v)
-#define local_dec(v) atomic64_dec(v)
-#define local_add(i, v) atomic64_add(i, v)
-#define local_sub(i, v) atomic64_sub(i, v)
+static __inline__ long local_add_return(long i, local_t * l)
+{
+ long temp, result;
+ __asm__ __volatile__(
+ "1: ldq_l %0,%1\n"
+ " addq %0,%3,%2\n"
+ " addq %0,%3,%0\n"
+ " stq_c %0,%1\n"
+ " beq %0,2f\n"
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous"
+ :"=&r" (temp), "=m" (l->a.counter), "=&r" (result)
+ :"Ir" (i), "m" (l->a.counter) : "memory");
+ return result;
+}
-#define __local_inc(v) ((v)->counter++)
-#define __local_dec(v) ((v)->counter++)
-#define __local_add(i,v) ((v)->counter+=(i))
-#define __local_sub(i,v) ((v)->counter-=(i))
+static __inline__ long local_sub_return(long i, local_t * l)
+{
+ long temp, result;
+ __asm__ __volatile__(
+ "1: ldq_l %0,%1\n"
+ " subq %0,%3,%2\n"
+ " subq %0,%3,%0\n"
+ " stq_c %0,%1\n"
+ " beq %0,2f\n"
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous"
+ :"=&r" (temp), "=m" (l->a.counter), "=&r" (result)
+ :"Ir" (i), "m" (l->a.counter) : "memory");
+ return result;
+}
+
+#define local_cmpxchg(l, o, n) \
+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
+#define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
+
+/**
+ * local_add_unless - add unless the number is a given value
+ * @l: pointer of type local_t
+ * @a: the amount to add to l...
+ * @u: ...unless l is equal to u.
+ *
+ * Atomically adds @a to @l, so long as it was not @u.
+ * Returns non-zero if @l was not @u, and zero otherwise.
+ */
+#define local_add_unless(l, a, u) \
+({ \
+ long c, old; \
+ c = local_read(l); \
+ for (;;) { \
+ if (unlikely(c == (u))) \
+ break; \
+ old = local_cmpxchg((l), c, c + (a)); \
+ if (likely(old == c)) \
+ break; \
+ c = old; \
+ } \
+ c != (u); \
+})
+#define local_inc_not_zero(l) local_add_unless((l), 1, 0)
+
+#define local_add_negative(a, l) (local_add_return((a), (l)) < 0)
+
+#define local_dec_return(l) local_sub_return(1,(l))
+
+#define local_inc_return(l) local_add_return(1,(l))
+
+#define local_sub_and_test(i,l) (local_sub_return((i), (l)) == 0)
+
+#define local_inc_and_test(l) (local_add_return(1, (l)) == 0)
+
+#define local_dec_and_test(l) (local_sub_return(1, (l)) == 0)
+
+/* Verify if faster than atomic ops */
+#define __local_inc(l) ((l)->a.counter++)
+#define __local_dec(l) ((l)->a.counter++)
+#define __local_add(i,l) ((l)->a.counter+=(i))
+#define __local_sub(i,l) ((l)->a.counter-=(i))
/* Use these for per-cpu local_t variables: on some archs they are
* much more efficient than these naive implementations. Note they take
* a variable, not an address.
*/
-#define cpu_local_read(v) local_read(&__get_cpu_var(v))
-#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i))
-
-#define cpu_local_inc(v) local_inc(&__get_cpu_var(v))
-#define cpu_local_dec(v) local_dec(&__get_cpu_var(v))
-#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v))
-#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v))
-
-#define __cpu_local_inc(v) __local_inc(&__get_cpu_var(v))
-#define __cpu_local_dec(v) __local_dec(&__get_cpu_var(v))
-#define __cpu_local_add(i, v) __local_add((i), &__get_cpu_var(v))
-#define __cpu_local_sub(i, v) __local_sub((i), &__get_cpu_var(v))
+#define cpu_local_read(l) local_read(&__get_cpu_var(l))
+#define cpu_local_set(l, i) local_set(&__get_cpu_var(l), (i))
+
+#define cpu_local_inc(l) local_inc(&__get_cpu_var(l))
+#define cpu_local_dec(l) local_dec(&__get_cpu_var(l))
+#define cpu_local_add(i, l) local_add((i), &__get_cpu_var(l))
+#define cpu_local_sub(i, l) local_sub((i), &__get_cpu_var(l))
+
+#define __cpu_local_inc(l) __local_inc(&__get_cpu_var(l))
+#define __cpu_local_dec(l) __local_dec(&__get_cpu_var(l))
+#define __cpu_local_add(i, l) __local_add((i), &__get_cpu_var(l))
+#define __cpu_local_sub(i, l) __local_sub((i), &__get_cpu_var(l))
#endif /* _ALPHA_LOCAL_H */
diff --git a/include/asm-alpha/pgtable.h b/include/asm-alpha/pgtable.h
index 49ac9bee7ce..616d20662ff 100644
--- a/include/asm-alpha/pgtable.h
+++ b/include/asm-alpha/pgtable.h
@@ -345,10 +345,6 @@ extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
#define io_remap_pfn_range(vma, start, pfn, size, prot) \
remap_pfn_range(vma, start, pfn, size, prot)
-#define MK_IOSPACE_PFN(space, pfn) (pfn)
-#define GET_IOSPACE(pfn) 0
-#define GET_PFN(pfn) (pfn)
-
#define pte_ERROR(e) \
printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
#define pmd_ERROR(e) \
diff --git a/include/asm-alpha/system.h b/include/asm-alpha/system.h
index 03e9c0e5ed7..cf1021a97b2 100644
--- a/include/asm-alpha/system.h
+++ b/include/asm-alpha/system.h
@@ -443,8 +443,110 @@ extern void __xchg_called_with_bad_pointer(void);
(__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
})
-#define tas(ptr) (xchg((ptr),1))
+static inline unsigned long
+__xchg_u8_local(volatile char *m, unsigned long val)
+{
+ unsigned long ret, tmp, addr64;
+
+ __asm__ __volatile__(
+ " andnot %4,7,%3\n"
+ " insbl %1,%4,%1\n"
+ "1: ldq_l %2,0(%3)\n"
+ " extbl %2,%4,%0\n"
+ " mskbl %2,%4,%2\n"
+ " or %1,%2,%2\n"
+ " stq_c %2,0(%3)\n"
+ " beq %2,2f\n"
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous"
+ : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
+ : "r" ((long)m), "1" (val) : "memory");
+ return ret;
+}
+
+static inline unsigned long
+__xchg_u16_local(volatile short *m, unsigned long val)
+{
+ unsigned long ret, tmp, addr64;
+
+ __asm__ __volatile__(
+ " andnot %4,7,%3\n"
+ " inswl %1,%4,%1\n"
+ "1: ldq_l %2,0(%3)\n"
+ " extwl %2,%4,%0\n"
+ " mskwl %2,%4,%2\n"
+ " or %1,%2,%2\n"
+ " stq_c %2,0(%3)\n"
+ " beq %2,2f\n"
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous"
+ : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
+ : "r" ((long)m), "1" (val) : "memory");
+
+ return ret;
+}
+
+static inline unsigned long
+__xchg_u32_local(volatile int *m, unsigned long val)
+{
+ unsigned long dummy;
+
+ __asm__ __volatile__(
+ "1: ldl_l %0,%4\n"
+ " bis $31,%3,%1\n"
+ " stl_c %1,%2\n"
+ " beq %1,2f\n"
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous"
+ : "=&r" (val), "=&r" (dummy), "=m" (*m)
+ : "rI" (val), "m" (*m) : "memory");
+
+ return val;
+}
+
+static inline unsigned long
+__xchg_u64_local(volatile long *m, unsigned long val)
+{
+ unsigned long dummy;
+
+ __asm__ __volatile__(
+ "1: ldq_l %0,%4\n"
+ " bis $31,%3,%1\n"
+ " stq_c %1,%2\n"
+ " beq %1,2f\n"
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous"
+ : "=&r" (val), "=&r" (dummy), "=m" (*m)
+ : "rI" (val), "m" (*m) : "memory");
+
+ return val;
+}
+
+#define __xchg_local(ptr, x, size) \
+({ \
+ unsigned long __xchg__res; \
+ volatile void *__xchg__ptr = (ptr); \
+ switch (size) { \
+ case 1: __xchg__res = __xchg_u8_local(__xchg__ptr, x); break; \
+ case 2: __xchg__res = __xchg_u16_local(__xchg__ptr, x); break; \
+ case 4: __xchg__res = __xchg_u32_local(__xchg__ptr, x); break; \
+ case 8: __xchg__res = __xchg_u64_local(__xchg__ptr, x); break; \
+ default: __xchg_called_with_bad_pointer(); __xchg__res = x; \
+ } \
+ __xchg__res; \
+})
+
+#define xchg_local(ptr,x) \
+ ({ \
+ __typeof__(*(ptr)) _x_ = (x); \
+ (__typeof__(*(ptr))) __xchg_local((ptr), (unsigned long)_x_, \
+ sizeof(*(ptr))); \
+ })
/*
* Atomic compare and exchange. Compare OLD with MEM, if identical,
@@ -596,6 +698,128 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
(unsigned long)_n_, sizeof(*(ptr))); \
})
+static inline unsigned long
+__cmpxchg_u8_local(volatile char *m, long old, long new)
+{
+ unsigned long prev, tmp, cmp, addr64;
+
+ __asm__ __volatile__(
+ " andnot %5,7,%4\n"
+ " insbl %1,%5,%1\n"
+ "1: ldq_l %2,0(%4)\n"
+ " extbl %2,%5,%0\n"
+ " cmpeq %0,%6,%3\n"
+ " beq %3,2f\n"
+ " mskbl %2,%5,%2\n"
+ " or %1,%2,%2\n"
+ " stq_c %2,0(%4)\n"
+ " beq %2,3f\n"
+ "2:\n"
+ ".subsection 2\n"
+ "3: br 1b\n"
+ ".previous"
+ : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
+ : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
+
+ return prev;
+}
+
+static inline unsigned long
+__cmpxchg_u16_local(volatile short *m, long old, long new)
+{
+ unsigned long prev, tmp, cmp, addr64;
+
+ __asm__ __volatile__(
+ " andnot %5,7,%4\n"
+ " inswl %1,%5,%1\n"
+ "1: ldq_l %2,0(%4)\n"
+ " extwl %2,%5,%0\n"
+ " cmpeq %0,%6,%3\n"
+ " beq %3,2f\n"
+ " mskwl %2,%5,%2\n"
+ " or %1,%2,%2\n"
+ " stq_c %2,0(%4)\n"
+ " beq %2,3f\n"
+ "2:\n"
+ ".subsection 2\n"
+ "3: br 1b\n"
+ ".previous"
+ : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
+ : "r" ((long)m), "Ir" (old), "1" (new) : "memory");
+
+ return prev;
+}
+
+static inline unsigned long
+__cmpxchg_u32_local(volatile int *m, int old, int new)
+{
+ unsigned long prev, cmp;
+
+ __asm__ __volatile__(
+ "1: ldl_l %0,%5\n"
+ " cmpeq %0,%3,%1\n"
+ " beq %1,2f\n"
+ " mov %4,%1\n"
+ " stl_c %1,%2\n"
+ " beq %1,3f\n"
+ "2:\n"
+ ".subsection 2\n"
+ "3: br 1b\n"
+ ".previous"
+ : "=&r"(prev), "=&r"(cmp), "=m"(*m)
+ : "r"((long) old), "r"(new), "m"(*m) : "memory");
+
+ return prev;
+}
+
+static inline unsigned long
+__cmpxchg_u64_local(volatile long *m, unsigned long old, unsigned long new)
+{
+ unsigned long prev, cmp;
+
+ __asm__ __volatile__(
+ "1: ldq_l %0,%5\n"
+ " cmpeq %0,%3,%1\n"
+ " beq %1,2f\n"
+ " mov %4,%1\n"
+ " stq_c %1,%2\n"
+ " beq %1,3f\n"
+ "2:\n"
+ ".subsection 2\n"
+ "3: br 1b\n"
+ ".previous"
+ : "=&r"(prev), "=&r"(cmp), "=m"(*m)
+ : "r"((long) old), "r"(new), "m"(*m) : "memory");
+
+ return prev;
+}
+
+static __always_inline unsigned long
+__cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
+ int size)
+{
+ switch (size) {
+ case 1:
+ return __cmpxchg_u8_local(ptr, old, new);
+ case 2:
+ return __cmpxchg_u16_local(ptr, old, new);
+ case 4:
+ return __cmpxchg_u32_local(ptr, old, new);
+ case 8:
+ return __cmpxchg_u64_local(ptr, old, new);
+ }
+ __cmpxchg_called_with_bad_pointer();
+ return old;
+}
+
+#define cmpxchg_local(ptr,o,n) \
+ ({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
+ (unsigned long)_n_, sizeof(*(ptr))); \
+ })
+
#endif /* __ASSEMBLY__ */
#define arch_align_stack(x) (x)
diff --git a/include/asm-arm/atomic.h b/include/asm-arm/atomic.h
index f266c279512..3b59f94b5a3 100644
--- a/include/asm-arm/atomic.h
+++ b/include/asm-arm/atomic.h
@@ -12,6 +12,7 @@
#define __ASM_ARM_ATOMIC_H
#include <linux/compiler.h>
+#include <asm/system.h>
typedef struct { volatile int counter; } atomic_t;
diff --git a/include/asm-arm/kdebug.h b/include/asm-arm/kdebug.h
new file mode 100644
index 00000000000..6ece1b03766
--- /dev/null
+++ b/include/asm-arm/kdebug.h
@@ -0,0 +1 @@
+#include <asm-generic/kdebug.h>
diff --git a/include/asm-arm/kexec.h b/include/asm-arm/kexec.h
index 8c1c6162a80..b5b030ef633 100644
--- a/include/asm-arm/kexec.h
+++ b/include/asm-arm/kexec.h
@@ -16,8 +16,6 @@
#ifndef __ASSEMBLY__
-#define MAX_NOTE_BYTES 1024
-
struct kimage;
/* Provide a dummy definition to avoid build failures. */
static inline void crash_setup_regs(struct pt_regs *newregs,
diff --git a/include/asm-arm/pgtable-nommu.h b/include/asm-arm/pgtable-nommu.h
index 7b1c9acdf79..0c8be19fd66 100644
--- a/include/asm-arm/pgtable-nommu.h
+++ b/include/asm-arm/pgtable-nommu.h
@@ -83,10 +83,6 @@ extern int is_in_rom(unsigned long);
#define io_remap_page_range remap_page_range
#define io_remap_pfn_range remap_pfn_range
-#define MK_IOSPACE_PFN(space, pfn) (pfn)
-#define GET_IOSPACE(pfn) 0
-#define GET_PFN(pfn) (pfn)
-
/*
* All 32bit addresses are effectively valid for vmalloc...
diff --git a/include/asm-arm/pgtable.h b/include/asm-arm/pgtable.h
index 7b2bafce21a..21dec9f258d 100644
--- a/include/asm-arm/pgtable.h
+++ b/include/asm-arm/pgtable.h
@@ -395,10 +395,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
#define io_remap_pfn_range(vma,from,pfn,size,prot) \
remap_pfn_range(vma, from, pfn, size, prot)
-#define MK_IOSPACE_PFN(space, pfn) (pfn)
-#define GET_IOSPACE(pfn) 0
-#define GET_PFN(pfn) (pfn)
-
#define pgtable_cache_init() do { } while (0)
#endif /* !__ASSEMBLY__ */
diff --git a/include/asm-arm/system.h b/include/asm-arm/system.h
index 63b3080bdac..f2da3b6e3a8 100644
--- a/include/asm-arm/system.h
+++ b/include/asm-arm/system.h
@@ -93,7 +93,7 @@ void die(const char *msg, struct pt_regs *regs, int err)
__attribute__((noreturn));
struct siginfo;
-void notify_die(const char *str, struct pt_regs *regs, struct siginfo *info,
+void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info,
unsigned long err, unsigned long trap);
void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
@@ -103,8 +103,6 @@ void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
#define xchg(ptr,x) \
((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
-#define tas(ptr) (xchg((ptr),1))
-
extern asmlinkage void __backtrace(void);
extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
diff --git a/include/asm-arm26/atomic.h b/include/asm-arm26/atomic.h
index 97e944fe1cf..d6dd42374cf 100644
--- a/include/asm-arm26/atomic.h
+++ b/include/asm-arm26/atomic.h
@@ -20,7 +20,6 @@
#ifndef __ASM_ARM_ATOMIC_H
#define __ASM_ARM_ATOMIC_H
-
#ifdef CONFIG_SMP
#error SMP is NOT supported
#endif
diff --git a/include/asm-arm26/kdebug.h b/include/asm-arm26/kdebug.h
new file mode 100644
index 00000000000..6ece1b03766
--- /dev/null
+++ b/include/asm-arm26/kdebug.h
@@ -0,0 +1 @@
+#include <asm-generic/kdebug.h>
diff --git a/include/asm-arm26/pgtable.h b/include/asm-arm26/pgtable.h
index 63a8881fae1..2b20e9f0885 100644
--- a/include/asm-arm26/pgtable.h
+++ b/include/asm-arm26/pgtable.h
@@ -297,10 +297,6 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
#define io_remap_pfn_range(vma,from,pfn,size,prot) \
remap_pfn_range(vma, from, pfn, size, prot)
-#define MK_IOSPACE_PFN(space, pfn) (pfn)
-#define GET_IOSPACE(pfn) 0
-#define GET_PFN(pfn) (pfn)
-
#endif /* !__ASSEMBLY__ */
#endif /* _ASMARM_PGTABLE_H */
diff --git a/include/asm-arm26/system.h b/include/asm-arm26/system.h
index 00ae32aa1db..4703593b3bb 100644
--- a/include/asm-arm26/system.h
+++ b/include/asm-arm26/system.h
@@ -52,8 +52,6 @@ void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
#define xchg(ptr,x) \
((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
-#define tas(ptr) (xchg((ptr),1))
-
extern asmlinkage void __backtrace(void);
#define set_cr(x) \
diff --git a/include/asm-avr32/kdebug.h b/include/asm-avr32/kdebug.h
index f583b643ffb..de419278fc3 100644
--- a/include/asm-avr32/kdebug.h
+++ b/include/asm-avr32/kdebug.h
@@ -3,19 +3,6 @@
#include <linux/notifier.h>
-struct pt_regs;
-
-struct die_args {
- struct pt_regs *regs;
- int trapnr;
-};
-
-int register_die_notifier(struct notifier_block *nb);
-int unregister_die_notifier(struct notifier_block *nb);
-int register_page_fault_notifier(struct notifier_block *nb);
-int unregister_page_fault_notifier(struct notifier_block *nb);
-extern struct atomic_notifier_head avr32_die_chain;
-
/* Grossly misnamed. */
enum die_val {
DIE_FAULT,
@@ -24,15 +11,7 @@ enum die_val {
DIE_PAGE_FAULT,
};
-static inline int notify_die(enum die_val val, struct pt_regs *regs,
- int trap, int sig)
-{
- struct die_args args = {
- .regs = regs,
- .trapnr = trap,
- };
-
- return atomic_notifier_call_chain(&avr32_die_chain, val, &args);
-}
+int register_page_fault_notifier(struct notifier_block *nb);
+int unregister_page_fault_notifier(struct notifier_block *nb);
#endif /* __ASM_AVR32_KDEBUG_H */
diff --git a/include/asm-avr32/pgtable.h b/include/asm-avr32/pgtable.h
index 6b8ca9db2bd..f6cc2b0f75c 100644
--- a/include/asm-avr32/pgtable.h
+++ b/include/asm-avr32/pgtable.h
@@ -394,10 +394,6 @@ typedef pte_t *pte_addr_t;
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
-#define MK_IOSPACE_PFN(space, pfn) (pfn)
-#define GET_IOSPACE(pfn) 0
-#define GET_PFN(pfn) (pfn)
-
/* No page table caches to initialize (?) */
#define pgtable_cache_init() do { } while(0)
diff --git a/include/asm-blackfin/system.h b/include/asm-blackfin/system.h
index 758bac7c1e7..b5bf6e7cb5e 100644
--- a/include/asm-blackfin/system.h
+++ b/include/asm-blackfin/system.h
@@ -138,7 +138,6 @@ extern unsigned long irq_flags;
#endif
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
-#define tas(ptr) ((void)xchg((ptr),1))
struct __xchg_dummy {
unsigned long a[100];
diff --git a/include/asm-cris/kdebug.h b/include/asm-cris/kdebug.h
new file mode 100644
index 00000000000..6ece1b03766
--- /dev/null
+++ b/include/asm-cris/kdebug.h
@@ -0,0 +1 @@
+#include <asm-generic/kdebug.h>
diff --git a/include/asm-frv/atomic.h b/include/asm-frv/atomic.h
index 066386ac238..d425d8d0ad7 100644
--- a/include/asm-frv/atomic.h
+++ b/include/asm-frv/atomic.h
@@ -16,6 +16,7 @@
#include <linux/types.h>
#include <asm/spr-regs.h>
+#include <asm/system.h>
#ifdef CONFIG_SMP
#error not SMP safe
@@ -258,85 +259,23 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
#define tas(ptr) (xchg((ptr), 1))
-/*****************************************************************************/
-/*
- * compare and conditionally exchange value with memory
- * - if (*ptr == test) then orig = *ptr; *ptr = test;
- * - if (*ptr != test) then orig = *ptr;
- */
-#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
-
-#define cmpxchg(ptr, test, new) \
-({ \
- __typeof__(ptr) __xg_ptr = (ptr); \
- __typeof__(*(ptr)) __xg_orig, __xg_tmp; \
- __typeof__(*(ptr)) __xg_test = (test); \
- __typeof__(*(ptr)) __xg_new = (new); \
- \
- switch (sizeof(__xg_orig)) { \
- case 4: \
- asm volatile( \
- "0: \n" \
- " orcc gr0,gr0,gr0,icc3 \n" \
- " ckeq icc3,cc7 \n" \
- " ld.p %M0,%1 \n" \
- " orcr cc7,cc7,cc3 \n" \
- " sub%I4cc %1,%4,%2,icc0 \n" \
- " bne icc0,#0,1f \n" \
- " cst.p %3,%M0 ,cc3,#1 \n" \
- " corcc gr29,gr29,gr0 ,cc3,#1 \n" \
- " beq icc3,#0,0b \n" \
- "1: \n" \
- : "+U"(*__xg_ptr), "=&r"(__xg_orig), "=&r"(__xg_tmp) \
- : "r"(__xg_new), "NPr"(__xg_test) \
- : "memory", "cc7", "cc3", "icc3", "icc0" \
- ); \
- break; \
- \
- default: \
- __xg_orig = 0; \
- asm volatile("break"); \
- break; \
- } \
- \
- __xg_orig; \
-})
-
-#else
-
-extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new);
-
-#define cmpxchg(ptr, test, new) \
-({ \
- __typeof__(ptr) __xg_ptr = (ptr); \
- __typeof__(*(ptr)) __xg_orig; \
- __typeof__(*(ptr)) __xg_test = (test); \
- __typeof__(*(ptr)) __xg_new = (new); \
- \
- switch (sizeof(__xg_orig)) { \
- case 4: __xg_orig = __cmpxchg_32(__xg_ptr, __xg_test, __xg_new); break; \
- default: \
- __xg_orig = 0; \
- asm volatile("break"); \
- break; \
- } \
- \
- __xg_orig; \
-})
-
-#endif
-
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-#define atomic_add_unless(v, a, u) \
-({ \
- int c, old; \
- c = atomic_read(v); \
- while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
- c = old; \
- c != (u); \
-})
+static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+{
+ int c, old;
+ c = atomic_read(v);
+ for (;;) {
+ if (unlikely(c == (u)))
+ break;
+ old = atomic_cmpxchg((v), c, c + (a));
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return c != (u);
+}
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
diff --git a/include/asm-frv/kdebug.h b/include/asm-frv/kdebug.h
new file mode 100644
index 00000000000..6ece1b03766
--- /dev/null
+++ b/include/asm-frv/kdebug.h
@@ -0,0 +1 @@
+#include <asm-generic/kdebug.h>
diff --git a/include/asm-frv/pgtable.h b/include/asm-frv/pgtable.h
index 8a05aa16861..2687c771512 100644
--- a/include/asm-frv/pgtable.h
+++ b/include/asm-frv/pgtable.h
@@ -509,10 +509,6 @@ static inline int pte_file(pte_t pte)
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
-#define MK_IOSPACE_PFN(space, pfn) (pfn)
-#define GET_IOSPACE(pfn) 0
-#define GET_PFN(pfn) (pfn)
-
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
diff --git a/include/asm-frv/semaphore.h b/include/asm-frv/semaphore.h
index 907c5c3643c..09586528e00 100644
--- a/include/asm-frv/semaphore.h
+++ b/include/asm-frv/semaphore.h
@@ -20,8 +20,6 @@
#include <linux/spinlock.h>
#include <linux/rwsem.h>
-#define SEMAPHORE_DEBUG 0
-
/*
* the semaphore definition
* - if counter is >0 then there are tokens available on the semaphore for down to collect
@@ -32,12 +30,12 @@ struct semaphore {
unsigned counter;
spinlock_t wait_lock;
struct list_head wait_list;
-#if SEMAPHORE_DEBUG
+#ifdef CONFIG_DEBUG_SEMAPHORE
unsigned __magic;
#endif
};
-#if SEMAPHORE_DEBUG
+#ifdef CONFIG_DEBUG_SEMAPHORE
# define __SEM_DEBUG_INIT(name) , (long)&(name).__magic
#else
# define __SEM_DEBUG_INIT(name)
@@ -76,7 +74,7 @@ static inline void down(struct semaphore *sem)
{
unsigned long flags;
-#if SEMAPHORE_DEBUG
+#ifdef CONFIG_DEBUG_SEMAPHORE
CHECK_MAGIC(sem->__magic);
#endif
@@ -95,7 +93,7 @@ static inline int down_interruptible(struct semaphore *sem)
unsigned long flags;
int ret = 0;
-#if SEMAPHORE_DEBUG
+#ifdef CONFIG_DEBUG_SEMAPHORE
CHECK_MAGIC(sem->__magic);
#endif
@@ -119,7 +117,7 @@ static inline int down_trylock(struct semaphore *sem)
unsigned long flags;
int success = 0;
-#if SEMAPHORE_DEBUG
+#ifdef CONFIG_DEBUG_SEMAPHORE
CHECK_MAGIC(sem->__magic);
#endif
@@ -136,7 +134,7 @@ static inline void up(struct semaphore *sem)
{
unsigned long flags;
-#if SEMAPHORE_DEBUG
+#ifdef CONFIG_DEBUG_SEMAPHORE
CHECK_MAGIC(sem->__magic);
#endif
diff --git a/include/asm-frv/system.h b/include/asm-frv/system.h
index 1166899317d..be303b3eef4 100644
--- a/include/asm-frv/system.h
+++ b/include/asm-frv/system.h
@@ -13,7 +13,6 @@
#define _ASM_SYSTEM_H
#include <linux/linkage.h>
-#include <asm/atomic.h>
struct thread_struct;
@@ -197,4 +196,73 @@ extern void free_initmem(void);
#define arch_align_stack(x) (x)
+/*****************************************************************************/
+/*
+ * compare and conditionally exchange value with memory
+ * - if (*ptr == test) then orig = *ptr; *ptr = test;
+ * - if (*ptr != test) then orig = *ptr;
+ */
+#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
+
+#define cmpxchg(ptr, test, new) \
+({ \
+ __typeof__(ptr) __xg_ptr = (ptr); \
+ __typeof__(*(ptr)) __xg_orig, __xg_tmp; \
+ __typeof__(*(ptr)) __xg_test = (test); \
+ __typeof__(*(ptr)) __xg_new = (new); \
+ \
+ switch (sizeof(__xg_orig)) { \
+ case 4: \
+ asm volatile( \
+ "0: \n" \
+ " orcc gr0,gr0,gr0,icc3 \n" \
+ " ckeq icc3,cc7 \n" \
+ " ld.p %M0,%1 \n" \
+ " orcr cc7,cc7,cc3 \n" \
+ " sub%I4cc %1,%4,%2,icc0 \n" \
+ " bne icc0,#0,1f \n" \
+ " cst.p %3,%M0 ,cc3,#1 \n" \
+ " corcc gr29,gr29,gr0 ,cc3,#1 \n" \
+ " beq icc3,#0,0b \n" \
+ "1: \n" \
+ : "+U"(*__xg_ptr), "=&r"(__xg_orig), "=&r"(__xg_tmp) \
+ : "r"(__xg_new), "NPr"(__xg_test) \
+ : "memory", "cc7", "cc3", "icc3", "icc0" \
+ ); \
+ break; \
+ \
+ default: \
+ __xg_orig = 0; \
+ asm volatile("break"); \
+ break; \
+ } \
+ \
+ __xg_orig; \
+})
+
+#else
+
+extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new);
+
+#define cmpxchg(ptr, test, new) \
+({ \
+ __typeof__(ptr) __xg_ptr = (ptr); \
+ __typeof__(*(ptr)) __xg_orig; \
+ __typeof__(*(ptr)) __xg_test = (test); \
+ __typeof__(*(ptr)) __xg_new = (new); \
+ \
+ switch (sizeof(__xg_orig)) { \
+ case 4: __xg_orig = __cmpxchg_32(__xg_ptr, __xg_test, __xg_new); break; \
+ default: \
+ __xg_orig = 0; \
+ asm volatile("break"); \
+ break; \
+ } \
+ \
+ __xg_orig; \
+})
+
+#endif
+
+
#endif /* _ASM_SYSTEM_H */
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index b7e4a0467cb..85fd0aa27a8 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -66,6 +66,76 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
atomic64_sub(i, v);
}
+static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ return atomic64_sub_and_test(i, v);
+}
+
+static inline int atomic_long_dec_and_test(atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ return atomic64_dec_and_test(v);
+}
+
+static inline int atomic_long_inc_and_test(atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ return atomic64_inc_and_test(v);
+}
+
+static inline int atomic_long_add_negative(long i, atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ return atomic64_add_negative(i, v);
+}
+
+static inline long atomic_long_add_return(long i, atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ return (long)atomic64_add_return(i, v);
+}
+
+static inline long atomic_long_sub_return(long i, atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ return (long)atomic64_sub_return(i, v);
+}
+
+static inline long atomic_long_inc_return(atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ return (long)atomic64_inc_return(v);
+}
+
+static inline long atomic_long_dec_return(atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ return (long)atomic64_dec_return(v);
+}
+
+static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ return (long)atomic64_add_unless(v, a, u);
+}
+
+#define atomic_long_inc_not_zero(l) atomic64_inc_not_zero((atomic64_t *)(l))
+
+#define atomic_long_cmpxchg(l, old, new) \
+ (atomic_cmpxchg((atomic64_t *)(l), (old), (new)))
+#define atomic_long_xchg(v, new) \
+ (atomic_xchg((atomic64_t *)(l), (new)))
+
#else /* BITS_PER_LONG == 64 */
typedef atomic_t atomic_long_t;
@@ -113,6 +183,76 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
atomic_sub(i, v);
}
+static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return atomic_sub_and_test(i, v);
+}
+
+static inline int atomic_long_dec_and_test(atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return atomic_dec_and_test(v);
+}
+
+static inline int atomic_long_inc_and_test(atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return atomic_inc_and_test(v);
+}
+
+static inline int atomic_long_add_negative(long i, atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return atomic_add_negative(i, v);
+}
+
+static inline long atomic_long_add_return(long i, atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return (long)atomic_add_return(i, v);
+}
+
+static inline long atomic_long_sub_return(long i, atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return (long)atomic_sub_return(i, v);
+}
+
+static inline long atomic_long_inc_return(atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return (long)atomic_inc_return(v);
+}
+
+static inline long atomic_long_dec_return(atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return (long)atomic_dec_return(v);
+}
+
+static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return (long)atomic_add_unless(v, a, u);
+}
+
+#define atomic_long_inc_not_zero(l) atomic_inc_not_zero((atomic_t *)(l))
+
+#define atomic_long_cmpxchg(l, old, new) \
+ (atomic_cmpxchg((atomic_t *)(l), (old), (new)))
+#define atomic_long_xchg(v, new) \
+ (atomic_xchg((atomic_t *)(l), (new)))
+
#endif /* BITS_PER_LONG == 64 */
#endif /* _ASM_GENERIC_ATOMIC_H */
diff --git a/include/asm-generic/kdebug.h b/include/asm-generic/kdebug.h
new file mode 100644
index 00000000000..2b799c90b2d
--- /dev/null
+++ b/include/asm-generic/kdebug.h
@@ -0,0 +1,8 @@
+#ifndef _ASM_GENERIC_KDEBUG_H
+#define _ASM_GENERIC_KDEBUG_H
+
+enum die_val {
+ DIE_UNUSED,
+};
+
+#endif /* _ASM_GENERIC_KDEBUG_H */
diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
index ab469297272..33d7d04e411 100644
--- a/include/asm-generic/local.h
+++ b/include/asm-generic/local.h
@@ -33,6 +33,19 @@ typedef struct
#define local_add(i,l) atomic_long_add((i),(&(l)->a))
#define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
+#define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
+#define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
+#define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
+#define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
+#define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
+#define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
+#define local_inc_return(l) atomic_long_inc_return(&(l)->a)
+
+#define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
+#define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
+#define local_add_unless(l, a, u) atomic_long_add_unless((&(l)->a), (a), (u))
+#define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
+
/* Non-atomic variants, ie. preemption disabled and won't be touched
* in interrupt, etc. Some archs can optimize this case well. */
#define __local_inc(l) local_set((l), local_read(l) + 1)
@@ -44,19 +57,19 @@ typedef struct
* much more efficient than these naive implementations. Note they take
* a variable (eg. mystruct.foo), not an address.
*/
-#define cpu_local_read(v) local_read(&__get_cpu_var(v))
-#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i))
-#define cpu_local_inc(v) local_inc(&__get_cpu_var(v))
-#define cpu_local_dec(v) local_dec(&__get_cpu_var(v))
-#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v))
-#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v))
+#define cpu_local_read(l) local_read(&__get_cpu_var(l))
+#define cpu_local_set(l, i) local_set(&__get_cpu_var(l), (i))
+#define cpu_local_inc(l) local_inc(&__get_cpu_var(l))
+#define cpu_local_dec(l) local_dec(&__get_cpu_var(l))
+#define cpu_local_add(i, l) local_add((i), &__get_cpu_var(l))
+#define cpu_local_sub(i, l) local_sub((i), &__get_cpu_var(l))
/* Non-atomic increments, ie. preemption disabled and won't be touched
* in interrupt, etc. Some archs can optimize this case well.
*/
-#define __cpu_local_inc(v) __local_inc(&__get_cpu_var(v))
-#define __cpu_local_dec(v) __local_dec(&__get_cpu_var(v))
-#define __cpu_local_add(i, v) __local_add((i), &__get_cpu_var(v))
-#define __cpu_local_sub(i, v) __local_sub((i), &__get_cpu_var(v))
+#define __cpu_local_inc(l) __local_inc(&__get_cpu_var(l))
+#define __cpu_local_dec(l) __local_dec(&__get_cpu_var(l))
+#define __cpu_local_add(i, l) __local_add((i), &__get_cpu_var(l))
+#define __cpu_local_sub(i, l) __local_sub((i), &__get_cpu_var(l))
#endif /* _ASM_GENERIC_LOCAL_H */
diff --git a/include/asm-h8300/kdebug.h b/include/asm-h8300/kdebug.h
new file mode 100644
index 00000000000..6ece1b03766
--- /dev/null
+++ b/include/asm-h8300/kdebug.h
@@ -0,0 +1 @@
+#include <asm-generic/kdebug.h>
diff --git a/include/asm-h8300/pgtable.h b/include/asm-h8300/pgtable.h
index ddd07f485dd..a09230a08e0 100644
--- a/include/asm-h8300/pgtable.h
+++ b/include/asm-h8300/pgtable.h
@@ -55,10 +55,6 @@ extern int is_in_rom(unsigned long);
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
-#define MK_IOSPACE_PFN(space, pfn) (pfn)
-#define GET_IOSPACE(pfn) 0
-#define GET_PFN(pfn) (pfn)
-
/*
* All 32bit addresses are effectively valid for vmalloc...
* Sort of meaningless for non-VM targets.
diff --git a/include/asm-h8300/system.h b/include/asm-h8300/system.h
index 5084a9d4292..7807018f850 100644
--- a/include/asm-h8300/system.h
+++ b/include/asm-h8300/system.h
@@ -98,7 +98,6 @@ asmlinkage void resume(void);
#endif
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
-#define tas(ptr) (xchg((ptr),1))
struct __xchg_dummy { unsigned long a[100]; };
#define __xg(x) ((volatile struct __xchg_dummy *)(x))
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h
index 4dd27233136..0baa2f89463 100644
--- a/include/asm-i386/atomic.h
+++ b/include/asm-i386/atomic.h
@@ -3,6 +3,7 @@
#include <linux/compiler.h>
#include <asm/processor.h>
+#include <asm/cmpxchg.h>
/*
* Atomic operations that C can't guarantee us. Useful for
@@ -51,7 +52,7 @@ static __inline__ void atomic_add(int i, atomic_t *v)
}
/**
- * atomic_sub - subtract the atomic variable
+ * atomic_sub - subtract integer from atomic variable
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
@@ -170,7 +171,7 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v)
}
/**
- * atomic_add_return - add and return
+ * atomic_add_return - add integer and return
* @v: pointer of type atomic_t
* @i: integer value to add
*
@@ -202,13 +203,20 @@ no_xadd: /* Legacy 386 processor */
#endif
}
+/**
+ * atomic_sub_return - subtract integer and return
+ * @v: pointer of type atomic_t
+ * @i: integer value to subtract
+ *
+ * Atomically subtracts @i from @v and returns @v - @i
+ */
static __inline__ int atomic_sub_return(int i, atomic_t *v)
{
return atomic_add_return(-i,v);
}
-#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
/**
* atomic_add_unless - add unless the number is already a given value
@@ -219,20 +227,21 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
* Atomically adds @a to @v, so long as @v was not already @u.
* Returns non-zero if @v was not @u, and zero otherwise.
*/
-#define atomic_add_unless(v, a, u) \
-({ \
- int c, old; \
- c = atomic_read(v); \
- for (;;) { \
- if (unlikely(c == (u))) \
- break; \
- old = atomic_cmpxchg((v), c, c + (a)); \
- if (likely(old == c)) \
- break; \
- c = old; \
- } \
- c != (u); \
-})
+static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+{
+ int c, old;
+ c = atomic_read(v);
+ for (;;) {
+ if (unlikely(c == (u)))
+ break;
+ old = atomic_cmpxchg((v), c, c + (a));
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return c != (u);
+}
+
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#define atomic_inc_return(v) (atomic_add_return(1,v))
diff --git a/include/asm-i386/cmpxchg.h b/include/asm-i386/cmpxchg.h
new file mode 100644
index 00000000000..7adcef0cd53
--- /dev/null
+++ b/include/asm-i386/cmpxchg.h
@@ -0,0 +1,293 @@
+#ifndef __ASM_CMPXCHG_H
+#define __ASM_CMPXCHG_H
+
+#include <linux/bitops.h> /* for LOCK_PREFIX */
+
+#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
+
+struct __xchg_dummy { unsigned long a[100]; };
+#define __xg(x) ((struct __xchg_dummy *)(x))
+
+
+#ifdef CONFIG_X86_CMPXCHG64
+
+/*
+ * The semantics of XCHGCMP8B are a bit strange, this is why
+ * there is a loop and the loading of %%eax and %%edx has to
+ * be inside. This inlines well in most cases, the cached
+ * cost is around ~38 cycles. (in the future we might want
+ * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
+ * might have an implicit FPU-save as a cost, so it's not
+ * clear which path to go.)
+ *
+ * cmpxchg8b must be used with the lock prefix here to allow
+ * the instruction to be executed atomically, see page 3-102
+ * of the instruction set reference 24319102.pdf. We need
+ * the reader side to see the coherent 64bit value.
+ */
+static inline void __set_64bit (unsigned long long * ptr,
+ unsigned int low, unsigned int high)
+{
+ __asm__ __volatile__ (
+ "\n1:\t"
+ "movl (%0), %%eax\n\t"
+ "movl 4(%0), %%edx\n\t"
+ "lock cmpxchg8b (%0)\n\t"
+ "jnz 1b"
+ : /* no outputs */
+ : "D"(ptr),
+ "b"(low),
+ "c"(high)
+ : "ax","dx","memory");
+}
+
+static inline void __set_64bit_constant (unsigned long long *ptr,
+ unsigned long long value)
+{
+ __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
+}
+#define ll_low(x) *(((unsigned int*)&(x))+0)
+#define ll_high(x) *(((unsigned int*)&(x))+1)
+
+static inline void __set_64bit_var (unsigned long long *ptr,
+ unsigned long long value)
+{
+ __set_64bit(ptr,ll_low(value), ll_high(value));
+}
+
+#define set_64bit(ptr,value) \
+(__builtin_constant_p(value) ? \
+ __set_64bit_constant(ptr, value) : \
+ __set_64bit_var(ptr, value) )
+
+#define _set_64bit(ptr,value) \
+(__builtin_constant_p(value) ? \
+ __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
+ __set_64bit(ptr, ll_low(value), ll_high(value)) )
+
+#endif
+
+/*
+ * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
+ * Note 2: xchg has side effect, so that attribute volatile is necessary,
+ * but generally the primitive is invalid, *ptr is output argument. --ANK
+ */
+static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
+{
+ switch (size) {
+ case 1:
+ __asm__ __volatile__("xchgb %b0,%1"
+ :"=q" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 2:
+ __asm__ __volatile__("xchgw %w0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 4:
+ __asm__ __volatile__("xchgl %0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ }
+ return x;
+}
+
+/*
+ * Atomic compare and exchange. Compare OLD with MEM, if identical,
+ * store NEW in MEM. Return the initial value in MEM. Success is
+ * indicated by comparing RETURN with OLD.
+ */
+
+#ifdef CONFIG_X86_CMPXCHG
+#define __HAVE_ARCH_CMPXCHG 1
+#define cmpxchg(ptr,o,n)\
+ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
+ (unsigned long)(n),sizeof(*(ptr))))
+#define sync_cmpxchg(ptr,o,n)\
+ ((__typeof__(*(ptr)))__sync_cmpxchg((ptr),(unsigned long)(o),\
+ (unsigned long)(n),sizeof(*(ptr))))
+#define cmpxchg_local(ptr,o,n)\
+ ((__typeof__(*(ptr)))__cmpxchg_local((ptr),(unsigned long)(o),\
+ (unsigned long)(n),sizeof(*(ptr))))
+#endif
+
+static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+ unsigned long new, int size)
+{
+ unsigned long prev;
+ switch (size) {
+ case 1:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 2:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 4:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ }
+ return old;
+}
+
+/*
+ * Always use locked operations when touching memory shared with a
+ * hypervisor, since the system may be SMP even if the guest kernel
+ * isn't.
+ */
+static inline unsigned long __sync_cmpxchg(volatile void *ptr,
+ unsigned long old,
+ unsigned long new, int size)
+{
+ unsigned long prev;
+ switch (size) {
+ case 1:
+ __asm__ __volatile__("lock; cmpxchgb %b1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 2:
+ __asm__ __volatile__("lock; cmpxchgw %w1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 4:
+ __asm__ __volatile__("lock; cmpxchgl %1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ }
+ return old;
+}
+
+static inline unsigned long __cmpxchg_local(volatile void *ptr,
+ unsigned long old, unsigned long new, int size)
+{
+ unsigned long prev;
+ switch (size) {
+ case 1:
+ __asm__ __volatile__("cmpxchgb %b1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 2:
+ __asm__ __volatile__("cmpxchgw %w1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 4:
+ __asm__ __volatile__("cmpxchgl %1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ }
+ return old;
+}
+
+#ifndef CONFIG_X86_CMPXCHG
+/*
+ * Building a kernel capable running on 80386. It may be necessary to
+ * simulate the cmpxchg on the 80386 CPU. For that purpose we define
+ * a function for each of the sizes we support.
+ */
+
+extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
+extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
+extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
+
+static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
+ unsigned long new, int size)
+{
+ switch (size) {
+ case 1:
+ return cmpxchg_386_u8(ptr, old, new);
+ case 2:
+ return cmpxchg_386_u16(ptr, old, new);
+ case 4:
+ return cmpxchg_386_u32(ptr, old, new);
+ }
+ return old;
+}
+
+#define cmpxchg(ptr,o,n) \
+({ \
+ __typeof__(*(ptr)) __ret; \
+ if (likely(boot_cpu_data.x86 > 3)) \
+ __ret = __cmpxchg((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr))); \
+ else \
+ __ret = cmpxchg_386((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr))); \
+ __ret; \
+})
+#define cmpxchg_local(ptr,o,n) \
+({ \
+ __typeof__(*(ptr)) __ret; \
+ if (likely(boot_cpu_data.x86 > 3)) \
+ __ret = __cmpxchg_local((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr))); \
+ else \
+ __ret = cmpxchg_386((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr))); \
+ __ret; \
+})
+#endif
+
+#ifdef CONFIG_X86_CMPXCHG64
+
+static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old,
+ unsigned long long new)
+{
+ unsigned long long prev;
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3"
+ : "=A"(prev)
+ : "b"((unsigned long)new),
+ "c"((unsigned long)(new >> 32)),
+ "m"(*__xg(ptr)),
+ "0"(old)
+ : "memory");
+ return prev;
+}
+
+static inline unsigned long long __cmpxchg64_local(volatile void *ptr,
+ unsigned long long old, unsigned long long new)
+{
+ unsigned long long prev;
+ __asm__ __volatile__("cmpxchg8b %3"
+ : "=A"(prev)
+ : "b"((unsigned long)new),
+ "c"((unsigned long)(new >> 32)),
+ "m"(*__xg(ptr)),
+ "0"(old)
+ : "memory");
+ return prev;
+}
+
+#define cmpxchg64(ptr,o,n)\
+ ((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\
+ (unsigned long long)(n)))
+#define cmpxchg64_local(ptr,o,n)\
+ ((__typeof__(*(ptr)))__cmpxchg64_local((ptr),(unsigned long long)(o),\
+ (unsigned long long)(n)))
+#endif
+
+#endif
diff --git a/include/asm-i386/elf.h b/include/asm-i386/elf.h
index d304ab4161f..b32df3a332d 100644
--- a/include/asm-i386/elf.h
+++ b/include/asm-i386/elf.h
@@ -9,8 +9,6 @@
#include <asm/user.h>
#include <asm/auxvec.h>
-#include <linux/utsname.h>
-
#define R_386_NONE 0
#define R_386_32 1
#define R_386_PC32 2
diff --git a/include/asm-i386/ioctls.h b/include/asm-i386/ioctls.h
index f962fadab0f..ef5878762dc 100644
--- a/include/asm-i386/ioctls.h
+++ b/include/asm-i386/ioctls.h
@@ -47,6 +47,10 @@
#define TIOCSBRK 0x5427 /* BSD compatibility */
#define TIOCCBRK 0x5428 /* BSD compatibility */
#define TIOCGSID 0x5429 /* Return the session ID of FD */
+#define TCGETS2 _IOR('T',0x2A, struct termios2)
+#define TCSETS2 _IOW('T',0x2B, struct termios2)
+#define TCSETSW2 _IOW('T',0x2C, struct termios2)
+#define TCSETSF2 _IOW('T',0x2D, struct termios2)
#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
diff --git a/include/asm-i386/kdebug.h b/include/asm-i386/kdebug.h
index d18cdb9fc9a..05c3117788b 100644
--- a/include/asm-i386/kdebug.h
+++ b/include/asm-i386/kdebug.h
@@ -9,19 +9,8 @@
struct pt_regs;
-struct die_args {
- struct pt_regs *regs;
- const char *str;
- long err;
- int trapnr;
- int signr;
-};
-
-extern int register_die_notifier(struct notifier_block *);
-extern int unregister_die_notifier(struct notifier_block *);
extern int register_page_fault_notifier(struct notifier_block *);
extern int unregister_page_fault_notifier(struct notifier_block *);
-extern struct atomic_notifier_head i386die_chain;
/* Grossly misnamed. */
@@ -38,20 +27,8 @@ enum die_val {
DIE_GPF,
DIE_CALL,
DIE_NMI_IPI,
+ DIE_NMI_POST,
DIE_PAGE_FAULT,
};
-static inline int notify_die(enum die_val val, const char *str,
- struct pt_regs *regs, long err, int trap, int sig)
-{
- struct die_args args = {
- .regs = regs,
- .str = str,
- .err = err,
- .trapnr = trap,
- .signr = sig
- };
- return atomic_notifier_call_chain(&i386die_chain, val, &args);
-}
-
#endif
diff --git a/include/asm-i386/kexec.h b/include/asm-i386/kexec.h
index bcb5b21de2d..4b9dc9e6b70 100644
--- a/include/asm-i386/kexec.h
+++ b/include/asm-i386/kexec.h
@@ -45,8 +45,6 @@
/* We can also handle crash dumps from 64 bit kernel. */
#define vmcore_elf_check_arch_cross(x) ((x)->e_machine == EM_X86_64)
-#define MAX_NOTE_BYTES 1024
-
/* CPU does not save ss and esp on stack if execution is already
* running in kernel mode at the time of NMI occurrence. This code
* fixes it.
diff --git a/include/asm-i386/local.h b/include/asm-i386/local.h
index 12060e22f7e..e13d3e98823 100644
--- a/include/asm-i386/local.h
+++ b/include/asm-i386/local.h
@@ -2,47 +2,198 @@
#define _ARCH_I386_LOCAL_H
#include <linux/percpu.h>
+#include <asm/system.h>
+#include <asm/atomic.h>
typedef struct
{
- volatile long counter;
+ atomic_long_t a;
} local_t;
-#define LOCAL_INIT(i) { (i) }
+#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
-#define local_read(v) ((v)->counter)
-#define local_set(v,i) (((v)->counter) = (i))
+#define local_read(l) atomic_long_read(&(l)->a)
+#define local_set(l,i) atomic_long_set(&(l)->a, (i))
-static __inline__ void local_inc(local_t *v)
+static __inline__ void local_inc(local_t *l)
{
__asm__ __volatile__(
"incl %0"
- :"+m" (v->counter));
+ :"+m" (l->a.counter));
}
-static __inline__ void local_dec(local_t *v)
+static __inline__ void local_dec(local_t *l)
{
__asm__ __volatile__(
"decl %0"
- :"+m" (v->counter));
+ :"+m" (l->a.counter));
}
-static __inline__ void local_add(long i, local_t *v)
+static __inline__ void local_add(long i, local_t *l)
{
__asm__ __volatile__(
"addl %1,%0"
- :"+m" (v->counter)
+ :"+m" (l->a.counter)
:"ir" (i));
}
-static __inline__ void local_sub(long i, local_t *v)
+static __inline__ void local_sub(long i, local_t *l)
{
__asm__ __volatile__(
"subl %1,%0"
- :"+m" (v->counter)
+ :"+m" (l->a.counter)
:"ir" (i));
}
+/**
+ * local_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @l: pointer of type local_t
+ *
+ * Atomically subtracts @i from @l and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+static __inline__ int local_sub_and_test(long i, local_t *l)
+{
+ unsigned char c;
+
+ __asm__ __volatile__(
+ "subl %2,%0; sete %1"
+ :"+m" (l->a.counter), "=qm" (c)
+ :"ir" (i) : "memory");
+ return c;
+}
+
+/**
+ * local_dec_and_test - decrement and test
+ * @l: pointer of type local_t
+ *
+ * Atomically decrements @l by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+static __inline__ int local_dec_and_test(local_t *l)
+{
+ unsigned char c;
+
+ __asm__ __volatile__(
+ "decl %0; sete %1"
+ :"+m" (l->a.counter), "=qm" (c)
+ : : "memory");
+ return c != 0;
+}
+
+/**
+ * local_inc_and_test - increment and test
+ * @l: pointer of type local_t
+ *
+ * Atomically increments @l by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+static __inline__ int local_inc_and_test(local_t *l)
+{
+ unsigned char c;
+
+ __asm__ __volatile__(
+ "incl %0; sete %1"
+ :"+m" (l->a.counter), "=qm" (c)
+ : : "memory");
+ return c != 0;
+}
+
+/**
+ * local_add_negative - add and test if negative
+ * @l: pointer of type local_t
+ * @i: integer value to add
+ *
+ * Atomically adds @i to @l and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+static __inline__ int local_add_negative(long i, local_t *l)
+{
+ unsigned char c;
+
+ __asm__ __volatile__(
+ "addl %2,%0; sets %1"
+ :"+m" (l->a.counter), "=qm" (c)
+ :"ir" (i) : "memory");
+ return c;
+}
+
+/**
+ * local_add_return - add and return
+ * @l: pointer of type local_t
+ * @i: integer value to add
+ *
+ * Atomically adds @i to @l and returns @i + @l
+ */
+static __inline__ long local_add_return(long i, local_t *l)
+{
+ long __i;
+#ifdef CONFIG_M386
+ unsigned long flags;
+ if(unlikely(boot_cpu_data.x86==3))
+ goto no_xadd;
+#endif
+ /* Modern 486+ processor */
+ __i = i;
+ __asm__ __volatile__(
+ "xaddl %0, %1;"
+ :"+r" (i), "+m" (l->a.counter)
+ : : "memory");
+ return i + __i;
+
+#ifdef CONFIG_M386
+no_xadd: /* Legacy 386 processor */
+ local_irq_save(flags);
+ __i = local_read(l);
+ local_set(l, i + __i);
+ local_irq_restore(flags);
+ return i + __i;
+#endif
+}
+
+static __inline__ long local_sub_return(long i, local_t *l)
+{
+ return local_add_return(-i,l);
+}
+
+#define local_inc_return(l) (local_add_return(1,l))
+#define local_dec_return(l) (local_sub_return(1,l))
+
+#define local_cmpxchg(l, o, n) \
+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
+/* Always has a lock prefix */
+#define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
+
+/**
+ * local_add_unless - add unless the number is a given value
+ * @l: pointer of type local_t
+ * @a: the amount to add to l...
+ * @u: ...unless l is equal to u.
+ *
+ * Atomically adds @a to @l, so long as it was not @u.
+ * Returns non-zero if @l was not @u, and zero otherwise.
+ */
+#define local_add_unless(l, a, u) \
+({ \
+ long c, old; \
+ c = local_read(l); \
+ for (;;) { \
+ if (unlikely(c == (u))) \
+ break; \
+ old = local_cmpxchg((l), c, c + (a)); \
+ if (likely(old == c)) \
+ break; \
+ c = old; \
+ } \
+ c != (u); \
+})
+#define local_inc_not_zero(l) local_add_unless((l), 1, 0)
+
/* On x86, these are no better than the atomic variants. */
#define __local_inc(l) local_inc(l)
#define __local_dec(l) local_dec(l)
@@ -56,27 +207,27 @@ static __inline__ void local_sub(long i, local_t *v)
/* Need to disable preemption for the cpu local counters otherwise we could
still access a variable of a previous CPU in a non atomic way. */
-#define cpu_local_wrap_v(v) \
+#define cpu_local_wrap_v(l) \
({ local_t res__; \
preempt_disable(); \
- res__ = (v); \
+ res__ = (l); \
preempt_enable(); \
res__; })
-#define cpu_local_wrap(v) \
+#define cpu_local_wrap(l) \
({ preempt_disable(); \
- v; \
+ l; \
preempt_enable(); }) \
-#define cpu_local_read(v) cpu_local_wrap_v(local_read(&__get_cpu_var(v)))
-#define cpu_local_set(v, i) cpu_local_wrap(local_set(&__get_cpu_var(v), (i)))
-#define cpu_local_inc(v) cpu_local_wrap(local_inc(&__get_cpu_var(v)))
-#define cpu_local_dec(v) cpu_local_wrap(local_dec(&__get_cpu_var(v)))
-#define cpu_local_add(i, v) cpu_local_wrap(local_add((i), &__get_cpu_var(v)))
-#define cpu_local_sub(i, v) cpu_local_wrap(local_sub((i), &__get_cpu_var(v)))
-
-#define __cpu_local_inc(v) cpu_local_inc(v)
-#define __cpu_local_dec(v) cpu_local_dec(v)
-#define __cpu_local_add(i, v) cpu_local_add((i), (v))
-#define __cpu_local_sub(i, v) cpu_local_sub((i), (v))
+#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
+#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
+#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
+#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
+#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
+#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
+
+#define __cpu_local_inc(l) cpu_local_inc(l)
+#define __cpu_local_dec(l) cpu_local_dec(l)
+#define __cpu_local_add(i, l) cpu_local_add((i), (l))
+#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
#endif /* _ARCH_I386_LOCAL_H */
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index e16359f81a4..edce9d51a67 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -243,8 +243,6 @@ static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; re
static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; }
static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return pte; }
-extern void vmalloc_sync_all(void);
-
#ifdef CONFIG_X86_PAE
# include <asm/pgtable-3level.h>
#else
@@ -544,10 +542,6 @@ static inline void paravirt_pagetable_setup_done(pgd_t *base)
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
-#define MK_IOSPACE_PFN(space, pfn) (pfn)
-#define GET_IOSPACE(pfn) 0
-#define GET_PFN(pfn) (pfn)
-
#include <asm-generic/pgtable.h>
#endif /* _I386_PGTABLE_H */
diff --git a/include/asm-i386/serial.h b/include/asm-i386/serial.h
index bd67480ca10..57a4306cdf6 100644
--- a/include/asm-i386/serial.h
+++ b/include/asm-i386/serial.h
@@ -11,19 +11,3 @@
* megabits/second; but this requires the faster clock.
*/
#define BASE_BAUD ( 1843200 / 16 )
-
-/* Standard COM flags (except for COM4, because of the 8514 problem) */
-#ifdef CONFIG_SERIAL_DETECT_IRQ
-#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ)
-#define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ)
-#else
-#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
-#define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF
-#endif
-
-#define SERIAL_PORT_DFNS \
- /* UART CLK PORT IRQ FLAGS */ \
- { 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \
- { 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS }, /* ttyS1 */ \
- { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \
- { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index c3a58c08c49..94ed3686a5f 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -4,7 +4,7 @@
#include <linux/kernel.h>
#include <asm/segment.h>
#include <asm/cpufeature.h>
-#include <linux/bitops.h> /* for LOCK_PREFIX */
+#include <asm/cmpxchg.h>
#ifdef __KERNEL__
@@ -195,238 +195,6 @@ static inline unsigned long get_limit(unsigned long segment)
#define nop() __asm__ __volatile__ ("nop")
-#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
-
-#define tas(ptr) (xchg((ptr),1))
-
-struct __xchg_dummy { unsigned long a[100]; };
-#define __xg(x) ((struct __xchg_dummy *)(x))
-
-
-#ifdef CONFIG_X86_CMPXCHG64
-
-/*
- * The semantics of XCHGCMP8B are a bit strange, this is why
- * there is a loop and the loading of %%eax and %%edx has to
- * be inside. This inlines well in most cases, the cached
- * cost is around ~38 cycles. (in the future we might want
- * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
- * might have an implicit FPU-save as a cost, so it's not
- * clear which path to go.)
- *
- * cmpxchg8b must be used with the lock prefix here to allow
- * the instruction to be executed atomically, see page 3-102
- * of the instruction set reference 24319102.pdf. We need
- * the reader side to see the coherent 64bit value.
- */
-static inline void __set_64bit (unsigned long long * ptr,
- unsigned int low, unsigned int high)
-{
- __asm__ __volatile__ (
- "\n1:\t"
- "movl (%0), %%eax\n\t"
- "movl 4(%0), %%edx\n\t"
- "lock cmpxchg8b (%0)\n\t"
- "jnz 1b"
- : /* no outputs */
- : "D"(ptr),
- "b"(low),
- "c"(high)
- : "ax","dx","memory");
-}
-
-static inline void __set_64bit_constant (unsigned long long *ptr,
- unsigned long long value)
-{
- __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
-}
-#define ll_low(x) *(((unsigned int*)&(x))+0)
-#define ll_high(x) *(((unsigned int*)&(x))+1)
-
-static inline void __set_64bit_var (unsigned long long *ptr,
- unsigned long long value)
-{
- __set_64bit(ptr,ll_low(value), ll_high(value));
-}
-
-#define set_64bit(ptr,value) \
-(__builtin_constant_p(value) ? \
- __set_64bit_constant(ptr, value) : \
- __set_64bit_var(ptr, value) )
-
-#define _set_64bit(ptr,value) \
-(__builtin_constant_p(value) ? \
- __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
- __set_64bit(ptr, ll_low(value), ll_high(value)) )
-
-#endif
-
-/*
- * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
- * Note 2: xchg has side effect, so that attribute volatile is necessary,
- * but generally the primitive is invalid, *ptr is output argument. --ANK
- */
-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
-{
- switch (size) {
- case 1:
- __asm__ __volatile__("xchgb %b0,%1"
- :"=q" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- case 2:
- __asm__ __volatile__("xchgw %w0,%1"
- :"=r" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- case 4:
- __asm__ __volatile__("xchgl %0,%1"
- :"=r" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- }
- return x;
-}
-
-/*
- * Atomic compare and exchange. Compare OLD with MEM, if identical,
- * store NEW in MEM. Return the initial value in MEM. Success is
- * indicated by comparing RETURN with OLD.
- */
-
-#ifdef CONFIG_X86_CMPXCHG
-#define __HAVE_ARCH_CMPXCHG 1
-#define cmpxchg(ptr,o,n)\
- ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
- (unsigned long)(n),sizeof(*(ptr))))
-#define sync_cmpxchg(ptr,o,n)\
- ((__typeof__(*(ptr)))__sync_cmpxchg((ptr),(unsigned long)(o),\
- (unsigned long)(n),sizeof(*(ptr))))
-#endif
-
-static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
- unsigned long new, int size)
-{
- unsigned long prev;
- switch (size) {
- case 1:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 2:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 4:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- }
- return old;
-}
-
-/*
- * Always use locked operations when touching memory shared with a
- * hypervisor, since the system may be SMP even if the guest kernel
- * isn't.
- */
-static inline unsigned long __sync_cmpxchg(volatile void *ptr,
- unsigned long old,
- unsigned long new, int size)
-{
- unsigned long prev;
- switch (size) {
- case 1:
- __asm__ __volatile__("lock; cmpxchgb %b1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 2:
- __asm__ __volatile__("lock; cmpxchgw %w1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 4:
- __asm__ __volatile__("lock; cmpxchgl %1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- }
- return old;
-}
-
-#ifndef CONFIG_X86_CMPXCHG
-/*
- * Building a kernel capable running on 80386. It may be necessary to
- * simulate the cmpxchg on the 80386 CPU. For that purpose we define
- * a function for each of the sizes we support.
- */
-
-extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
-extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
-extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
-
-static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
- unsigned long new, int size)
-{
- switch (size) {
- case 1:
- return cmpxchg_386_u8(ptr, old, new);
- case 2:
- return cmpxchg_386_u16(ptr, old, new);
- case 4:
- return cmpxchg_386_u32(ptr, old, new);
- }
- return old;
-}
-
-#define cmpxchg(ptr,o,n) \
-({ \
- __typeof__(*(ptr)) __ret; \
- if (likely(boot_cpu_data.x86 > 3)) \
- __ret = __cmpxchg((ptr), (unsigned long)(o), \
- (unsigned long)(n), sizeof(*(ptr))); \
- else \
- __ret = cmpxchg_386((ptr), (unsigned long)(o), \
- (unsigned long)(n), sizeof(*(ptr))); \
- __ret; \
-})
-#endif
-
-#ifdef CONFIG_X86_CMPXCHG64
-
-static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old,
- unsigned long long new)
-{
- unsigned long long prev;
- __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3"
- : "=A"(prev)
- : "b"((unsigned long)new),
- "c"((unsigned long)(new >> 32)),
- "m"(*__xg(ptr)),
- "0"(old)
- : "memory");
- return prev;
-}
-
-#define cmpxchg64(ptr,o,n)\
- ((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\
- (unsigned long long)(n)))
-
-#endif
-
/*
* Force strict CPU ordering.
* And yes, this is required on UP too when we're talking
diff --git a/include/asm-i386/termbits.h b/include/asm-i386/termbits.h
index 2e623769381..a21700352e7 100644
--- a/include/asm-i386/termbits.h
+++ b/include/asm-i386/termbits.h
@@ -17,6 +17,17 @@ struct termios {
cc_t c_cc[NCCS]; /* control characters */
};
+struct termios2 {
+ tcflag_t c_iflag; /* input mode flags */
+ tcflag_t c_oflag; /* output mode flags */
+ tcflag_t c_cflag; /* control mode flags */
+ tcflag_t c_lflag; /* local mode flags */
+ cc_t c_line; /* line discipline */
+ cc_t c_cc[NCCS]; /* control characters */
+ speed_t c_ispeed; /* input speed */
+ speed_t c_ospeed; /* output speed */
+};
+
struct ktermios {
tcflag_t c_iflag; /* input mode flags */
tcflag_t c_oflag; /* output mode flags */
@@ -129,6 +140,7 @@ struct ktermios {
#define HUPCL 0002000
#define CLOCAL 0004000
#define CBAUDEX 0010000
+#define BOTHER 0010000
#define B57600 0010001
#define B115200 0010002
#define B230400 0010003
@@ -148,6 +160,8 @@ struct ktermios {
#define CMSPAR 010000000000 /* mark or space (stick) parity */
#define CRTSCTS 020000000000 /* flow control */
+#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
+
/* c_lflag bits */
#define ISIG 0000001
#define ICANON 0000002
diff --git a/include/asm-i386/termios.h b/include/asm-i386/termios.h
index 7c99678a8f8..f520b7c16fa 100644
--- a/include/asm-i386/termios.h
+++ b/include/asm-i386/termios.h
@@ -81,8 +81,10 @@ struct termio {
copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
})
-#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios))
-#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios))
+#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2))
+#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2))
+#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
+#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
#endif /* __KERNEL__ */
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h
index 833fa1704ff..bd21e795197 100644
--- a/include/asm-i386/unistd.h
+++ b/include/asm-i386/unistd.h
@@ -325,10 +325,11 @@
#define __NR_move_pages 317
#define __NR_getcpu 318
#define __NR_epoll_pwait 319
+#define __NR_utimensat 320
#ifdef __KERNEL__
-#define NR_syscalls 320
+#define NR_syscalls 321
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
diff --git a/include/asm-ia64/atomic.h b/include/asm-ia64/atomic.h
index 569ec7574ba..1fc3b83325d 100644
--- a/include/asm-ia64/atomic.h
+++ b/include/asm-ia64/atomic.h
@@ -15,6 +15,7 @@
#include <linux/types.h>
#include <asm/intrinsics.h>
+#include <asm/system.h>
/*
* On IA-64, counter must always be volatile to ensure that that the
@@ -88,25 +89,47 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v)
return new;
}
-#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
+#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-#define atomic_add_unless(v, a, u) \
-({ \
- int c, old; \
- c = atomic_read(v); \
- for (;;) { \
- if (unlikely(c == (u))) \
- break; \
- old = atomic_cmpxchg((v), c, c + (a)); \
- if (likely(old == c)) \
- break; \
- c = old; \
- } \
- c != (u); \
-})
+#define atomic64_cmpxchg(v, old, new) \
+ (cmpxchg(&((v)->counter), old, new))
+#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+
+static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+{
+ int c, old;
+ c = atomic_read(v);
+ for (;;) {
+ if (unlikely(c == (u)))
+ break;
+ old = atomic_cmpxchg((v), c, c + (a));
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return c != (u);
+}
+
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+{
+ long c, old;
+ c = atomic64_read(v);
+ for (;;) {
+ if (unlikely(c == (u)))
+ break;
+ old = atomic64_cmpxchg((v), c, c + (a));
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return c != (u);
+}
+
+#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+
#define atomic_add_return(i,v) \
({ \
int __ia64_aar_i = (i); \
diff --git a/include/asm-ia64/kdebug.h b/include/asm-ia64/kdebug.h
index aed7142f9e4..ba211e011a1 100644
--- a/include/asm-ia64/kdebug.h
+++ b/include/asm-ia64/kdebug.h
@@ -28,21 +28,8 @@
*/
#include <linux/notifier.h>
-struct pt_regs;
-
-struct die_args {
- struct pt_regs *regs;
- const char *str;
- long err;
- int trapnr;
- int signr;
-};
-
-extern int register_die_notifier(struct notifier_block *);
-extern int unregister_die_notifier(struct notifier_block *);
extern int register_page_fault_notifier(struct notifier_block *);
extern int unregister_page_fault_notifier(struct notifier_block *);
-extern struct atomic_notifier_head ia64die_chain;
enum die_val {
DIE_BREAK = 1,
@@ -74,18 +61,4 @@ enum die_val {
DIE_KDUMP_LEAVE,
};
-static inline int notify_die(enum die_val val, char *str, struct pt_regs *regs,
- long err, int trap, int sig)
-{
- struct die_args args = {
- .regs = regs,
- .str = str,
- .err = err,
- .trapnr = trap,
- .signr = sig
- };
-
- return atomic_notifier_call_chain(&ia64die_chain, val, &args);
-}
-
#endif
diff --git a/include/asm-ia64/kexec.h b/include/asm-ia64/kexec.h
index 41299ddfee3..541be835fc5 100644
--- a/include/asm-ia64/kexec.h
+++ b/include/asm-ia64/kexec.h
@@ -14,8 +14,6 @@
/* The native architecture */
#define KEXEC_ARCH KEXEC_ARCH_IA_64
-#define MAX_NOTE_BYTES 1024
-
#define kexec_flush_icache_page(page) do { \
unsigned long page_addr = (unsigned long)page_address(page); \
flush_icache_range(page_addr, page_addr + PAGE_SIZE); \
diff --git a/include/asm-ia64/local.h b/include/asm-ia64/local.h
index dc519092ef4..c11c530f74d 100644
--- a/include/asm-ia64/local.h
+++ b/include/asm-ia64/local.h
@@ -1,50 +1 @@
-#ifndef _ASM_IA64_LOCAL_H
-#define _ASM_IA64_LOCAL_H
-
-/*
- * Copyright (C) 2003 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- */
-
-#include <linux/percpu.h>
-
-typedef struct {
- atomic64_t val;
-} local_t;
-
-#define LOCAL_INIT(i) ((local_t) { { (i) } })
-#define local_read(l) atomic64_read(&(l)->val)
-#define local_set(l, i) atomic64_set(&(l)->val, i)
-#define local_inc(l) atomic64_inc(&(l)->val)
-#define local_dec(l) atomic64_dec(&(l)->val)
-#define local_add(i, l) atomic64_add((i), &(l)->val)
-#define local_sub(i, l) atomic64_sub((i), &(l)->val)
-
-/* Non-atomic variants, i.e., preemption disabled and won't be touched in interrupt, etc. */
-
-#define __local_inc(l) (++(l)->val.counter)
-#define __local_dec(l) (--(l)->val.counter)
-#define __local_add(i,l) ((l)->val.counter += (i))
-#define __local_sub(i,l) ((l)->val.counter -= (i))
-
-/*
- * Use these for per-cpu local_t variables. Note they take a variable (eg. mystruct.foo),
- * not an address.
- */
-#define cpu_local_read(v) local_read(&__ia64_per_cpu_var(v))
-#define cpu_local_set(v, i) local_set(&__ia64_per_cpu_var(v), (i))
-#define cpu_local_inc(v) local_inc(&__ia64_per_cpu_var(v))
-#define cpu_local_dec(v) local_dec(&__ia64_per_cpu_var(v))
-#define cpu_local_add(i, v) local_add((i), &__ia64_per_cpu_var(v))
-#define cpu_local_sub(i, v) local_sub((i), &__ia64_per_cpu_var(v))
-
-/*
- * Non-atomic increments, i.e., preemption disabled and won't be touched in interrupt,
- * etc.
- */
-#define __cpu_local_inc(v) __local_inc(&__ia64_per_cpu_var(v))
-#define __cpu_local_dec(v) __local_dec(&__ia64_per_cpu_var(v))
-#define __cpu_local_add(i, v) __local_add((i), &__ia64_per_cpu_var(v))
-#define __cpu_local_sub(i, v) __local_sub((i), &__ia64_per_cpu_var(v))
-
-#endif /* _ASM_IA64_LOCAL_H */
+#include <asm-generic/local.h>
diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h
index 55318274772..670b706411b 100644
--- a/include/asm-ia64/pgtable.h
+++ b/include/asm-ia64/pgtable.h
@@ -485,10 +485,6 @@ extern void paging_init (void);
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
-#define MK_IOSPACE_PFN(space, pfn) (pfn)
-#define GET_IOSPACE(pfn) 0
-#define GET_PFN(pfn) (pfn)
-
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
diff --git a/include/asm-m32r/atomic.h b/include/asm-m32r/atomic.h
index f5a7d7301c7..3a38ffe4a4f 100644
--- a/include/asm-m32r/atomic.h
+++ b/include/asm-m32r/atomic.h
@@ -253,14 +253,21 @@ static __inline__ int atomic_dec_return(atomic_t *v)
* Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise.
*/
-#define atomic_add_unless(v, a, u) \
-({ \
- int c, old; \
- c = atomic_read(v); \
- while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
- c = old; \
- c != (u); \
-})
+static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+{
+ int c, old;
+ c = atomic_read(v);
+ for (;;) {
+ if (unlikely(c == (u)))
+ break;
+ old = atomic_cmpxchg((v), c, c + (a));
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return c != (u);
+}
+
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *addr)
diff --git a/include/asm-m32r/kdebug.h b/include/asm-m32r/kdebug.h
new file mode 100644
index 00000000000..6ece1b03766
--- /dev/null
+++ b/include/asm-m32r/kdebug.h
@@ -0,0 +1 @@
+#include <asm-generic/kdebug.h>
diff --git a/include/asm-m32r/pgtable.h b/include/asm-m32r/pgtable.h
index 1c15ba7ce31..8b2a2f17e69 100644
--- a/include/asm-m32r/pgtable.h
+++ b/include/asm-m32r/pgtable.h
@@ -381,10 +381,6 @@ static inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
-#define MK_IOSPACE_PFN(space, pfn) (pfn)
-#define GET_IOSPACE(pfn) 0
-#define GET_PFN(pfn) (pfn)
-
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
diff --git a/include/asm-m32r/system.h b/include/asm-m32r/system.h
index 99ee09889ff..06cdece3586 100644
--- a/include/asm-m32r/system.h
+++ b/include/asm-m32r/system.h
@@ -122,8 +122,6 @@ static inline void local_irq_disable(void)
#define xchg(ptr,x) \
((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
-#define tas(ptr) (xchg((ptr),1))
-
#ifdef CONFIG_SMP
extern void __xchg_called_with_bad_pointer(void);
#endif
diff --git a/include/asm-m68k/atomic.h b/include/asm-m68k/atomic.h
index d5eed64cb83..4915294fea6 100644
--- a/include/asm-m68k/atomic.h
+++ b/include/asm-m68k/atomic.h
@@ -2,7 +2,7 @@
#define __ARCH_M68K_ATOMIC__
-#include <asm/system.h> /* local_irq_XXX() */
+#include <asm/system.h>
/*
* Atomic operations that C can't guarantee us. Useful for
@@ -170,20 +170,21 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
__asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask));
}
-#define atomic_add_unless(v, a, u) \
-({ \
- int c, old; \
- c = atomic_read(v); \
- for (;;) { \
- if (unlikely(c == (u))) \
- break; \
- old = atomic_cmpxchg((v), c, c + (a)); \
- if (likely(old == c)) \
- break; \
- c = old; \
- } \
- c != (u); \
-})
+static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+{
+ int c, old;
+ c = atomic_read(v);
+ for (;;) {
+ if (unlikely(c == (u)))
+ break;
+ old = atomic_cmpxchg((v), c, c + (a));
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return c != (u);
+}
+
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
/* Atomic operations are already serializing */
diff --git a/include/asm-m68k/kdebug.h b/include/asm-m68k/kdebug.h
new file mode 100644
index 00000000000..6ece1b03766
--- /dev/null
+++ b/include/asm-m68k/kdebug.h
@@ -0,0 +1 @@
+#include <asm-generic/kdebug.h>
diff --git a/include/asm-m68k/pgtable.h b/include/asm-m68k/pgtable.h
index f3aa0537798..555b87a1f7e 100644
--- a/include/asm-m68k/pgtable.h
+++ b/include/asm-m68k/pgtable.h
@@ -143,10 +143,6 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
-#define MK_IOSPACE_PFN(space, pfn) (pfn)
-#define GET_IOSPACE(pfn) 0
-#define GET_PFN(pfn) (pfn)
-
/* MMU-specific headers */
#ifdef CONFIG_SUN3
diff --git a/include/asm-m68k/system.h b/include/asm-m68k/system.h
index 243dd13e6bf..198878b53a6 100644
--- a/include/asm-m68k/system.h
+++ b/include/asm-m68k/system.h
@@ -88,7 +88,6 @@ static inline int irqs_disabled(void)
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
-#define tas(ptr) (xchg((ptr),1))
struct __xchg_dummy { unsigned long a[100]; };
#define __xg(x) ((volatile struct __xchg_dummy *)(x))
diff --git a/include/asm-m68knommu/atomic.h b/include/asm-m68knommu/atomic.h
index 6c4e4b63e45..d5632a305da 100644
--- a/include/asm-m68knommu/atomic.h
+++ b/include/asm-m68knommu/atomic.h
@@ -1,7 +1,7 @@
#ifndef __ARCH_M68KNOMMU_ATOMIC__
#define __ARCH_M68KNOMMU_ATOMIC__
-#include <asm/system.h> /* local_irq_XXX() */
+#include <asm/system.h>
/*
* Atomic operations that C can't guarantee us. Useful for
@@ -131,14 +131,21 @@ static inline int atomic_sub_return(int i, atomic_t * v)
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-#define atomic_add_unless(v, a, u) \
-({ \
- int c, old; \
- c = atomic_read(v); \
- while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
- c = old; \
- c != (u); \
-})
+static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+{
+ int c, old;
+ c = atomic_read(v);
+ for (;;) {
+ if (unlikely(c == (u)))
+ break;
+ old = atomic_cmpxchg((v), c, c + (a));
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return c != (u);
+}
+
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#define atomic_dec_return(v) atomic_sub_return(1,(v))
diff --git a/include/asm-m68knommu/kdebug.h b/include/asm-m68knommu/kdebug.h
new file mode 100644
index 00000000000..6ece1b03766
--- /dev/null
+++ b/include/asm-m68knommu/kdebug.h
@@ -0,0 +1 @@
+#include <asm-generic/kdebug.h>
diff --git a/include/asm-m68knommu/pgtable.h b/include/asm-m68knommu/pgtable.h
index 549ad231efa..9dfbbc24aa7 100644
--- a/include/asm-m68knommu/pgtable.h
+++ b/include/asm-m68knommu/pgtable.h
@@ -59,10 +59,6 @@ extern int is_in_rom(unsigned long);
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
-#define MK_IOSPACE_PFN(space, pfn) (pfn)
-#define GET_IOSPACE(pfn) 0
-#define GET_PFN(pfn) (pfn)
-
/*
* All 32bit addresses are effectively valid for vmalloc...
* Sort of meaningless for non-VM targets.
diff --git a/include/asm-m68knommu/system.h b/include/asm-m68knommu/system.h
index 2a814498672..5e5ed18bb78 100644
--- a/include/asm-m68knommu/system.h
+++ b/include/asm-m68knommu/system.h
@@ -120,7 +120,6 @@ asmlinkage void resume(void);
#endif
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
-#define tas(ptr) (xchg((ptr),1))
struct __xchg_dummy { unsigned long a[100]; };
#define __xg(x) ((volatile struct __xchg_dummy *)(x))
diff --git a/include/asm-mips/atomic.h b/include/asm-mips/atomic.h
index 1ac50b6c47a..62daa746a9c 100644
--- a/include/asm-mips/atomic.h
+++ b/include/asm-mips/atomic.h
@@ -18,6 +18,7 @@
#include <asm/barrier.h>
#include <asm/cpu-features.h>
#include <asm/war.h>
+#include <asm/system.h>
typedef struct { volatile int counter; } atomic_t;
@@ -306,8 +307,8 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
return result;
}
-#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
/**
* atomic_add_unless - add unless the number is a given value
@@ -318,14 +319,20 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
* Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise.
*/
-#define atomic_add_unless(v, a, u) \
-({ \
- int c, old; \
- c = atomic_read(v); \
- while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
- c = old; \
- c != (u); \
-})
+static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+{
+ int c, old;
+ c = atomic_read(v);
+ for (;;) {
+ if (unlikely(c == (u)))
+ break;
+ old = atomic_cmpxchg((v), c, c + (a));
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return c != (u);
+}
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#define atomic_dec_return(v) atomic_sub_return(1,(v))
@@ -681,6 +688,36 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
return result;
}
+#define atomic64_cmpxchg(v, o, n) \
+ (((__typeof__((v)->counter)))cmpxchg(&((v)->counter), (o), (n)))
+#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
+
+/**
+ * atomic64_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns non-zero if @v was not @u, and zero otherwise.
+ */
+static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+{
+ long c, old;
+ c = atomic64_read(v);
+ for (;;) {
+ if (unlikely(c == (u)))
+ break;
+ old = atomic64_cmpxchg((v), c, c + (a));
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return c != (u);
+}
+
+#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+
#define atomic64_dec_return(v) atomic64_sub_return(1,(v))
#define atomic64_inc_return(v) atomic64_add_return(1,(v))
diff --git a/include/asm-mips/kdebug.h b/include/asm-mips/kdebug.h
new file mode 100644
index 00000000000..6ece1b03766
--- /dev/null
+++ b/include/asm-mips/kdebug.h
@@ -0,0 +1 @@
+#include <asm-generic/kdebug.h>
diff --git a/include/asm-mips/kexec.h b/include/asm-mips/kexec.h
index b25267ebcb0..cdbab43b7d3 100644
--- a/include/asm-mips/kexec.h
+++ b/include/asm-mips/kexec.h
@@ -21,8 +21,6 @@
/* The native architecture */
#define KEXEC_ARCH KEXEC_ARCH_MIPS
-#define MAX_NOTE_BYTES 1024
-
static inline void crash_setup_regs(struct pt_regs *newregs,
struct pt_regs *oldregs)
{
diff --git a/include/asm-mips/local.h b/include/asm-mips/local.h
index 9e2d43bae38..ed882c88e0c 100644
--- a/include/asm-mips/local.h
+++ b/include/asm-mips/local.h
@@ -1,60 +1,288 @@
-#ifndef _ASM_LOCAL_H
-#define _ASM_LOCAL_H
+#ifndef _ARCH_MIPS_LOCAL_H
+#define _ARCH_MIPS_LOCAL_H
#include <linux/percpu.h>
+#include <linux/bitops.h>
#include <asm/atomic.h>
+#include <asm/war.h>
-#ifdef CONFIG_32BIT
+typedef struct
+{
+ atomic_long_t a;
+} local_t;
-typedef atomic_t local_t;
+#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
-#define LOCAL_INIT(i) ATOMIC_INIT(i)
-#define local_read(v) atomic_read(v)
-#define local_set(v,i) atomic_set(v,i)
+#define local_read(l) atomic_long_read(&(l)->a)
+#define local_set(l,i) atomic_long_set(&(l)->a, (i))
-#define local_inc(v) atomic_inc(v)
-#define local_dec(v) atomic_dec(v)
-#define local_add(i, v) atomic_add(i, v)
-#define local_sub(i, v) atomic_sub(i, v)
+#define local_add(i,l) atomic_long_add((i),(&(l)->a))
+#define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
+#define local_inc(l) atomic_long_inc(&(l)->a)
+#define local_dec(l) atomic_long_dec(&(l)->a)
-#endif
+/*
+ * Same as above, but return the result value
+ */
+static __inline__ long local_add_return(long i, local_t * l)
+{
+ unsigned long result;
+
+ if (cpu_has_llsc && R10000_LLSC_WAR) {
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ "1:" __LL "%1, %2 # local_add_return \n"
+ " addu %0, %1, %3 \n"
+ __SC "%0, %2 \n"
+ " beqzl %0, 1b \n"
+ " addu %0, %1, %3 \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
+ : "Ir" (i), "m" (l->a.counter)
+ : "memory");
+ } else if (cpu_has_llsc) {
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ "1:" __LL "%1, %2 # local_add_return \n"
+ " addu %0, %1, %3 \n"
+ __SC "%0, %2 \n"
+ " beqz %0, 1b \n"
+ " addu %0, %1, %3 \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
+ : "Ir" (i), "m" (l->a.counter)
+ : "memory");
+ } else {
+ unsigned long flags;
-#ifdef CONFIG_64BIT
+ local_irq_save(flags);
+ result = l->a.counter;
+ result += i;
+ l->a.counter = result;
+ local_irq_restore(flags);
+ }
-typedef atomic64_t local_t;
+ return result;
+}
-#define LOCAL_INIT(i) ATOMIC64_INIT(i)
-#define local_read(v) atomic64_read(v)
-#define local_set(v,i) atomic64_set(v,i)
+static __inline__ long local_sub_return(long i, local_t * l)
+{
+ unsigned long result;
-#define local_inc(v) atomic64_inc(v)
-#define local_dec(v) atomic64_dec(v)
-#define local_add(i, v) atomic64_add(i, v)
-#define local_sub(i, v) atomic64_sub(i, v)
+ if (cpu_has_llsc && R10000_LLSC_WAR) {
+ unsigned long temp;
-#endif
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ "1:" __LL "%1, %2 # local_sub_return \n"
+ " subu %0, %1, %3 \n"
+ __SC "%0, %2 \n"
+ " beqzl %0, 1b \n"
+ " subu %0, %1, %3 \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
+ : "Ir" (i), "m" (l->a.counter)
+ : "memory");
+ } else if (cpu_has_llsc) {
+ unsigned long temp;
-#define __local_inc(v) ((v)->counter++)
-#define __local_dec(v) ((v)->counter--)
-#define __local_add(i,v) ((v)->counter+=(i))
-#define __local_sub(i,v) ((v)->counter-=(i))
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ "1:" __LL "%1, %2 # local_sub_return \n"
+ " subu %0, %1, %3 \n"
+ __SC "%0, %2 \n"
+ " beqz %0, 1b \n"
+ " subu %0, %1, %3 \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
+ : "Ir" (i), "m" (l->a.counter)
+ : "memory");
+ } else {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ result = l->a.counter;
+ result -= i;
+ l->a.counter = result;
+ local_irq_restore(flags);
+ }
+
+ return result;
+}
/*
- * Use these for per-cpu local_t variables: on some archs they are
+ * local_sub_if_positive - conditionally subtract integer from atomic variable
+ * @i: integer value to subtract
+ * @l: pointer of type local_t
+ *
+ * Atomically test @l and subtract @i if @l is greater or equal than @i.
+ * The function returns the old value of @l minus @i.
+ */
+static __inline__ long local_sub_if_positive(long i, local_t * l)
+{
+ unsigned long result;
+
+ if (cpu_has_llsc && R10000_LLSC_WAR) {
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ "1:" __LL "%1, %2 # local_sub_if_positive\n"
+ " dsubu %0, %1, %3 \n"
+ " bltz %0, 1f \n"
+ __SC "%0, %2 \n"
+ " .set noreorder \n"
+ " beqzl %0, 1b \n"
+ " dsubu %0, %1, %3 \n"
+ " .set reorder \n"
+ "1: \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
+ : "Ir" (i), "m" (l->a.counter)
+ : "memory");
+ } else if (cpu_has_llsc) {
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ "1:" __LL "%1, %2 # local_sub_if_positive\n"
+ " dsubu %0, %1, %3 \n"
+ " bltz %0, 1f \n"
+ __SC "%0, %2 \n"
+ " .set noreorder \n"
+ " beqz %0, 1b \n"
+ " dsubu %0, %1, %3 \n"
+ " .set reorder \n"
+ "1: \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
+ : "Ir" (i), "m" (l->a.counter)
+ : "memory");
+ } else {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ result = l->a.counter;
+ result -= i;
+ if (result >= 0)
+ l->a.counter = result;
+ local_irq_restore(flags);
+ }
+
+ return result;
+}
+
+#define local_cmpxchg(l, o, n) \
+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
+#define local_xchg(l, n) (xchg_local(&((l)->a.counter),(n)))
+
+/**
+ * local_add_unless - add unless the number is a given value
+ * @l: pointer of type local_t
+ * @a: the amount to add to l...
+ * @u: ...unless l is equal to u.
+ *
+ * Atomically adds @a to @l, so long as it was not @u.
+ * Returns non-zero if @l was not @u, and zero otherwise.
+ */
+#define local_add_unless(l, a, u) \
+({ \
+ long c, old; \
+ c = local_read(l); \
+ while (c != (u) && (old = local_cmpxchg((l), c, c + (a))) != c) \
+ c = old; \
+ c != (u); \
+})
+#define local_inc_not_zero(l) local_add_unless((l), 1, 0)
+
+#define local_dec_return(l) local_sub_return(1,(l))
+#define local_inc_return(l) local_add_return(1,(l))
+
+/*
+ * local_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @l: pointer of type local_t
+ *
+ * Atomically subtracts @i from @l and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+#define local_sub_and_test(i,l) (local_sub_return((i), (l)) == 0)
+
+/*
+ * local_inc_and_test - increment and test
+ * @l: pointer of type local_t
+ *
+ * Atomically increments @l by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+#define local_inc_and_test(l) (local_inc_return(l) == 0)
+
+/*
+ * local_dec_and_test - decrement by 1 and test
+ * @l: pointer of type local_t
+ *
+ * Atomically decrements @l by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+#define local_dec_and_test(l) (local_sub_return(1, (l)) == 0)
+
+/*
+ * local_dec_if_positive - decrement by 1 if old value positive
+ * @l: pointer of type local_t
+ */
+#define local_dec_if_positive(l) local_sub_if_positive(1, l)
+
+/*
+ * local_add_negative - add and test if negative
+ * @l: pointer of type local_t
+ * @i: integer value to add
+ *
+ * Atomically adds @i to @l and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+#define local_add_negative(i,l) (local_add_return(i, (l)) < 0)
+
+/* Use these for per-cpu local_t variables: on some archs they are
* much more efficient than these naive implementations. Note they take
* a variable, not an address.
*/
-#define cpu_local_read(v) local_read(&__get_cpu_var(v))
-#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i))
-#define cpu_local_inc(v) local_inc(&__get_cpu_var(v))
-#define cpu_local_dec(v) local_dec(&__get_cpu_var(v))
-#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v))
-#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v))
+#define __local_inc(l) ((l)->a.counter++)
+#define __local_dec(l) ((l)->a.counter++)
+#define __local_add(i,l) ((l)->a.counter+=(i))
+#define __local_sub(i,l) ((l)->a.counter-=(i))
+
+/* Need to disable preemption for the cpu local counters otherwise we could
+ still access a variable of a previous CPU in a non atomic way. */
+#define cpu_local_wrap_v(l) \
+ ({ local_t res__; \
+ preempt_disable(); \
+ res__ = (l); \
+ preempt_enable(); \
+ res__; })
+#define cpu_local_wrap(l) \
+ ({ preempt_disable(); \
+ l; \
+ preempt_enable(); }) \
+
+#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
+#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
+#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
+#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
+#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
+#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
-#define __cpu_local_inc(v) __local_inc(&__get_cpu_var(v))
-#define __cpu_local_dec(v) __local_dec(&__get_cpu_var(v))
-#define __cpu_local_add(i, v) __local_add((i), &__get_cpu_var(v))
-#define __cpu_local_sub(i, v) __local_sub((i), &__get_cpu_var(v))
+#define __cpu_local_inc(l) cpu_local_inc(l)
+#define __cpu_local_dec(l) cpu_local_dec(l)
+#define __cpu_local_add(i, l) cpu_local_add((i), (l))
+#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
-#endif /* _ASM_LOCAL_H */
+#endif /* _ARCH_MIPS_LOCAL_H */
diff --git a/include/asm-mips/mach-au1x00/au1550_spi.h b/include/asm-mips/mach-au1x00/au1550_spi.h
new file mode 100644
index 00000000000..c2f0466523e
--- /dev/null
+++ b/include/asm-mips/mach-au1x00/au1550_spi.h
@@ -0,0 +1,16 @@
+/*
+ * au1550_spi.h - au1550 psc spi controller driver - platform data struct
+ */
+
+#ifndef _AU1550_SPI_H_
+#define _AU1550_SPI_H_
+
+struct au1550_spi_info {
+ s16 bus_num; /* defines which PSC and IRQ to use */
+ u32 mainclk_hz; /* main input clock frequency of PSC */
+ u16 num_chipselect; /* number of chipselects supported */
+ void (*activate_cs)(struct au1550_spi_info *spi, int cs, int polarity);
+ void (*deactivate_cs)(struct au1550_spi_info *spi, int cs, int polarity);
+};
+
+#endif
diff --git a/include/asm-mips/pgtable.h b/include/asm-mips/pgtable.h
index 0d3295f57a9..27d77d98193 100644
--- a/include/asm-mips/pgtable.h
+++ b/include/asm-mips/pgtable.h
@@ -387,10 +387,6 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
remap_pfn_range(vma, vaddr, pfn, size, prot)
#endif
-#define MK_IOSPACE_PFN(space, pfn) (pfn)
-#define GET_IOSPACE(pfn) 0
-#define GET_PFN(pfn) (pfn)
-
#include <asm-generic/pgtable.h>
/*
diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h
index 290887077e4..30f23a2b46c 100644
--- a/include/asm-mips/system.h
+++ b/include/asm-mips/system.h
@@ -201,7 +201,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
}
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
-#define tas(ptr) (xchg((ptr),1))
#define __HAVE_ARCH_CMPXCHG 1
@@ -262,6 +261,58 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
return retval;
}
+static inline unsigned long __cmpxchg_u32_local(volatile int * m,
+ unsigned long old, unsigned long new)
+{
+ __u32 retval;
+
+ if (cpu_has_llsc && R10000_LLSC_WAR) {
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set noat \n"
+ " .set mips3 \n"
+ "1: ll %0, %2 # __cmpxchg_u32 \n"
+ " bne %0, %z3, 2f \n"
+ " .set mips0 \n"
+ " move $1, %z4 \n"
+ " .set mips3 \n"
+ " sc $1, %1 \n"
+ " beqzl $1, 1b \n"
+ "2: \n"
+ " .set pop \n"
+ : "=&r" (retval), "=R" (*m)
+ : "R" (*m), "Jr" (old), "Jr" (new)
+ : "memory");
+ } else if (cpu_has_llsc) {
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set noat \n"
+ " .set mips3 \n"
+ "1: ll %0, %2 # __cmpxchg_u32 \n"
+ " bne %0, %z3, 2f \n"
+ " .set mips0 \n"
+ " move $1, %z4 \n"
+ " .set mips3 \n"
+ " sc $1, %1 \n"
+ " beqz $1, 1b \n"
+ "2: \n"
+ " .set pop \n"
+ : "=&r" (retval), "=R" (*m)
+ : "R" (*m), "Jr" (old), "Jr" (new)
+ : "memory");
+ } else {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ retval = *m;
+ if (retval == old)
+ *m = new;
+ local_irq_restore(flags); /* implies memory barrier */
+ }
+
+ return retval;
+}
+
#ifdef CONFIG_64BIT
static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
unsigned long new)
@@ -315,10 +366,62 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
return retval;
}
+
+static inline unsigned long __cmpxchg_u64_local(volatile int * m,
+ unsigned long old, unsigned long new)
+{
+ __u64 retval;
+
+ if (cpu_has_llsc && R10000_LLSC_WAR) {
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set noat \n"
+ " .set mips3 \n"
+ "1: lld %0, %2 # __cmpxchg_u64 \n"
+ " bne %0, %z3, 2f \n"
+ " move $1, %z4 \n"
+ " scd $1, %1 \n"
+ " beqzl $1, 1b \n"
+ "2: \n"
+ " .set pop \n"
+ : "=&r" (retval), "=R" (*m)
+ : "R" (*m), "Jr" (old), "Jr" (new)
+ : "memory");
+ } else if (cpu_has_llsc) {
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set noat \n"
+ " .set mips3 \n"
+ "1: lld %0, %2 # __cmpxchg_u64 \n"
+ " bne %0, %z3, 2f \n"
+ " move $1, %z4 \n"
+ " scd $1, %1 \n"
+ " beqz $1, 1b \n"
+ "2: \n"
+ " .set pop \n"
+ : "=&r" (retval), "=R" (*m)
+ : "R" (*m), "Jr" (old), "Jr" (new)
+ : "memory");
+ } else {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ retval = *m;
+ if (retval == old)
+ *m = new;
+ local_irq_restore(flags); /* implies memory barrier */
+ }
+
+ return retval;
+}
+
#else
extern unsigned long __cmpxchg_u64_unsupported_on_32bit_kernels(
volatile int * m, unsigned long old, unsigned long new);
#define __cmpxchg_u64 __cmpxchg_u64_unsupported_on_32bit_kernels
+extern unsigned long __cmpxchg_u64_local_unsupported_on_32bit_kernels(
+ volatile int * m, unsigned long old, unsigned long new);
+#define __cmpxchg_u64_local __cmpxchg_u64_local_unsupported_on_32bit_kernels
#endif
/* This function doesn't exist, so you'll get a linker error
@@ -338,7 +441,26 @@ static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
return old;
}
-#define cmpxchg(ptr,old,new) ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr))))
+static inline unsigned long __cmpxchg_local(volatile void * ptr,
+ unsigned long old, unsigned long new, int size)
+{
+ switch (size) {
+ case 4:
+ return __cmpxchg_u32_local(ptr, old, new);
+ case 8:
+ return __cmpxchg_u64_local(ptr, old, new);
+ }
+ __cmpxchg_called_with_bad_pointer();
+ return old;
+}
+
+#define cmpxchg(ptr,old,new) \
+ ((__typeof__(*(ptr)))__cmpxchg((ptr), \
+ (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr))))
+
+#define cmpxchg_local(ptr,old,new) \
+ ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \
+ (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr))))
extern void set_handler (unsigned long offset, void *addr, unsigned long len);
extern void set_uncached_handler (unsigned long offset, void *addr, unsigned long len);
diff --git a/include/asm-parisc/atomic.h b/include/asm-parisc/atomic.h
index 7d57d34fcca..e894ee35074 100644
--- a/include/asm-parisc/atomic.h
+++ b/include/asm-parisc/atomic.h
@@ -163,7 +163,7 @@ static __inline__ int atomic_read(const atomic_t *v)
}
/* exported interface */
-#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
/**
@@ -175,14 +175,21 @@ static __inline__ int atomic_read(const atomic_t *v)
* Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise.
*/
-#define atomic_add_unless(v, a, u) \
-({ \
- int c, old; \
- c = atomic_read(v); \
- while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
- c = old; \
- c != (u); \
-})
+static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+{
+ int c, old;
+ c = atomic_read(v);
+ for (;;) {
+ if (unlikely(c == (u)))
+ break;
+ old = atomic_cmpxchg((v), c, c + (a));
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return c != (u);
+}
+
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#define atomic_add(i,v) ((void)(__atomic_add_return( ((int)i),(v))))
@@ -270,6 +277,37 @@ atomic64_read(const atomic64_t *v)
#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i),(v)) == 0)
+/* exported interface */
+#define atomic64_cmpxchg(v, o, n) \
+ ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
+#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+
+/**
+ * atomic64_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns non-zero if @v was not @u, and zero otherwise.
+ */
+static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+{
+ long c, old;
+ c = atomic64_read(v);
+ for (;;) {
+ if (unlikely(c == (u)))
+ break;
+ old = atomic64_cmpxchg((v), c, c + (a));
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return c != (u);
+}
+
+#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+
#endif /* CONFIG_64BIT */
#include <asm-generic/atomic.h>
diff --git a/include/asm-parisc/kdebug.h b/include/asm-parisc/kdebug.h
new file mode 100644
index 00000000000..6ece1b03766
--- /dev/null
+++ b/include/asm-parisc/kdebug.h
@@ -0,0 +1 @@
+#include <asm-generic/kdebug.h>
diff --git a/include/asm-parisc/local.h b/include/asm-parisc/local.h
index d0f55091275..c11c530f74d 100644
--- a/include/asm-parisc/local.h
+++ b/include/asm-parisc/local.h
@@ -1,40 +1 @@
-#ifndef _ARCH_PARISC_LOCAL_H
-#define _ARCH_PARISC_LOCAL_H
-
-#include <linux/percpu.h>
-#include <asm/atomic.h>
-
-typedef atomic_long_t local_t;
-
-#define LOCAL_INIT(i) ATOMIC_LONG_INIT(i)
-#define local_read(v) atomic_long_read(v)
-#define local_set(v,i) atomic_long_set(v,i)
-
-#define local_inc(v) atomic_long_inc(v)
-#define local_dec(v) atomic_long_dec(v)
-#define local_add(i, v) atomic_long_add(i, v)
-#define local_sub(i, v) atomic_long_sub(i, v)
-
-#define __local_inc(v) ((v)->counter++)
-#define __local_dec(v) ((v)->counter--)
-#define __local_add(i,v) ((v)->counter+=(i))
-#define __local_sub(i,v) ((v)->counter-=(i))
-
-/* Use these for per-cpu local_t variables: on some archs they are
- * much more efficient than these naive implementations. Note they take
- * a variable, not an address.
- */
-#define cpu_local_read(v) local_read(&__get_cpu_var(v))
-#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i))
-
-#define cpu_local_inc(v) local_inc(&__get_cpu_var(v))
-#define cpu_local_dec(v) local_dec(&__get_cpu_var(v))
-#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v))
-#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v))
-
-#define __cpu_local_inc(v) __local_inc(&__get_cpu_var(v))
-#define __cpu_local_dec(v) __local_dec(&__get_cpu_var(v))
-#define __cpu_local_add(i, v) __local_add((i), &__get_cpu_var(v))
-#define __cpu_local_sub(i, v) __local_sub((i), &__get_cpu_var(v))
-
-#endif /* _ARCH_PARISC_LOCAL_H */
+#include <asm-generic/local.h>
diff --git a/include/asm-parisc/pgtable.h b/include/asm-parisc/pgtable.h
index d7e1b10da5c..beb2adb979d 100644
--- a/include/asm-parisc/pgtable.h
+++ b/include/asm-parisc/pgtable.h
@@ -528,10 +528,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) | _PAGE_NO_CACHE)
-#define MK_IOSPACE_PFN(space, pfn) (pfn)
-#define GET_IOSPACE(pfn) 0
-#define GET_PFN(pfn) (pfn)
-
/* We provide our own get_unmapped_area to provide cache coherency */
#define HAVE_ARCH_UNMAPPED_AREA
diff --git a/include/asm-powerpc/atomic.h b/include/asm-powerpc/atomic.h
index 2ce4b6b7b34..c44810b9d32 100644
--- a/include/asm-powerpc/atomic.h
+++ b/include/asm-powerpc/atomic.h
@@ -11,6 +11,7 @@ typedef struct { volatile int counter; } atomic_t;
#include <linux/compiler.h>
#include <asm/synch.h>
#include <asm/asm-compat.h>
+#include <asm/system.h>
#define ATOMIC_INIT(i) { (i) }
@@ -165,8 +166,7 @@ static __inline__ int atomic_dec_return(atomic_t *v)
return t;
}
-#define atomic_cmpxchg(v, o, n) \
- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
/**
@@ -414,8 +414,7 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
return t;
}
-#define atomic64_cmpxchg(v, o, n) \
- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
+#define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
/**
diff --git a/include/asm-powerpc/bitops.h b/include/asm-powerpc/bitops.h
index 8f757f6246e..8144a2788db 100644
--- a/include/asm-powerpc/bitops.h
+++ b/include/asm-powerpc/bitops.h
@@ -39,7 +39,6 @@
#ifdef __KERNEL__
#include <linux/compiler.h>
-#include <asm/atomic.h>
#include <asm/asm-compat.h>
#include <asm/synch.h>
diff --git a/include/asm-powerpc/iommu.h b/include/asm-powerpc/iommu.h
index b2e56b30306..870967e4720 100644
--- a/include/asm-powerpc/iommu.h
+++ b/include/asm-powerpc/iommu.h
@@ -26,6 +26,7 @@
#include <linux/spinlock.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
+#include <asm/machdep.h>
#include <asm/types.h>
#include <asm/bitops.h>
@@ -109,6 +110,19 @@ static inline void pci_iommu_init(void) { }
#endif
extern void alloc_dart_table(void);
+#if defined(CONFIG_PPC64) && defined(CONFIG_PM)
+static inline void iommu_save(void)
+{
+ if (ppc_md.iommu_save)
+ ppc_md.iommu_save();
+}
+
+static inline void iommu_restore(void)
+{
+ if (ppc_md.iommu_restore)
+ ppc_md.iommu_restore();
+}
+#endif
#endif /* __KERNEL__ */
#endif /* _ASM_IOMMU_H */
diff --git a/include/asm-powerpc/kdebug.h b/include/asm-powerpc/kdebug.h
index 532bfee934f..295f0162c60 100644
--- a/include/asm-powerpc/kdebug.h
+++ b/include/asm-powerpc/kdebug.h
@@ -6,20 +6,19 @@
#include <linux/notifier.h>
-struct pt_regs;
-
-struct die_args {
- struct pt_regs *regs;
- const char *str;
- long err;
- int trapnr;
- int signr;
-};
-
-extern int register_die_notifier(struct notifier_block *);
-extern int unregister_die_notifier(struct notifier_block *);
-extern int register_page_fault_notifier(struct notifier_block *);
-extern int unregister_page_fault_notifier(struct notifier_block *);
+/*
+ * These are only here because kprobes.c wants them to implement a
+ * blatant layering violation. Will hopefully go away soon once all
+ * architectures are updated.
+ */
+static inline int register_page_fault_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+static inline int unregister_page_fault_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
extern struct atomic_notifier_head powerpc_die_chain;
/* Grossly misnamed. */
@@ -29,14 +28,7 @@ enum die_val {
DIE_DABR_MATCH,
DIE_BPT,
DIE_SSTEP,
- DIE_PAGE_FAULT,
};
-static inline int notify_die(enum die_val val,char *str,struct pt_regs *regs,long err,int trap, int sig)
-{
- struct die_args args = { .regs=regs, .str=str, .err=err, .trapnr=trap,.signr=sig };
- return atomic_notifier_call_chain(&powerpc_die_chain, val, &args);
-}
-
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_KDEBUG_H */
diff --git a/include/asm-powerpc/kexec.h b/include/asm-powerpc/kexec.h
index 11cbdf81fd2..b6f817b8ba3 100644
--- a/include/asm-powerpc/kexec.h
+++ b/include/asm-powerpc/kexec.h
@@ -108,8 +108,6 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
struct pt_regs *oldregs) { }
#endif /* !__powerpc64 __ */
-#define MAX_NOTE_BYTES 1024
-
extern void kexec_smp_wait(void); /* get and clear naca physid, wait for
master to copy new code to 0 */
extern int crashing_cpu;
diff --git a/include/asm-powerpc/kprobes.h b/include/asm-powerpc/kprobes.h
index f850ca7020e..b0e40ff32ee 100644
--- a/include/asm-powerpc/kprobes.h
+++ b/include/asm-powerpc/kprobes.h
@@ -64,6 +64,12 @@ typedef unsigned int kprobe_opcode_t;
addr = *(kprobe_opcode_t **)addr; \
} else if (name[0] != '.') \
addr = *(kprobe_opcode_t **)addr; \
+ } else { \
+ char dot_name[KSYM_NAME_LEN+1]; \
+ dot_name[0] = '.'; \
+ dot_name[1] = '\0'; \
+ strncat(dot_name, name, KSYM_NAME_LEN); \
+ addr = (kprobe_opcode_t *)kallsyms_lookup_name(dot_name); \
} \
}
@@ -110,5 +116,6 @@ struct kprobe_ctlblk {
extern int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data);
+extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_KPROBES_H */
diff --git a/include/asm-powerpc/local.h b/include/asm-powerpc/local.h
index c11c530f74d..612d8327665 100644
--- a/include/asm-powerpc/local.h
+++ b/include/asm-powerpc/local.h
@@ -1 +1,200 @@
-#include <asm-generic/local.h>
+#ifndef _ARCH_POWERPC_LOCAL_H
+#define _ARCH_POWERPC_LOCAL_H
+
+#include <linux/percpu.h>
+#include <asm/atomic.h>
+
+typedef struct
+{
+ atomic_long_t a;
+} local_t;
+
+#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
+
+#define local_read(l) atomic_long_read(&(l)->a)
+#define local_set(l,i) atomic_long_set(&(l)->a, (i))
+
+#define local_add(i,l) atomic_long_add((i),(&(l)->a))
+#define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
+#define local_inc(l) atomic_long_inc(&(l)->a)
+#define local_dec(l) atomic_long_dec(&(l)->a)
+
+static __inline__ long local_add_return(long a, local_t *l)
+{
+ long t;
+
+ __asm__ __volatile__(
+"1:" PPC_LLARX "%0,0,%2 # local_add_return\n\
+ add %0,%1,%0\n"
+ PPC405_ERR77(0,%2)
+ PPC_STLCX "%0,0,%2 \n\
+ bne- 1b"
+ : "=&r" (t)
+ : "r" (a), "r" (&(l->a.counter))
+ : "cc", "memory");
+
+ return t;
+}
+
+#define local_add_negative(a, l) (local_add_return((a), (l)) < 0)
+
+static __inline__ long local_sub_return(long a, local_t *l)
+{
+ long t;
+
+ __asm__ __volatile__(
+"1:" PPC_LLARX "%0,0,%2 # local_sub_return\n\
+ subf %0,%1,%0\n"
+ PPC405_ERR77(0,%2)
+ PPC_STLCX "%0,0,%2 \n\
+ bne- 1b"
+ : "=&r" (t)
+ : "r" (a), "r" (&(l->a.counter))
+ : "cc", "memory");
+
+ return t;
+}
+
+static __inline__ long local_inc_return(local_t *l)
+{
+ long t;
+
+ __asm__ __volatile__(
+"1:" PPC_LLARX "%0,0,%1 # local_inc_return\n\
+ addic %0,%0,1\n"
+ PPC405_ERR77(0,%1)
+ PPC_STLCX "%0,0,%1 \n\
+ bne- 1b"
+ : "=&r" (t)
+ : "r" (&(l->a.counter))
+ : "cc", "memory");
+
+ return t;
+}
+
+/*
+ * local_inc_and_test - increment and test
+ * @l: pointer of type local_t
+ *
+ * Atomically increments @l by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+#define local_inc_and_test(l) (local_inc_return(l) == 0)
+
+static __inline__ long local_dec_return(local_t *l)
+{
+ long t;
+
+ __asm__ __volatile__(
+"1:" PPC_LLARX "%0,0,%1 # local_dec_return\n\
+ addic %0,%0,-1\n"
+ PPC405_ERR77(0,%1)
+ PPC_STLCX "%0,0,%1\n\
+ bne- 1b"
+ : "=&r" (t)
+ : "r" (&(l->a.counter))
+ : "cc", "memory");
+
+ return t;
+}
+
+#define local_cmpxchg(l, o, n) \
+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
+#define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
+
+/**
+ * local_add_unless - add unless the number is a given value
+ * @l: pointer of type local_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @l, so long as it was not @u.
+ * Returns non-zero if @l was not @u, and zero otherwise.
+ */
+static __inline__ int local_add_unless(local_t *l, long a, long u)
+{
+ long t;
+
+ __asm__ __volatile__ (
+"1:" PPC_LLARX "%0,0,%1 # local_add_unless\n\
+ cmpw 0,%0,%3 \n\
+ beq- 2f \n\
+ add %0,%2,%0 \n"
+ PPC405_ERR77(0,%2)
+ PPC_STLCX "%0,0,%1 \n\
+ bne- 1b \n"
+" subf %0,%2,%0 \n\
+2:"
+ : "=&r" (t)
+ : "r" (&(l->a.counter)), "r" (a), "r" (u)
+ : "cc", "memory");
+
+ return t != u;
+}
+
+#define local_inc_not_zero(l) local_add_unless((l), 1, 0)
+
+#define local_sub_and_test(a, l) (local_sub_return((a), (l)) == 0)
+#define local_dec_and_test(l) (local_dec_return((l)) == 0)
+
+/*
+ * Atomically test *l and decrement if it is greater than 0.
+ * The function returns the old value of *l minus 1.
+ */
+static __inline__ long local_dec_if_positive(local_t *l)
+{
+ long t;
+
+ __asm__ __volatile__(
+"1:" PPC_LLARX "%0,0,%1 # local_dec_if_positive\n\
+ cmpwi %0,1\n\
+ addi %0,%0,-1\n\
+ blt- 2f\n"
+ PPC405_ERR77(0,%1)
+ PPC_STLCX "%0,0,%1\n\
+ bne- 1b"
+ "\n\
+2:" : "=&b" (t)
+ : "r" (&(l->a.counter))
+ : "cc", "memory");
+
+ return t;
+}
+
+/* Use these for per-cpu local_t variables: on some archs they are
+ * much more efficient than these naive implementations. Note they take
+ * a variable, not an address.
+ */
+
+#define __local_inc(l) ((l)->a.counter++)
+#define __local_dec(l) ((l)->a.counter++)
+#define __local_add(i,l) ((l)->a.counter+=(i))
+#define __local_sub(i,l) ((l)->a.counter-=(i))
+
+/* Need to disable preemption for the cpu local counters otherwise we could
+ still access a variable of a previous CPU in a non atomic way. */
+#define cpu_local_wrap_v(l) \
+ ({ local_t res__; \
+ preempt_disable(); \
+ res__ = (l); \
+ preempt_enable(); \
+ res__; })
+#define cpu_local_wrap(l) \
+ ({ preempt_disable(); \
+ l; \
+ preempt_enable(); }) \
+
+#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
+#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
+#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
+#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
+#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
+#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
+
+#define __cpu_local_inc(l) cpu_local_inc(l)
+#define __cpu_local_dec(l) cpu_local_dec(l)
+#define __cpu_local_add(i, l) cpu_local_add((i), (l))
+#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
+
+#endif /* _ARCH_POWERPC_LOCAL_H */
diff --git a/include/asm-powerpc/machdep.h b/include/asm-powerpc/machdep.h
index b204926ce91..6cf1a831f55 100644
--- a/include/asm-powerpc/machdep.h
+++ b/include/asm-powerpc/machdep.h
@@ -91,6 +91,11 @@ struct machdep_calls {
void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size,
unsigned long flags);
void (*iounmap)(volatile void __iomem *token);
+
+#ifdef CONFIG_PM
+ void (*iommu_save)(void);
+ void (*iommu_restore)(void);
+#endif
#endif /* CONFIG_PPC64 */
int (*probe)(void);
@@ -115,6 +120,14 @@ struct machdep_calls {
/* To setup PHBs when using automatic OF platform driver for PCI */
int (*pci_setup_phb)(struct pci_controller *host);
+#ifdef CONFIG_PCI_MSI
+ int (*msi_check_device)(struct pci_dev* dev,
+ int nvec, int type);
+ int (*setup_msi_irqs)(struct pci_dev *dev,
+ int nvec, int type);
+ void (*teardown_msi_irqs)(struct pci_dev *dev);
+#endif
+
void (*restart)(char *cmd);
void (*power_off)(void);
void (*halt)(void);
@@ -240,14 +253,10 @@ struct machdep_calls {
*/
void (*machine_kexec)(struct kimage *image);
#endif /* CONFIG_KEXEC */
-
-#ifdef CONFIG_PCI_MSI
- int (*enable_msi)(struct pci_dev *pdev);
- void (*disable_msi)(struct pci_dev *pdev);
-#endif /* CONFIG_PCI_MSI */
};
extern void power4_idle(void);
+extern void power4_cpu_offline_powersave(void);
extern void ppc6xx_idle(void);
/*
diff --git a/include/asm-powerpc/mmu-44x.h b/include/asm-powerpc/mmu-44x.h
new file mode 100644
index 00000000000..d5ce7a8dfe9
--- /dev/null
+++ b/include/asm-powerpc/mmu-44x.h
@@ -0,0 +1,78 @@
+#ifndef _ASM_POWERPC_MMU_44X_H_
+#define _ASM_POWERPC_MMU_44X_H_
+/*
+ * PPC440 support
+ */
+
+#define PPC44x_MMUCR_TID 0x000000ff
+#define PPC44x_MMUCR_STS 0x00010000
+
+#define PPC44x_TLB_PAGEID 0
+#define PPC44x_TLB_XLAT 1
+#define PPC44x_TLB_ATTRIB 2
+
+/* Page identification fields */
+#define PPC44x_TLB_EPN_MASK 0xfffffc00 /* Effective Page Number */
+#define PPC44x_TLB_VALID 0x00000200 /* Valid flag */
+#define PPC44x_TLB_TS 0x00000100 /* Translation address space */
+#define PPC44x_TLB_1K 0x00000000 /* Page sizes */
+#define PPC44x_TLB_4K 0x00000010
+#define PPC44x_TLB_16K 0x00000020
+#define PPC44x_TLB_64K 0x00000030
+#define PPC44x_TLB_256K 0x00000040
+#define PPC44x_TLB_1M 0x00000050
+#define PPC44x_TLB_16M 0x00000070
+#define PPC44x_TLB_256M 0x00000090
+
+/* Translation fields */
+#define PPC44x_TLB_RPN_MASK 0xfffffc00 /* Real Page Number */
+#define PPC44x_TLB_ERPN_MASK 0x0000000f
+
+/* Storage attribute and access control fields */
+#define PPC44x_TLB_ATTR_MASK 0x0000ff80
+#define PPC44x_TLB_U0 0x00008000 /* User 0 */
+#define PPC44x_TLB_U1 0x00004000 /* User 1 */
+#define PPC44x_TLB_U2 0x00002000 /* User 2 */
+#define PPC44x_TLB_U3 0x00001000 /* User 3 */
+#define PPC44x_TLB_W 0x00000800 /* Caching is write-through */
+#define PPC44x_TLB_I 0x00000400 /* Caching is inhibited */
+#define PPC44x_TLB_M 0x00000200 /* Memory is coherent */
+#define PPC44x_TLB_G 0x00000100 /* Memory is guarded */
+#define PPC44x_TLB_E 0x00000080 /* Memory is guarded */
+
+#define PPC44x_TLB_PERM_MASK 0x0000003f
+#define PPC44x_TLB_UX 0x00000020 /* User execution */
+#define PPC44x_TLB_UW 0x00000010 /* User write */
+#define PPC44x_TLB_UR 0x00000008 /* User read */
+#define PPC44x_TLB_SX 0x00000004 /* Super execution */
+#define PPC44x_TLB_SW 0x00000002 /* Super write */
+#define PPC44x_TLB_SR 0x00000001 /* Super read */
+
+/* Number of TLB entries */
+#define PPC44x_TLB_SIZE 64
+
+#ifndef __ASSEMBLY__
+
+typedef unsigned long long phys_addr_t;
+
+extern phys_addr_t fixup_bigphys_addr(phys_addr_t, phys_addr_t);
+
+typedef struct {
+ unsigned long id;
+ unsigned long vdso_base;
+} mm_context_t;
+
+#endif /* !__ASSEMBLY__ */
+
+#ifndef CONFIG_PPC_EARLY_DEBUG_44x
+#define PPC44x_EARLY_TLBS 1
+#else
+#define PPC44x_EARLY_TLBS 2
+#define PPC44x_EARLY_DEBUG_VIRTADDR (ASM_CONST(0xf0000000) \
+ | (ASM_CONST(CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW) & 0xffff))
+#endif
+
+/* Size of the TLBs used for pinning in lowmem */
+#define PPC_PIN_SIZE (1 << 28) /* 256M */
+
+#endif /* _ASM_POWERPC_MMU_44X_H_ */
diff --git a/include/asm-powerpc/mmu.h b/include/asm-powerpc/mmu.h
index 06b3e6d336c..fe510fff890 100644
--- a/include/asm-powerpc/mmu.h
+++ b/include/asm-powerpc/mmu.h
@@ -5,9 +5,12 @@
#ifdef CONFIG_PPC64
/* 64-bit classic hash table MMU */
# include <asm/mmu-hash64.h>
+#elif defined(CONFIG_44x)
+/* 44x-style software loaded TLB */
+# include <asm/mmu-44x.h>
#else
-/* 32-bit. FIXME: split up the 32-bit MMU types, and revise for
- * arch/powerpc */
+/* Other 32-bit. FIXME: split up the other 32-bit MMU types, and
+ * revise for arch/powerpc */
# include <asm-ppc/mmu.h>
#endif
diff --git a/include/asm-powerpc/mpc52xx.h b/include/asm-powerpc/mpc52xx.h
index 7afd5bf9452..c4631f6dd4f 100644
--- a/include/asm-powerpc/mpc52xx.h
+++ b/include/asm-powerpc/mpc52xx.h
@@ -253,5 +253,16 @@ extern int __init mpc52xx_add_bridge(struct device_node *node);
#endif /* __ASSEMBLY__ */
+#ifdef CONFIG_PM
+struct mpc52xx_suspend {
+ void (*board_suspend_prepare)(void __iomem *mbar);
+ void (*board_resume_finish)(void __iomem *mbar);
+};
+
+extern struct mpc52xx_suspend mpc52xx_suspend;
+extern int __init mpc52xx_pm_init(void);
+extern int mpc52xx_set_wakeup_gpio(u8 pin, u8 level);
+#endif /* CONFIG_PM */
+
#endif /* __ASM_POWERPC_MPC52xx_H__ */
diff --git a/include/asm-powerpc/mpic.h b/include/asm-powerpc/mpic.h
index e4d5fc5362a..2ffb06abe88 100644
--- a/include/asm-powerpc/mpic.h
+++ b/include/asm-powerpc/mpic.h
@@ -3,6 +3,7 @@
#ifdef __KERNEL__
#include <linux/irq.h>
+#include <linux/sysdev.h>
#include <asm/dcr.h>
/*
@@ -228,6 +229,14 @@ struct mpic_reg_bank {
#endif /* CONFIG_PPC_DCR */
};
+struct mpic_irq_save {
+ u32 vecprio,
+ dest;
+#ifdef CONFIG_MPIC_U3_HT_IRQS
+ u32 fixup_data;
+#endif
+};
+
/* The instance data of a given MPIC */
struct mpic
{
@@ -292,8 +301,19 @@ struct mpic
u32 *hw_set;
#endif
+#ifdef CONFIG_PCI_MSI
+ spinlock_t bitmap_lock;
+ unsigned long *hwirq_bitmap;
+#endif
+
/* link */
struct mpic *next;
+
+ struct sys_device sysdev;
+
+#ifdef CONFIG_PM
+ struct mpic_irq_save *save_data;
+#endif
};
/*
diff --git a/include/asm-powerpc/of_device.h b/include/asm-powerpc/of_device.h
index 4f1aabe0ce7..e9af49eb1aa 100644
--- a/include/asm-powerpc/of_device.h
+++ b/include/asm-powerpc/of_device.h
@@ -32,6 +32,8 @@ extern int of_device_register(struct of_device *ofdev);
extern void of_device_unregister(struct of_device *ofdev);
extern void of_release_dev(struct device *dev);
+extern ssize_t of_device_get_modalias(struct of_device *ofdev,
+ char *str, ssize_t len);
extern int of_device_uevent(struct device *dev,
char **envp, int num_envp, char *buffer, int buffer_size);
diff --git a/include/asm-powerpc/page.h b/include/asm-powerpc/page.h
index b4d38b0b15f..10c51f457d4 100644
--- a/include/asm-powerpc/page.h
+++ b/include/asm-powerpc/page.h
@@ -121,6 +121,7 @@ typedef struct { pte_t pte; } real_pte_t;
#endif
/* PMD level */
+#ifdef CONFIG_PPC64
typedef struct { unsigned long pmd; } pmd_t;
#define pmd_val(x) ((x).pmd)
#define __pmd(x) ((pmd_t) { (x) })
@@ -130,7 +131,8 @@ typedef struct { unsigned long pmd; } pmd_t;
typedef struct { unsigned long pud; } pud_t;
#define pud_val(x) ((x).pud)
#define __pud(x) ((pud_t) { (x) })
-#endif
+#endif /* !CONFIG_PPC_64K_PAGES */
+#endif /* CONFIG_PPC64 */
/* PGD level */
typedef struct { unsigned long pgd; } pgd_t;
@@ -159,15 +161,17 @@ typedef unsigned long real_pte_t;
#endif
+#ifdef CONFIG_PPC64
typedef unsigned long pmd_t;
#define pmd_val(x) (x)
#define __pmd(x) (x)
-#if defined(CONFIG_PPC64) && !defined(CONFIG_PPC_64K_PAGES)
+#ifndef CONFIG_PPC_64K_PAGES
typedef unsigned long pud_t;
#define pud_val(x) (x)
#define __pud(x) (x)
-#endif
+#endif /* !CONFIG_PPC_64K_PAGES */
+#endif /* CONFIG_PPC64 */
typedef unsigned long pgd_t;
#define pgd_val(x) (x)
diff --git a/include/asm-powerpc/page_32.h b/include/asm-powerpc/page_32.h
index 07f6d3cf5e5..374d0db37e1 100644
--- a/include/asm-powerpc/page_32.h
+++ b/include/asm-powerpc/page_32.h
@@ -14,11 +14,9 @@
#ifdef CONFIG_PTE_64BIT
typedef unsigned long long pte_basic_t;
#define PTE_SHIFT (PAGE_SHIFT - 3) /* 512 ptes per page */
-#define PTE_FMT "%16Lx"
#else
typedef unsigned long pte_basic_t;
#define PTE_SHIFT (PAGE_SHIFT - 2) /* 1024 ptes per page */
-#define PTE_FMT "%.8lx"
#endif
struct page;
diff --git a/include/asm-powerpc/parport.h b/include/asm-powerpc/parport.h
index b37b81e3727..414c50e2e88 100644
--- a/include/asm-powerpc/parport.h
+++ b/include/asm-powerpc/parport.h
@@ -12,11 +12,6 @@
#include <asm/prom.h>
-extern struct parport *parport_pc_probe_port (unsigned long int base,
- unsigned long int base_hi,
- int irq, int dma,
- struct pci_dev *dev);
-
static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma)
{
struct device_node *np;
diff --git a/include/asm-powerpc/pgalloc-32.h b/include/asm-powerpc/pgalloc-32.h
new file mode 100644
index 00000000000..e1307432163
--- /dev/null
+++ b/include/asm-powerpc/pgalloc-32.h
@@ -0,0 +1,41 @@
+#ifndef _ASM_POWERPC_PGALLOC_32_H
+#define _ASM_POWERPC_PGALLOC_32_H
+
+#include <linux/threads.h>
+
+extern void __bad_pte(pmd_t *pmd);
+
+extern pgd_t *pgd_alloc(struct mm_struct *mm);
+extern void pgd_free(pgd_t *pgd);
+
+/*
+ * We don't have any real pmd's, and this code never triggers because
+ * the pgd will always be present..
+ */
+/* #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) */
+#define pmd_free(x) do { } while (0)
+#define __pmd_free_tlb(tlb,x) do { } while (0)
+/* #define pgd_populate(mm, pmd, pte) BUG() */
+
+#ifndef CONFIG_BOOKE
+#define pmd_populate_kernel(mm, pmd, pte) \
+ (pmd_val(*(pmd)) = __pa(pte) | _PMD_PRESENT)
+#define pmd_populate(mm, pmd, pte) \
+ (pmd_val(*(pmd)) = (page_to_pfn(pte) << PAGE_SHIFT) | _PMD_PRESENT)
+#else
+#define pmd_populate_kernel(mm, pmd, pte) \
+ (pmd_val(*(pmd)) = (unsigned long)pte | _PMD_PRESENT)
+#define pmd_populate(mm, pmd, pte) \
+ (pmd_val(*(pmd)) = (unsigned long)lowmem_page_address(pte) | _PMD_PRESENT)
+#endif
+
+extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
+extern struct page *pte_alloc_one(struct mm_struct *mm, unsigned long addr);
+extern void pte_free_kernel(pte_t *pte);
+extern void pte_free(struct page *pte);
+
+#define __pte_free_tlb(tlb, pte) pte_free((pte))
+
+#define check_pgt_cache() do { } while (0)
+
+#endif /* _ASM_POWERPC_PGALLOC_32_H */
diff --git a/include/asm-powerpc/pgalloc-64.h b/include/asm-powerpc/pgalloc-64.h
new file mode 100644
index 00000000000..30b50cf56e2
--- /dev/null
+++ b/include/asm-powerpc/pgalloc-64.h
@@ -0,0 +1,152 @@
+#ifndef _ASM_POWERPC_PGALLOC_64_H
+#define _ASM_POWERPC_PGALLOC_64_H
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/cpumask.h>
+#include <linux/percpu.h>
+
+extern struct kmem_cache *pgtable_cache[];
+
+#ifdef CONFIG_PPC_64K_PAGES
+#define PTE_CACHE_NUM 0
+#define PMD_CACHE_NUM 1
+#define PGD_CACHE_NUM 2
+#define HUGEPTE_CACHE_NUM 3
+#else
+#define PTE_CACHE_NUM 0
+#define PMD_CACHE_NUM 1
+#define PUD_CACHE_NUM 1
+#define PGD_CACHE_NUM 0
+#define HUGEPTE_CACHE_NUM 2
+#endif
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ return kmem_cache_alloc(pgtable_cache[PGD_CACHE_NUM], GFP_KERNEL);
+}
+
+static inline void pgd_free(pgd_t *pgd)
+{
+ kmem_cache_free(pgtable_cache[PGD_CACHE_NUM], pgd);
+}
+
+#ifndef CONFIG_PPC_64K_PAGES
+
+#define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
+
+static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ return kmem_cache_alloc(pgtable_cache[PUD_CACHE_NUM],
+ GFP_KERNEL|__GFP_REPEAT);
+}
+
+static inline void pud_free(pud_t *pud)
+{
+ kmem_cache_free(pgtable_cache[PUD_CACHE_NUM], pud);
+}
+
+static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+{
+ pud_set(pud, (unsigned long)pmd);
+}
+
+#define pmd_populate(mm, pmd, pte_page) \
+ pmd_populate_kernel(mm, pmd, page_address(pte_page))
+#define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
+
+
+#else /* CONFIG_PPC_64K_PAGES */
+
+#define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
+
+static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
+ pte_t *pte)
+{
+ pmd_set(pmd, (unsigned long)pte);
+}
+
+#define pmd_populate(mm, pmd, pte_page) \
+ pmd_populate_kernel(mm, pmd, page_address(pte_page))
+
+#endif /* CONFIG_PPC_64K_PAGES */
+
+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ return kmem_cache_alloc(pgtable_cache[PMD_CACHE_NUM],
+ GFP_KERNEL|__GFP_REPEAT);
+}
+
+static inline void pmd_free(pmd_t *pmd)
+{
+ kmem_cache_free(pgtable_cache[PMD_CACHE_NUM], pmd);
+}
+
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+ unsigned long address)
+{
+ return kmem_cache_alloc(pgtable_cache[PTE_CACHE_NUM],
+ GFP_KERNEL|__GFP_REPEAT);
+}
+
+static inline struct page *pte_alloc_one(struct mm_struct *mm,
+ unsigned long address)
+{
+ return virt_to_page(pte_alloc_one_kernel(mm, address));
+}
+
+static inline void pte_free_kernel(pte_t *pte)
+{
+ kmem_cache_free(pgtable_cache[PTE_CACHE_NUM], pte);
+}
+
+static inline void pte_free(struct page *ptepage)
+{
+ pte_free_kernel(page_address(ptepage));
+}
+
+#define PGF_CACHENUM_MASK 0x3
+
+typedef struct pgtable_free {
+ unsigned long val;
+} pgtable_free_t;
+
+static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
+ unsigned long mask)
+{
+ BUG_ON(cachenum > PGF_CACHENUM_MASK);
+
+ return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum};
+}
+
+static inline void pgtable_free(pgtable_free_t pgf)
+{
+ void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK);
+ int cachenum = pgf.val & PGF_CACHENUM_MASK;
+
+ kmem_cache_free(pgtable_cache[cachenum], p);
+}
+
+extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
+
+#define __pte_free_tlb(tlb, ptepage) \
+ pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
+ PTE_CACHE_NUM, PTE_TABLE_SIZE-1))
+#define __pmd_free_tlb(tlb, pmd) \
+ pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
+ PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
+#ifndef CONFIG_PPC_64K_PAGES
+#define __pud_free_tlb(tlb, pud) \
+ pgtable_free_tlb(tlb, pgtable_free_cache(pud, \
+ PUD_CACHE_NUM, PUD_TABLE_SIZE-1))
+#endif /* CONFIG_PPC_64K_PAGES */
+
+#define check_pgt_cache() do { } while (0)
+
+#endif /* _ASM_POWERPC_PGALLOC_64_H */
diff --git a/include/asm-powerpc/pgalloc.h b/include/asm-powerpc/pgalloc.h
index b0830db68f8..b4505ed0f0f 100644
--- a/include/asm-powerpc/pgalloc.h
+++ b/include/asm-powerpc/pgalloc.h
@@ -2,159 +2,11 @@
#define _ASM_POWERPC_PGALLOC_H
#ifdef __KERNEL__
-#ifndef CONFIG_PPC64
-#include <asm-ppc/pgalloc.h>
+#ifdef CONFIG_PPC64
+#include <asm/pgalloc-64.h>
#else
-
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/cpumask.h>
-#include <linux/percpu.h>
-
-extern struct kmem_cache *pgtable_cache[];
-
-#ifdef CONFIG_PPC_64K_PAGES
-#define PTE_CACHE_NUM 0
-#define PMD_CACHE_NUM 1
-#define PGD_CACHE_NUM 2
-#define HUGEPTE_CACHE_NUM 3
-#else
-#define PTE_CACHE_NUM 0
-#define PMD_CACHE_NUM 1
-#define PUD_CACHE_NUM 1
-#define PGD_CACHE_NUM 0
-#define HUGEPTE_CACHE_NUM 2
+#include <asm/pgalloc-32.h>
#endif
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-static inline pgd_t *pgd_alloc(struct mm_struct *mm)
-{
- return kmem_cache_alloc(pgtable_cache[PGD_CACHE_NUM], GFP_KERNEL);
-}
-
-static inline void pgd_free(pgd_t *pgd)
-{
- kmem_cache_free(pgtable_cache[PGD_CACHE_NUM], pgd);
-}
-
-#ifndef CONFIG_PPC_64K_PAGES
-
-#define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
-
-static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
-{
- return kmem_cache_alloc(pgtable_cache[PUD_CACHE_NUM],
- GFP_KERNEL|__GFP_REPEAT);
-}
-
-static inline void pud_free(pud_t *pud)
-{
- kmem_cache_free(pgtable_cache[PUD_CACHE_NUM], pud);
-}
-
-static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
-{
- pud_set(pud, (unsigned long)pmd);
-}
-
-#define pmd_populate(mm, pmd, pte_page) \
- pmd_populate_kernel(mm, pmd, page_address(pte_page))
-#define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
-
-
-#else /* CONFIG_PPC_64K_PAGES */
-
-#define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
-
-static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
- pte_t *pte)
-{
- pmd_set(pmd, (unsigned long)pte);
-}
-
-#define pmd_populate(mm, pmd, pte_page) \
- pmd_populate_kernel(mm, pmd, page_address(pte_page))
-
-#endif /* CONFIG_PPC_64K_PAGES */
-
-static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
-{
- return kmem_cache_alloc(pgtable_cache[PMD_CACHE_NUM],
- GFP_KERNEL|__GFP_REPEAT);
-}
-
-static inline void pmd_free(pmd_t *pmd)
-{
- kmem_cache_free(pgtable_cache[PMD_CACHE_NUM], pmd);
-}
-
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
- unsigned long address)
-{
- return kmem_cache_alloc(pgtable_cache[PTE_CACHE_NUM],
- GFP_KERNEL|__GFP_REPEAT);
-}
-
-static inline struct page *pte_alloc_one(struct mm_struct *mm,
- unsigned long address)
-{
- return virt_to_page(pte_alloc_one_kernel(mm, address));
-}
-
-static inline void pte_free_kernel(pte_t *pte)
-{
- kmem_cache_free(pgtable_cache[PTE_CACHE_NUM], pte);
-}
-
-static inline void pte_free(struct page *ptepage)
-{
- pte_free_kernel(page_address(ptepage));
-}
-
-#define PGF_CACHENUM_MASK 0x3
-
-typedef struct pgtable_free {
- unsigned long val;
-} pgtable_free_t;
-
-static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
- unsigned long mask)
-{
- BUG_ON(cachenum > PGF_CACHENUM_MASK);
-
- return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum};
-}
-
-static inline void pgtable_free(pgtable_free_t pgf)
-{
- void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK);
- int cachenum = pgf.val & PGF_CACHENUM_MASK;
-
- kmem_cache_free(pgtable_cache[cachenum], p);
-}
-
-extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
-
-#define __pte_free_tlb(tlb, ptepage) \
- pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
- PTE_CACHE_NUM, PTE_TABLE_SIZE-1))
-#define __pmd_free_tlb(tlb, pmd) \
- pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
- PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
-#ifndef CONFIG_PPC_64K_PAGES
-#define __pud_free_tlb(tlb, pud) \
- pgtable_free_tlb(tlb, pgtable_free_cache(pud, \
- PUD_CACHE_NUM, PUD_TABLE_SIZE-1))
-#endif /* CONFIG_PPC_64K_PAGES */
-
-#define check_pgt_cache() do { } while (0)
-
-#endif /* CONFIG_PPC64 */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_PGALLOC_H */
diff --git a/include/asm-powerpc/pgtable-4k.h b/include/asm-powerpc/pgtable-4k.h
index a28fa8bc01d..1744d6ac12a 100644
--- a/include/asm-powerpc/pgtable-4k.h
+++ b/include/asm-powerpc/pgtable-4k.h
@@ -1,3 +1,5 @@
+#ifndef _ASM_POWERPC_PGTABLE_4K_H
+#define _ASM_POWERPC_PGTABLE_4K_H
/*
* Entries per page directory level. The PTE level must use a 64b record
* for each page table entry. The PMD and PGD level use a 32b record for
@@ -100,3 +102,4 @@
#define remap_4k_pfn(vma, addr, pfn, prot) \
remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot))
+#endif /* _ASM_POWERPC_PGTABLE_4K_H */
diff --git a/include/asm-powerpc/pgtable-64k.h b/include/asm-powerpc/pgtable-64k.h
index 5e84f070eaf..16ef4978520 100644
--- a/include/asm-powerpc/pgtable-64k.h
+++ b/include/asm-powerpc/pgtable-64k.h
@@ -1,6 +1,5 @@
#ifndef _ASM_POWERPC_PGTABLE_64K_H
#define _ASM_POWERPC_PGTABLE_64K_H
-#ifdef __KERNEL__
#include <asm-generic/pgtable-nopud.h>
@@ -65,8 +64,6 @@
/* Bits to mask out from a PGD/PUD to get to the PMD page */
#define PUD_MASKED_BITS 0x1ff
-#ifndef __ASSEMBLY__
-
/* Manipulate "rpte" values */
#define __real_pte(e,p) ((real_pte_t) { \
(e), pte_val(*((p) + PTRS_PER_PTE)) })
@@ -98,6 +95,4 @@
remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, \
__pgprot(pgprot_val((prot)) | _PAGE_4K_PFN))
-#endif /* __ASSEMBLY__ */
-#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_PGTABLE_64K_H */
diff --git a/include/asm-powerpc/pgtable-ppc32.h b/include/asm-powerpc/pgtable-ppc32.h
new file mode 100644
index 00000000000..09662a24f22
--- /dev/null
+++ b/include/asm-powerpc/pgtable-ppc32.h
@@ -0,0 +1,813 @@
+#ifndef _ASM_POWERPC_PGTABLE_PPC32_H
+#define _ASM_POWERPC_PGTABLE_PPC32_H
+
+#include <asm-generic/pgtable-nopmd.h>
+
+#ifndef __ASSEMBLY__
+#include <linux/sched.h>
+#include <linux/threads.h>
+#include <asm/processor.h> /* For TASK_SIZE */
+#include <asm/mmu.h>
+#include <asm/page.h>
+#include <asm/io.h> /* For sub-arch specific PPC_PIN_SIZE */
+struct mm_struct;
+
+extern unsigned long va_to_phys(unsigned long address);
+extern pte_t *va_to_pte(unsigned long address);
+extern unsigned long ioremap_bot, ioremap_base;
+#endif /* __ASSEMBLY__ */
+
+/*
+ * The PowerPC MMU uses a hash table containing PTEs, together with
+ * a set of 16 segment registers (on 32-bit implementations), to define
+ * the virtual to physical address mapping.
+ *
+ * We use the hash table as an extended TLB, i.e. a cache of currently
+ * active mappings. We maintain a two-level page table tree, much
+ * like that used by the i386, for the sake of the Linux memory
+ * management code. Low-level assembler code in hashtable.S
+ * (procedure hash_page) is responsible for extracting ptes from the
+ * tree and putting them into the hash table when necessary, and
+ * updating the accessed and modified bits in the page table tree.
+ */
+
+/*
+ * The PowerPC MPC8xx uses a TLB with hardware assisted, software tablewalk.
+ * We also use the two level tables, but we can put the real bits in them
+ * needed for the TLB and tablewalk. These definitions require Mx_CTR.PPM = 0,
+ * Mx_CTR.PPCS = 0, and MD_CTR.TWAM = 1. The level 2 descriptor has
+ * additional page protection (when Mx_CTR.PPCS = 1) that allows TLB hit
+ * based upon user/super access. The TLB does not have accessed nor write
+ * protect. We assume that if the TLB get loaded with an entry it is
+ * accessed, and overload the changed bit for write protect. We use
+ * two bits in the software pte that are supposed to be set to zero in
+ * the TLB entry (24 and 25) for these indicators. Although the level 1
+ * descriptor contains the guarded and writethrough/copyback bits, we can
+ * set these at the page level since they get copied from the Mx_TWC
+ * register when the TLB entry is loaded. We will use bit 27 for guard, since
+ * that is where it exists in the MD_TWC, and bit 26 for writethrough.
+ * These will get masked from the level 2 descriptor at TLB load time, and
+ * copied to the MD_TWC before it gets loaded.
+ * Large page sizes added. We currently support two sizes, 4K and 8M.
+ * This also allows a TLB hander optimization because we can directly
+ * load the PMD into MD_TWC. The 8M pages are only used for kernel
+ * mapping of well known areas. The PMD (PGD) entries contain control
+ * flags in addition to the address, so care must be taken that the
+ * software no longer assumes these are only pointers.
+ */
+
+/*
+ * At present, all PowerPC 400-class processors share a similar TLB
+ * architecture. The instruction and data sides share a unified,
+ * 64-entry, fully-associative TLB which is maintained totally under
+ * software control. In addition, the instruction side has a
+ * hardware-managed, 4-entry, fully-associative TLB which serves as a
+ * first level to the shared TLB. These two TLBs are known as the UTLB
+ * and ITLB, respectively (see "mmu.h" for definitions).
+ */
+
+/*
+ * The normal case is that PTEs are 32-bits and we have a 1-page
+ * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
+ *
+ * For any >32-bit physical address platform, we can use the following
+ * two level page table layout where the pgdir is 8KB and the MS 13 bits
+ * are an index to the second level table. The combined pgdir/pmd first
+ * level has 2048 entries and the second level has 512 64-bit PTE entries.
+ * -Matt
+ */
+/* PGDIR_SHIFT determines what a top-level page table entry can map */
+#define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT)
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+/*
+ * entries per page directory level: our page-table tree is two-level, so
+ * we don't really have any PMD directory.
+ */
+#define PTRS_PER_PTE (1 << PTE_SHIFT)
+#define PTRS_PER_PMD 1
+#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
+
+#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
+#define FIRST_USER_ADDRESS 0
+
+#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
+#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
+
+#define pte_ERROR(e) \
+ printk("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
+ (unsigned long long)pte_val(e))
+#define pgd_ERROR(e) \
+ printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+/*
+ * Just any arbitrary offset to the start of the vmalloc VM area: the
+ * current 64MB value just means that there will be a 64MB "hole" after the
+ * physical memory until the kernel virtual memory starts. That means that
+ * any out-of-bounds memory accesses will hopefully be caught.
+ * The vmalloc() routines leaves a hole of 4kB between each vmalloced
+ * area for the same reason. ;)
+ *
+ * We no longer map larger than phys RAM with the BATs so we don't have
+ * to worry about the VMALLOC_OFFSET causing problems. We do have to worry
+ * about clashes between our early calls to ioremap() that start growing down
+ * from ioremap_base being run into the VM area allocations (growing upwards
+ * from VMALLOC_START). For this reason we have ioremap_bot to check when
+ * we actually run into our mappings setup in the early boot with the VM
+ * system. This really does become a problem for machines with good amounts
+ * of RAM. -- Cort
+ */
+#define VMALLOC_OFFSET (0x1000000) /* 16M */
+#ifdef PPC_PIN_SIZE
+#define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
+#else
+#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
+#endif
+#define VMALLOC_END ioremap_bot
+
+/*
+ * Bits in a linux-style PTE. These match the bits in the
+ * (hardware-defined) PowerPC PTE as closely as possible.
+ */
+
+#if defined(CONFIG_40x)
+
+/* There are several potential gotchas here. The 40x hardware TLBLO
+ field looks like this:
+
+ 0 1 2 3 4 ... 18 19 20 21 22 23 24 25 26 27 28 29 30 31
+ RPN..................... 0 0 EX WR ZSEL....... W I M G
+
+ Where possible we make the Linux PTE bits match up with this
+
+ - bits 20 and 21 must be cleared, because we use 4k pages (40x can
+ support down to 1k pages), this is done in the TLBMiss exception
+ handler.
+ - We use only zones 0 (for kernel pages) and 1 (for user pages)
+ of the 16 available. Bit 24-26 of the TLB are cleared in the TLB
+ miss handler. Bit 27 is PAGE_USER, thus selecting the correct
+ zone.
+ - PRESENT *must* be in the bottom two bits because swap cache
+ entries use the top 30 bits. Because 40x doesn't support SMP
+ anyway, M is irrelevant so we borrow it for PAGE_PRESENT. Bit 30
+ is cleared in the TLB miss handler before the TLB entry is loaded.
+ - All other bits of the PTE are loaded into TLBLO without
+ modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for
+ software PTE bits. We actually use use bits 21, 24, 25, and
+ 30 respectively for the software bits: ACCESSED, DIRTY, RW, and
+ PRESENT.
+*/
+
+/* Definitions for 40x embedded chips. */
+#define _PAGE_GUARDED 0x001 /* G: page is guarded from prefetch */
+#define _PAGE_FILE 0x001 /* when !present: nonlinear file mapping */
+#define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */
+#define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */
+#define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */
+#define _PAGE_USER 0x010 /* matches one of the zone permission bits */
+#define _PAGE_RW 0x040 /* software: Writes permitted */
+#define _PAGE_DIRTY 0x080 /* software: dirty page */
+#define _PAGE_HWWRITE 0x100 /* hardware: Dirty & RW, set in exception */
+#define _PAGE_HWEXEC 0x200 /* hardware: EX permission */
+#define _PAGE_ACCESSED 0x400 /* software: R: page referenced */
+
+#define _PMD_PRESENT 0x400 /* PMD points to page of PTEs */
+#define _PMD_BAD 0x802
+#define _PMD_SIZE 0x0e0 /* size field, != 0 for large-page PMD entry */
+#define _PMD_SIZE_4M 0x0c0
+#define _PMD_SIZE_16M 0x0e0
+#define PMD_PAGE_SIZE(pmdval) (1024 << (((pmdval) & _PMD_SIZE) >> 4))
+
+#elif defined(CONFIG_44x)
+/*
+ * Definitions for PPC440
+ *
+ * Because of the 3 word TLB entries to support 36-bit addressing,
+ * the attribute are difficult to map in such a fashion that they
+ * are easily loaded during exception processing. I decided to
+ * organize the entry so the ERPN is the only portion in the
+ * upper word of the PTE and the attribute bits below are packed
+ * in as sensibly as they can be in the area below a 4KB page size
+ * oriented RPN. This at least makes it easy to load the RPN and
+ * ERPN fields in the TLB. -Matt
+ *
+ * Note that these bits preclude future use of a page size
+ * less than 4KB.
+ *
+ *
+ * PPC 440 core has following TLB attribute fields;
+ *
+ * TLB1:
+ * 0 1 2 3 4 ... 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
+ * RPN................................. - - - - - - ERPN.......
+ *
+ * TLB2:
+ * 0 1 2 3 4 ... 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
+ * - - - - - - U0 U1 U2 U3 W I M G E - UX UW UR SX SW SR
+ *
+ * There are some constrains and options, to decide mapping software bits
+ * into TLB entry.
+ *
+ * - PRESENT *must* be in the bottom three bits because swap cache
+ * entries use the top 29 bits for TLB2.
+ *
+ * - FILE *must* be in the bottom three bits because swap cache
+ * entries use the top 29 bits for TLB2.
+ *
+ * - CACHE COHERENT bit (M) has no effect on PPC440 core, because it
+ * doesn't support SMP. So we can use this as software bit, like
+ * DIRTY.
+ *
+ * With the PPC 44x Linux implementation, the 0-11th LSBs of the PTE are used
+ * for memory protection related functions (see PTE structure in
+ * include/asm-ppc/mmu.h). The _PAGE_XXX definitions in this file map to the
+ * above bits. Note that the bit values are CPU specific, not architecture
+ * specific.
+ *
+ * The kernel PTE entry holds an arch-dependent swp_entry structure under
+ * certain situations. In other words, in such situations some portion of
+ * the PTE bits are used as a swp_entry. In the PPC implementation, the
+ * 3-24th LSB are shared with swp_entry, however the 0-2nd three LSB still
+ * hold protection values. That means the three protection bits are
+ * reserved for both PTE and SWAP entry at the most significant three
+ * LSBs.
+ *
+ * There are three protection bits available for SWAP entry:
+ * _PAGE_PRESENT
+ * _PAGE_FILE
+ * _PAGE_HASHPTE (if HW has)
+ *
+ * So those three bits have to be inside of 0-2nd LSB of PTE.
+ *
+ */
+
+#define _PAGE_PRESENT 0x00000001 /* S: PTE valid */
+#define _PAGE_RW 0x00000002 /* S: Write permission */
+#define _PAGE_FILE 0x00000004 /* S: nonlinear file mapping */
+#define _PAGE_ACCESSED 0x00000008 /* S: Page referenced */
+#define _PAGE_HWWRITE 0x00000010 /* H: Dirty & RW */
+#define _PAGE_HWEXEC 0x00000020 /* H: Execute permission */
+#define _PAGE_USER 0x00000040 /* S: User page */
+#define _PAGE_ENDIAN 0x00000080 /* H: E bit */
+#define _PAGE_GUARDED 0x00000100 /* H: G bit */
+#define _PAGE_DIRTY 0x00000200 /* S: Page dirty */
+#define _PAGE_NO_CACHE 0x00000400 /* H: I bit */
+#define _PAGE_WRITETHRU 0x00000800 /* H: W bit */
+
+/* TODO: Add large page lowmem mapping support */
+#define _PMD_PRESENT 0
+#define _PMD_PRESENT_MASK (PAGE_MASK)
+#define _PMD_BAD (~PAGE_MASK)
+
+/* ERPN in a PTE never gets cleared, ignore it */
+#define _PTE_NONE_MASK 0xffffffff00000000ULL
+
+#elif defined(CONFIG_FSL_BOOKE)
+/*
+ MMU Assist Register 3:
+
+ 32 33 34 35 36 ... 50 51 52 53 54 55 56 57 58 59 60 61 62 63
+ RPN...................... 0 0 U0 U1 U2 U3 UX SX UW SW UR SR
+
+ - PRESENT *must* be in the bottom three bits because swap cache
+ entries use the top 29 bits.
+
+ - FILE *must* be in the bottom three bits because swap cache
+ entries use the top 29 bits.
+*/
+
+/* Definitions for FSL Book-E Cores */
+#define _PAGE_PRESENT 0x00001 /* S: PTE contains a translation */
+#define _PAGE_USER 0x00002 /* S: User page (maps to UR) */
+#define _PAGE_FILE 0x00002 /* S: when !present: nonlinear file mapping */
+#define _PAGE_ACCESSED 0x00004 /* S: Page referenced */
+#define _PAGE_HWWRITE 0x00008 /* H: Dirty & RW, set in exception */
+#define _PAGE_RW 0x00010 /* S: Write permission */
+#define _PAGE_HWEXEC 0x00020 /* H: UX permission */
+
+#define _PAGE_ENDIAN 0x00040 /* H: E bit */
+#define _PAGE_GUARDED 0x00080 /* H: G bit */
+#define _PAGE_COHERENT 0x00100 /* H: M bit */
+#define _PAGE_NO_CACHE 0x00200 /* H: I bit */
+#define _PAGE_WRITETHRU 0x00400 /* H: W bit */
+
+#ifdef CONFIG_PTE_64BIT
+#define _PAGE_DIRTY 0x08000 /* S: Page dirty */
+
+/* ERPN in a PTE never gets cleared, ignore it */
+#define _PTE_NONE_MASK 0xffffffffffff0000ULL
+#else
+#define _PAGE_DIRTY 0x00800 /* S: Page dirty */
+#endif
+
+#define _PMD_PRESENT 0
+#define _PMD_PRESENT_MASK (PAGE_MASK)
+#define _PMD_BAD (~PAGE_MASK)
+
+#elif defined(CONFIG_8xx)
+/* Definitions for 8xx embedded chips. */
+#define _PAGE_PRESENT 0x0001 /* Page is valid */
+#define _PAGE_FILE 0x0002 /* when !present: nonlinear file mapping */
+#define _PAGE_NO_CACHE 0x0002 /* I: cache inhibit */
+#define _PAGE_SHARED 0x0004 /* No ASID (context) compare */
+
+/* These five software bits must be masked out when the entry is loaded
+ * into the TLB.
+ */
+#define _PAGE_EXEC 0x0008 /* software: i-cache coherency required */
+#define _PAGE_GUARDED 0x0010 /* software: guarded access */
+#define _PAGE_DIRTY 0x0020 /* software: page changed */
+#define _PAGE_RW 0x0040 /* software: user write access allowed */
+#define _PAGE_ACCESSED 0x0080 /* software: page referenced */
+
+/* Setting any bits in the nibble with the follow two controls will
+ * require a TLB exception handler change. It is assumed unused bits
+ * are always zero.
+ */
+#define _PAGE_HWWRITE 0x0100 /* h/w write enable: never set in Linux PTE */
+#define _PAGE_USER 0x0800 /* One of the PP bits, the other is USER&~RW */
+
+#define _PMD_PRESENT 0x0001
+#define _PMD_BAD 0x0ff0
+#define _PMD_PAGE_MASK 0x000c
+#define _PMD_PAGE_8M 0x000c
+
+/*
+ * The 8xx TLB miss handler allegedly sets _PAGE_ACCESSED in the PTE
+ * for an address even if _PAGE_PRESENT is not set, as a performance
+ * optimization. This is a bug if you ever want to use swap unless
+ * _PAGE_ACCESSED is 2, which it isn't, or unless you have 8xx-specific
+ * definitions for __swp_entry etc. below, which would be gross.
+ * -- paulus
+ */
+#define _PTE_NONE_MASK _PAGE_ACCESSED
+
+#else /* CONFIG_6xx */
+/* Definitions for 60x, 740/750, etc. */
+#define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
+#define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
+#define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
+#define _PAGE_USER 0x004 /* usermode access allowed */
+#define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
+#define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
+#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
+#define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
+#define _PAGE_DIRTY 0x080 /* C: page changed */
+#define _PAGE_ACCESSED 0x100 /* R: page referenced */
+#define _PAGE_EXEC 0x200 /* software: i-cache coherency required */
+#define _PAGE_RW 0x400 /* software: user write access allowed */
+
+#define _PTE_NONE_MASK _PAGE_HASHPTE
+
+#define _PMD_PRESENT 0
+#define _PMD_PRESENT_MASK (PAGE_MASK)
+#define _PMD_BAD (~PAGE_MASK)
+#endif
+
+/*
+ * Some bits are only used on some cpu families...
+ */
+#ifndef _PAGE_HASHPTE
+#define _PAGE_HASHPTE 0
+#endif
+#ifndef _PTE_NONE_MASK
+#define _PTE_NONE_MASK 0
+#endif
+#ifndef _PAGE_SHARED
+#define _PAGE_SHARED 0
+#endif
+#ifndef _PAGE_HWWRITE
+#define _PAGE_HWWRITE 0
+#endif
+#ifndef _PAGE_HWEXEC
+#define _PAGE_HWEXEC 0
+#endif
+#ifndef _PAGE_EXEC
+#define _PAGE_EXEC 0
+#endif
+#ifndef _PMD_PRESENT_MASK
+#define _PMD_PRESENT_MASK _PMD_PRESENT
+#endif
+#ifndef _PMD_SIZE
+#define _PMD_SIZE 0
+#define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE()
+#endif
+
+#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+
+/*
+ * Note: the _PAGE_COHERENT bit automatically gets set in the hardware
+ * PTE if CONFIG_SMP is defined (hash_page does this); there is no need
+ * to have it in the Linux PTE, and in fact the bit could be reused for
+ * another purpose. -- paulus.
+ */
+
+#ifdef CONFIG_44x
+#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_GUARDED)
+#else
+#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED)
+#endif
+#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE)
+#define _PAGE_KERNEL (_PAGE_BASE | _PAGE_SHARED | _PAGE_WRENABLE)
+
+#ifdef CONFIG_PPC_STD_MMU
+/* On standard PPC MMU, no user access implies kernel read/write access,
+ * so to write-protect kernel memory we must turn on user access */
+#define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED | _PAGE_USER)
+#else
+#define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED)
+#endif
+
+#define _PAGE_IO (_PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED)
+#define _PAGE_RAM (_PAGE_KERNEL | _PAGE_HWEXEC)
+
+#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH)
+/* We want the debuggers to be able to set breakpoints anywhere, so
+ * don't write protect the kernel text */
+#define _PAGE_RAM_TEXT _PAGE_RAM
+#else
+#define _PAGE_RAM_TEXT (_PAGE_KERNEL_RO | _PAGE_HWEXEC)
+#endif
+
+#define PAGE_NONE __pgprot(_PAGE_BASE)
+#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
+#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
+#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+
+#define PAGE_KERNEL __pgprot(_PAGE_RAM)
+#define PAGE_KERNEL_NOCACHE __pgprot(_PAGE_IO)
+
+/*
+ * The PowerPC can only do execute protection on a segment (256MB) basis,
+ * not on a page basis. So we consider execute permission the same as read.
+ * Also, write permissions imply read permissions.
+ * This is the closest we can get..
+ */
+#define __P000 PAGE_NONE
+#define __P001 PAGE_READONLY_X
+#define __P010 PAGE_COPY
+#define __P011 PAGE_COPY_X
+#define __P100 PAGE_READONLY
+#define __P101 PAGE_READONLY_X
+#define __P110 PAGE_COPY
+#define __P111 PAGE_COPY_X
+
+#define __S000 PAGE_NONE
+#define __S001 PAGE_READONLY_X
+#define __S010 PAGE_SHARED
+#define __S011 PAGE_SHARED_X
+#define __S100 PAGE_READONLY
+#define __S101 PAGE_READONLY_X
+#define __S110 PAGE_SHARED
+#define __S111 PAGE_SHARED_X
+
+#ifndef __ASSEMBLY__
+/* Make sure we get a link error if PMD_PAGE_SIZE is ever called on a
+ * kernel without large page PMD support */
+extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
+
+/*
+ * Conversions between PTE values and page frame numbers.
+ */
+
+/* in some case we want to additionaly adjust where the pfn is in the pte to
+ * allow room for more flags */
+#if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT)
+#define PFN_SHIFT_OFFSET (PAGE_SHIFT + 8)
+#else
+#define PFN_SHIFT_OFFSET (PAGE_SHIFT)
+#endif
+
+#define pte_pfn(x) (pte_val(x) >> PFN_SHIFT_OFFSET)
+#define pte_page(x) pfn_to_page(pte_pfn(x))
+
+#define pfn_pte(pfn, prot) __pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) |\
+ pgprot_val(prot))
+#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+extern unsigned long empty_zero_page[1024];
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
+#endif /* __ASSEMBLY__ */
+
+#define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0)
+#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
+#define pte_clear(mm,addr,ptep) do { set_pte_at((mm), (addr), (ptep), __pte(0)); } while (0)
+
+#define pmd_none(pmd) (!pmd_val(pmd))
+#define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
+#define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK)
+#define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0)
+
+#ifndef __ASSEMBLY__
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
+static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
+static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
+static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
+static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
+static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
+
+static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
+static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
+
+static inline pte_t pte_rdprotect(pte_t pte) {
+ pte_val(pte) &= ~_PAGE_USER; return pte; }
+static inline pte_t pte_wrprotect(pte_t pte) {
+ pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
+static inline pte_t pte_exprotect(pte_t pte) {
+ pte_val(pte) &= ~_PAGE_EXEC; return pte; }
+static inline pte_t pte_mkclean(pte_t pte) {
+ pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
+static inline pte_t pte_mkold(pte_t pte) {
+ pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
+
+static inline pte_t pte_mkread(pte_t pte) {
+ pte_val(pte) |= _PAGE_USER; return pte; }
+static inline pte_t pte_mkexec(pte_t pte) {
+ pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; }
+static inline pte_t pte_mkwrite(pte_t pte) {
+ pte_val(pte) |= _PAGE_RW; return pte; }
+static inline pte_t pte_mkdirty(pte_t pte) {
+ pte_val(pte) |= _PAGE_DIRTY; return pte; }
+static inline pte_t pte_mkyoung(pte_t pte) {
+ pte_val(pte) |= _PAGE_ACCESSED; return pte; }
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
+ return pte;
+}
+
+/*
+ * When flushing the tlb entry for a page, we also need to flush the hash
+ * table entry. flush_hash_pages is assembler (for speed) in hashtable.S.
+ */
+extern int flush_hash_pages(unsigned context, unsigned long va,
+ unsigned long pmdval, int count);
+
+/* Add an HPTE to the hash table */
+extern void add_hash_page(unsigned context, unsigned long va,
+ unsigned long pmdval);
+
+/*
+ * Atomic PTE updates.
+ *
+ * pte_update clears and sets bit atomically, and returns
+ * the old pte value. In the 64-bit PTE case we lock around the
+ * low PTE word since we expect ALL flag bits to be there
+ */
+#ifndef CONFIG_PTE_64BIT
+static inline unsigned long pte_update(pte_t *p, unsigned long clr,
+ unsigned long set)
+{
+ unsigned long old, tmp;
+
+ __asm__ __volatile__("\
+1: lwarx %0,0,%3\n\
+ andc %1,%0,%4\n\
+ or %1,%1,%5\n"
+ PPC405_ERR77(0,%3)
+" stwcx. %1,0,%3\n\
+ bne- 1b"
+ : "=&r" (old), "=&r" (tmp), "=m" (*p)
+ : "r" (p), "r" (clr), "r" (set), "m" (*p)
+ : "cc" );
+ return old;
+}
+#else
+static inline unsigned long long pte_update(pte_t *p, unsigned long clr,
+ unsigned long set)
+{
+ unsigned long long old;
+ unsigned long tmp;
+
+ __asm__ __volatile__("\
+1: lwarx %L0,0,%4\n\
+ lwzx %0,0,%3\n\
+ andc %1,%L0,%5\n\
+ or %1,%1,%6\n"
+ PPC405_ERR77(0,%3)
+" stwcx. %1,0,%4\n\
+ bne- 1b"
+ : "=&r" (old), "=&r" (tmp), "=m" (*p)
+ : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
+ : "cc" );
+ return old;
+}
+#endif
+
+/*
+ * set_pte stores a linux PTE into the linux page table.
+ * On machines which use an MMU hash table we avoid changing the
+ * _PAGE_HASHPTE bit.
+ */
+static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+#if _PAGE_HASHPTE != 0
+ pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte) & ~_PAGE_HASHPTE);
+#else
+ *ptep = pte;
+#endif
+}
+
+/*
+ * 2.6 calles this without flushing the TLB entry, this is wrong
+ * for our hash-based implementation, we fix that up here
+ */
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
+{
+ unsigned long old;
+ old = pte_update(ptep, _PAGE_ACCESSED, 0);
+#if _PAGE_HASHPTE != 0
+ if (old & _PAGE_HASHPTE) {
+ unsigned long ptephys = __pa(ptep) & PAGE_MASK;
+ flush_hash_pages(context, addr, ptephys, 1);
+ }
+#endif
+ return (old & _PAGE_ACCESSED) != 0;
+}
+#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
+ __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
+
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
+static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+ return (pte_update(ptep, (_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0;
+}
+
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
+}
+
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0);
+}
+
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
+{
+ unsigned long bits = pte_val(entry) &
+ (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW);
+ pte_update(ptep, 0, bits);
+}
+
+#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
+ do { \
+ __ptep_set_access_flags(__ptep, __entry, __dirty); \
+ flush_tlb_page_nohash(__vma, __address); \
+ } while(0)
+
+/*
+ * Macro to mark a page protection value as "uncacheable".
+ */
+#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED))
+
+struct file;
+extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t vma_prot);
+#define __HAVE_PHYS_MEM_ACCESS_PROT
+
+#define __HAVE_ARCH_PTE_SAME
+#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
+
+/*
+ * Note that on Book E processors, the pmd contains the kernel virtual
+ * (lowmem) address of the pte page. The physical address is less useful
+ * because everything runs with translation enabled (even the TLB miss
+ * handler). On everything else the pmd contains the physical address
+ * of the pte page. -- paulus
+ */
+#ifndef CONFIG_BOOKE
+#define pmd_page_vaddr(pmd) \
+ ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
+#define pmd_page(pmd) \
+ (mem_map + (pmd_val(pmd) >> PAGE_SHIFT))
+#else
+#define pmd_page_vaddr(pmd) \
+ ((unsigned long) (pmd_val(pmd) & PAGE_MASK))
+#define pmd_page(pmd) \
+ (mem_map + (__pa(pmd_val(pmd)) >> PAGE_SHIFT))
+#endif
+
+/* to find an entry in a kernel page-table-directory */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+/* to find an entry in a page-table-directory */
+#define pgd_index(address) ((address) >> PGDIR_SHIFT)
+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
+
+/* Find an entry in the third-level page table.. */
+#define pte_index(address) \
+ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_offset_kernel(dir, addr) \
+ ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
+#define pte_offset_map(dir, addr) \
+ ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr))
+#define pte_offset_map_nested(dir, addr) \
+ ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + pte_index(addr))
+
+#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
+#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
+
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+
+extern void paging_init(void);
+
+/*
+ * Encode and decode a swap entry.
+ * Note that the bits we use in a PTE for representing a swap entry
+ * must not include the _PAGE_PRESENT bit, the _PAGE_FILE bit, or the
+ *_PAGE_HASHPTE bit (if used). -- paulus
+ */
+#define __swp_type(entry) ((entry).val & 0x1f)
+#define __swp_offset(entry) ((entry).val >> 5)
+#define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
+
+/* Encode and decode a nonlinear file mapping entry */
+#define PTE_FILE_MAX_BITS 29
+#define pte_to_pgoff(pte) (pte_val(pte) >> 3)
+#define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE })
+
+/* CONFIG_APUS */
+/* For virtual address to physical address conversion */
+extern void cache_clear(__u32 addr, int length);
+extern void cache_push(__u32 addr, int length);
+extern int mm_end_of_chunk (unsigned long addr, int len);
+extern unsigned long iopa(unsigned long addr);
+extern unsigned long mm_ptov(unsigned long addr) __attribute_const__;
+
+/* Values for nocacheflag and cmode */
+/* These are not used by the APUS kernel_map, but prevents
+ compilation errors. */
+#define KERNELMAP_FULL_CACHING 0
+#define KERNELMAP_NOCACHE_SER 1
+#define KERNELMAP_NOCACHE_NONSER 2
+#define KERNELMAP_NO_COPYBACK 3
+
+/*
+ * Map some physical address range into the kernel address space.
+ */
+extern unsigned long kernel_map(unsigned long paddr, unsigned long size,
+ int nocacheflag, unsigned long *memavailp );
+
+/*
+ * Set cache mode of (kernel space) address range.
+ */
+extern void kernel_set_cachemode (unsigned long address, unsigned long size,
+ unsigned int cmode);
+
+/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
+#define kern_addr_valid(addr) (1)
+
+#ifdef CONFIG_PHYS_64BIT
+extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
+ unsigned long paddr, unsigned long size, pgprot_t prot);
+
+static inline int io_remap_pfn_range(struct vm_area_struct *vma,
+ unsigned long vaddr,
+ unsigned long pfn,
+ unsigned long size,
+ pgprot_t prot)
+{
+ phys_addr_t paddr64 = fixup_bigphys_addr(pfn << PAGE_SHIFT, size);
+ return remap_pfn_range(vma, vaddr, paddr64 >> PAGE_SHIFT, size, prot);
+}
+#else
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+#endif
+
+/*
+ * No page table caches to initialise
+ */
+#define pgtable_cache_init() do { } while (0)
+
+extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
+ pmd_t **pmdp);
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_PGTABLE_PPC32_H */
diff --git a/include/asm-powerpc/pgtable-ppc64.h b/include/asm-powerpc/pgtable-ppc64.h
new file mode 100644
index 00000000000..704c4e669fe
--- /dev/null
+++ b/include/asm-powerpc/pgtable-ppc64.h
@@ -0,0 +1,492 @@
+#ifndef _ASM_POWERPC_PGTABLE_PPC64_H_
+#define _ASM_POWERPC_PGTABLE_PPC64_H_
+/*
+ * This file contains the functions and defines necessary to modify and use
+ * the ppc64 hashed page table.
+ */
+
+#ifndef __ASSEMBLY__
+#include <linux/stddef.h>
+#include <asm/processor.h> /* For TASK_SIZE */
+#include <asm/mmu.h>
+#include <asm/page.h>
+#include <asm/tlbflush.h>
+struct mm_struct;
+#endif /* __ASSEMBLY__ */
+
+#ifdef CONFIG_PPC_64K_PAGES
+#include <asm/pgtable-64k.h>
+#else
+#include <asm/pgtable-4k.h>
+#endif
+
+#define FIRST_USER_ADDRESS 0
+
+/*
+ * Size of EA range mapped by our pagetables.
+ */
+#define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
+ PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT)
+#define PGTABLE_RANGE (1UL << PGTABLE_EADDR_SIZE)
+
+#if TASK_SIZE_USER64 > PGTABLE_RANGE
+#error TASK_SIZE_USER64 exceeds pagetable range
+#endif
+
+#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT))
+#error TASK_SIZE_USER64 exceeds user VSID range
+#endif
+
+/*
+ * Define the address range of the vmalloc VM area.
+ */
+#define VMALLOC_START ASM_CONST(0xD000000000000000)
+#define VMALLOC_SIZE ASM_CONST(0x80000000000)
+#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE)
+
+/*
+ * Define the address range of the imalloc VM area.
+ */
+#define PHBS_IO_BASE VMALLOC_END
+#define IMALLOC_BASE (PHBS_IO_BASE + 0x80000000ul) /* Reserve 2 gigs for PHBs */
+#define IMALLOC_END (VMALLOC_START + PGTABLE_RANGE)
+
+/*
+ * Region IDs
+ */
+#define REGION_SHIFT 60UL
+#define REGION_MASK (0xfUL << REGION_SHIFT)
+#define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT)
+
+#define VMALLOC_REGION_ID (REGION_ID(VMALLOC_START))
+#define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET))
+#define USER_REGION_ID (0UL)
+
+/*
+ * Common bits in a linux-style PTE. These match the bits in the
+ * (hardware-defined) PowerPC PTE as closely as possible. Additional
+ * bits may be defined in pgtable-*.h
+ */
+#define _PAGE_PRESENT 0x0001 /* software: pte contains a translation */
+#define _PAGE_USER 0x0002 /* matches one of the PP bits */
+#define _PAGE_FILE 0x0002 /* (!present only) software: pte holds file offset */
+#define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */
+#define _PAGE_GUARDED 0x0008
+#define _PAGE_COHERENT 0x0010 /* M: enforce memory coherence (SMP systems) */
+#define _PAGE_NO_CACHE 0x0020 /* I: cache inhibit */
+#define _PAGE_WRITETHRU 0x0040 /* W: cache write-through */
+#define _PAGE_DIRTY 0x0080 /* C: page changed */
+#define _PAGE_ACCESSED 0x0100 /* R: page referenced */
+#define _PAGE_RW 0x0200 /* software: user write access allowed */
+#define _PAGE_HASHPTE 0x0400 /* software: pte has an associated HPTE */
+#define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */
+
+#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT)
+
+#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY)
+
+/* __pgprot defined in asm-powerpc/page.h */
+#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
+
+#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER)
+#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER | _PAGE_EXEC)
+#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_WRENABLE)
+#define PAGE_KERNEL_CI __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
+ _PAGE_WRENABLE | _PAGE_NO_CACHE | _PAGE_GUARDED)
+#define PAGE_KERNEL_EXEC __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_EXEC)
+
+#define PAGE_AGP __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE)
+#define HAVE_PAGE_AGP
+
+/* PTEIDX nibble */
+#define _PTEIDX_SECONDARY 0x8
+#define _PTEIDX_GROUP_IX 0x7
+
+
+/*
+ * POWER4 and newer have per page execute protection, older chips can only
+ * do this on a segment (256MB) basis.
+ *
+ * Also, write permissions imply read permissions.
+ * This is the closest we can get..
+ *
+ * Note due to the way vm flags are laid out, the bits are XWR
+ */
+#define __P000 PAGE_NONE
+#define __P001 PAGE_READONLY
+#define __P010 PAGE_COPY
+#define __P011 PAGE_COPY
+#define __P100 PAGE_READONLY_X
+#define __P101 PAGE_READONLY_X
+#define __P110 PAGE_COPY_X
+#define __P111 PAGE_COPY_X
+
+#define __S000 PAGE_NONE
+#define __S001 PAGE_READONLY
+#define __S010 PAGE_SHARED
+#define __S011 PAGE_SHARED
+#define __S100 PAGE_READONLY_X
+#define __S101 PAGE_READONLY_X
+#define __S110 PAGE_SHARED_X
+#define __S111 PAGE_SHARED_X
+
+#ifndef __ASSEMBLY__
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+#endif /* __ASSEMBLY__ */
+
+#ifdef CONFIG_HUGETLB_PAGE
+
+#define HAVE_ARCH_UNMAPPED_AREA
+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
+
+#endif
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ *
+ * mk_pte takes a (struct page *) as input
+ */
+#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
+
+static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
+{
+ pte_t pte;
+
+
+ pte_val(pte) = (pfn << PTE_RPN_SHIFT) | pgprot_val(pgprot);
+ return pte;
+}
+
+#define pte_modify(_pte, newprot) \
+ (__pte((pte_val(_pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)))
+
+#define pte_none(pte) ((pte_val(pte) & ~_PAGE_HPTEFLAGS) == 0)
+#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
+
+/* pte_clear moved to later in this file */
+
+#define pte_pfn(x) ((unsigned long)((pte_val(x)>>PTE_RPN_SHIFT)))
+#define pte_page(x) pfn_to_page(pte_pfn(x))
+
+#define PMD_BAD_BITS (PTE_TABLE_SIZE-1)
+#define PUD_BAD_BITS (PMD_TABLE_SIZE-1)
+
+#define pmd_set(pmdp, pmdval) (pmd_val(*(pmdp)) = (pmdval))
+#define pmd_none(pmd) (!pmd_val(pmd))
+#define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \
+ || (pmd_val(pmd) & PMD_BAD_BITS))
+#define pmd_present(pmd) (pmd_val(pmd) != 0)
+#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0)
+#define pmd_page_vaddr(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS)
+#define pmd_page(pmd) virt_to_page(pmd_page_vaddr(pmd))
+
+#define pud_set(pudp, pudval) (pud_val(*(pudp)) = (pudval))
+#define pud_none(pud) (!pud_val(pud))
+#define pud_bad(pud) (!is_kernel_addr(pud_val(pud)) \
+ || (pud_val(pud) & PUD_BAD_BITS))
+#define pud_present(pud) (pud_val(pud) != 0)
+#define pud_clear(pudp) (pud_val(*(pudp)) = 0)
+#define pud_page_vaddr(pud) (pud_val(pud) & ~PUD_MASKED_BITS)
+#define pud_page(pud) virt_to_page(pud_page_vaddr(pud))
+
+#define pgd_set(pgdp, pudp) ({pgd_val(*(pgdp)) = (unsigned long)(pudp);})
+
+/*
+ * Find an entry in a page-table-directory. We combine the address region
+ * (the high order N bits) and the pgd portion of the address.
+ */
+/* to avoid overflow in free_pgtables we don't use PTRS_PER_PGD here */
+#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & 0x1ff)
+
+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
+
+#define pmd_offset(pudp,addr) \
+ (((pmd_t *) pud_page_vaddr(*(pudp))) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
+
+#define pte_offset_kernel(dir,addr) \
+ (((pte_t *) pmd_page_vaddr(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
+
+#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
+#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
+#define pte_unmap(pte) do { } while(0)
+#define pte_unmap_nested(pte) do { } while(0)
+
+/* to find an entry in a kernel page-table-directory */
+/* This now only contains the vmalloc pages */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER;}
+static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW;}
+static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC;}
+static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;}
+static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;}
+static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;}
+
+static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
+static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
+
+static inline pte_t pte_rdprotect(pte_t pte) {
+ pte_val(pte) &= ~_PAGE_USER; return pte; }
+static inline pte_t pte_exprotect(pte_t pte) {
+ pte_val(pte) &= ~_PAGE_EXEC; return pte; }
+static inline pte_t pte_wrprotect(pte_t pte) {
+ pte_val(pte) &= ~(_PAGE_RW); return pte; }
+static inline pte_t pte_mkclean(pte_t pte) {
+ pte_val(pte) &= ~(_PAGE_DIRTY); return pte; }
+static inline pte_t pte_mkold(pte_t pte) {
+ pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
+static inline pte_t pte_mkread(pte_t pte) {
+ pte_val(pte) |= _PAGE_USER; return pte; }
+static inline pte_t pte_mkexec(pte_t pte) {
+ pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; }
+static inline pte_t pte_mkwrite(pte_t pte) {
+ pte_val(pte) |= _PAGE_RW; return pte; }
+static inline pte_t pte_mkdirty(pte_t pte) {
+ pte_val(pte) |= _PAGE_DIRTY; return pte; }
+static inline pte_t pte_mkyoung(pte_t pte) {
+ pte_val(pte) |= _PAGE_ACCESSED; return pte; }
+static inline pte_t pte_mkhuge(pte_t pte) {
+ return pte; }
+
+/* Atomic PTE updates */
+static inline unsigned long pte_update(struct mm_struct *mm,
+ unsigned long addr,
+ pte_t *ptep, unsigned long clr,
+ int huge)
+{
+ unsigned long old, tmp;
+
+ __asm__ __volatile__(
+ "1: ldarx %0,0,%3 # pte_update\n\
+ andi. %1,%0,%6\n\
+ bne- 1b \n\
+ andc %1,%0,%4 \n\
+ stdcx. %1,0,%3 \n\
+ bne- 1b"
+ : "=&r" (old), "=&r" (tmp), "=m" (*ptep)
+ : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY)
+ : "cc" );
+
+ if (old & _PAGE_HASHPTE)
+ hpte_need_flush(mm, addr, ptep, old, huge);
+ return old;
+}
+
+static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ unsigned long old;
+
+ if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
+ return 0;
+ old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0);
+ return (old & _PAGE_ACCESSED) != 0;
+}
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
+({ \
+ int __r; \
+ __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \
+ __r; \
+})
+
+/*
+ * On RW/DIRTY bit transitions we can avoid flushing the hpte. For the
+ * moment we always flush but we need to fix hpte_update and test if the
+ * optimisation is worth it.
+ */
+static inline int __ptep_test_and_clear_dirty(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ unsigned long old;
+
+ if ((pte_val(*ptep) & _PAGE_DIRTY) == 0)
+ return 0;
+ old = pte_update(mm, addr, ptep, _PAGE_DIRTY, 0);
+ return (old & _PAGE_DIRTY) != 0;
+}
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
+#define ptep_test_and_clear_dirty(__vma, __addr, __ptep) \
+({ \
+ int __r; \
+ __r = __ptep_test_and_clear_dirty((__vma)->vm_mm, __addr, __ptep); \
+ __r; \
+})
+
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ unsigned long old;
+
+ if ((pte_val(*ptep) & _PAGE_RW) == 0)
+ return;
+ old = pte_update(mm, addr, ptep, _PAGE_RW, 0);
+}
+
+/*
+ * We currently remove entries from the hashtable regardless of whether
+ * the entry was young or dirty. The generic routines only flush if the
+ * entry was young or dirty which is not good enough.
+ *
+ * We should be more intelligent about this but for the moment we override
+ * these functions and force a tlb flush unconditionally
+ */
+#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+#define ptep_clear_flush_young(__vma, __address, __ptep) \
+({ \
+ int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \
+ __ptep); \
+ __young; \
+})
+
+#define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
+#define ptep_clear_flush_dirty(__vma, __address, __ptep) \
+({ \
+ int __dirty = __ptep_test_and_clear_dirty((__vma)->vm_mm, __address, \
+ __ptep); \
+ __dirty; \
+})
+
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0);
+ return __pte(old);
+}
+
+static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t * ptep)
+{
+ pte_update(mm, addr, ptep, ~0UL, 0);
+}
+
+/*
+ * set_pte stores a linux PTE into the linux page table.
+ */
+static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ if (pte_present(*ptep))
+ pte_clear(mm, addr, ptep);
+ pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
+ *ptep = pte;
+}
+
+/* Set the dirty and/or accessed bits atomically in a linux PTE, this
+ * function doesn't need to flush the hash entry
+ */
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
+{
+ unsigned long bits = pte_val(entry) &
+ (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
+ unsigned long old, tmp;
+
+ __asm__ __volatile__(
+ "1: ldarx %0,0,%4\n\
+ andi. %1,%0,%6\n\
+ bne- 1b \n\
+ or %0,%3,%0\n\
+ stdcx. %0,0,%4\n\
+ bne- 1b"
+ :"=&r" (old), "=&r" (tmp), "=m" (*ptep)
+ :"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY)
+ :"cc");
+}
+#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
+ do { \
+ __ptep_set_access_flags(__ptep, __entry, __dirty); \
+ flush_tlb_page_nohash(__vma, __address); \
+ } while(0)
+
+/*
+ * Macro to mark a page protection value as "uncacheable".
+ */
+#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED))
+
+struct file;
+extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t vma_prot);
+#define __HAVE_PHYS_MEM_ACCESS_PROT
+
+#define __HAVE_ARCH_PTE_SAME
+#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)
+
+#define pte_ERROR(e) \
+ printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
+#define pmd_ERROR(e) \
+ printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
+#define pgd_ERROR(e) \
+ printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+extern pgd_t swapper_pg_dir[];
+
+extern void paging_init(void);
+
+/* Encode and de-code a swap entry */
+#define __swp_type(entry) (((entry).val >> 1) & 0x3f)
+#define __swp_offset(entry) ((entry).val >> 8)
+#define __swp_entry(type, offset) ((swp_entry_t){((type)<< 1)|((offset)<<8)})
+#define __pte_to_swp_entry(pte) ((swp_entry_t){pte_val(pte) >> PTE_RPN_SHIFT})
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val << PTE_RPN_SHIFT })
+#define pte_to_pgoff(pte) (pte_val(pte) >> PTE_RPN_SHIFT)
+#define pgoff_to_pte(off) ((pte_t) {((off) << PTE_RPN_SHIFT)|_PAGE_FILE})
+#define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_RPN_SHIFT)
+
+/*
+ * kern_addr_valid is intended to indicate whether an address is a valid
+ * kernel address. Most 32-bit archs define it as always true (like this)
+ * but most 64-bit archs actually perform a test. What should we do here?
+ * The only use is in fs/ncpfs/dir.c
+ */
+#define kern_addr_valid(addr) (1)
+
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
+void pgtable_cache_init(void);
+
+/*
+ * find_linux_pte returns the address of a linux pte for a given
+ * effective address and directory. If not found, it returns zero.
+ */static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea)
+{
+ pgd_t *pg;
+ pud_t *pu;
+ pmd_t *pm;
+ pte_t *pt = NULL;
+
+ pg = pgdir + pgd_index(ea);
+ if (!pgd_none(*pg)) {
+ pu = pud_offset(pg, ea);
+ if (!pud_none(*pu)) {
+ pm = pmd_offset(pu, ea);
+ if (pmd_present(*pm))
+ pt = pte_offset_kernel(pm, ea);
+ }
+ }
+ return pt;
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_PGTABLE_PPC64_H_ */
diff --git a/include/asm-powerpc/pgtable.h b/include/asm-powerpc/pgtable.h
index 19edb6982b8..78bf4ae712a 100644
--- a/include/asm-powerpc/pgtable.h
+++ b/include/asm-powerpc/pgtable.h
@@ -2,502 +2,15 @@
#define _ASM_POWERPC_PGTABLE_H
#ifdef __KERNEL__
-#ifndef CONFIG_PPC64
-#include <asm-ppc/pgtable.h>
+#if defined(CONFIG_PPC64)
+# include <asm/pgtable-ppc64.h>
#else
-
-/*
- * This file contains the functions and defines necessary to modify and use
- * the ppc64 hashed page table.
- */
-
-#ifndef __ASSEMBLY__
-#include <linux/stddef.h>
-#include <asm/processor.h> /* For TASK_SIZE */
-#include <asm/mmu.h>
-#include <asm/page.h>
-#include <asm/tlbflush.h>
-struct mm_struct;
-#endif /* __ASSEMBLY__ */
-
-#ifdef CONFIG_PPC_64K_PAGES
-#include <asm/pgtable-64k.h>
-#else
-#include <asm/pgtable-4k.h>
-#endif
-
-#define FIRST_USER_ADDRESS 0
-
-/*
- * Size of EA range mapped by our pagetables.
- */
-#define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
- PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT)
-#define PGTABLE_RANGE (1UL << PGTABLE_EADDR_SIZE)
-
-#if TASK_SIZE_USER64 > PGTABLE_RANGE
-#error TASK_SIZE_USER64 exceeds pagetable range
-#endif
-
-#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT))
-#error TASK_SIZE_USER64 exceeds user VSID range
+# include <asm/pgtable-ppc32.h>
#endif
-/*
- * Define the address range of the vmalloc VM area.
- */
-#define VMALLOC_START ASM_CONST(0xD000000000000000)
-#define VMALLOC_SIZE ASM_CONST(0x80000000000)
-#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE)
-
-/*
- * Define the address range of the imalloc VM area.
- */
-#define PHBS_IO_BASE VMALLOC_END
-#define IMALLOC_BASE (PHBS_IO_BASE + 0x80000000ul) /* Reserve 2 gigs for PHBs */
-#define IMALLOC_END (VMALLOC_START + PGTABLE_RANGE)
-
-/*
- * Region IDs
- */
-#define REGION_SHIFT 60UL
-#define REGION_MASK (0xfUL << REGION_SHIFT)
-#define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT)
-
-#define VMALLOC_REGION_ID (REGION_ID(VMALLOC_START))
-#define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET))
-#define USER_REGION_ID (0UL)
-
-/*
- * Common bits in a linux-style PTE. These match the bits in the
- * (hardware-defined) PowerPC PTE as closely as possible. Additional
- * bits may be defined in pgtable-*.h
- */
-#define _PAGE_PRESENT 0x0001 /* software: pte contains a translation */
-#define _PAGE_USER 0x0002 /* matches one of the PP bits */
-#define _PAGE_FILE 0x0002 /* (!present only) software: pte holds file offset */
-#define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */
-#define _PAGE_GUARDED 0x0008
-#define _PAGE_COHERENT 0x0010 /* M: enforce memory coherence (SMP systems) */
-#define _PAGE_NO_CACHE 0x0020 /* I: cache inhibit */
-#define _PAGE_WRITETHRU 0x0040 /* W: cache write-through */
-#define _PAGE_DIRTY 0x0080 /* C: page changed */
-#define _PAGE_ACCESSED 0x0100 /* R: page referenced */
-#define _PAGE_RW 0x0200 /* software: user write access allowed */
-#define _PAGE_HASHPTE 0x0400 /* software: pte has an associated HPTE */
-#define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */
-
-#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT)
-
-#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY)
-
-/* __pgprot defined in asm-powerpc/page.h */
-#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
-
-#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER)
-#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER | _PAGE_EXEC)
-#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
-#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
-#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_WRENABLE)
-#define PAGE_KERNEL_CI __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
- _PAGE_WRENABLE | _PAGE_NO_CACHE | _PAGE_GUARDED)
-#define PAGE_KERNEL_EXEC __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_EXEC)
-
-#define PAGE_AGP __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE)
-#define HAVE_PAGE_AGP
-
-/* PTEIDX nibble */
-#define _PTEIDX_SECONDARY 0x8
-#define _PTEIDX_GROUP_IX 0x7
-
-
-/*
- * POWER4 and newer have per page execute protection, older chips can only
- * do this on a segment (256MB) basis.
- *
- * Also, write permissions imply read permissions.
- * This is the closest we can get..
- *
- * Note due to the way vm flags are laid out, the bits are XWR
- */
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY
-#define __P100 PAGE_READONLY_X
-#define __P101 PAGE_READONLY_X
-#define __P110 PAGE_COPY_X
-#define __P111 PAGE_COPY_X
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_READONLY_X
-#define __S101 PAGE_READONLY_X
-#define __S110 PAGE_SHARED_X
-#define __S111 PAGE_SHARED_X
-
#ifndef __ASSEMBLY__
-
-/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
-#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
-#endif /* __ASSEMBLY__ */
-
-#ifdef CONFIG_HUGETLB_PAGE
-
-#define HAVE_ARCH_UNMAPPED_AREA
-#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
-
-#endif
-
-#ifndef __ASSEMBLY__
-
-/*
- * Conversion functions: convert a page and protection to a page entry,
- * and a page entry and page directory to the page they refer to.
- *
- * mk_pte takes a (struct page *) as input
- */
-#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
-
-static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
-{
- pte_t pte;
-
-
- pte_val(pte) = (pfn << PTE_RPN_SHIFT) | pgprot_val(pgprot);
- return pte;
-}
-
-#define pte_modify(_pte, newprot) \
- (__pte((pte_val(_pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)))
-
-#define pte_none(pte) ((pte_val(pte) & ~_PAGE_HPTEFLAGS) == 0)
-#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
-
-/* pte_clear moved to later in this file */
-
-#define pte_pfn(x) ((unsigned long)((pte_val(x)>>PTE_RPN_SHIFT)))
-#define pte_page(x) pfn_to_page(pte_pfn(x))
-
-#define PMD_BAD_BITS (PTE_TABLE_SIZE-1)
-#define PUD_BAD_BITS (PMD_TABLE_SIZE-1)
-
-#define pmd_set(pmdp, pmdval) (pmd_val(*(pmdp)) = (pmdval))
-#define pmd_none(pmd) (!pmd_val(pmd))
-#define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \
- || (pmd_val(pmd) & PMD_BAD_BITS))
-#define pmd_present(pmd) (pmd_val(pmd) != 0)
-#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0)
-#define pmd_page_vaddr(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS)
-#define pmd_page(pmd) virt_to_page(pmd_page_vaddr(pmd))
-
-#define pud_set(pudp, pudval) (pud_val(*(pudp)) = (pudval))
-#define pud_none(pud) (!pud_val(pud))
-#define pud_bad(pud) (!is_kernel_addr(pud_val(pud)) \
- || (pud_val(pud) & PUD_BAD_BITS))
-#define pud_present(pud) (pud_val(pud) != 0)
-#define pud_clear(pudp) (pud_val(*(pudp)) = 0)
-#define pud_page_vaddr(pud) (pud_val(pud) & ~PUD_MASKED_BITS)
-#define pud_page(pud) virt_to_page(pud_page_vaddr(pud))
-
-#define pgd_set(pgdp, pudp) ({pgd_val(*(pgdp)) = (unsigned long)(pudp);})
-
-/*
- * Find an entry in a page-table-directory. We combine the address region
- * (the high order N bits) and the pgd portion of the address.
- */
-/* to avoid overflow in free_pgtables we don't use PTRS_PER_PGD here */
-#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & 0x1ff)
-
-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
-
-#define pmd_offset(pudp,addr) \
- (((pmd_t *) pud_page_vaddr(*(pudp))) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
-
-#define pte_offset_kernel(dir,addr) \
- (((pte_t *) pmd_page_vaddr(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
-
-#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
-#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
-#define pte_unmap(pte) do { } while(0)
-#define pte_unmap_nested(pte) do { } while(0)
-
-/* to find an entry in a kernel page-table-directory */
-/* This now only contains the vmalloc pages */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-/*
- * The following only work if pte_present() is true.
- * Undefined behaviour if not..
- */
-static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER;}
-static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW;}
-static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC;}
-static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;}
-static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;}
-static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;}
-
-static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
-static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
-
-static inline pte_t pte_rdprotect(pte_t pte) {
- pte_val(pte) &= ~_PAGE_USER; return pte; }
-static inline pte_t pte_exprotect(pte_t pte) {
- pte_val(pte) &= ~_PAGE_EXEC; return pte; }
-static inline pte_t pte_wrprotect(pte_t pte) {
- pte_val(pte) &= ~(_PAGE_RW); return pte; }
-static inline pte_t pte_mkclean(pte_t pte) {
- pte_val(pte) &= ~(_PAGE_DIRTY); return pte; }
-static inline pte_t pte_mkold(pte_t pte) {
- pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
-static inline pte_t pte_mkread(pte_t pte) {
- pte_val(pte) |= _PAGE_USER; return pte; }
-static inline pte_t pte_mkexec(pte_t pte) {
- pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; }
-static inline pte_t pte_mkwrite(pte_t pte) {
- pte_val(pte) |= _PAGE_RW; return pte; }
-static inline pte_t pte_mkdirty(pte_t pte) {
- pte_val(pte) |= _PAGE_DIRTY; return pte; }
-static inline pte_t pte_mkyoung(pte_t pte) {
- pte_val(pte) |= _PAGE_ACCESSED; return pte; }
-static inline pte_t pte_mkhuge(pte_t pte) {
- return pte; }
-
-/* Atomic PTE updates */
-static inline unsigned long pte_update(struct mm_struct *mm,
- unsigned long addr,
- pte_t *ptep, unsigned long clr,
- int huge)
-{
- unsigned long old, tmp;
-
- __asm__ __volatile__(
- "1: ldarx %0,0,%3 # pte_update\n\
- andi. %1,%0,%6\n\
- bne- 1b \n\
- andc %1,%0,%4 \n\
- stdcx. %1,0,%3 \n\
- bne- 1b"
- : "=&r" (old), "=&r" (tmp), "=m" (*ptep)
- : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY)
- : "cc" );
-
- if (old & _PAGE_HASHPTE)
- hpte_need_flush(mm, addr, ptep, old, huge);
- return old;
-}
-
-static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
-{
- unsigned long old;
-
- if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
- return 0;
- old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0);
- return (old & _PAGE_ACCESSED) != 0;
-}
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
-({ \
- int __r; \
- __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \
- __r; \
-})
-
-/*
- * On RW/DIRTY bit transitions we can avoid flushing the hpte. For the
- * moment we always flush but we need to fix hpte_update and test if the
- * optimisation is worth it.
- */
-static inline int __ptep_test_and_clear_dirty(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
-{
- unsigned long old;
-
- if ((pte_val(*ptep) & _PAGE_DIRTY) == 0)
- return 0;
- old = pte_update(mm, addr, ptep, _PAGE_DIRTY, 0);
- return (old & _PAGE_DIRTY) != 0;
-}
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
-#define ptep_test_and_clear_dirty(__vma, __addr, __ptep) \
-({ \
- int __r; \
- __r = __ptep_test_and_clear_dirty((__vma)->vm_mm, __addr, __ptep); \
- __r; \
-})
-
-#define __HAVE_ARCH_PTEP_SET_WRPROTECT
-static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep)
-{
- unsigned long old;
-
- if ((pte_val(*ptep) & _PAGE_RW) == 0)
- return;
- old = pte_update(mm, addr, ptep, _PAGE_RW, 0);
-}
-
-/*
- * We currently remove entries from the hashtable regardless of whether
- * the entry was young or dirty. The generic routines only flush if the
- * entry was young or dirty which is not good enough.
- *
- * We should be more intelligent about this but for the moment we override
- * these functions and force a tlb flush unconditionally
- */
-#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
-#define ptep_clear_flush_young(__vma, __address, __ptep) \
-({ \
- int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \
- __ptep); \
- __young; \
-})
-
-#define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
-#define ptep_clear_flush_dirty(__vma, __address, __ptep) \
-({ \
- int __dirty = __ptep_test_and_clear_dirty((__vma)->vm_mm, __address, \
- __ptep); \
- __dirty; \
-})
-
-#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
-static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
-{
- unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0);
- return __pte(old);
-}
-
-static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
- pte_t * ptep)
-{
- pte_update(mm, addr, ptep, ~0UL, 0);
-}
-
-/*
- * set_pte stores a linux PTE into the linux page table.
- */
-static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte)
-{
- if (pte_present(*ptep))
- pte_clear(mm, addr, ptep);
- pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
- *ptep = pte;
-}
-
-/* Set the dirty and/or accessed bits atomically in a linux PTE, this
- * function doesn't need to flush the hash entry
- */
-#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
-{
- unsigned long bits = pte_val(entry) &
- (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
- unsigned long old, tmp;
-
- __asm__ __volatile__(
- "1: ldarx %0,0,%4\n\
- andi. %1,%0,%6\n\
- bne- 1b \n\
- or %0,%3,%0\n\
- stdcx. %0,0,%4\n\
- bne- 1b"
- :"=&r" (old), "=&r" (tmp), "=m" (*ptep)
- :"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY)
- :"cc");
-}
-#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
- do { \
- __ptep_set_access_flags(__ptep, __entry, __dirty); \
- flush_tlb_page_nohash(__vma, __address); \
- } while(0)
-
-/*
- * Macro to mark a page protection value as "uncacheable".
- */
-#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED))
-
-struct file;
-extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
- unsigned long size, pgprot_t vma_prot);
-#define __HAVE_PHYS_MEM_ACCESS_PROT
-
-#define __HAVE_ARCH_PTE_SAME
-#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)
-
-#define pte_ERROR(e) \
- printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
-#define pmd_ERROR(e) \
- printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
-#define pgd_ERROR(e) \
- printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
-
-extern pgd_t swapper_pg_dir[];
-
-extern void paging_init(void);
-
-/* Encode and de-code a swap entry */
-#define __swp_type(entry) (((entry).val >> 1) & 0x3f)
-#define __swp_offset(entry) ((entry).val >> 8)
-#define __swp_entry(type, offset) ((swp_entry_t){((type)<< 1)|((offset)<<8)})
-#define __pte_to_swp_entry(pte) ((swp_entry_t){pte_val(pte) >> PTE_RPN_SHIFT})
-#define __swp_entry_to_pte(x) ((pte_t) { (x).val << PTE_RPN_SHIFT })
-#define pte_to_pgoff(pte) (pte_val(pte) >> PTE_RPN_SHIFT)
-#define pgoff_to_pte(off) ((pte_t) {((off) << PTE_RPN_SHIFT)|_PAGE_FILE})
-#define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_RPN_SHIFT)
-
-/*
- * kern_addr_valid is intended to indicate whether an address is a valid
- * kernel address. Most 32-bit archs define it as always true (like this)
- * but most 64-bit archs actually perform a test. What should we do here?
- * The only use is in fs/ncpfs/dir.c
- */
-#define kern_addr_valid(addr) (1)
-
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
- remap_pfn_range(vma, vaddr, pfn, size, prot)
-
-void pgtable_cache_init(void);
-
-/*
- * find_linux_pte returns the address of a linux pte for a given
- * effective address and directory. If not found, it returns zero.
- */static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea)
-{
- pgd_t *pg;
- pud_t *pu;
- pmd_t *pm;
- pte_t *pt = NULL;
-
- pg = pgdir + pgd_index(ea);
- if (!pgd_none(*pg)) {
- pu = pud_offset(pg, ea);
- if (!pud_none(*pu)) {
- pm = pmd_offset(pu, ea);
- if (pmd_present(*pm))
- pt = pte_offset_kernel(pm, ea);
- }
- }
- return pt;
-}
-
-
#include <asm-generic/pgtable.h>
-
#endif /* __ASSEMBLY__ */
-#endif /* CONFIG_PPC64 */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_PGTABLE_H */
diff --git a/include/asm-powerpc/pmac_feature.h b/include/asm-powerpc/pmac_feature.h
index d3599cc9aa7..d43d91beba9 100644
--- a/include/asm-powerpc/pmac_feature.h
+++ b/include/asm-powerpc/pmac_feature.h
@@ -146,7 +146,7 @@ struct device_node;
static inline long pmac_call_feature(int selector, struct device_node* node,
long param, long value)
{
- if (!ppc_md.feature_call)
+ if (!ppc_md.feature_call || !machine_is(powermac))
return -ENODEV;
return ppc_md.feature_call(selector, node, param, value);
}
diff --git a/include/asm-powerpc/prom.h b/include/asm-powerpc/prom.h
index ec400f608e1..6845af93ba9 100644
--- a/include/asm-powerpc/prom.h
+++ b/include/asm-powerpc/prom.h
@@ -20,7 +20,6 @@
#include <linux/platform_device.h>
#include <asm/irq.h>
#include <asm/atomic.h>
-#include <asm/io.h>
/* Definitions used by the flattened device tree */
#define OF_DT_HEADER 0xd00dfeed /* marker */
@@ -334,30 +333,17 @@ extern int of_irq_map_one(struct device_node *device, int index,
struct pci_dev;
extern int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq);
-static inline int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
-{
- int irq = irq_of_parse_and_map(dev, index);
-
- /* Only dereference the resource if both the
- * resource and the irq are valid. */
- if (r && irq != NO_IRQ) {
- r->start = r->end = irq;
- r->flags = IORESOURCE_IRQ;
- }
-
- return irq;
-}
-
-static inline void __iomem *of_iomap(struct device_node *np, int index)
-{
- struct resource res;
-
- if (of_address_to_resource(np, index, &res))
- return NULL;
-
- return ioremap(res.start, 1 + res.end - res.start);
-}
+extern int of_irq_to_resource(struct device_node *dev, int index,
+ struct resource *r);
+/**
+ * of_iomap - Maps the memory mapped IO for a given device_node
+ * @device: the device whose io range will be mapped
+ * @index: index of the io range
+ *
+ * Returns a pointer to the mapped memory
+ */
+extern void __iomem *of_iomap(struct device_node *device, int index);
#endif /* __KERNEL__ */
#endif /* _POWERPC_PROM_H */
diff --git a/include/asm-powerpc/ps3.h b/include/asm-powerpc/ps3.h
index 821581a8b64..13c372df99e 100644
--- a/include/asm-powerpc/ps3.h
+++ b/include/asm-powerpc/ps3.h
@@ -167,26 +167,31 @@ enum ps3_cpu_binding {
PS3_BINDING_CPU_1 = 1,
};
-int ps3_alloc_io_irq(enum ps3_cpu_binding cpu, unsigned int interrupt_id,
+int ps3_virq_setup(enum ps3_cpu_binding cpu, unsigned long outlet,
unsigned int *virq);
-int ps3_free_io_irq(unsigned int virq);
-int ps3_alloc_event_irq(enum ps3_cpu_binding cpu, unsigned int *virq);
-int ps3_free_event_irq(unsigned int virq);
+int ps3_virq_destroy(unsigned int virq);
+int ps3_irq_plug_setup(enum ps3_cpu_binding cpu, unsigned long outlet,
+ unsigned int *virq);
+int ps3_irq_plug_destroy(unsigned int virq);
+int ps3_event_receive_port_setup(enum ps3_cpu_binding cpu, unsigned int *virq);
+int ps3_event_receive_port_destroy(unsigned int virq);
int ps3_send_event_locally(unsigned int virq);
-int ps3_connect_event_irq(enum ps3_cpu_binding cpu,
- const struct ps3_device_id *did, unsigned int interrupt_id,
+
+int ps3_io_irq_setup(enum ps3_cpu_binding cpu, unsigned int interrupt_id,
unsigned int *virq);
-int ps3_disconnect_event_irq(const struct ps3_device_id *did,
- unsigned int interrupt_id, unsigned int virq);
-int ps3_alloc_vuart_irq(enum ps3_cpu_binding cpu, void* virt_addr_bmp,
+int ps3_io_irq_destroy(unsigned int virq);
+int ps3_vuart_irq_setup(enum ps3_cpu_binding cpu, void* virt_addr_bmp,
unsigned int *virq);
-int ps3_free_vuart_irq(unsigned int virq);
-int ps3_alloc_spe_irq(enum ps3_cpu_binding cpu, unsigned long spe_id,
+int ps3_vuart_irq_destroy(unsigned int virq);
+int ps3_spe_irq_setup(enum ps3_cpu_binding cpu, unsigned long spe_id,
unsigned int class, unsigned int *virq);
-int ps3_free_spe_irq(unsigned int virq);
-int ps3_alloc_irq(enum ps3_cpu_binding cpu, unsigned long outlet,
+int ps3_spe_irq_destroy(unsigned int virq);
+
+int ps3_sb_event_receive_port_setup(enum ps3_cpu_binding cpu,
+ const struct ps3_device_id *did, unsigned int interrupt_id,
unsigned int *virq);
-int ps3_free_irq(unsigned int virq);
+int ps3_sb_event_receive_port_destroy(const struct ps3_device_id *did,
+ unsigned int interrupt_id, unsigned int virq);
/* lv1 result codes */
diff --git a/include/asm-powerpc/suspend.h b/include/asm-powerpc/suspend.h
new file mode 100644
index 00000000000..cbf2c9404c3
--- /dev/null
+++ b/include/asm-powerpc/suspend.h
@@ -0,0 +1,9 @@
+#ifndef __ASM_POWERPC_SUSPEND_H
+#define __ASM_POWERPC_SUSPEND_H
+
+static inline int arch_prepare_suspend(void) { return 0; }
+
+void save_processor_state(void);
+void restore_processor_state(void);
+
+#endif /* __ASM_POWERPC_SUSPEND_H */
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
index d3e0906ff2b..09621f611db 100644
--- a/include/asm-powerpc/system.h
+++ b/include/asm-powerpc/system.h
@@ -7,7 +7,6 @@
#include <linux/kernel.h>
#include <asm/hw_irq.h>
-#include <asm/atomic.h>
/*
* Memory barrier.
@@ -227,6 +226,29 @@ __xchg_u32(volatile void *p, unsigned long val)
return prev;
}
+/*
+ * Atomic exchange
+ *
+ * Changes the memory location '*ptr' to be val and returns
+ * the previous value stored there.
+ */
+static __inline__ unsigned long
+__xchg_u32_local(volatile void *p, unsigned long val)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%2 \n"
+ PPC405_ERR77(0,%2)
+" stwcx. %3,0,%2 \n\
+ bne- 1b"
+ : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
+ : "r" (p), "r" (val)
+ : "cc", "memory");
+
+ return prev;
+}
+
#ifdef CONFIG_PPC64
static __inline__ unsigned long
__xchg_u64(volatile void *p, unsigned long val)
@@ -246,6 +268,23 @@ __xchg_u64(volatile void *p, unsigned long val)
return prev;
}
+
+static __inline__ unsigned long
+__xchg_u64_local(volatile void *p, unsigned long val)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__(
+"1: ldarx %0,0,%2 \n"
+ PPC405_ERR77(0,%2)
+" stdcx. %3,0,%2 \n\
+ bne- 1b"
+ : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
+ : "r" (p), "r" (val)
+ : "cc", "memory");
+
+ return prev;
+}
#endif
/*
@@ -269,13 +308,32 @@ __xchg(volatile void *ptr, unsigned long x, unsigned int size)
return x;
}
+static __inline__ unsigned long
+__xchg_local(volatile void *ptr, unsigned long x, unsigned int size)
+{
+ switch (size) {
+ case 4:
+ return __xchg_u32_local(ptr, x);
+#ifdef CONFIG_PPC64
+ case 8:
+ return __xchg_u64_local(ptr, x);
+#endif
+ }
+ __xchg_called_with_bad_pointer();
+ return x;
+}
#define xchg(ptr,x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
})
-#define tas(ptr) (xchg((ptr),1))
+#define xchg_local(ptr,x) \
+ ({ \
+ __typeof__(*(ptr)) _x_ = (x); \
+ (__typeof__(*(ptr))) __xchg_local((ptr), \
+ (unsigned long)_x_, sizeof(*(ptr))); \
+ })
/*
* Compare and exchange - if *p == old, set it to new,
@@ -306,6 +364,28 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
return prev;
}
+static __inline__ unsigned long
+__cmpxchg_u32_local(volatile unsigned int *p, unsigned long old,
+ unsigned long new)
+{
+ unsigned int prev;
+
+ __asm__ __volatile__ (
+"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
+ cmpw 0,%0,%3\n\
+ bne- 2f\n"
+ PPC405_ERR77(0,%2)
+" stwcx. %4,0,%2\n\
+ bne- 1b"
+ "\n\
+2:"
+ : "=&r" (prev), "+m" (*p)
+ : "r" (p), "r" (old), "r" (new)
+ : "cc", "memory");
+
+ return prev;
+}
+
#ifdef CONFIG_PPC64
static __inline__ unsigned long
__cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
@@ -328,6 +408,27 @@ __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
return prev;
}
+
+static __inline__ unsigned long
+__cmpxchg_u64_local(volatile unsigned long *p, unsigned long old,
+ unsigned long new)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__ (
+"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
+ cmpd 0,%0,%3\n\
+ bne- 2f\n\
+ stdcx. %4,0,%2\n\
+ bne- 1b"
+ "\n\
+2:"
+ : "=&r" (prev), "+m" (*p)
+ : "r" (p), "r" (old), "r" (new)
+ : "cc", "memory");
+
+ return prev;
+}
#endif
/* This function doesn't exist, so you'll get a linker error
@@ -350,6 +451,22 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
return old;
}
+static __inline__ unsigned long
+__cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
+ unsigned int size)
+{
+ switch (size) {
+ case 4:
+ return __cmpxchg_u32_local(ptr, old, new);
+#ifdef CONFIG_PPC64
+ case 8:
+ return __cmpxchg_u64_local(ptr, old, new);
+#endif
+ }
+ __cmpxchg_called_with_bad_pointer();
+ return old;
+}
+
#define cmpxchg(ptr,o,n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
@@ -358,6 +475,15 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
(unsigned long)_n_, sizeof(*(ptr))); \
})
+
+#define cmpxchg_local(ptr,o,n) \
+ ({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
+ (unsigned long)_n_, sizeof(*(ptr))); \
+ })
+
#ifdef CONFIG_PPC64
/*
* We handle most unaligned accesses in hardware. On the other hand
diff --git a/include/asm-powerpc/tsi108.h b/include/asm-powerpc/tsi108.h
index 4e95d153be8..f8b60793b7a 100644
--- a/include/asm-powerpc/tsi108.h
+++ b/include/asm-powerpc/tsi108.h
@@ -68,8 +68,17 @@
#define TSI108_PB_ERRCS_ES (1 << 1)
#define TSI108_PB_ISR_PBS_RD_ERR (1 << 8)
-#define TSI108_PCI_CFG_BASE_PHYS (0xfb000000)
#define TSI108_PCI_CFG_SIZE (0x01000000)
+
+/*
+ * PHY Configuration Options
+ *
+ * Specify "bcm54xx" in the compatible property of your device tree phy
+ * nodes if your board uses the Broadcom PHYs
+ */
+#define TSI108_PHY_MV88E 0 /* Marvel 88Exxxx PHY */
+#define TSI108_PHY_BCM54XX 1 /* Broardcom BCM54xx PHY */
+
/* Global variables */
extern u32 tsi108_pci_cfg_base;
@@ -93,6 +102,7 @@ typedef struct {
u16 phy; /* phy address */
u16 irq_num; /* irq number */
u8 mac_addr[6]; /* phy mac address */
+ u16 phy_type; /* type of phy on board */
} hw_info;
extern u32 get_vir_csrbase(void);
diff --git a/include/asm-powerpc/tsi108_pci.h b/include/asm-powerpc/tsi108_pci.h
new file mode 100644
index 00000000000..a9f92f73232
--- /dev/null
+++ b/include/asm-powerpc/tsi108_pci.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2007 IBM Corp
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef _ASM_PPC_TSI108_PCI_H
+#define _ASM_PPC_TSI108_PCI_H
+
+#include <asm/tsi108.h>
+
+/* Register definitions */
+#define TSI108_PCI_P2O_BAR0 (TSI108_PCI_OFFSET + 0x10)
+#define TSI108_PCI_P2O_BAR0_UPPER (TSI108_PCI_OFFSET + 0x14)
+#define TSI108_PCI_P2O_BAR2 (TSI108_PCI_OFFSET + 0x18)
+#define TSI108_PCI_P2O_BAR2_UPPER (TSI108_PCI_OFFSET + 0x1c)
+#define TSI108_PCI_P2O_PAGE_SIZES (TSI108_PCI_OFFSET + 0x4c)
+#define TSI108_PCI_PFAB_BAR0 (TSI108_PCI_OFFSET + 0x204)
+#define TSI108_PCI_PFAB_BAR0_UPPER (TSI108_PCI_OFFSET + 0x208)
+#define TSI108_PCI_PFAB_IO (TSI108_PCI_OFFSET + 0x20c)
+#define TSI108_PCI_PFAB_IO_UPPER (TSI108_PCI_OFFSET + 0x210)
+#define TSI108_PCI_PFAB_MEM32 (TSI108_PCI_OFFSET + 0x214)
+#define TSI108_PCI_PFAB_PFM3 (TSI108_PCI_OFFSET + 0x220)
+#define TSI108_PCI_PFAB_PFM4 (TSI108_PCI_OFFSET + 0x230)
+
+extern int tsi108_setup_pci(struct device_node *dev, u32 cfg_phys, int primary);
+extern void tsi108_pci_int_init(struct device_node *node);
+extern void tsi108_irq_cascade(unsigned int irq, struct irq_desc *desc);
+extern void tsi108_clear_pci_cfg_error(void);
+
+#endif /* _ASM_PPC_TSI108_PCI_H */
diff --git a/include/asm-powerpc/udbg.h b/include/asm-powerpc/udbg.h
index d03d8557f70..ce9d82fb7b6 100644
--- a/include/asm-powerpc/udbg.h
+++ b/include/asm-powerpc/udbg.h
@@ -47,6 +47,7 @@ extern void __init udbg_init_rtas_panel(void);
extern void __init udbg_init_rtas_console(void);
extern void __init udbg_init_debug_beat(void);
extern void __init udbg_init_btext(void);
+extern void __init udbg_init_44x_as1(void);
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_UDBG_H */
diff --git a/include/asm-ppc/kdebug.h b/include/asm-ppc/kdebug.h
new file mode 100644
index 00000000000..6ece1b03766
--- /dev/null
+++ b/include/asm-ppc/kdebug.h
@@ -0,0 +1 @@
+#include <asm-generic/kdebug.h>
diff --git a/include/asm-ppc/pgtable.h b/include/asm-ppc/pgtable.h
index b1fdbf40dba..bed452d4a5f 100644
--- a/include/asm-ppc/pgtable.h
+++ b/include/asm-ppc/pgtable.h
@@ -827,10 +827,6 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
remap_pfn_range(vma, vaddr, pfn, size, prot)
#endif
-#define MK_IOSPACE_PFN(space, pfn) (pfn)
-#define GET_IOSPACE(pfn) 0
-#define GET_PFN(pfn) (pfn)
-
/*
* No page table caches to initialise
*/
diff --git a/include/asm-ppc/system.h b/include/asm-ppc/system.h
index 738943584c0..d84a3cf4d03 100644
--- a/include/asm-ppc/system.h
+++ b/include/asm-ppc/system.h
@@ -6,7 +6,6 @@
#include <linux/kernel.h>
-#include <asm/atomic.h>
#include <asm/hw_irq.h>
/*
@@ -170,7 +169,6 @@ xchg_u32(volatile void *p, unsigned long val)
extern void __xchg_called_with_bad_pointer(void);
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
-#define tas(ptr) (xchg((ptr),1))
static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
{
diff --git a/include/asm-s390/kdebug.h b/include/asm-s390/kdebug.h
index d2d7ad27614..04418af08f8 100644
--- a/include/asm-s390/kdebug.h
+++ b/include/asm-s390/kdebug.h
@@ -8,21 +8,6 @@
struct pt_regs;
-struct die_args {
- struct pt_regs *regs;
- const char *str;
- long err;
- int trapnr;
- int signr;
-};
-
-/* Note - you should never unregister because that can race with NMIs.
- * If you really want to do it first unregister - then synchronize_sched
- * - then free.
- */
-extern int register_die_notifier(struct notifier_block *);
-extern int unregister_die_notifier(struct notifier_block *);
-
/*
* These are only here because kprobes.c wants them to implement a
* blatant layering violation. Will hopefully go away soon once all
@@ -37,8 +22,6 @@ static inline int unregister_page_fault_notifier(struct notifier_block *nb)
return 0;
}
-extern struct atomic_notifier_head s390die_chain;
-
enum die_val {
DIE_OOPS = 1,
DIE_BPT,
@@ -54,19 +37,6 @@ enum die_val {
DIE_NMI_IPI,
};
-static inline int notify_die(enum die_val val, const char *str,
- struct pt_regs *regs, long err, int trap, int sig)
-{
- struct die_args args = {
- .regs = regs,
- .str = str,
- .err = err,
- .trapnr = trap,
- .signr = sig
- };
- return atomic_notifier_call_chain(&s390die_chain, val, &args);
-}
-
extern void die(const char *, struct pt_regs *, long);
#endif
diff --git a/include/asm-s390/kexec.h b/include/asm-s390/kexec.h
index 9c35c8ad1af..7592af708b4 100644
--- a/include/asm-s390/kexec.h
+++ b/include/asm-s390/kexec.h
@@ -34,8 +34,6 @@
/* The native architecture */
#define KEXEC_ARCH KEXEC_ARCH_S390
-#define MAX_NOTE_BYTES 1024
-
/* Provide a dummy definition to avoid build failures. */
static inline void crash_setup_regs(struct pt_regs *newregs,
struct pt_regs *oldregs) { }
diff --git a/include/asm-sh/kdebug.h b/include/asm-sh/kdebug.h
index ef009baf5a1..493c2062974 100644
--- a/include/asm-sh/kdebug.h
+++ b/include/asm-sh/kdebug.h
@@ -2,6 +2,7 @@
#define __ASM_SH_KDEBUG_H
#include <linux/notifier.h>
+#include <asm-generic/kdebug.h>
struct pt_regs;
diff --git a/include/asm-sh/kexec.h b/include/asm-sh/kexec.h
index da36a754860..00f4260ef09 100644
--- a/include/asm-sh/kexec.h
+++ b/include/asm-sh/kexec.h
@@ -26,8 +26,6 @@
/* The native architecture */
#define KEXEC_ARCH KEXEC_ARCH_SH
-#define MAX_NOTE_BYTES 1024
-
static inline void crash_setup_regs(struct pt_regs *newregs,
struct pt_regs *oldregs)
{
diff --git a/include/asm-sh/pgtable.h b/include/asm-sh/pgtable.h
index 184d7fcaaf1..5b523c7e7d9 100644
--- a/include/asm-sh/pgtable.h
+++ b/include/asm-sh/pgtable.h
@@ -568,10 +568,6 @@ typedef pte_t *pte_addr_t;
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
-#define MK_IOSPACE_PFN(space, pfn) (pfn)
-#define GET_IOSPACE(pfn) 0
-#define GET_PFN(pfn) (pfn)
-
struct mm_struct;
/*
diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h
index 127af304865..e7e96ee0c8a 100644
--- a/include/asm-sh/system.h
+++ b/include/asm-sh/system.h
@@ -82,16 +82,6 @@ static inline void sched_cacheflush(void)
}
#endif
-static inline unsigned long tas(volatile int *m)
-{
- unsigned long retval;
-
- __asm__ __volatile__ ("tas.b @%1\n\t"
- "movt %0"
- : "=r" (retval): "r" (m): "t", "memory");
- return retval;
-}
-
/*
* A brief note on ctrl_barrier(), the control register write barrier.
*
diff --git a/include/asm-sh64/kdebug.h b/include/asm-sh64/kdebug.h
new file mode 100644
index 00000000000..6ece1b03766
--- /dev/null
+++ b/include/asm-sh64/kdebug.h
@@ -0,0 +1 @@
+#include <asm-generic/kdebug.h>
diff --git a/include/asm-sh64/pgtable.h b/include/asm-sh64/pgtable.h
index 6b97c4cb1d6..b875482eb59 100644
--- a/include/asm-sh64/pgtable.h
+++ b/include/asm-sh64/pgtable.h
@@ -485,10 +485,6 @@ extern void update_mmu_cache(struct vm_area_struct * vma,
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
-#define MK_IOSPACE_PFN(space, pfn) (pfn)
-#define GET_IOSPACE(pfn) 0
-#define GET_PFN(pfn) (pfn)
-
#endif /* !__ASSEMBLY__ */
/*
diff --git a/include/asm-sh64/system.h b/include/asm-sh64/system.h
index b1598c26fcb..5ff94644e8c 100644
--- a/include/asm-sh64/system.h
+++ b/include/asm-sh64/system.h
@@ -43,8 +43,6 @@ extern struct task_struct *sh64_switch_to(struct task_struct *prev,
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
-#define tas(ptr) (xchg((ptr), 1))
-
extern void __xchg_called_with_bad_pointer(void);
#define mb() __asm__ __volatile__ ("synco": : :"memory")
diff --git a/include/asm-sparc/kdebug.h b/include/asm-sparc/kdebug.h
index fba92485fdb..404d8076732 100644
--- a/include/asm-sparc/kdebug.h
+++ b/include/asm-sparc/kdebug.h
@@ -66,4 +66,8 @@ static inline void sp_enter_debugger(void)
#define KDEBUG_DUNNO2_OFF 0x8
#define KDEBUG_TEACH_OFF 0xc
+enum die_val {
+ DIE_UNUSED,
+};
+
#endif /* !(_SPARC_KDEBUG_H) */
diff --git a/include/asm-sparc/system.h b/include/asm-sparc/system.h
index 100c3eaf3c1..8b6d9c9c8b9 100644
--- a/include/asm-sparc/system.h
+++ b/include/asm-sparc/system.h
@@ -241,7 +241,6 @@ static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned lon
}
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
-#define tas(ptr) (xchg((ptr),1))
extern void __xchg_called_with_bad_pointer(void);
diff --git a/include/asm-sparc64/Kbuild b/include/asm-sparc64/Kbuild
index a7f44408c93..854fd3a65ac 100644
--- a/include/asm-sparc64/Kbuild
+++ b/include/asm-sparc64/Kbuild
@@ -8,7 +8,6 @@ header-y += apb.h
header-y += asi.h
header-y += bbc.h
header-y += bpp.h
-header-y += const.h
header-y += display7seg.h
header-y += envctrl.h
header-y += ipc.h
diff --git a/include/asm-sparc64/atomic.h b/include/asm-sparc64/atomic.h
index 2f0bec26a69..3fb4e1f7f18 100644
--- a/include/asm-sparc64/atomic.h
+++ b/include/asm-sparc64/atomic.h
@@ -9,6 +9,7 @@
#define __ARCH_SPARC64_ATOMIC__
#include <linux/types.h>
+#include <asm/system.h>
typedef struct { volatile int counter; } atomic_t;
typedef struct { volatile __s64 counter; } atomic64_t;
@@ -70,25 +71,47 @@ extern int atomic64_sub_ret(int, atomic64_t *);
#define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
#define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
-#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-#define atomic_add_unless(v, a, u) \
-({ \
- int c, old; \
- c = atomic_read(v); \
- for (;;) { \
- if (unlikely(c == (u))) \
- break; \
- old = atomic_cmpxchg((v), c, c + (a)); \
- if (likely(old == c)) \
- break; \
- c = old; \
- } \
- likely(c != (u)); \
-})
+static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+{
+ int c, old;
+ c = atomic_read(v);
+ for (;;) {
+ if (unlikely(c == (u)))
+ break;
+ old = atomic_cmpxchg((v), c, c + (a));
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return c != (u);
+}
+
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+#define atomic64_cmpxchg(v, o, n) \
+ ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
+#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+
+static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+{
+ long c, old;
+ c = atomic64_read(v);
+ for (;;) {
+ if (unlikely(c == (u)))
+ break;
+ old = atomic64_cmpxchg((v), c, c + (a));
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return c != (u);
+}
+
+#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+
/* Atomic operations are already serializing */
#ifdef CONFIG_SMP
#define smp_mb__before_atomic_dec() membar_storeload_loadload();
diff --git a/include/asm-sparc64/const.h b/include/asm-sparc64/const.h
deleted file mode 100644
index 8ad902b2ce0..00000000000
--- a/include/asm-sparc64/const.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* const.h: Macros for dealing with constants. */
-
-#ifndef _SPARC64_CONST_H
-#define _SPARC64_CONST_H
-
-/* Some constant macros are used in both assembler and
- * C code. Therefore we cannot annotate them always with
- * 'UL' and other type specificers unilaterally. We
- * use the following macros to deal with this.
- */
-
-#ifdef __ASSEMBLY__
-#define _AC(X,Y) X
-#else
-#define _AC(X,Y) (X##Y)
-#endif
-
-
-#endif /* !(_SPARC64_CONST_H) */
diff --git a/include/asm-sparc64/kdebug.h b/include/asm-sparc64/kdebug.h
index 11251bdd00c..f8032e73f38 100644
--- a/include/asm-sparc64/kdebug.h
+++ b/include/asm-sparc64/kdebug.h
@@ -7,19 +7,8 @@
struct pt_regs;
-struct die_args {
- struct pt_regs *regs;
- const char *str;
- long err;
- int trapnr;
- int signr;
-};
-
-extern int register_die_notifier(struct notifier_block *);
-extern int unregister_die_notifier(struct notifier_block *);
extern int register_page_fault_notifier(struct notifier_block *);
extern int unregister_page_fault_notifier(struct notifier_block *);
-extern struct atomic_notifier_head sparc64die_chain;
extern void bad_trap(struct pt_regs *, long);
@@ -36,16 +25,4 @@ enum die_val {
DIE_PAGE_FAULT,
};
-static inline int notify_die(enum die_val val,char *str, struct pt_regs *regs,
- long err, int trap, int sig)
-{
- struct die_args args = { .regs = regs,
- .str = str,
- .err = err,
- .trapnr = trap,
- .signr = sig };
-
- return atomic_notifier_call_chain(&sparc64die_chain, val, &args);
-}
-
#endif
diff --git a/include/asm-sparc64/local.h b/include/asm-sparc64/local.h
index dfde115ac89..c11c530f74d 100644
--- a/include/asm-sparc64/local.h
+++ b/include/asm-sparc64/local.h
@@ -1,40 +1 @@
-#ifndef _ARCH_SPARC64_LOCAL_H
-#define _ARCH_SPARC64_LOCAL_H
-
-#include <linux/percpu.h>
-#include <asm/atomic.h>
-
-typedef atomic64_t local_t;
-
-#define LOCAL_INIT(i) ATOMIC64_INIT(i)
-#define local_read(v) atomic64_read(v)
-#define local_set(v,i) atomic64_set(v,i)
-
-#define local_inc(v) atomic64_inc(v)
-#define local_dec(v) atomic64_dec(v)
-#define local_add(i, v) atomic64_add(i, v)
-#define local_sub(i, v) atomic64_sub(i, v)
-
-#define __local_inc(v) ((v)->counter++)
-#define __local_dec(v) ((v)->counter--)
-#define __local_add(i,v) ((v)->counter+=(i))
-#define __local_sub(i,v) ((v)->counter-=(i))
-
-/* Use these for per-cpu local_t variables: on some archs they are
- * much more efficient than these naive implementations. Note they take
- * a variable, not an address.
- */
-#define cpu_local_read(v) local_read(&__get_cpu_var(v))
-#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i))
-
-#define cpu_local_inc(v) local_inc(&__get_cpu_var(v))
-#define cpu_local_dec(v) local_dec(&__get_cpu_var(v))
-#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v))
-#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v))
-
-#define __cpu_local_inc(v) __local_inc(&__get_cpu_var(v))
-#define __cpu_local_dec(v) __local_dec(&__get_cpu_var(v))
-#define __cpu_local_add(i, v) __local_add((i), &__get_cpu_var(v))
-#define __cpu_local_sub(i, v) __local_sub((i), &__get_cpu_var(v))
-
-#endif /* _ARCH_SPARC64_LOCAL_H */
+#include <asm-generic/local.h>
diff --git a/include/asm-sparc64/lsu.h b/include/asm-sparc64/lsu.h
index e5329c7f583..79f109840c3 100644
--- a/include/asm-sparc64/lsu.h
+++ b/include/asm-sparc64/lsu.h
@@ -2,7 +2,7 @@
#ifndef _SPARC64_LSU_H
#define _SPARC64_LSU_H
-#include <asm/const.h>
+#include <linux/const.h>
/* LSU Control Register */
#define LSU_CONTROL_PM _AC(0x000001fe00000000,UL) /* Phys-watchpoint byte mask*/
diff --git a/include/asm-sparc64/mmu.h b/include/asm-sparc64/mmu.h
index 70af4b6ce13..8abc58f0f9d 100644
--- a/include/asm-sparc64/mmu.h
+++ b/include/asm-sparc64/mmu.h
@@ -1,8 +1,8 @@
#ifndef __MMU_H
#define __MMU_H
+#include <linux/const.h>
#include <asm/page.h>
-#include <asm/const.h>
#include <asm/hypervisor.h>
#define CTX_NR_BITS 13
diff --git a/include/asm-sparc64/page.h b/include/asm-sparc64/page.h
index ff736eafa64..7af1077451f 100644
--- a/include/asm-sparc64/page.h
+++ b/include/asm-sparc64/page.h
@@ -5,7 +5,7 @@
#ifdef __KERNEL__
-#include <asm/const.h>
+#include <linux/const.h>
#if defined(CONFIG_SPARC64_PAGE_SIZE_8KB)
#define PAGE_SHIFT 13
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
index 46705ef47d2..9e80ad43b29 100644
--- a/include/asm-sparc64/pgtable.h
+++ b/include/asm-sparc64/pgtable.h
@@ -15,13 +15,13 @@
#include <asm-generic/pgtable-nopud.h>
#include <linux/compiler.h>
+#include <linux/const.h>
#include <asm/types.h>
#include <asm/spitfire.h>
#include <asm/asi.h>
#include <asm/system.h>
#include <asm/page.h>
#include <asm/processor.h>
-#include <asm/const.h>
/* The kernel image occupies 0x4000000 to 0x1000000 (4MB --> 32MB).
* The page copy blockops can use 0x2000000 to 0x4000000.
diff --git a/include/asm-sparc64/pstate.h b/include/asm-sparc64/pstate.h
index 49a7924a89a..f3c45484c63 100644
--- a/include/asm-sparc64/pstate.h
+++ b/include/asm-sparc64/pstate.h
@@ -2,7 +2,7 @@
#ifndef _SPARC64_PSTATE_H
#define _SPARC64_PSTATE_H
-#include <asm/const.h>
+#include <linux/const.h>
/* The V9 PSTATE Register (with SpitFire extensions).
*
diff --git a/include/asm-sparc64/sfafsr.h b/include/asm-sparc64/sfafsr.h
index 2f792c20b53..e96137b04a4 100644
--- a/include/asm-sparc64/sfafsr.h
+++ b/include/asm-sparc64/sfafsr.h
@@ -1,7 +1,7 @@
#ifndef _SPARC64_SFAFSR_H
#define _SPARC64_SFAFSR_H
-#include <asm/const.h>
+#include <linux/const.h>
/* Spitfire Asynchronous Fault Status register, ASI=0x4C VA<63:0>=0x0 */
diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h
index 32281acb878..8ba380ec6da 100644
--- a/include/asm-sparc64/system.h
+++ b/include/asm-sparc64/system.h
@@ -253,7 +253,6 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long
}
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
-#define tas(ptr) (xchg((ptr),1))
extern void __xchg_called_with_bad_pointer(void);
diff --git a/include/asm-um/cmpxchg.h b/include/asm-um/cmpxchg.h
new file mode 100644
index 00000000000..529376a9988
--- /dev/null
+++ b/include/asm-um/cmpxchg.h
@@ -0,0 +1,6 @@
+#ifndef __UM_CMPXCHG_H
+#define __UM_CMPXCHG_H
+
+#include "asm/arch/cmpxchg.h"
+
+#endif
diff --git a/include/asm-um/kdebug.h b/include/asm-um/kdebug.h
new file mode 100644
index 00000000000..6ece1b03766
--- /dev/null
+++ b/include/asm-um/kdebug.h
@@ -0,0 +1 @@
+#include <asm-generic/kdebug.h>
diff --git a/include/asm-v850/kdebug.h b/include/asm-v850/kdebug.h
new file mode 100644
index 00000000000..6ece1b03766
--- /dev/null
+++ b/include/asm-v850/kdebug.h
@@ -0,0 +1 @@
+#include <asm-generic/kdebug.h>
diff --git a/include/asm-v850/system.h b/include/asm-v850/system.h
index da39916f10b..0de2481fd99 100644
--- a/include/asm-v850/system.h
+++ b/include/asm-v850/system.h
@@ -76,7 +76,6 @@ static inline int irqs_disabled (void)
#define xchg(ptr, with) \
((__typeof__ (*(ptr)))__xchg ((unsigned long)(with), (ptr), sizeof (*(ptr))))
-#define tas(ptr) (xchg ((ptr), 1))
static inline unsigned long __xchg (unsigned long with,
__volatile__ void *ptr, int size)
diff --git a/include/asm-x86_64/Kbuild b/include/asm-x86_64/Kbuild
index 89ad1fc27c8..75a2deffca6 100644
--- a/include/asm-x86_64/Kbuild
+++ b/include/asm-x86_64/Kbuild
@@ -19,4 +19,3 @@ unifdef-y += mce.h
unifdef-y += msr.h
unifdef-y += mtrr.h
unifdef-y += vsyscall.h
-unifdef-y += const.h
diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h
index 706ca4b6000..f2e64634fa4 100644
--- a/include/asm-x86_64/atomic.h
+++ b/include/asm-x86_64/atomic.h
@@ -2,6 +2,7 @@
#define __ARCH_X86_64_ATOMIC__
#include <asm/alternative.h>
+#include <asm/cmpxchg.h>
/* atomic_t should be 32 bit signed type */
@@ -375,8 +376,8 @@ static __inline__ long atomic64_add_return(long i, atomic64_t *v)
long __i = i;
__asm__ __volatile__(
LOCK_PREFIX "xaddq %0, %1;"
- :"=r"(i)
- :"m"(v->counter), "0"(i));
+ :"+r" (i), "+m" (v->counter)
+ : : "memory");
return i + __i;
}
@@ -388,7 +389,10 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
#define atomic64_inc_return(v) (atomic64_add_return(1,v))
#define atomic64_dec_return(v) (atomic64_sub_return(1,v))
-#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
+#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
+#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+
+#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
/**
@@ -400,22 +404,49 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
* Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise.
*/
-#define atomic_add_unless(v, a, u) \
-({ \
- int c, old; \
- c = atomic_read(v); \
- for (;;) { \
- if (unlikely(c == (u))) \
- break; \
- old = atomic_cmpxchg((v), c, c + (a)); \
- if (likely(old == c)) \
- break; \
- c = old; \
- } \
- c != (u); \
-})
+static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+{
+ int c, old;
+ c = atomic_read(v);
+ for (;;) {
+ if (unlikely(c == (u)))
+ break;
+ old = atomic_cmpxchg((v), c, c + (a));
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return c != (u);
+}
+
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+/**
+ * atomic64_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns non-zero if @v was not @u, and zero otherwise.
+ */
+static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+{
+ long c, old;
+ c = atomic64_read(v);
+ for (;;) {
+ if (unlikely(c == (u)))
+ break;
+ old = atomic64_cmpxchg((v), c, c + (a));
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return c != (u);
+}
+
+#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+
/* These are x86-specific, used by some header files */
#define atomic_clear_mask(mask, addr) \
__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \
diff --git a/include/asm-x86_64/cmpxchg.h b/include/asm-x86_64/cmpxchg.h
new file mode 100644
index 00000000000..09a6b6b6b74
--- /dev/null
+++ b/include/asm-x86_64/cmpxchg.h
@@ -0,0 +1,134 @@
+#ifndef __ASM_CMPXCHG_H
+#define __ASM_CMPXCHG_H
+
+#include <asm/alternative.h> /* Provides LOCK_PREFIX */
+
+#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
+
+#define __xg(x) ((volatile long *)(x))
+
+static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
+{
+ *ptr = val;
+}
+
+#define _set_64bit set_64bit
+
+/*
+ * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
+ * Note 2: xchg has side effect, so that attribute volatile is necessary,
+ * but generally the primitive is invalid, *ptr is output argument. --ANK
+ */
+static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
+{
+ switch (size) {
+ case 1:
+ __asm__ __volatile__("xchgb %b0,%1"
+ :"=q" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 2:
+ __asm__ __volatile__("xchgw %w0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 4:
+ __asm__ __volatile__("xchgl %k0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 8:
+ __asm__ __volatile__("xchgq %0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ }
+ return x;
+}
+
+/*
+ * Atomic compare and exchange. Compare OLD with MEM, if identical,
+ * store NEW in MEM. Return the initial value in MEM. Success is
+ * indicated by comparing RETURN with OLD.
+ */
+
+#define __HAVE_ARCH_CMPXCHG 1
+
+static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+ unsigned long new, int size)
+{
+ unsigned long prev;
+ switch (size) {
+ case 1:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 2:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 4:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 8:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ }
+ return old;
+}
+
+static inline unsigned long __cmpxchg_local(volatile void *ptr,
+ unsigned long old, unsigned long new, int size)
+{
+ unsigned long prev;
+ switch (size) {
+ case 1:
+ __asm__ __volatile__("cmpxchgb %b1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 2:
+ __asm__ __volatile__("cmpxchgw %w1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 4:
+ __asm__ __volatile__("cmpxchgl %k1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 8:
+ __asm__ __volatile__("cmpxchgq %1,%2"
+ : "=a"(prev)
+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ }
+ return old;
+}
+
+#define cmpxchg(ptr,o,n)\
+ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
+ (unsigned long)(n),sizeof(*(ptr))))
+#define cmpxchg_local(ptr,o,n)\
+ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
+ (unsigned long)(n),sizeof(*(ptr))))
+
+#endif
diff --git a/include/asm-x86_64/kdebug.h b/include/asm-x86_64/kdebug.h
index 2b0c088e295..74feae945a2 100644
--- a/include/asm-x86_64/kdebug.h
+++ b/include/asm-x86_64/kdebug.h
@@ -5,19 +5,8 @@
struct pt_regs;
-struct die_args {
- struct pt_regs *regs;
- const char *str;
- long err;
- int trapnr;
- int signr;
-};
-
-extern int register_die_notifier(struct notifier_block *);
-extern int unregister_die_notifier(struct notifier_block *);
extern int register_page_fault_notifier(struct notifier_block *);
extern int unregister_page_fault_notifier(struct notifier_block *);
-extern struct atomic_notifier_head die_chain;
/* Grossly misnamed. */
enum die_val {
@@ -33,22 +22,10 @@ enum die_val {
DIE_GPF,
DIE_CALL,
DIE_NMI_IPI,
+ DIE_NMI_POST,
DIE_PAGE_FAULT,
};
-static inline int notify_die(enum die_val val, const char *str,
- struct pt_regs *regs, long err, int trap, int sig)
-{
- struct die_args args = {
- .regs = regs,
- .str = str,
- .err = err,
- .trapnr = trap,
- .signr = sig
- };
- return atomic_notifier_call_chain(&die_chain, val, &args);
-}
-
extern void printk_address(unsigned long address);
extern void die(const char *,struct pt_regs *,long);
extern void __die(const char *,struct pt_regs *,long);
diff --git a/include/asm-x86_64/kexec.h b/include/asm-x86_64/kexec.h
index 5fab957e109..738e581b67f 100644
--- a/include/asm-x86_64/kexec.h
+++ b/include/asm-x86_64/kexec.h
@@ -48,8 +48,6 @@
/* The native architecture */
#define KEXEC_ARCH KEXEC_ARCH_X86_64
-#define MAX_NOTE_BYTES 1024
-
/*
* Saving the registers of the cpu on which panic occured in
* crash_kexec to save a valid sp. The registers of other cpus
diff --git a/include/asm-x86_64/local.h b/include/asm-x86_64/local.h
index e769e620022..e87492bb069 100644
--- a/include/asm-x86_64/local.h
+++ b/include/asm-x86_64/local.h
@@ -2,49 +2,183 @@
#define _ARCH_X8664_LOCAL_H
#include <linux/percpu.h>
+#include <asm/atomic.h>
typedef struct
{
- volatile long counter;
+ atomic_long_t a;
} local_t;
-#define LOCAL_INIT(i) { (i) }
+#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
-#define local_read(v) ((v)->counter)
-#define local_set(v,i) (((v)->counter) = (i))
+#define local_read(l) atomic_long_read(&(l)->a)
+#define local_set(l,i) atomic_long_set(&(l)->a, (i))
-static inline void local_inc(local_t *v)
+static inline void local_inc(local_t *l)
{
__asm__ __volatile__(
"incq %0"
- :"=m" (v->counter)
- :"m" (v->counter));
+ :"=m" (l->a.counter)
+ :"m" (l->a.counter));
}
-static inline void local_dec(local_t *v)
+static inline void local_dec(local_t *l)
{
__asm__ __volatile__(
"decq %0"
- :"=m" (v->counter)
- :"m" (v->counter));
+ :"=m" (l->a.counter)
+ :"m" (l->a.counter));
}
-static inline void local_add(long i, local_t *v)
+static inline void local_add(long i, local_t *l)
{
__asm__ __volatile__(
"addq %1,%0"
- :"=m" (v->counter)
- :"ir" (i), "m" (v->counter));
+ :"=m" (l->a.counter)
+ :"ir" (i), "m" (l->a.counter));
}
-static inline void local_sub(long i, local_t *v)
+static inline void local_sub(long i, local_t *l)
{
__asm__ __volatile__(
"subq %1,%0"
- :"=m" (v->counter)
- :"ir" (i), "m" (v->counter));
+ :"=m" (l->a.counter)
+ :"ir" (i), "m" (l->a.counter));
}
+/**
+ * local_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @l: pointer to type local_t
+ *
+ * Atomically subtracts @i from @l and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+static __inline__ int local_sub_and_test(long i, local_t *l)
+{
+ unsigned char c;
+
+ __asm__ __volatile__(
+ "subq %2,%0; sete %1"
+ :"=m" (l->a.counter), "=qm" (c)
+ :"ir" (i), "m" (l->a.counter) : "memory");
+ return c;
+}
+
+/**
+ * local_dec_and_test - decrement and test
+ * @l: pointer to type local_t
+ *
+ * Atomically decrements @l by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+static __inline__ int local_dec_and_test(local_t *l)
+{
+ unsigned char c;
+
+ __asm__ __volatile__(
+ "decq %0; sete %1"
+ :"=m" (l->a.counter), "=qm" (c)
+ :"m" (l->a.counter) : "memory");
+ return c != 0;
+}
+
+/**
+ * local_inc_and_test - increment and test
+ * @l: pointer to type local_t
+ *
+ * Atomically increments @l by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+static __inline__ int local_inc_and_test(local_t *l)
+{
+ unsigned char c;
+
+ __asm__ __volatile__(
+ "incq %0; sete %1"
+ :"=m" (l->a.counter), "=qm" (c)
+ :"m" (l->a.counter) : "memory");
+ return c != 0;
+}
+
+/**
+ * local_add_negative - add and test if negative
+ * @i: integer value to add
+ * @l: pointer to type local_t
+ *
+ * Atomically adds @i to @l and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+static __inline__ int local_add_negative(long i, local_t *l)
+{
+ unsigned char c;
+
+ __asm__ __volatile__(
+ "addq %2,%0; sets %1"
+ :"=m" (l->a.counter), "=qm" (c)
+ :"ir" (i), "m" (l->a.counter) : "memory");
+ return c;
+}
+
+/**
+ * local_add_return - add and return
+ * @i: integer value to add
+ * @l: pointer to type local_t
+ *
+ * Atomically adds @i to @l and returns @i + @l
+ */
+static __inline__ long local_add_return(long i, local_t *l)
+{
+ long __i = i;
+ __asm__ __volatile__(
+ "xaddq %0, %1;"
+ :"+r" (i), "+m" (l->a.counter)
+ : : "memory");
+ return i + __i;
+}
+
+static __inline__ long local_sub_return(long i, local_t *l)
+{
+ return local_add_return(-i,l);
+}
+
+#define local_inc_return(l) (local_add_return(1,l))
+#define local_dec_return(l) (local_sub_return(1,l))
+
+#define local_cmpxchg(l, o, n) \
+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
+/* Always has a lock prefix */
+#define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
+
+/**
+ * atomic_up_add_unless - add unless the number is a given value
+ * @l: pointer of type local_t
+ * @a: the amount to add to l...
+ * @u: ...unless l is equal to u.
+ *
+ * Atomically adds @a to @l, so long as it was not @u.
+ * Returns non-zero if @l was not @u, and zero otherwise.
+ */
+#define local_add_unless(l, a, u) \
+({ \
+ long c, old; \
+ c = local_read(l); \
+ for (;;) { \
+ if (unlikely(c == (u))) \
+ break; \
+ old = local_cmpxchg((l), c, c + (a)); \
+ if (likely(old == c)) \
+ break; \
+ c = old; \
+ } \
+ c != (u); \
+})
+#define local_inc_not_zero(l) local_add_unless((l), 1, 0)
+
/* On x86-64 these are better than the atomic variants on SMP kernels
because they dont use a lock prefix. */
#define __local_inc(l) local_inc(l)
@@ -62,27 +196,27 @@ static inline void local_sub(long i, local_t *v)
/* Need to disable preemption for the cpu local counters otherwise we could
still access a variable of a previous CPU in a non atomic way. */
-#define cpu_local_wrap_v(v) \
+#define cpu_local_wrap_v(l) \
({ local_t res__; \
preempt_disable(); \
- res__ = (v); \
+ res__ = (l); \
preempt_enable(); \
res__; })
-#define cpu_local_wrap(v) \
+#define cpu_local_wrap(l) \
({ preempt_disable(); \
- v; \
+ l; \
preempt_enable(); }) \
-#define cpu_local_read(v) cpu_local_wrap_v(local_read(&__get_cpu_var(v)))
-#define cpu_local_set(v, i) cpu_local_wrap(local_set(&__get_cpu_var(v), (i)))
-#define cpu_local_inc(v) cpu_local_wrap(local_inc(&__get_cpu_var(v)))
-#define cpu_local_dec(v) cpu_local_wrap(local_dec(&__get_cpu_var(v)))
-#define cpu_local_add(i, v) cpu_local_wrap(local_add((i), &__get_cpu_var(v)))
-#define cpu_local_sub(i, v) cpu_local_wrap(local_sub((i), &__get_cpu_var(v)))
+#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
+#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
+#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
+#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
+#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
+#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
-#define __cpu_local_inc(v) cpu_local_inc(v)
-#define __cpu_local_dec(v) cpu_local_dec(v)
-#define __cpu_local_add(i, v) cpu_local_add((i), (v))
-#define __cpu_local_sub(i, v) cpu_local_sub((i), (v))
+#define __cpu_local_inc(l) cpu_local_inc(l)
+#define __cpu_local_dec(l) cpu_local_dec(l)
+#define __cpu_local_add(i, l) cpu_local_add((i), (l))
+#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
-#endif /* _ARCH_I386_LOCAL_H */
+#endif /* _ARCH_X8664_LOCAL_H */
diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h
index 4d04e247956..dee632fa457 100644
--- a/include/asm-x86_64/page.h
+++ b/include/asm-x86_64/page.h
@@ -1,7 +1,7 @@
#ifndef _X86_64_PAGE_H
#define _X86_64_PAGE_H
-#include <asm/const.h>
+#include <linux/const.h>
/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT 12
@@ -79,9 +79,10 @@ extern unsigned long phys_base;
#define __PHYSICAL_START CONFIG_PHYSICAL_START
#define __KERNEL_ALIGN 0x200000
+
#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
-#define __START_KERNEL_map 0xffffffff80000000
-#define __PAGE_OFFSET 0xffff810000000000
+#define __START_KERNEL_map _AC(0xffffffff80000000, UL)
+#define __PAGE_OFFSET _AC(0xffff810000000000, UL)
/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
@@ -93,7 +94,7 @@ extern unsigned long phys_base;
#define __VIRTUAL_MASK ((_AC(1,UL) << __VIRTUAL_MASK_SHIFT) - 1)
#define KERNEL_TEXT_SIZE (40*1024*1024)
-#define KERNEL_TEXT_START 0xffffffff80000000
+#define KERNEL_TEXT_START _AC(0xffffffff80000000, UL)
#define PAGE_OFFSET __PAGE_OFFSET
#ifndef __ASSEMBLY__
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
index da3390faaea..08b9831f2e1 100644
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -1,7 +1,7 @@
#ifndef _X86_64_PGTABLE_H
#define _X86_64_PGTABLE_H
-#include <asm/const.h>
+#include <linux/const.h>
#ifndef __ASSEMBLY__
/*
@@ -134,11 +134,11 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long
#define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1)
#define FIRST_USER_ADDRESS 0
-#define MAXMEM 0x3fffffffffff
-#define VMALLOC_START 0xffffc20000000000
-#define VMALLOC_END 0xffffe1ffffffffff
-#define MODULES_VADDR 0xffffffff88000000
-#define MODULES_END 0xfffffffffff00000
+#define MAXMEM _AC(0x3fffffffffff, UL)
+#define VMALLOC_START _AC(0xffffc20000000000, UL)
+#define VMALLOC_END _AC(0xffffe1ffffffffff, UL)
+#define MODULES_VADDR _AC(0xffffffff88000000, UL)
+#define MODULES_END _AC(0xfffffffffff00000, UL)
#define MODULES_LEN (MODULES_END - MODULES_VADDR)
#define _PAGE_BIT_PRESENT 0
@@ -411,17 +411,12 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
extern spinlock_t pgd_lock;
extern struct list_head pgd_list;
-void vmalloc_sync_all(void);
extern int kern_addr_valid(unsigned long addr);
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
-#define MK_IOSPACE_PFN(space, pfn) (pfn)
-#define GET_IOSPACE(pfn) 0
-#define GET_PFN(pfn) (pfn)
-
#define HAVE_ARCH_UNMAPPED_AREA
#define pgtable_cache_init() do { } while (0)
diff --git a/include/asm-x86_64/serial.h b/include/asm-x86_64/serial.h
index b0496e0d72a..8ebd765c674 100644
--- a/include/asm-x86_64/serial.h
+++ b/include/asm-x86_64/serial.h
@@ -11,19 +11,3 @@
* megabits/second; but this requires the faster clock.
*/
#define BASE_BAUD ( 1843200 / 16 )
-
-/* Standard COM flags (except for COM4, because of the 8514 problem) */
-#ifdef CONFIG_SERIAL_DETECT_IRQ
-#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ)
-#define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ)
-#else
-#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
-#define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF
-#endif
-
-#define SERIAL_PORT_DFNS \
- /* UART CLK PORT IRQ FLAGS */ \
- { 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \
- { 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS }, /* ttyS1 */ \
- { 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \
- { 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h
index 213b7fe5d99..b7b8021e8c4 100644
--- a/include/asm-x86_64/system.h
+++ b/include/asm-x86_64/system.h
@@ -3,7 +3,7 @@
#include <linux/kernel.h>
#include <asm/segment.h>
-#include <asm/alternative.h>
+#include <asm/cmpxchg.h>
#ifdef __KERNEL__
@@ -124,100 +124,6 @@ static inline void sched_cacheflush(void)
#define nop() __asm__ __volatile__ ("nop")
-#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
-
-#define tas(ptr) (xchg((ptr),1))
-
-#define __xg(x) ((volatile long *)(x))
-
-static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
-{
- *ptr = val;
-}
-
-#define _set_64bit set_64bit
-
-/*
- * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
- * Note 2: xchg has side effect, so that attribute volatile is necessary,
- * but generally the primitive is invalid, *ptr is output argument. --ANK
- */
-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
-{
- switch (size) {
- case 1:
- __asm__ __volatile__("xchgb %b0,%1"
- :"=q" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- case 2:
- __asm__ __volatile__("xchgw %w0,%1"
- :"=r" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- case 4:
- __asm__ __volatile__("xchgl %k0,%1"
- :"=r" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- case 8:
- __asm__ __volatile__("xchgq %0,%1"
- :"=r" (x)
- :"m" (*__xg(ptr)), "0" (x)
- :"memory");
- break;
- }
- return x;
-}
-
-/*
- * Atomic compare and exchange. Compare OLD with MEM, if identical,
- * store NEW in MEM. Return the initial value in MEM. Success is
- * indicated by comparing RETURN with OLD.
- */
-
-#define __HAVE_ARCH_CMPXCHG 1
-
-static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
- unsigned long new, int size)
-{
- unsigned long prev;
- switch (size) {
- case 1:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
- : "=a"(prev)
- : "q"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 2:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 4:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- case 8:
- __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
- : "=a"(prev)
- : "r"(new), "m"(*__xg(ptr)), "0"(old)
- : "memory");
- return prev;
- }
- return old;
-}
-
-#define cmpxchg(ptr,o,n)\
- ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
- (unsigned long)(n),sizeof(*(ptr))))
-
#ifdef CONFIG_SMP
#define smp_mb() mb()
#define smp_rmb() rmb()
diff --git a/include/asm-x86_64/termbits.h b/include/asm-x86_64/termbits.h
index 6cfc3bb10c1..7405756dd41 100644
--- a/include/asm-x86_64/termbits.h
+++ b/include/asm-x86_64/termbits.h
@@ -160,7 +160,7 @@ struct ktermios {
#define CMSPAR 010000000000 /* mark or space (stick) parity */
#define CRTSCTS 020000000000 /* flow control */
-#define IBSHIFT 8 /* Shift from CBAUD to CIBAUD */
+#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
/* c_lflag bits */
#define ISIG 0000001
diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h
index 26e23e01c54..595703949df 100644
--- a/include/asm-x86_64/unistd.h
+++ b/include/asm-x86_64/unistd.h
@@ -619,6 +619,8 @@ __SYSCALL(__NR_sync_file_range, sys_sync_file_range)
__SYSCALL(__NR_vmsplice, sys_vmsplice)
#define __NR_move_pages 279
__SYSCALL(__NR_move_pages, sys_move_pages)
+#define __NR_utimensat 280
+__SYSCALL(__NR_utimensat, sys_utimensat)
#ifndef __NO_STUBS
#define __ARCH_WANT_OLD_READDIR
diff --git a/include/asm-xtensa/atomic.h b/include/asm-xtensa/atomic.h
index 5c267202106..b3b23540f14 100644
--- a/include/asm-xtensa/atomic.h
+++ b/include/asm-xtensa/atomic.h
@@ -234,14 +234,21 @@ static inline int atomic_sub_return(int i, atomic_t * v)
* Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise.
*/
-#define atomic_add_unless(v, a, u) \
-({ \
- int c, old; \
- c = atomic_read(v); \
- while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
- c = old; \
- c != (u); \
-})
+static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+{
+ int c, old;
+ c = atomic_read(v);
+ for (;;) {
+ if (unlikely(c == (u)))
+ break;
+ old = atomic_cmpxchg((v), c, c + (a));
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return c != (u);
+}
+
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
diff --git a/include/asm-xtensa/kdebug.h b/include/asm-xtensa/kdebug.h
new file mode 100644
index 00000000000..6ece1b03766
--- /dev/null
+++ b/include/asm-xtensa/kdebug.h
@@ -0,0 +1 @@
+#include <asm-generic/kdebug.h>
diff --git a/include/asm-xtensa/system.h b/include/asm-xtensa/system.h
index 4aaed7fe6cf..ddc970847ae 100644
--- a/include/asm-xtensa/system.h
+++ b/include/asm-xtensa/system.h
@@ -183,8 +183,6 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
return tmp;
}
-#define tas(ptr) (xchg((ptr),1))
-
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
/*
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 9f05279e7dd..94cc04a143f 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -4,6 +4,7 @@ header-y += hdlc/
header-y += isdn/
header-y += nfsd/
header-y += raid/
+header-y += spi/
header-y += sunrpc/
header-y += tc_act/
header-y += netfilter/
@@ -33,7 +34,6 @@ header-y += atmsvc.h
header-y += atm_zatm.h
header-y += auto_fs4.h
header-y += auxvec.h
-header-y += awe_voice.h
header-y += ax25.h
header-y += b1lli.h
header-y += baycom.h
@@ -46,6 +46,7 @@ header-y += coda_psdev.h
header-y += coff.h
header-y += comstats.h
header-y += consolemap.h
+header-y += const.h
header-y += cycx_cfm.h
header-y += dlm_device.h
header-y += dm-ioctl.h
@@ -91,7 +92,6 @@ header-y += ip_mp_alg.h
header-y += ipsec.h
header-y += ipx.h
header-y += irda.h
-header-y += isdn_divertif.h
header-y += iso_fs.h
header-y += ixjuser.h
header-y += jffs2.h
@@ -121,6 +121,7 @@ header-y += pci_regs.h
header-y += personality.h
header-y += pfkeyv2.h
header-y += pg.h
+header-y += phantom.h
header-y += pkt_cls.h
header-y += pkt_sched.h
header-y += posix_types.h
@@ -140,6 +141,7 @@ header-y += sockios.h
header-y += som.h
header-y += sound.h
header-y += synclink.h
+header-y += taskstats.h
header-y += telephony.h
header-y += termios.h
header-y += ticable.h
@@ -239,6 +241,7 @@ unifdef-y += ipv6.h
unifdef-y += ipv6_route.h
unifdef-y += isdn.h
unifdef-y += isdnif.h
+unifdef-y += isdn_divertif.h
unifdef-y += isdn_ppp.h
unifdef-y += isicom.h
unifdef-y += jbd.h
diff --git a/include/linux/awe_voice.h b/include/linux/awe_voice.h
deleted file mode 100644
index bf33f17bea9..00000000000
--- a/include/linux/awe_voice.h
+++ /dev/null
@@ -1,525 +0,0 @@
-/*
- * include/linux/awe_voice.h
- *
- * Voice information definitions for the low level driver for the
- * AWE32/SB32/AWE64 wave table synth.
- * version 0.4.4; Jan. 4, 2000
- *
- * Copyright (C) 1996-2000 Takashi Iwai
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef AWE_VOICE_H
-#define AWE_VOICE_H
-
-#ifndef SAMPLE_TYPE_AWE32
-#define SAMPLE_TYPE_AWE32 0x20
-#endif
-
-#define _LINUX_PATCHKEY_H_INDIRECT
-#include <linux/patchkey.h>
-#undef _LINUX_PATCHKEY_H_INDIRECT
-
-/*----------------------------------------------------------------
- * patch information record
- *----------------------------------------------------------------*/
-
-/* patch interface header: 16 bytes */
-typedef struct awe_patch_info {
- short key; /* use AWE_PATCH here */
-#define AWE_PATCH _PATCHKEY(0x07)
-
- short device_no; /* synthesizer number */
- unsigned short sf_id; /* file id (should be zero) */
- short optarg; /* optional argument */
- int len; /* data length (without this header) */
-
- short type; /* patch operation type */
-#define AWE_LOAD_INFO 0 /* awe_voice_rec */
-#define AWE_LOAD_DATA 1 /* awe_sample_info */
-#define AWE_OPEN_PATCH 2 /* awe_open_parm */
-#define AWE_CLOSE_PATCH 3 /* none */
-#define AWE_UNLOAD_PATCH 4 /* none */
-#define AWE_REPLACE_DATA 5 /* awe_sample_info (optarg=#channels)*/
-#define AWE_MAP_PRESET 6 /* awe_voice_map */
-/*#define AWE_PROBE_INFO 7*/ /* awe_voice_map (pat only) */
-#define AWE_PROBE_DATA 8 /* optarg=sample */
-#define AWE_REMOVE_INFO 9 /* optarg=(bank<<8)|instr */
-#define AWE_LOAD_CHORUS_FX 0x10 /* awe_chorus_fx_rec (optarg=mode) */
-#define AWE_LOAD_REVERB_FX 0x11 /* awe_reverb_fx_rec (optarg=mode) */
-
- short reserved; /* word alignment data */
-
- /* the actual patch data begins after this */
-#if defined(AWE_COMPAT_030) && AWE_COMPAT_030
- char data[0];
-#endif
-} awe_patch_info;
-
-/*#define AWE_PATCH_INFO_SIZE 16*/
-#define AWE_PATCH_INFO_SIZE sizeof(awe_patch_info)
-
-
-/*----------------------------------------------------------------
- * open patch
- *----------------------------------------------------------------*/
-
-#define AWE_PATCH_NAME_LEN 32
-
-typedef struct _awe_open_parm {
- unsigned short type; /* sample type */
-#define AWE_PAT_TYPE_MISC 0
-#define AWE_PAT_TYPE_GM 1
-#define AWE_PAT_TYPE_GS 2
-#define AWE_PAT_TYPE_MT32 3
-#define AWE_PAT_TYPE_XG 4
-#define AWE_PAT_TYPE_SFX 5
-#define AWE_PAT_TYPE_GUS 6
-#define AWE_PAT_TYPE_MAP 7
-
-#define AWE_PAT_LOCKED 0x100 /* lock the samples */
-#define AWE_PAT_SHARED 0x200 /* sample is shared */
-
- short reserved;
- char name[AWE_PATCH_NAME_LEN];
-} awe_open_parm;
-
-/*#define AWE_OPEN_PARM_SIZE 28*/
-#define AWE_OPEN_PARM_SIZE sizeof(awe_open_parm)
-
-
-/*----------------------------------------------------------------
- * raw voice information record
- *----------------------------------------------------------------*/
-
-/* wave table envelope & effect parameters to control EMU8000 */
-typedef struct _awe_voice_parm {
- unsigned short moddelay; /* modulation delay (0x8000) */
- unsigned short modatkhld; /* modulation attack & hold time (0x7f7f) */
- unsigned short moddcysus; /* modulation decay & sustain (0x7f7f) */
- unsigned short modrelease; /* modulation release time (0x807f) */
- short modkeyhold, modkeydecay; /* envelope change per key (not used) */
- unsigned short voldelay; /* volume delay (0x8000) */
- unsigned short volatkhld; /* volume attack & hold time (0x7f7f) */
- unsigned short voldcysus; /* volume decay & sustain (0x7f7f) */
- unsigned short volrelease; /* volume release time (0x807f) */
- short volkeyhold, volkeydecay; /* envelope change per key (not used) */
- unsigned short lfo1delay; /* LFO1 delay (0x8000) */
- unsigned short lfo2delay; /* LFO2 delay (0x8000) */
- unsigned short pefe; /* modulation pitch & cutoff (0x0000) */
- unsigned short fmmod; /* LFO1 pitch & cutoff (0x0000) */
- unsigned short tremfrq; /* LFO1 volume & freq (0x0000) */
- unsigned short fm2frq2; /* LFO2 pitch & freq (0x0000) */
- unsigned char cutoff; /* initial cutoff (0xff) */
- unsigned char filterQ; /* initial filter Q [0-15] (0x0) */
- unsigned char chorus; /* chorus send (0x00) */
- unsigned char reverb; /* reverb send (0x00) */
- unsigned short reserved[4]; /* not used */
-} awe_voice_parm;
-
-typedef struct _awe_voice_parm_block {
- unsigned short moddelay; /* modulation delay (0x8000) */
- unsigned char modatk, modhld;
- unsigned char moddcy, modsus;
- unsigned char modrel, moddummy;
- short modkeyhold, modkeydecay; /* envelope change per key (not used) */
- unsigned short voldelay; /* volume delay (0x8000) */
- unsigned char volatk, volhld;
- unsigned char voldcy, volsus;
- unsigned char volrel, voldummy;
- short volkeyhold, volkeydecay; /* envelope change per key (not used) */
- unsigned short lfo1delay; /* LFO1 delay (0x8000) */
- unsigned short lfo2delay; /* LFO2 delay (0x8000) */
- unsigned char env1fc, env1pit;
- unsigned char lfo1fc, lfo1pit;
- unsigned char lfo1freq, lfo1vol;
- unsigned char lfo2freq, lfo2pit;
- unsigned char cutoff; /* initial cutoff (0xff) */
- unsigned char filterQ; /* initial filter Q [0-15] (0x0) */
- unsigned char chorus; /* chorus send (0x00) */
- unsigned char reverb; /* reverb send (0x00) */
- unsigned short reserved[4]; /* not used */
-} awe_voice_parm_block;
-
-#define AWE_VOICE_PARM_SIZE 48
-
-
-/* wave table parameters: 92 bytes */
-typedef struct _awe_voice_info {
- unsigned short sf_id; /* file id (should be zero) */
- unsigned short sample; /* sample id */
- int start, end; /* sample offset correction */
- int loopstart, loopend; /* loop offset correction */
- short rate_offset; /* sample rate pitch offset */
- unsigned short mode; /* sample mode */
-#define AWE_MODE_ROMSOUND 0x8000
-#define AWE_MODE_STEREO 1
-#define AWE_MODE_LOOPING 2
-#define AWE_MODE_NORELEASE 4 /* obsolete */
-#define AWE_MODE_INIT_PARM 8
-
- short root; /* midi root key */
- short tune; /* pitch tuning (in cents) */
- signed char low, high; /* key note range */
- signed char vellow, velhigh; /* velocity range */
- signed char fixkey, fixvel; /* fixed key, velocity */
- signed char pan, fixpan; /* panning, fixed panning */
- short exclusiveClass; /* exclusive class (0 = none) */
- unsigned char amplitude; /* sample volume (127 max) */
- unsigned char attenuation; /* attenuation (0.375dB) */
- short scaleTuning; /* pitch scale tuning(%), normally 100 */
- awe_voice_parm parm; /* voice envelope parameters */
- short index; /* internal index (set by driver) */
-} awe_voice_info;
-
-/*#define AWE_VOICE_INFO_SIZE 92*/
-#define AWE_VOICE_INFO_SIZE sizeof(awe_voice_info)
-
-/*----------------------------------------------------------------*/
-
-/* The info entry of awe_voice_rec is changed from 0 to 1
- * for some compilers refusing zero size array.
- * Due to this change, sizeof(awe_voice_rec) becomes different
- * from older versions.
- * Use AWE_VOICE_REC_SIZE instead.
- */
-
-/* instrument info header: 4 bytes */
-typedef struct _awe_voice_rec_hdr {
- unsigned char bank; /* midi bank number */
- unsigned char instr; /* midi preset number */
- char nvoices; /* number of voices */
- char write_mode; /* write mode; normally 0 */
-#define AWE_WR_APPEND 0 /* append anyway */
-#define AWE_WR_EXCLUSIVE 1 /* skip if already exists */
-#define AWE_WR_REPLACE 2 /* replace if already exists */
-} awe_voice_rec_hdr;
-
-/*#define AWE_VOICE_REC_SIZE 4*/
-#define AWE_VOICE_REC_SIZE sizeof(awe_voice_rec_hdr)
-
-/* the standard patch structure for one sample */
-typedef struct _awe_voice_rec_patch {
- awe_patch_info patch;
- awe_voice_rec_hdr hdr;
- awe_voice_info info;
-} awe_voice_rec_patch;
-
-
-/* obsolete data type */
-#if defined(AWE_COMPAT_030) && AWE_COMPAT_030
-#define AWE_INFOARRAY_SIZE 0
-#else
-#define AWE_INFOARRAY_SIZE 1
-#endif
-
-typedef struct _awe_voice_rec {
- unsigned char bank; /* midi bank number */
- unsigned char instr; /* midi preset number */
- short nvoices; /* number of voices */
- /* voice information follows here */
- awe_voice_info info[AWE_INFOARRAY_SIZE];
-} awe_voice_rec;
-
-
-/*----------------------------------------------------------------
- * sample wave information
- *----------------------------------------------------------------*/
-
-/* wave table sample header: 32 bytes */
-typedef struct awe_sample_info {
- unsigned short sf_id; /* file id (should be zero) */
- unsigned short sample; /* sample id */
- int start, end; /* start & end offset */
- int loopstart, loopend; /* loop start & end offset */
- int size; /* size (0 = ROM) */
- short checksum_flag; /* use check sum = 1 */
- unsigned short mode_flags; /* mode flags */
-#define AWE_SAMPLE_8BITS 1 /* wave data is 8bits */
-#define AWE_SAMPLE_UNSIGNED 2 /* wave data is unsigned */
-#define AWE_SAMPLE_NO_BLANK 4 /* no blank loop is attached */
-#define AWE_SAMPLE_SINGLESHOT 8 /* single-shot w/o loop */
-#define AWE_SAMPLE_BIDIR_LOOP 16 /* bidirectional looping */
-#define AWE_SAMPLE_STEREO_LEFT 32 /* stereo left sound */
-#define AWE_SAMPLE_STEREO_RIGHT 64 /* stereo right sound */
-#define AWE_SAMPLE_REVERSE_LOOP 128 /* reverse looping */
- unsigned int checksum; /* check sum */
-#if defined(AWE_COMPAT_030) && AWE_COMPAT_030
- unsigned short data[0]; /* sample data follows here */
-#endif
-} awe_sample_info;
-
-/*#define AWE_SAMPLE_INFO_SIZE 32*/
-#define AWE_SAMPLE_INFO_SIZE sizeof(awe_sample_info)
-
-
-/*----------------------------------------------------------------
- * voice preset mapping
- *----------------------------------------------------------------*/
-
-typedef struct awe_voice_map {
- int map_bank, map_instr, map_key; /* key = -1 means all keys */
- int src_bank, src_instr, src_key;
-} awe_voice_map;
-
-#define AWE_VOICE_MAP_SIZE sizeof(awe_voice_map)
-
-
-/*----------------------------------------------------------------
- * awe hardware controls
- *----------------------------------------------------------------*/
-
-#define _AWE_DEBUG_MODE 0x00
-#define _AWE_REVERB_MODE 0x01
-#define _AWE_CHORUS_MODE 0x02
-#define _AWE_REMOVE_LAST_SAMPLES 0x03
-#define _AWE_INITIALIZE_CHIP 0x04
-#define _AWE_SEND_EFFECT 0x05
-#define _AWE_TERMINATE_CHANNEL 0x06
-#define _AWE_TERMINATE_ALL 0x07
-#define _AWE_INITIAL_VOLUME 0x08
-#define _AWE_INITIAL_ATTEN _AWE_INITIAL_VOLUME
-#define _AWE_RESET_CHANNEL 0x09
-#define _AWE_CHANNEL_MODE 0x0a
-#define _AWE_DRUM_CHANNELS 0x0b
-#define _AWE_MISC_MODE 0x0c
-#define _AWE_RELEASE_ALL 0x0d
-#define _AWE_NOTEOFF_ALL 0x0e
-#define _AWE_CHN_PRESSURE 0x0f
-/*#define _AWE_GET_CURRENT_MODE 0x10*/
-#define _AWE_EQUALIZER 0x11
-/*#define _AWE_GET_MISC_MODE 0x12*/
-/*#define _AWE_GET_FONTINFO 0x13*/
-
-#define _AWE_MODE_FLAG 0x80
-#define _AWE_COOKED_FLAG 0x40 /* not supported */
-#define _AWE_MODE_VALUE_MASK 0x3F
-
-/*----------------------------------------------------------------*/
-
-#define _AWE_SET_CMD(p,dev,voice,cmd,p1,p2) \
-{((char*)(p))[0] = SEQ_PRIVATE;\
- ((char*)(p))[1] = dev;\
- ((char*)(p))[2] = _AWE_MODE_FLAG|(cmd);\
- ((char*)(p))[3] = voice;\
- ((unsigned short*)(p))[2] = p1;\
- ((unsigned short*)(p))[3] = p2;}
-
-/* buffered access */
-#define _AWE_CMD(dev, voice, cmd, p1, p2) \
-{_SEQ_NEEDBUF(8);\
- _AWE_SET_CMD(_seqbuf + _seqbufptr, dev, voice, cmd, p1, p2);\
- _SEQ_ADVBUF(8);}
-
-/* direct access */
-#define _AWE_CMD_NOW(seqfd,dev,voice,cmd,p1,p2) \
-{struct seq_event_rec tmp;\
- _AWE_SET_CMD(&tmp, dev, voice, cmd, p1, p2);\
- ioctl(seqfd, SNDCTL_SEQ_OUTOFBAND, &tmp);}
-
-/*----------------------------------------------------------------*/
-
-/* set debugging mode */
-#define AWE_DEBUG_MODE(dev,p1) _AWE_CMD(dev, 0, _AWE_DEBUG_MODE, p1, 0)
-/* set reverb mode; from 0 to 7 */
-#define AWE_REVERB_MODE(dev,p1) _AWE_CMD(dev, 0, _AWE_REVERB_MODE, p1, 0)
-/* set chorus mode; from 0 to 7 */
-#define AWE_CHORUS_MODE(dev,p1) _AWE_CMD(dev, 0, _AWE_CHORUS_MODE, p1, 0)
-
-/* reset channel */
-#define AWE_RESET_CHANNEL(dev,ch) _AWE_CMD(dev, ch, _AWE_RESET_CHANNEL, 0, 0)
-#define AWE_RESET_CONTROL(dev,ch) _AWE_CMD(dev, ch, _AWE_RESET_CHANNEL, 1, 0)
-
-/* send an effect to all layers */
-#define AWE_SEND_EFFECT(dev,voice,type,value) _AWE_CMD(dev,voice,_AWE_SEND_EFFECT,type,value)
-#define AWE_ADD_EFFECT(dev,voice,type,value) _AWE_CMD(dev,voice,_AWE_SEND_EFFECT,((type)|0x80),value)
-#define AWE_UNSET_EFFECT(dev,voice,type) _AWE_CMD(dev,voice,_AWE_SEND_EFFECT,((type)|0x40),0)
-/* send an effect to a layer */
-#define AWE_SEND_LAYER_EFFECT(dev,voice,layer,type,value) _AWE_CMD(dev,voice,_AWE_SEND_EFFECT,((layer+1)<<8|(type)),value)
-#define AWE_ADD_LAYER_EFFECT(dev,voice,layer,type,value) _AWE_CMD(dev,voice,_AWE_SEND_EFFECT,((layer+1)<<8|(type)|0x80),value)
-#define AWE_UNSET_LAYER_EFFECT(dev,voice,layer,type) _AWE_CMD(dev,voice,_AWE_SEND_EFFECT,((layer+1)<<8|(type)|0x40),0)
-
-/* terminate sound on the channel/voice */
-#define AWE_TERMINATE_CHANNEL(dev,voice) _AWE_CMD(dev,voice,_AWE_TERMINATE_CHANNEL,0,0)
-/* terminate all sounds */
-#define AWE_TERMINATE_ALL(dev) _AWE_CMD(dev, 0, _AWE_TERMINATE_ALL, 0, 0)
-/* release all sounds (w/o sustain effect) */
-#define AWE_RELEASE_ALL(dev) _AWE_CMD(dev, 0, _AWE_RELEASE_ALL, 0, 0)
-/* note off all sounds (w sustain effect) */
-#define AWE_NOTEOFF_ALL(dev) _AWE_CMD(dev, 0, _AWE_NOTEOFF_ALL, 0, 0)
-
-/* set initial attenuation */
-#define AWE_INITIAL_VOLUME(dev,atten) _AWE_CMD(dev, 0, _AWE_INITIAL_VOLUME, atten, 0)
-#define AWE_INITIAL_ATTEN AWE_INITIAL_VOLUME
-/* relative attenuation */
-#define AWE_SET_ATTEN(dev,atten) _AWE_CMD(dev, 0, _AWE_INITIAL_VOLUME, atten, 1)
-
-/* set channel playing mode; mode=0/1/2 */
-#define AWE_SET_CHANNEL_MODE(dev,mode) _AWE_CMD(dev, 0, _AWE_CHANNEL_MODE, mode, 0)
-#define AWE_PLAY_INDIRECT 0 /* indirect voice mode (default) */
-#define AWE_PLAY_MULTI 1 /* multi note voice mode */
-#define AWE_PLAY_DIRECT 2 /* direct single voice mode */
-#define AWE_PLAY_MULTI2 3 /* sequencer2 mode; used internally */
-
-/* set drum channel mask; channels is 32bit long value */
-#define AWE_DRUM_CHANNELS(dev,channels) _AWE_CMD(dev, 0, _AWE_DRUM_CHANNELS, ((channels) & 0xffff), ((channels) >> 16))
-
-/* set bass and treble control; values are from 0 to 11 */
-#define AWE_EQUALIZER(dev,bass,treble) _AWE_CMD(dev, 0, _AWE_EQUALIZER, bass, treble)
-
-/* remove last loaded samples */
-#define AWE_REMOVE_LAST_SAMPLES(seqfd,dev) _AWE_CMD_NOW(seqfd, dev, 0, _AWE_REMOVE_LAST_SAMPLES, 0, 0)
-/* initialize emu8000 chip */
-#define AWE_INITIALIZE_CHIP(seqfd,dev) _AWE_CMD_NOW(seqfd, dev, 0, _AWE_INITIALIZE_CHIP, 0, 0)
-
-/* set miscellaneous modes; meta command */
-#define AWE_MISC_MODE(dev,mode,value) _AWE_CMD(dev, 0, _AWE_MISC_MODE, mode, value)
-/* exclusive sound off; 1=off */
-#define AWE_EXCLUSIVE_SOUND(dev,mode) AWE_MISC_MODE(dev,AWE_MD_EXCLUSIVE_SOUND,mode)
-/* default GUS bank number */
-#define AWE_SET_GUS_BANK(dev,bank) AWE_MISC_MODE(dev,AWE_MD_GUS_BANK,bank)
-/* change panning position in realtime; 0=don't 1=do */
-#define AWE_REALTIME_PAN(dev,mode) AWE_MISC_MODE(dev,AWE_MD_REALTIME_PAN,mode)
-
-/* extended pressure controls; not portable with other sound drivers */
-#define AWE_KEY_PRESSURE(dev,ch,note,vel) SEQ_START_NOTE(dev,ch,(note)+128,vel)
-#define AWE_CHN_PRESSURE(dev,ch,vel) _AWE_CMD(dev,ch,_AWE_CHN_PRESSURE,vel,0)
-
-/*----------------------------------------------------------------*/
-
-/* reverb mode parameters */
-#define AWE_REVERB_ROOM1 0
-#define AWE_REVERB_ROOM2 1
-#define AWE_REVERB_ROOM3 2
-#define AWE_REVERB_HALL1 3
-#define AWE_REVERB_HALL2 4
-#define AWE_REVERB_PLATE 5
-#define AWE_REVERB_DELAY 6
-#define AWE_REVERB_PANNINGDELAY 7
-#define AWE_REVERB_PREDEFINED 8
-/* user can define reverb modes up to 32 */
-#define AWE_REVERB_NUMBERS 32
-
-typedef struct awe_reverb_fx_rec {
- unsigned short parms[28];
-} awe_reverb_fx_rec;
-
-/*----------------------------------------------------------------*/
-
-/* chorus mode parameters */
-#define AWE_CHORUS_1 0
-#define AWE_CHORUS_2 1
-#define AWE_CHORUS_3 2
-#define AWE_CHORUS_4 3
-#define AWE_CHORUS_FEEDBACK 4
-#define AWE_CHORUS_FLANGER 5
-#define AWE_CHORUS_SHORTDELAY 6
-#define AWE_CHORUS_SHORTDELAY2 7
-#define AWE_CHORUS_PREDEFINED 8
-/* user can define chorus modes up to 32 */
-#define AWE_CHORUS_NUMBERS 32
-
-typedef struct awe_chorus_fx_rec {
- unsigned short feedback; /* feedback level (0xE600-0xE6FF) */
- unsigned short delay_offset; /* delay (0-0x0DA3) [1/44100 sec] */
- unsigned short lfo_depth; /* LFO depth (0xBC00-0xBCFF) */
- unsigned int delay; /* right delay (0-0xFFFFFFFF) [1/256/44100 sec] */
- unsigned int lfo_freq; /* LFO freq LFO freq (0-0xFFFFFFFF) */
-} awe_chorus_fx_rec;
-
-/*----------------------------------------------------------------*/
-
-/* misc mode types */
-enum {
-/* 0*/ AWE_MD_EXCLUSIVE_OFF, /* obsolete */
-/* 1*/ AWE_MD_EXCLUSIVE_ON, /* obsolete */
-/* 2*/ AWE_MD_VERSION, /* read only */
-/* 3*/ AWE_MD_EXCLUSIVE_SOUND, /* 0/1: exclusive note on (default=1) */
-/* 4*/ AWE_MD_REALTIME_PAN, /* 0/1: do realtime pan change (default=1) */
-/* 5*/ AWE_MD_GUS_BANK, /* bank number for GUS patches (default=0) */
-/* 6*/ AWE_MD_KEEP_EFFECT, /* 0/1: keep effect values, (default=0) */
-/* 7*/ AWE_MD_ZERO_ATTEN, /* attenuation of max volume (default=32) */
-/* 8*/ AWE_MD_CHN_PRIOR, /* 0/1: set MIDI channel priority mode (default=1) */
-/* 9*/ AWE_MD_MOD_SENSE, /* integer: modwheel sensitivity (def=18) */
-/*10*/ AWE_MD_DEF_PRESET, /* integer: default preset number (def=0) */
-/*11*/ AWE_MD_DEF_BANK, /* integer: default bank number (def=0) */
-/*12*/ AWE_MD_DEF_DRUM, /* integer: default drumset number (def=0) */
-/*13*/ AWE_MD_TOGGLE_DRUM_BANK, /* 0/1: toggle drum flag with bank# (def=0) */
-/*14*/ AWE_MD_NEW_VOLUME_CALC, /* 0/1: volume calculation mode (def=1) */
-/*15*/ AWE_MD_CHORUS_MODE, /* integer: chorus mode (def=2) */
-/*16*/ AWE_MD_REVERB_MODE, /* integer: chorus mode (def=4) */
-/*17*/ AWE_MD_BASS_LEVEL, /* integer: bass level (def=5) */
-/*18*/ AWE_MD_TREBLE_LEVEL, /* integer: treble level (def=9) */
-/*19*/ AWE_MD_DEBUG_MODE, /* integer: debug level (def=0) */
-/*20*/ AWE_MD_PAN_EXCHANGE, /* 0/1: exchange panning direction (def=0) */
- AWE_MD_END,
-};
-
-/*----------------------------------------------------------------*/
-
-/* effect parameters */
-enum {
-
-/* modulation envelope parameters */
-/* 0*/ AWE_FX_ENV1_DELAY, /* WORD: ENVVAL */
-/* 1*/ AWE_FX_ENV1_ATTACK, /* BYTE: up ATKHLD */
-/* 2*/ AWE_FX_ENV1_HOLD, /* BYTE: lw ATKHLD */
-/* 3*/ AWE_FX_ENV1_DECAY, /* BYTE: lw DCYSUS */
-/* 4*/ AWE_FX_ENV1_RELEASE, /* BYTE: lw DCYSUS */
-/* 5*/ AWE_FX_ENV1_SUSTAIN, /* BYTE: up DCYSUS */
-/* 6*/ AWE_FX_ENV1_PITCH, /* BYTE: up PEFE */
-/* 7*/ AWE_FX_ENV1_CUTOFF, /* BYTE: lw PEFE */
-
-/* volume envelope parameters */
-/* 8*/ AWE_FX_ENV2_DELAY, /* WORD: ENVVOL */
-/* 9*/ AWE_FX_ENV2_ATTACK, /* BYTE: up ATKHLDV */
-/*10*/ AWE_FX_ENV2_HOLD, /* BYTE: lw ATKHLDV */
-/*11*/ AWE_FX_ENV2_DECAY, /* BYTE: lw DCYSUSV */
-/*12*/ AWE_FX_ENV2_RELEASE, /* BYTE: lw DCYSUSV */
-/*13*/ AWE_FX_ENV2_SUSTAIN, /* BYTE: up DCYSUSV */
-
-/* LFO1 (tremolo & vibrato) parameters */
-/*14*/ AWE_FX_LFO1_DELAY, /* WORD: LFO1VAL */
-/*15*/ AWE_FX_LFO1_FREQ, /* BYTE: lo TREMFRQ */
-/*16*/ AWE_FX_LFO1_VOLUME, /* BYTE: up TREMFRQ */
-/*17*/ AWE_FX_LFO1_PITCH, /* BYTE: up FMMOD */
-/*18*/ AWE_FX_LFO1_CUTOFF, /* BYTE: lo FMMOD */
-
-/* LFO2 (vibrato) parameters */
-/*19*/ AWE_FX_LFO2_DELAY, /* WORD: LFO2VAL */
-/*20*/ AWE_FX_LFO2_FREQ, /* BYTE: lo FM2FRQ2 */
-/*21*/ AWE_FX_LFO2_PITCH, /* BYTE: up FM2FRQ2 */
-
-/* Other overall effect parameters */
-/*22*/ AWE_FX_INIT_PITCH, /* SHORT: pitch offset */
-/*23*/ AWE_FX_CHORUS, /* BYTE: chorus effects send (0-255) */
-/*24*/ AWE_FX_REVERB, /* BYTE: reverb effects send (0-255) */
-/*25*/ AWE_FX_CUTOFF, /* BYTE: up IFATN */
-/*26*/ AWE_FX_FILTERQ, /* BYTE: up CCCA */
-
-/* Sample / loop offset changes */
-/*27*/ AWE_FX_SAMPLE_START, /* SHORT: offset */
-/*28*/ AWE_FX_LOOP_START, /* SHORT: offset */
-/*29*/ AWE_FX_LOOP_END, /* SHORT: offset */
-/*30*/ AWE_FX_COARSE_SAMPLE_START, /* SHORT: upper word offset */
-/*31*/ AWE_FX_COARSE_LOOP_START, /* SHORT: upper word offset */
-/*32*/ AWE_FX_COARSE_LOOP_END, /* SHORT: upper word offset */
-/*33*/ AWE_FX_ATTEN, /* BYTE: lo IFATN */
-
- AWE_FX_END,
-};
-
-#endif /* AWE_VOICE_H */
diff --git a/include/linux/byteorder/generic.h b/include/linux/byteorder/generic.h
index e86e4a93837..3dc715b0250 100644
--- a/include/linux/byteorder/generic.h
+++ b/include/linux/byteorder/generic.h
@@ -124,19 +124,8 @@
#define be32_to_cpus __be32_to_cpus
#define cpu_to_be16s __cpu_to_be16s
#define be16_to_cpus __be16_to_cpus
-#endif
-
-#if defined(__KERNEL__)
/*
- * Handle ntohl and suches. These have various compatibility
- * issues - like we want to give the prototype even though we
- * also have a macro for them in case some strange program
- * wants to take the address of the thing or something..
- *
- * Note that these used to return a "long" in libc5, even though
- * long is often 64-bit these days.. Thus the casts.
- *
* They have to be macros in order to do the constant folding
* correctly - if the argument passed into a inline function
* it is no longer constant according to gcc..
@@ -147,17 +136,6 @@
#undef htonl
#undef htons
-/*
- * Do the prototypes. Somebody might want to take the
- * address or some such sick thing..
- */
-extern __u32 ntohl(__be32);
-extern __be32 htonl(__u32);
-extern __u16 ntohs(__be16);
-extern __be16 htons(__u16);
-
-#if defined(__GNUC__) && defined(__OPTIMIZE__)
-
#define ___htonl(x) __cpu_to_be32(x)
#define ___htons(x) __cpu_to_be16(x)
#define ___ntohl(x) __be32_to_cpu(x)
@@ -168,9 +146,6 @@ extern __be16 htons(__u16);
#define htons(x) ___htons(x)
#define ntohs(x) ___ntohs(x)
-#endif /* OPTIMIZE */
-
#endif /* KERNEL */
-
#endif /* _LINUX_BYTEORDER_GENERIC_H */
diff --git a/include/linux/byteorder/swab.h b/include/linux/byteorder/swab.h
index 25f7f32883e..142134ff164 100644
--- a/include/linux/byteorder/swab.h
+++ b/include/linux/byteorder/swab.h
@@ -10,6 +10,10 @@
* separated swab functions from cpu_to_XX,
* to clean up support for bizarre-endian architectures.
*
+ * Trent Piepho <xyzzy@speakeasy.org> 2007114
+ * make constant-folding work, provide C versions that
+ * gcc can optimize better, explain different versions
+ *
* See asm-i386/byteorder.h and suches for examples of how to provide
* architecture-dependent optimized versions
*
@@ -17,40 +21,66 @@
#include <linux/compiler.h>
+/* Functions/macros defined, there are a lot:
+ *
+ * ___swabXX
+ * Generic C versions of the swab functions.
+ *
+ * ___constant_swabXX
+ * C versions that gcc can fold into a compile-time constant when
+ * the argument is a compile-time constant.
+ *
+ * __arch__swabXX[sp]?
+ * Architecture optimized versions of all the swab functions
+ * (including the s and p versions). These can be defined in
+ * asm-arch/byteorder.h. Any which are not, are defined here.
+ * __arch__swabXXs() is defined in terms of __arch__swabXXp(), which
+ * is defined in terms of __arch__swabXX(), which is in turn defined
+ * in terms of ___swabXX(x).
+ * These must be macros. They may be unsafe for arguments with
+ * side-effects.
+ *
+ * __fswabXX
+ * Inline function versions of the __arch__ macros. These _are_ safe
+ * if the arguments have side-effects. Note there are no s and p
+ * versions of these.
+ *
+ * __swabXX[sb]
+ * There are the ones you should actually use. The __swabXX versions
+ * will be a constant given a constant argument and use the arch
+ * specific code (if any) for non-constant arguments. The s and p
+ * versions always use the arch specific code (constant folding
+ * doesn't apply). They are safe to use with arguments with
+ * side-effects.
+ *
+ * swabXX[sb]
+ * Nicknames for __swabXX[sb] to use in the kernel.
+ */
+
/* casts are necessary for constants, because we never know how for sure
* how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way.
*/
-#define ___swab16(x) \
-({ \
- __u16 __x = (x); \
- ((__u16)( \
- (((__u16)(__x) & (__u16)0x00ffU) << 8) | \
- (((__u16)(__x) & (__u16)0xff00U) >> 8) )); \
-})
-#define ___swab32(x) \
-({ \
- __u32 __x = (x); \
- ((__u32)( \
- (((__u32)(__x) & (__u32)0x000000ffUL) << 24) | \
- (((__u32)(__x) & (__u32)0x0000ff00UL) << 8) | \
- (((__u32)(__x) & (__u32)0x00ff0000UL) >> 8) | \
- (((__u32)(__x) & (__u32)0xff000000UL) >> 24) )); \
-})
-
-#define ___swab64(x) \
-({ \
- __u64 __x = (x); \
- ((__u64)( \
- (__u64)(((__u64)(__x) & (__u64)0x00000000000000ffULL) << 56) | \
- (__u64)(((__u64)(__x) & (__u64)0x000000000000ff00ULL) << 40) | \
- (__u64)(((__u64)(__x) & (__u64)0x0000000000ff0000ULL) << 24) | \
- (__u64)(((__u64)(__x) & (__u64)0x00000000ff000000ULL) << 8) | \
- (__u64)(((__u64)(__x) & (__u64)0x000000ff00000000ULL) >> 8) | \
- (__u64)(((__u64)(__x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
- (__u64)(((__u64)(__x) & (__u64)0x00ff000000000000ULL) >> 40) | \
- (__u64)(((__u64)(__x) & (__u64)0xff00000000000000ULL) >> 56) )); \
-})
+static __inline__ __attribute_const__ __u16 ___swab16(__u16 x)
+{
+ return x<<8 | x>>8;
+}
+static __inline__ __attribute_const__ __u32 ___swab32(__u32 x)
+{
+ return x<<24 | x>>24 |
+ (x & (__u32)0x0000ff00UL)<<8 |
+ (x & (__u32)0x00ff0000UL)>>8;
+}
+static __inline__ __attribute_const__ __u64 ___swab64(__u64 x)
+{
+ return x<<56 | x>>56 |
+ (x & (__u64)0x000000000000ff00ULL)<<40 |
+ (x & (__u64)0x0000000000ff0000ULL)<<24 |
+ (x & (__u64)0x00000000ff000000ULL)<< 8 |
+ (x & (__u64)0x000000ff00000000ULL)>> 8 |
+ (x & (__u64)0x0000ff0000000000ULL)>>24 |
+ (x & (__u64)0x00ff000000000000ULL)>>40;
+}
#define ___constant_swab16(x) \
((__u16)( \
@@ -77,13 +107,13 @@
* provide defaults when no architecture-specific optimization is detected
*/
#ifndef __arch__swab16
-# define __arch__swab16(x) ({ __u16 __tmp = (x) ; ___swab16(__tmp); })
+# define __arch__swab16(x) ___swab16(x)
#endif
#ifndef __arch__swab32
-# define __arch__swab32(x) ({ __u32 __tmp = (x) ; ___swab32(__tmp); })
+# define __arch__swab32(x) ___swab32(x)
#endif
#ifndef __arch__swab64
-# define __arch__swab64(x) ({ __u64 __tmp = (x) ; ___swab64(__tmp); })
+# define __arch__swab64(x) ___swab64(x)
#endif
#ifndef __arch__swab16p
@@ -97,13 +127,13 @@
#endif
#ifndef __arch__swab16s
-# define __arch__swab16s(x) do { *(x) = __arch__swab16p((x)); } while (0)
+# define __arch__swab16s(x) ((void)(*(x) = __arch__swab16p(x)))
#endif
#ifndef __arch__swab32s
-# define __arch__swab32s(x) do { *(x) = __arch__swab32p((x)); } while (0)
+# define __arch__swab32s(x) ((void)(*(x) = __arch__swab32p(x)))
#endif
#ifndef __arch__swab64s
-# define __arch__swab64s(x) do { *(x) = __arch__swab64p((x)); } while (0)
+# define __arch__swab64s(x) ((void)(*(x) = __arch__swab64p(x)))
#endif
@@ -113,15 +143,15 @@
#if defined(__GNUC__) && defined(__OPTIMIZE__)
# define __swab16(x) \
(__builtin_constant_p((__u16)(x)) ? \
- ___swab16((x)) : \
+ ___constant_swab16((x)) : \
__fswab16((x)))
# define __swab32(x) \
(__builtin_constant_p((__u32)(x)) ? \
- ___swab32((x)) : \
+ ___constant_swab32((x)) : \
__fswab32((x)))
# define __swab64(x) \
(__builtin_constant_p((__u64)(x)) ? \
- ___swab64((x)) : \
+ ___constant_swab64((x)) : \
__fswab64((x)))
#else
# define __swab16(x) __fswab16(x)
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 4ea7e7bcfaf..8486e78f733 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -54,17 +54,17 @@ enum clock_event_nofitiers {
/**
* struct clock_event_device - clock event device descriptor
* @name: ptr to clock event name
- * @hints: usage hints
+ * @features: features
* @max_delta_ns: maximum delta value in ns
* @min_delta_ns: minimum delta value in ns
* @mult: nanosecond to cycles multiplier
* @shift: nanoseconds to cycles divisor (power of two)
* @rating: variable to rate clock event devices
- * @irq: irq number (only for non cpu local devices)
- * @cpumask: cpumask to indicate for which cpus this device works
- * @set_next_event: set next event
+ * @irq: IRQ number (only for non CPU local devices)
+ * @cpumask: cpumask to indicate for which CPUs this device works
+ * @set_next_event: set next event function
* @set_mode: set mode function
- * @evthandler: Assigned by the framework to be called by the low
+ * @event_handler: Assigned by the framework to be called by the low
* level handler of the event source
* @broadcast: function to broadcast events
* @list: list head for the management code
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index daa4940cc0f..2665ca04cf8 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -12,6 +12,7 @@
#include <linux/timex.h>
#include <linux/time.h>
#include <linux/list.h>
+#include <linux/cache.h>
#include <linux/timer.h>
#include <asm/div64.h>
#include <asm/io.h>
@@ -52,6 +53,9 @@ struct clocksource;
* @xtime_interval: Used internally by timekeeping core, please ignore.
*/
struct clocksource {
+ /*
+ * First part of structure is read mostly
+ */
char *name;
struct list_head list;
int rating;
@@ -63,8 +67,15 @@ struct clocksource {
cycle_t (*vread)(void);
/* timekeeping specific data, ignore */
- cycle_t cycle_last, cycle_interval;
- u64 xtime_nsec, xtime_interval;
+ cycle_t cycle_interval;
+ u64 xtime_interval;
+ /*
+ * Second part is written at each timer interrupt
+ * Keep it in a different cache line to dirty no
+ * more than one cache line.
+ */
+ cycle_t cycle_last ____cacheline_aligned_in_smp;
+ u64 xtime_nsec;
s64 error;
#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
diff --git a/include/linux/compat_ioctl.h b/include/linux/compat_ioctl.h
deleted file mode 100644
index c26c3adcfac..00000000000
--- a/include/linux/compat_ioctl.h
+++ /dev/null
@@ -1,830 +0,0 @@
-/* List here explicitly which ioctl's are known to have
- * compatible types passed or none at all... Please include
- * only stuff that is compatible on *all architectures*.
- */
-
-COMPATIBLE_IOCTL(0x4B50) /* KDGHWCLK - not in the kernel, but don't complain */
-COMPATIBLE_IOCTL(0x4B51) /* KDSHWCLK - not in the kernel, but don't complain */
-
-/* Big T */
-COMPATIBLE_IOCTL(TCGETA)
-COMPATIBLE_IOCTL(TCSETA)
-COMPATIBLE_IOCTL(TCSETAW)
-COMPATIBLE_IOCTL(TCSETAF)
-COMPATIBLE_IOCTL(TCSBRK)
-ULONG_IOCTL(TCSBRKP)
-COMPATIBLE_IOCTL(TCXONC)
-COMPATIBLE_IOCTL(TCFLSH)
-COMPATIBLE_IOCTL(TCGETS)
-COMPATIBLE_IOCTL(TCSETS)
-COMPATIBLE_IOCTL(TCSETSW)
-COMPATIBLE_IOCTL(TCSETSF)
-COMPATIBLE_IOCTL(TIOCLINUX)
-COMPATIBLE_IOCTL(TIOCSBRK)
-COMPATIBLE_IOCTL(TIOCCBRK)
-ULONG_IOCTL(TIOCMIWAIT)
-COMPATIBLE_IOCTL(TIOCGICOUNT)
-/* Little t */
-COMPATIBLE_IOCTL(TIOCGETD)
-COMPATIBLE_IOCTL(TIOCSETD)
-COMPATIBLE_IOCTL(TIOCEXCL)
-COMPATIBLE_IOCTL(TIOCNXCL)
-COMPATIBLE_IOCTL(TIOCCONS)
-COMPATIBLE_IOCTL(TIOCGSOFTCAR)
-COMPATIBLE_IOCTL(TIOCSSOFTCAR)
-COMPATIBLE_IOCTL(TIOCSWINSZ)
-COMPATIBLE_IOCTL(TIOCGWINSZ)
-COMPATIBLE_IOCTL(TIOCMGET)
-COMPATIBLE_IOCTL(TIOCMBIC)
-COMPATIBLE_IOCTL(TIOCMBIS)
-COMPATIBLE_IOCTL(TIOCMSET)
-COMPATIBLE_IOCTL(TIOCPKT)
-COMPATIBLE_IOCTL(TIOCNOTTY)
-COMPATIBLE_IOCTL(TIOCSTI)
-COMPATIBLE_IOCTL(TIOCOUTQ)
-COMPATIBLE_IOCTL(TIOCSPGRP)
-COMPATIBLE_IOCTL(TIOCGPGRP)
-ULONG_IOCTL(TIOCSCTTY)
-COMPATIBLE_IOCTL(TIOCGPTN)
-COMPATIBLE_IOCTL(TIOCSPTLCK)
-COMPATIBLE_IOCTL(TIOCSERGETLSR)
-/* Little f */
-COMPATIBLE_IOCTL(FIOCLEX)
-COMPATIBLE_IOCTL(FIONCLEX)
-COMPATIBLE_IOCTL(FIOASYNC)
-COMPATIBLE_IOCTL(FIONBIO)
-COMPATIBLE_IOCTL(FIONREAD) /* This is also TIOCINQ */
-/* 0x00 */
-COMPATIBLE_IOCTL(FIBMAP)
-COMPATIBLE_IOCTL(FIGETBSZ)
-/* 0x03 -- HD/IDE ioctl's used by hdparm and friends.
- * Some need translations, these do not.
- */
-COMPATIBLE_IOCTL(HDIO_GET_IDENTITY)
-COMPATIBLE_IOCTL(HDIO_DRIVE_TASK)
-COMPATIBLE_IOCTL(HDIO_DRIVE_CMD)
-ULONG_IOCTL(HDIO_SET_MULTCOUNT)
-ULONG_IOCTL(HDIO_SET_UNMASKINTR)
-ULONG_IOCTL(HDIO_SET_KEEPSETTINGS)
-ULONG_IOCTL(HDIO_SET_32BIT)
-ULONG_IOCTL(HDIO_SET_NOWERR)
-ULONG_IOCTL(HDIO_SET_DMA)
-ULONG_IOCTL(HDIO_SET_PIO_MODE)
-ULONG_IOCTL(HDIO_SET_NICE)
-ULONG_IOCTL(HDIO_SET_WCACHE)
-ULONG_IOCTL(HDIO_SET_ACOUSTIC)
-ULONG_IOCTL(HDIO_SET_BUSSTATE)
-ULONG_IOCTL(HDIO_SET_ADDRESS)
-COMPATIBLE_IOCTL(HDIO_SCAN_HWIF)
-/* 0x330 is reserved -- it used to be HDIO_GETGEO_BIG */
-COMPATIBLE_IOCTL(0x330)
-/* 0x02 -- Floppy ioctls */
-COMPATIBLE_IOCTL(FDMSGON)
-COMPATIBLE_IOCTL(FDMSGOFF)
-COMPATIBLE_IOCTL(FDSETEMSGTRESH)
-COMPATIBLE_IOCTL(FDFLUSH)
-COMPATIBLE_IOCTL(FDWERRORCLR)
-COMPATIBLE_IOCTL(FDSETMAXERRS)
-COMPATIBLE_IOCTL(FDGETMAXERRS)
-COMPATIBLE_IOCTL(FDGETDRVTYP)
-COMPATIBLE_IOCTL(FDEJECT)
-COMPATIBLE_IOCTL(FDCLRPRM)
-COMPATIBLE_IOCTL(FDFMTBEG)
-COMPATIBLE_IOCTL(FDFMTEND)
-COMPATIBLE_IOCTL(FDRESET)
-COMPATIBLE_IOCTL(FDTWADDLE)
-COMPATIBLE_IOCTL(FDFMTTRK)
-COMPATIBLE_IOCTL(FDRAWCMD)
-/* 0x12 */
-#ifdef CONFIG_BLOCK
-COMPATIBLE_IOCTL(BLKRASET)
-COMPATIBLE_IOCTL(BLKROSET)
-COMPATIBLE_IOCTL(BLKROGET)
-COMPATIBLE_IOCTL(BLKRRPART)
-COMPATIBLE_IOCTL(BLKFLSBUF)
-COMPATIBLE_IOCTL(BLKSECTSET)
-COMPATIBLE_IOCTL(BLKSSZGET)
-COMPATIBLE_IOCTL(BLKTRACESTART)
-COMPATIBLE_IOCTL(BLKTRACESTOP)
-COMPATIBLE_IOCTL(BLKTRACESETUP)
-COMPATIBLE_IOCTL(BLKTRACETEARDOWN)
-ULONG_IOCTL(BLKRASET)
-ULONG_IOCTL(BLKFRASET)
-#endif
-/* RAID */
-COMPATIBLE_IOCTL(RAID_VERSION)
-COMPATIBLE_IOCTL(GET_ARRAY_INFO)
-COMPATIBLE_IOCTL(GET_DISK_INFO)
-COMPATIBLE_IOCTL(PRINT_RAID_DEBUG)
-COMPATIBLE_IOCTL(RAID_AUTORUN)
-COMPATIBLE_IOCTL(CLEAR_ARRAY)
-COMPATIBLE_IOCTL(ADD_NEW_DISK)
-ULONG_IOCTL(HOT_REMOVE_DISK)
-COMPATIBLE_IOCTL(SET_ARRAY_INFO)
-COMPATIBLE_IOCTL(SET_DISK_INFO)
-COMPATIBLE_IOCTL(WRITE_RAID_INFO)
-COMPATIBLE_IOCTL(UNPROTECT_ARRAY)
-COMPATIBLE_IOCTL(PROTECT_ARRAY)
-ULONG_IOCTL(HOT_ADD_DISK)
-ULONG_IOCTL(SET_DISK_FAULTY)
-COMPATIBLE_IOCTL(RUN_ARRAY)
-COMPATIBLE_IOCTL(STOP_ARRAY)
-COMPATIBLE_IOCTL(STOP_ARRAY_RO)
-COMPATIBLE_IOCTL(RESTART_ARRAY_RW)
-COMPATIBLE_IOCTL(GET_BITMAP_FILE)
-ULONG_IOCTL(SET_BITMAP_FILE)
-/* DM */
-COMPATIBLE_IOCTL(DM_VERSION_32)
-COMPATIBLE_IOCTL(DM_REMOVE_ALL_32)
-COMPATIBLE_IOCTL(DM_LIST_DEVICES_32)
-COMPATIBLE_IOCTL(DM_DEV_CREATE_32)
-COMPATIBLE_IOCTL(DM_DEV_REMOVE_32)
-COMPATIBLE_IOCTL(DM_DEV_RENAME_32)
-COMPATIBLE_IOCTL(DM_DEV_SUSPEND_32)
-COMPATIBLE_IOCTL(DM_DEV_STATUS_32)
-COMPATIBLE_IOCTL(DM_DEV_WAIT_32)
-COMPATIBLE_IOCTL(DM_TABLE_LOAD_32)
-COMPATIBLE_IOCTL(DM_TABLE_CLEAR_32)
-COMPATIBLE_IOCTL(DM_TABLE_DEPS_32)
-COMPATIBLE_IOCTL(DM_TABLE_STATUS_32)
-COMPATIBLE_IOCTL(DM_LIST_VERSIONS_32)
-COMPATIBLE_IOCTL(DM_TARGET_MSG_32)
-COMPATIBLE_IOCTL(DM_DEV_SET_GEOMETRY_32)
-COMPATIBLE_IOCTL(DM_VERSION)
-COMPATIBLE_IOCTL(DM_REMOVE_ALL)
-COMPATIBLE_IOCTL(DM_LIST_DEVICES)
-COMPATIBLE_IOCTL(DM_DEV_CREATE)
-COMPATIBLE_IOCTL(DM_DEV_REMOVE)
-COMPATIBLE_IOCTL(DM_DEV_RENAME)
-COMPATIBLE_IOCTL(DM_DEV_SUSPEND)
-COMPATIBLE_IOCTL(DM_DEV_STATUS)
-COMPATIBLE_IOCTL(DM_DEV_WAIT)
-COMPATIBLE_IOCTL(DM_TABLE_LOAD)
-COMPATIBLE_IOCTL(DM_TABLE_CLEAR)
-COMPATIBLE_IOCTL(DM_TABLE_DEPS)
-COMPATIBLE_IOCTL(DM_TABLE_STATUS)
-COMPATIBLE_IOCTL(DM_LIST_VERSIONS)
-COMPATIBLE_IOCTL(DM_TARGET_MSG)
-COMPATIBLE_IOCTL(DM_DEV_SET_GEOMETRY)
-/* Big K */
-COMPATIBLE_IOCTL(PIO_FONT)
-COMPATIBLE_IOCTL(GIO_FONT)
-ULONG_IOCTL(KDSIGACCEPT)
-COMPATIBLE_IOCTL(KDGETKEYCODE)
-COMPATIBLE_IOCTL(KDSETKEYCODE)
-ULONG_IOCTL(KIOCSOUND)
-ULONG_IOCTL(KDMKTONE)
-COMPATIBLE_IOCTL(KDGKBTYPE)
-ULONG_IOCTL(KDSETMODE)
-COMPATIBLE_IOCTL(KDGETMODE)
-ULONG_IOCTL(KDSKBMODE)
-COMPATIBLE_IOCTL(KDGKBMODE)
-ULONG_IOCTL(KDSKBMETA)
-COMPATIBLE_IOCTL(KDGKBMETA)
-COMPATIBLE_IOCTL(KDGKBENT)
-COMPATIBLE_IOCTL(KDSKBENT)
-COMPATIBLE_IOCTL(KDGKBSENT)
-COMPATIBLE_IOCTL(KDSKBSENT)
-COMPATIBLE_IOCTL(KDGKBDIACR)
-COMPATIBLE_IOCTL(KDSKBDIACR)
-COMPATIBLE_IOCTL(KDKBDREP)
-COMPATIBLE_IOCTL(KDGKBLED)
-ULONG_IOCTL(KDSKBLED)
-COMPATIBLE_IOCTL(KDGETLED)
-ULONG_IOCTL(KDSETLED)
-COMPATIBLE_IOCTL(GIO_SCRNMAP)
-COMPATIBLE_IOCTL(PIO_SCRNMAP)
-COMPATIBLE_IOCTL(GIO_UNISCRNMAP)
-COMPATIBLE_IOCTL(PIO_UNISCRNMAP)
-COMPATIBLE_IOCTL(PIO_FONTRESET)
-COMPATIBLE_IOCTL(PIO_UNIMAPCLR)
-/* Big S */
-COMPATIBLE_IOCTL(SCSI_IOCTL_GET_IDLUN)
-COMPATIBLE_IOCTL(SCSI_IOCTL_DOORLOCK)
-COMPATIBLE_IOCTL(SCSI_IOCTL_DOORUNLOCK)
-COMPATIBLE_IOCTL(SCSI_IOCTL_TEST_UNIT_READY)
-COMPATIBLE_IOCTL(SCSI_IOCTL_GET_BUS_NUMBER)
-COMPATIBLE_IOCTL(SCSI_IOCTL_SEND_COMMAND)
-COMPATIBLE_IOCTL(SCSI_IOCTL_PROBE_HOST)
-COMPATIBLE_IOCTL(SCSI_IOCTL_GET_PCI)
-/* Big T */
-COMPATIBLE_IOCTL(TUNSETNOCSUM)
-COMPATIBLE_IOCTL(TUNSETDEBUG)
-COMPATIBLE_IOCTL(TUNSETPERSIST)
-COMPATIBLE_IOCTL(TUNSETOWNER)
-/* Big V */
-COMPATIBLE_IOCTL(VT_SETMODE)
-COMPATIBLE_IOCTL(VT_GETMODE)
-COMPATIBLE_IOCTL(VT_GETSTATE)
-COMPATIBLE_IOCTL(VT_OPENQRY)
-ULONG_IOCTL(VT_ACTIVATE)
-ULONG_IOCTL(VT_WAITACTIVE)
-ULONG_IOCTL(VT_RELDISP)
-ULONG_IOCTL(VT_DISALLOCATE)
-COMPATIBLE_IOCTL(VT_RESIZE)
-COMPATIBLE_IOCTL(VT_RESIZEX)
-COMPATIBLE_IOCTL(VT_LOCKSWITCH)
-COMPATIBLE_IOCTL(VT_UNLOCKSWITCH)
-COMPATIBLE_IOCTL(VT_GETHIFONTMASK)
-/* Little p (/dev/rtc, /dev/envctrl, etc.) */
-COMPATIBLE_IOCTL(RTC_AIE_ON)
-COMPATIBLE_IOCTL(RTC_AIE_OFF)
-COMPATIBLE_IOCTL(RTC_UIE_ON)
-COMPATIBLE_IOCTL(RTC_UIE_OFF)
-COMPATIBLE_IOCTL(RTC_PIE_ON)
-COMPATIBLE_IOCTL(RTC_PIE_OFF)
-COMPATIBLE_IOCTL(RTC_WIE_ON)
-COMPATIBLE_IOCTL(RTC_WIE_OFF)
-COMPATIBLE_IOCTL(RTC_ALM_SET)
-COMPATIBLE_IOCTL(RTC_ALM_READ)
-COMPATIBLE_IOCTL(RTC_RD_TIME)
-COMPATIBLE_IOCTL(RTC_SET_TIME)
-COMPATIBLE_IOCTL(RTC_WKALM_SET)
-COMPATIBLE_IOCTL(RTC_WKALM_RD)
-/*
- * These two are only for the sbus rtc driver, but
- * hwclock tries them on every rtc device first when
- * running on sparc. On other architectures the entries
- * are useless but harmless.
- */
-COMPATIBLE_IOCTL(_IOR('p', 20, int[7])) /* RTCGET */
-COMPATIBLE_IOCTL(_IOW('p', 21, int[7])) /* RTCSET */
-/* Little m */
-COMPATIBLE_IOCTL(MTIOCTOP)
-/* Socket level stuff */
-COMPATIBLE_IOCTL(FIOQSIZE)
-COMPATIBLE_IOCTL(FIOSETOWN)
-COMPATIBLE_IOCTL(SIOCSPGRP)
-COMPATIBLE_IOCTL(FIOGETOWN)
-COMPATIBLE_IOCTL(SIOCGPGRP)
-COMPATIBLE_IOCTL(SIOCATMARK)
-COMPATIBLE_IOCTL(SIOCSIFLINK)
-COMPATIBLE_IOCTL(SIOCSIFENCAP)
-COMPATIBLE_IOCTL(SIOCGIFENCAP)
-COMPATIBLE_IOCTL(SIOCSIFNAME)
-COMPATIBLE_IOCTL(SIOCSARP)
-COMPATIBLE_IOCTL(SIOCGARP)
-COMPATIBLE_IOCTL(SIOCDARP)
-COMPATIBLE_IOCTL(SIOCSRARP)
-COMPATIBLE_IOCTL(SIOCGRARP)
-COMPATIBLE_IOCTL(SIOCDRARP)
-COMPATIBLE_IOCTL(SIOCADDDLCI)
-COMPATIBLE_IOCTL(SIOCDELDLCI)
-COMPATIBLE_IOCTL(SIOCGMIIPHY)
-COMPATIBLE_IOCTL(SIOCGMIIREG)
-COMPATIBLE_IOCTL(SIOCSMIIREG)
-COMPATIBLE_IOCTL(SIOCGIFVLAN)
-COMPATIBLE_IOCTL(SIOCSIFVLAN)
-COMPATIBLE_IOCTL(SIOCBRADDBR)
-COMPATIBLE_IOCTL(SIOCBRDELBR)
-/* SG stuff */
-COMPATIBLE_IOCTL(SG_SET_TIMEOUT)
-COMPATIBLE_IOCTL(SG_GET_TIMEOUT)
-COMPATIBLE_IOCTL(SG_EMULATED_HOST)
-ULONG_IOCTL(SG_SET_TRANSFORM)
-COMPATIBLE_IOCTL(SG_GET_TRANSFORM)
-COMPATIBLE_IOCTL(SG_SET_RESERVED_SIZE)
-COMPATIBLE_IOCTL(SG_GET_RESERVED_SIZE)
-COMPATIBLE_IOCTL(SG_GET_SCSI_ID)
-COMPATIBLE_IOCTL(SG_SET_FORCE_LOW_DMA)
-COMPATIBLE_IOCTL(SG_GET_LOW_DMA)
-COMPATIBLE_IOCTL(SG_SET_FORCE_PACK_ID)
-COMPATIBLE_IOCTL(SG_GET_PACK_ID)
-COMPATIBLE_IOCTL(SG_GET_NUM_WAITING)
-COMPATIBLE_IOCTL(SG_SET_DEBUG)
-COMPATIBLE_IOCTL(SG_GET_SG_TABLESIZE)
-COMPATIBLE_IOCTL(SG_GET_COMMAND_Q)
-COMPATIBLE_IOCTL(SG_SET_COMMAND_Q)
-COMPATIBLE_IOCTL(SG_GET_VERSION_NUM)
-COMPATIBLE_IOCTL(SG_NEXT_CMD_LEN)
-COMPATIBLE_IOCTL(SG_SCSI_RESET)
-COMPATIBLE_IOCTL(SG_GET_REQUEST_TABLE)
-COMPATIBLE_IOCTL(SG_SET_KEEP_ORPHAN)
-COMPATIBLE_IOCTL(SG_GET_KEEP_ORPHAN)
-/* PPP stuff */
-COMPATIBLE_IOCTL(PPPIOCGFLAGS)
-COMPATIBLE_IOCTL(PPPIOCSFLAGS)
-COMPATIBLE_IOCTL(PPPIOCGASYNCMAP)
-COMPATIBLE_IOCTL(PPPIOCSASYNCMAP)
-COMPATIBLE_IOCTL(PPPIOCGUNIT)
-COMPATIBLE_IOCTL(PPPIOCGRASYNCMAP)
-COMPATIBLE_IOCTL(PPPIOCSRASYNCMAP)
-COMPATIBLE_IOCTL(PPPIOCGMRU)
-COMPATIBLE_IOCTL(PPPIOCSMRU)
-COMPATIBLE_IOCTL(PPPIOCSMAXCID)
-COMPATIBLE_IOCTL(PPPIOCGXASYNCMAP)
-COMPATIBLE_IOCTL(PPPIOCSXASYNCMAP)
-COMPATIBLE_IOCTL(PPPIOCXFERUNIT)
-/* PPPIOCSCOMPRESS is translated */
-COMPATIBLE_IOCTL(PPPIOCGNPMODE)
-COMPATIBLE_IOCTL(PPPIOCSNPMODE)
-COMPATIBLE_IOCTL(PPPIOCGDEBUG)
-COMPATIBLE_IOCTL(PPPIOCSDEBUG)
-/* PPPIOCSPASS is translated */
-/* PPPIOCSACTIVE is translated */
-/* PPPIOCGIDLE is translated */
-COMPATIBLE_IOCTL(PPPIOCNEWUNIT)
-COMPATIBLE_IOCTL(PPPIOCATTACH)
-COMPATIBLE_IOCTL(PPPIOCDETACH)
-COMPATIBLE_IOCTL(PPPIOCSMRRU)
-COMPATIBLE_IOCTL(PPPIOCCONNECT)
-COMPATIBLE_IOCTL(PPPIOCDISCONN)
-COMPATIBLE_IOCTL(PPPIOCATTCHAN)
-COMPATIBLE_IOCTL(PPPIOCGCHAN)
-/* PPPOX */
-COMPATIBLE_IOCTL(PPPOEIOCSFWD)
-COMPATIBLE_IOCTL(PPPOEIOCDFWD)
-/* LP */
-COMPATIBLE_IOCTL(LPGETSTATUS)
-/* ppdev */
-COMPATIBLE_IOCTL(PPSETMODE)
-COMPATIBLE_IOCTL(PPRSTATUS)
-COMPATIBLE_IOCTL(PPRCONTROL)
-COMPATIBLE_IOCTL(PPWCONTROL)
-COMPATIBLE_IOCTL(PPFCONTROL)
-COMPATIBLE_IOCTL(PPRDATA)
-COMPATIBLE_IOCTL(PPWDATA)
-COMPATIBLE_IOCTL(PPCLAIM)
-COMPATIBLE_IOCTL(PPRELEASE)
-COMPATIBLE_IOCTL(PPYIELD)
-COMPATIBLE_IOCTL(PPEXCL)
-COMPATIBLE_IOCTL(PPDATADIR)
-COMPATIBLE_IOCTL(PPNEGOT)
-COMPATIBLE_IOCTL(PPWCTLONIRQ)
-COMPATIBLE_IOCTL(PPCLRIRQ)
-COMPATIBLE_IOCTL(PPSETPHASE)
-COMPATIBLE_IOCTL(PPGETMODES)
-COMPATIBLE_IOCTL(PPGETMODE)
-COMPATIBLE_IOCTL(PPGETPHASE)
-COMPATIBLE_IOCTL(PPGETFLAGS)
-COMPATIBLE_IOCTL(PPSETFLAGS)
-/* CDROM stuff */
-COMPATIBLE_IOCTL(CDROMPAUSE)
-COMPATIBLE_IOCTL(CDROMRESUME)
-COMPATIBLE_IOCTL(CDROMPLAYMSF)
-COMPATIBLE_IOCTL(CDROMPLAYTRKIND)
-COMPATIBLE_IOCTL(CDROMREADTOCHDR)
-COMPATIBLE_IOCTL(CDROMREADTOCENTRY)
-COMPATIBLE_IOCTL(CDROMSTOP)
-COMPATIBLE_IOCTL(CDROMSTART)
-COMPATIBLE_IOCTL(CDROMEJECT)
-COMPATIBLE_IOCTL(CDROMVOLCTRL)
-COMPATIBLE_IOCTL(CDROMSUBCHNL)
-ULONG_IOCTL(CDROMEJECT_SW)
-COMPATIBLE_IOCTL(CDROMMULTISESSION)
-COMPATIBLE_IOCTL(CDROM_GET_MCN)
-COMPATIBLE_IOCTL(CDROMRESET)
-COMPATIBLE_IOCTL(CDROMVOLREAD)
-COMPATIBLE_IOCTL(CDROMSEEK)
-COMPATIBLE_IOCTL(CDROMPLAYBLK)
-COMPATIBLE_IOCTL(CDROMCLOSETRAY)
-ULONG_IOCTL(CDROM_SET_OPTIONS)
-ULONG_IOCTL(CDROM_CLEAR_OPTIONS)
-ULONG_IOCTL(CDROM_SELECT_SPEED)
-ULONG_IOCTL(CDROM_SELECT_DISC)
-ULONG_IOCTL(CDROM_MEDIA_CHANGED)
-ULONG_IOCTL(CDROM_DRIVE_STATUS)
-COMPATIBLE_IOCTL(CDROM_DISC_STATUS)
-COMPATIBLE_IOCTL(CDROM_CHANGER_NSLOTS)
-ULONG_IOCTL(CDROM_LOCKDOOR)
-ULONG_IOCTL(CDROM_DEBUG)
-COMPATIBLE_IOCTL(CDROM_GET_CAPABILITY)
-/* Ignore cdrom.h about these next 5 ioctls, they absolutely do
- * not take a struct cdrom_read, instead they take a struct cdrom_msf
- * which is compatible.
- */
-COMPATIBLE_IOCTL(CDROMREADMODE2)
-COMPATIBLE_IOCTL(CDROMREADMODE1)
-COMPATIBLE_IOCTL(CDROMREADRAW)
-COMPATIBLE_IOCTL(CDROMREADCOOKED)
-COMPATIBLE_IOCTL(CDROMREADALL)
-/* DVD ioctls */
-COMPATIBLE_IOCTL(DVD_READ_STRUCT)
-COMPATIBLE_IOCTL(DVD_WRITE_STRUCT)
-COMPATIBLE_IOCTL(DVD_AUTH)
-/* pktcdvd */
-COMPATIBLE_IOCTL(PACKET_CTRL_CMD)
-/* Big A */
-/* sparc only */
-/* Big Q for sound/OSS */
-COMPATIBLE_IOCTL(SNDCTL_SEQ_RESET)
-COMPATIBLE_IOCTL(SNDCTL_SEQ_SYNC)
-COMPATIBLE_IOCTL(SNDCTL_SYNTH_INFO)
-COMPATIBLE_IOCTL(SNDCTL_SEQ_CTRLRATE)
-COMPATIBLE_IOCTL(SNDCTL_SEQ_GETOUTCOUNT)
-COMPATIBLE_IOCTL(SNDCTL_SEQ_GETINCOUNT)
-COMPATIBLE_IOCTL(SNDCTL_SEQ_PERCMODE)
-COMPATIBLE_IOCTL(SNDCTL_FM_LOAD_INSTR)
-COMPATIBLE_IOCTL(SNDCTL_SEQ_TESTMIDI)
-COMPATIBLE_IOCTL(SNDCTL_SEQ_RESETSAMPLES)
-COMPATIBLE_IOCTL(SNDCTL_SEQ_NRSYNTHS)
-COMPATIBLE_IOCTL(SNDCTL_SEQ_NRMIDIS)
-COMPATIBLE_IOCTL(SNDCTL_MIDI_INFO)
-COMPATIBLE_IOCTL(SNDCTL_SEQ_THRESHOLD)
-COMPATIBLE_IOCTL(SNDCTL_SYNTH_MEMAVL)
-COMPATIBLE_IOCTL(SNDCTL_FM_4OP_ENABLE)
-COMPATIBLE_IOCTL(SNDCTL_SEQ_PANIC)
-COMPATIBLE_IOCTL(SNDCTL_SEQ_OUTOFBAND)
-COMPATIBLE_IOCTL(SNDCTL_SEQ_GETTIME)
-COMPATIBLE_IOCTL(SNDCTL_SYNTH_ID)
-COMPATIBLE_IOCTL(SNDCTL_SYNTH_CONTROL)
-COMPATIBLE_IOCTL(SNDCTL_SYNTH_REMOVESAMPLE)
-/* Big T for sound/OSS */
-COMPATIBLE_IOCTL(SNDCTL_TMR_TIMEBASE)
-COMPATIBLE_IOCTL(SNDCTL_TMR_START)
-COMPATIBLE_IOCTL(SNDCTL_TMR_STOP)
-COMPATIBLE_IOCTL(SNDCTL_TMR_CONTINUE)
-COMPATIBLE_IOCTL(SNDCTL_TMR_TEMPO)
-COMPATIBLE_IOCTL(SNDCTL_TMR_SOURCE)
-COMPATIBLE_IOCTL(SNDCTL_TMR_METRONOME)
-COMPATIBLE_IOCTL(SNDCTL_TMR_SELECT)
-/* Little m for sound/OSS */
-COMPATIBLE_IOCTL(SNDCTL_MIDI_PRETIME)
-COMPATIBLE_IOCTL(SNDCTL_MIDI_MPUMODE)
-COMPATIBLE_IOCTL(SNDCTL_MIDI_MPUCMD)
-/* Big P for sound/OSS */
-COMPATIBLE_IOCTL(SNDCTL_DSP_RESET)
-COMPATIBLE_IOCTL(SNDCTL_DSP_SYNC)
-COMPATIBLE_IOCTL(SNDCTL_DSP_SPEED)
-COMPATIBLE_IOCTL(SNDCTL_DSP_STEREO)
-COMPATIBLE_IOCTL(SNDCTL_DSP_GETBLKSIZE)
-COMPATIBLE_IOCTL(SNDCTL_DSP_CHANNELS)
-COMPATIBLE_IOCTL(SOUND_PCM_WRITE_FILTER)
-COMPATIBLE_IOCTL(SNDCTL_DSP_POST)
-COMPATIBLE_IOCTL(SNDCTL_DSP_SUBDIVIDE)
-COMPATIBLE_IOCTL(SNDCTL_DSP_SETFRAGMENT)
-COMPATIBLE_IOCTL(SNDCTL_DSP_GETFMTS)
-COMPATIBLE_IOCTL(SNDCTL_DSP_SETFMT)
-COMPATIBLE_IOCTL(SNDCTL_DSP_GETOSPACE)
-COMPATIBLE_IOCTL(SNDCTL_DSP_GETISPACE)
-COMPATIBLE_IOCTL(SNDCTL_DSP_NONBLOCK)
-COMPATIBLE_IOCTL(SNDCTL_DSP_GETCAPS)
-COMPATIBLE_IOCTL(SNDCTL_DSP_GETTRIGGER)
-COMPATIBLE_IOCTL(SNDCTL_DSP_SETTRIGGER)
-COMPATIBLE_IOCTL(SNDCTL_DSP_GETIPTR)
-COMPATIBLE_IOCTL(SNDCTL_DSP_GETOPTR)
-/* SNDCTL_DSP_MAPINBUF, XXX needs translation */
-/* SNDCTL_DSP_MAPOUTBUF, XXX needs translation */
-COMPATIBLE_IOCTL(SNDCTL_DSP_SETSYNCRO)
-COMPATIBLE_IOCTL(SNDCTL_DSP_SETDUPLEX)
-COMPATIBLE_IOCTL(SNDCTL_DSP_GETODELAY)
-COMPATIBLE_IOCTL(SNDCTL_DSP_PROFILE)
-COMPATIBLE_IOCTL(SOUND_PCM_READ_RATE)
-COMPATIBLE_IOCTL(SOUND_PCM_READ_CHANNELS)
-COMPATIBLE_IOCTL(SOUND_PCM_READ_BITS)
-COMPATIBLE_IOCTL(SOUND_PCM_READ_FILTER)
-/* Big C for sound/OSS */
-COMPATIBLE_IOCTL(SNDCTL_COPR_RESET)
-COMPATIBLE_IOCTL(SNDCTL_COPR_LOAD)
-COMPATIBLE_IOCTL(SNDCTL_COPR_RDATA)
-COMPATIBLE_IOCTL(SNDCTL_COPR_RCODE)
-COMPATIBLE_IOCTL(SNDCTL_COPR_WDATA)
-COMPATIBLE_IOCTL(SNDCTL_COPR_WCODE)
-COMPATIBLE_IOCTL(SNDCTL_COPR_RUN)
-COMPATIBLE_IOCTL(SNDCTL_COPR_HALT)
-COMPATIBLE_IOCTL(SNDCTL_COPR_SENDMSG)
-COMPATIBLE_IOCTL(SNDCTL_COPR_RCVMSG)
-/* Big M for sound/OSS */
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_VOLUME)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_BASS)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_TREBLE)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_SYNTH)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_PCM)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_SPEAKER)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_LINE)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_MIC)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_CD)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_IMIX)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_ALTPCM)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_RECLEV)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_IGAIN)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_OGAIN)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_LINE1)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_LINE2)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_LINE3)
-COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_DIGITAL1))
-COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_DIGITAL2))
-COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_DIGITAL3))
-COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_PHONEIN))
-COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_PHONEOUT))
-COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_VIDEO))
-COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_RADIO))
-COMPATIBLE_IOCTL(MIXER_READ(SOUND_MIXER_MONITOR))
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_MUTE)
-/* SOUND_MIXER_READ_ENHANCE, same value as READ_MUTE */
-/* SOUND_MIXER_READ_LOUD, same value as READ_MUTE */
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_RECSRC)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_DEVMASK)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_RECMASK)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_STEREODEVS)
-COMPATIBLE_IOCTL(SOUND_MIXER_READ_CAPS)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_VOLUME)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_BASS)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_TREBLE)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_SYNTH)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_PCM)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_SPEAKER)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_LINE)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_MIC)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_CD)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_IMIX)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_ALTPCM)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_RECLEV)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_IGAIN)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_OGAIN)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_LINE1)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_LINE2)
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_LINE3)
-COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_DIGITAL1))
-COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_DIGITAL2))
-COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_DIGITAL3))
-COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_PHONEIN))
-COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_PHONEOUT))
-COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_VIDEO))
-COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_RADIO))
-COMPATIBLE_IOCTL(MIXER_WRITE(SOUND_MIXER_MONITOR))
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_MUTE)
-/* SOUND_MIXER_WRITE_ENHANCE, same value as WRITE_MUTE */
-/* SOUND_MIXER_WRITE_LOUD, same value as WRITE_MUTE */
-COMPATIBLE_IOCTL(SOUND_MIXER_WRITE_RECSRC)
-COMPATIBLE_IOCTL(SOUND_MIXER_INFO)
-COMPATIBLE_IOCTL(SOUND_OLD_MIXER_INFO)
-COMPATIBLE_IOCTL(SOUND_MIXER_ACCESS)
-COMPATIBLE_IOCTL(SOUND_MIXER_AGC)
-COMPATIBLE_IOCTL(SOUND_MIXER_3DSE)
-COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE1)
-COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE2)
-COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE3)
-COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE4)
-COMPATIBLE_IOCTL(SOUND_MIXER_PRIVATE5)
-COMPATIBLE_IOCTL(SOUND_MIXER_GETLEVELS)
-COMPATIBLE_IOCTL(SOUND_MIXER_SETLEVELS)
-COMPATIBLE_IOCTL(OSS_GETVERSION)
-/* AUTOFS */
-ULONG_IOCTL(AUTOFS_IOC_READY)
-ULONG_IOCTL(AUTOFS_IOC_FAIL)
-COMPATIBLE_IOCTL(AUTOFS_IOC_CATATONIC)
-COMPATIBLE_IOCTL(AUTOFS_IOC_PROTOVER)
-COMPATIBLE_IOCTL(AUTOFS_IOC_EXPIRE)
-COMPATIBLE_IOCTL(AUTOFS_IOC_EXPIRE_MULTI)
-COMPATIBLE_IOCTL(AUTOFS_IOC_PROTOSUBVER)
-COMPATIBLE_IOCTL(AUTOFS_IOC_ASKREGHOST)
-COMPATIBLE_IOCTL(AUTOFS_IOC_TOGGLEREGHOST)
-COMPATIBLE_IOCTL(AUTOFS_IOC_ASKUMOUNT)
-/* Raw devices */
-COMPATIBLE_IOCTL(RAW_SETBIND)
-COMPATIBLE_IOCTL(RAW_GETBIND)
-/* SMB ioctls which do not need any translations */
-COMPATIBLE_IOCTL(SMB_IOC_NEWCONN)
-/* Little a */
-COMPATIBLE_IOCTL(ATMSIGD_CTRL)
-COMPATIBLE_IOCTL(ATMARPD_CTRL)
-COMPATIBLE_IOCTL(ATMLEC_CTRL)
-COMPATIBLE_IOCTL(ATMLEC_MCAST)
-COMPATIBLE_IOCTL(ATMLEC_DATA)
-COMPATIBLE_IOCTL(ATM_SETSC)
-COMPATIBLE_IOCTL(SIOCSIFATMTCP)
-COMPATIBLE_IOCTL(SIOCMKCLIP)
-COMPATIBLE_IOCTL(ATMARP_MKIP)
-COMPATIBLE_IOCTL(ATMARP_SETENTRY)
-COMPATIBLE_IOCTL(ATMARP_ENCAP)
-COMPATIBLE_IOCTL(ATMTCP_CREATE)
-COMPATIBLE_IOCTL(ATMTCP_REMOVE)
-COMPATIBLE_IOCTL(ATMMPC_CTRL)
-COMPATIBLE_IOCTL(ATMMPC_DATA)
-/* Watchdog */
-COMPATIBLE_IOCTL(WDIOC_GETSUPPORT)
-COMPATIBLE_IOCTL(WDIOC_GETSTATUS)
-COMPATIBLE_IOCTL(WDIOC_GETBOOTSTATUS)
-COMPATIBLE_IOCTL(WDIOC_GETTEMP)
-COMPATIBLE_IOCTL(WDIOC_SETOPTIONS)
-COMPATIBLE_IOCTL(WDIOC_KEEPALIVE)
-COMPATIBLE_IOCTL(WDIOC_SETTIMEOUT)
-COMPATIBLE_IOCTL(WDIOC_GETTIMEOUT)
-/* Big R */
-COMPATIBLE_IOCTL(RNDGETENTCNT)
-COMPATIBLE_IOCTL(RNDADDTOENTCNT)
-COMPATIBLE_IOCTL(RNDGETPOOL)
-COMPATIBLE_IOCTL(RNDADDENTROPY)
-COMPATIBLE_IOCTL(RNDZAPENTCNT)
-COMPATIBLE_IOCTL(RNDCLEARPOOL)
-/* Bluetooth */
-COMPATIBLE_IOCTL(HCIDEVUP)
-COMPATIBLE_IOCTL(HCIDEVDOWN)
-COMPATIBLE_IOCTL(HCIDEVRESET)
-COMPATIBLE_IOCTL(HCIDEVRESTAT)
-COMPATIBLE_IOCTL(HCIGETDEVLIST)
-COMPATIBLE_IOCTL(HCIGETDEVINFO)
-COMPATIBLE_IOCTL(HCIGETCONNLIST)
-COMPATIBLE_IOCTL(HCIGETCONNINFO)
-COMPATIBLE_IOCTL(HCISETRAW)
-COMPATIBLE_IOCTL(HCISETSCAN)
-COMPATIBLE_IOCTL(HCISETAUTH)
-COMPATIBLE_IOCTL(HCISETENCRYPT)
-COMPATIBLE_IOCTL(HCISETPTYPE)
-COMPATIBLE_IOCTL(HCISETLINKPOL)
-COMPATIBLE_IOCTL(HCISETLINKMODE)
-COMPATIBLE_IOCTL(HCISETACLMTU)
-COMPATIBLE_IOCTL(HCISETSCOMTU)
-COMPATIBLE_IOCTL(HCIINQUIRY)
-COMPATIBLE_IOCTL(HCIUARTSETPROTO)
-COMPATIBLE_IOCTL(HCIUARTGETPROTO)
-COMPATIBLE_IOCTL(RFCOMMCREATEDEV)
-COMPATIBLE_IOCTL(RFCOMMRELEASEDEV)
-COMPATIBLE_IOCTL(RFCOMMGETDEVLIST)
-COMPATIBLE_IOCTL(RFCOMMGETDEVINFO)
-COMPATIBLE_IOCTL(RFCOMMSTEALDLC)
-COMPATIBLE_IOCTL(BNEPCONNADD)
-COMPATIBLE_IOCTL(BNEPCONNDEL)
-COMPATIBLE_IOCTL(BNEPGETCONNLIST)
-COMPATIBLE_IOCTL(BNEPGETCONNINFO)
-COMPATIBLE_IOCTL(CMTPCONNADD)
-COMPATIBLE_IOCTL(CMTPCONNDEL)
-COMPATIBLE_IOCTL(CMTPGETCONNLIST)
-COMPATIBLE_IOCTL(CMTPGETCONNINFO)
-COMPATIBLE_IOCTL(HIDPCONNADD)
-COMPATIBLE_IOCTL(HIDPCONNDEL)
-COMPATIBLE_IOCTL(HIDPGETCONNLIST)
-COMPATIBLE_IOCTL(HIDPGETCONNINFO)
-/* CAPI */
-COMPATIBLE_IOCTL(CAPI_REGISTER)
-COMPATIBLE_IOCTL(CAPI_GET_MANUFACTURER)
-COMPATIBLE_IOCTL(CAPI_GET_VERSION)
-COMPATIBLE_IOCTL(CAPI_GET_SERIAL)
-COMPATIBLE_IOCTL(CAPI_GET_PROFILE)
-COMPATIBLE_IOCTL(CAPI_MANUFACTURER_CMD)
-COMPATIBLE_IOCTL(CAPI_GET_ERRCODE)
-COMPATIBLE_IOCTL(CAPI_INSTALLED)
-COMPATIBLE_IOCTL(CAPI_GET_FLAGS)
-COMPATIBLE_IOCTL(CAPI_SET_FLAGS)
-COMPATIBLE_IOCTL(CAPI_CLR_FLAGS)
-COMPATIBLE_IOCTL(CAPI_NCCI_OPENCOUNT)
-COMPATIBLE_IOCTL(CAPI_NCCI_GETUNIT)
-/* Siemens Gigaset */
-COMPATIBLE_IOCTL(GIGASET_REDIR)
-COMPATIBLE_IOCTL(GIGASET_CONFIG)
-COMPATIBLE_IOCTL(GIGASET_BRKCHARS)
-COMPATIBLE_IOCTL(GIGASET_VERSION)
-/* Misc. */
-COMPATIBLE_IOCTL(0x41545900) /* ATYIO_CLKR */
-COMPATIBLE_IOCTL(0x41545901) /* ATYIO_CLKW */
-COMPATIBLE_IOCTL(PCIIOC_CONTROLLER)
-COMPATIBLE_IOCTL(PCIIOC_MMAP_IS_IO)
-COMPATIBLE_IOCTL(PCIIOC_MMAP_IS_MEM)
-COMPATIBLE_IOCTL(PCIIOC_WRITE_COMBINE)
-/* USB */
-COMPATIBLE_IOCTL(USBDEVFS_RESETEP)
-COMPATIBLE_IOCTL(USBDEVFS_SETINTERFACE)
-COMPATIBLE_IOCTL(USBDEVFS_SETCONFIGURATION)
-COMPATIBLE_IOCTL(USBDEVFS_GETDRIVER)
-COMPATIBLE_IOCTL(USBDEVFS_DISCARDURB)
-COMPATIBLE_IOCTL(USBDEVFS_CLAIMINTERFACE)
-COMPATIBLE_IOCTL(USBDEVFS_RELEASEINTERFACE)
-COMPATIBLE_IOCTL(USBDEVFS_CONNECTINFO)
-COMPATIBLE_IOCTL(USBDEVFS_HUB_PORTINFO)
-COMPATIBLE_IOCTL(USBDEVFS_RESET)
-COMPATIBLE_IOCTL(USBDEVFS_SUBMITURB32)
-COMPATIBLE_IOCTL(USBDEVFS_REAPURB32)
-COMPATIBLE_IOCTL(USBDEVFS_REAPURBNDELAY32)
-COMPATIBLE_IOCTL(USBDEVFS_CLEAR_HALT)
-/* MTD */
-COMPATIBLE_IOCTL(MEMGETINFO)
-COMPATIBLE_IOCTL(MEMERASE)
-COMPATIBLE_IOCTL(MEMLOCK)
-COMPATIBLE_IOCTL(MEMUNLOCK)
-COMPATIBLE_IOCTL(MEMGETREGIONCOUNT)
-COMPATIBLE_IOCTL(MEMGETREGIONINFO)
-COMPATIBLE_IOCTL(MEMGETBADBLOCK)
-COMPATIBLE_IOCTL(MEMSETBADBLOCK)
-/* NBD */
-ULONG_IOCTL(NBD_SET_SOCK)
-ULONG_IOCTL(NBD_SET_BLKSIZE)
-ULONG_IOCTL(NBD_SET_SIZE)
-COMPATIBLE_IOCTL(NBD_DO_IT)
-COMPATIBLE_IOCTL(NBD_CLEAR_SOCK)
-COMPATIBLE_IOCTL(NBD_CLEAR_QUE)
-COMPATIBLE_IOCTL(NBD_PRINT_DEBUG)
-ULONG_IOCTL(NBD_SET_SIZE_BLOCKS)
-COMPATIBLE_IOCTL(NBD_DISCONNECT)
-/* i2c */
-COMPATIBLE_IOCTL(I2C_SLAVE)
-COMPATIBLE_IOCTL(I2C_SLAVE_FORCE)
-COMPATIBLE_IOCTL(I2C_TENBIT)
-COMPATIBLE_IOCTL(I2C_PEC)
-COMPATIBLE_IOCTL(I2C_RETRIES)
-COMPATIBLE_IOCTL(I2C_TIMEOUT)
-/* wireless */
-COMPATIBLE_IOCTL(SIOCSIWCOMMIT)
-COMPATIBLE_IOCTL(SIOCGIWNAME)
-COMPATIBLE_IOCTL(SIOCSIWNWID)
-COMPATIBLE_IOCTL(SIOCGIWNWID)
-COMPATIBLE_IOCTL(SIOCSIWFREQ)
-COMPATIBLE_IOCTL(SIOCGIWFREQ)
-COMPATIBLE_IOCTL(SIOCSIWMODE)
-COMPATIBLE_IOCTL(SIOCGIWMODE)
-COMPATIBLE_IOCTL(SIOCSIWSENS)
-COMPATIBLE_IOCTL(SIOCGIWSENS)
-COMPATIBLE_IOCTL(SIOCSIWRANGE)
-COMPATIBLE_IOCTL(SIOCSIWPRIV)
-COMPATIBLE_IOCTL(SIOCGIWPRIV)
-COMPATIBLE_IOCTL(SIOCSIWSTATS)
-COMPATIBLE_IOCTL(SIOCGIWSTATS)
-COMPATIBLE_IOCTL(SIOCSIWAP)
-COMPATIBLE_IOCTL(SIOCGIWAP)
-COMPATIBLE_IOCTL(SIOCSIWSCAN)
-COMPATIBLE_IOCTL(SIOCSIWRATE)
-COMPATIBLE_IOCTL(SIOCGIWRATE)
-COMPATIBLE_IOCTL(SIOCSIWRTS)
-COMPATIBLE_IOCTL(SIOCGIWRTS)
-COMPATIBLE_IOCTL(SIOCSIWFRAG)
-COMPATIBLE_IOCTL(SIOCGIWFRAG)
-COMPATIBLE_IOCTL(SIOCSIWTXPOW)
-COMPATIBLE_IOCTL(SIOCGIWTXPOW)
-COMPATIBLE_IOCTL(SIOCSIWRETRY)
-COMPATIBLE_IOCTL(SIOCGIWRETRY)
-COMPATIBLE_IOCTL(SIOCSIWPOWER)
-COMPATIBLE_IOCTL(SIOCGIWPOWER)
-/* hiddev */
-COMPATIBLE_IOCTL(HIDIOCGVERSION)
-COMPATIBLE_IOCTL(HIDIOCAPPLICATION)
-COMPATIBLE_IOCTL(HIDIOCGDEVINFO)
-COMPATIBLE_IOCTL(HIDIOCGSTRING)
-COMPATIBLE_IOCTL(HIDIOCINITREPORT)
-COMPATIBLE_IOCTL(HIDIOCGREPORT)
-COMPATIBLE_IOCTL(HIDIOCSREPORT)
-COMPATIBLE_IOCTL(HIDIOCGREPORTINFO)
-COMPATIBLE_IOCTL(HIDIOCGFIELDINFO)
-COMPATIBLE_IOCTL(HIDIOCGUSAGE)
-COMPATIBLE_IOCTL(HIDIOCSUSAGE)
-COMPATIBLE_IOCTL(HIDIOCGUCODE)
-COMPATIBLE_IOCTL(HIDIOCGFLAG)
-COMPATIBLE_IOCTL(HIDIOCSFLAG)
-COMPATIBLE_IOCTL(HIDIOCGCOLLECTIONINDEX)
-COMPATIBLE_IOCTL(HIDIOCGCOLLECTIONINFO)
-/* dvb */
-COMPATIBLE_IOCTL(AUDIO_STOP)
-COMPATIBLE_IOCTL(AUDIO_PLAY)
-COMPATIBLE_IOCTL(AUDIO_PAUSE)
-COMPATIBLE_IOCTL(AUDIO_CONTINUE)
-COMPATIBLE_IOCTL(AUDIO_SELECT_SOURCE)
-COMPATIBLE_IOCTL(AUDIO_SET_MUTE)
-COMPATIBLE_IOCTL(AUDIO_SET_AV_SYNC)
-COMPATIBLE_IOCTL(AUDIO_SET_BYPASS_MODE)
-COMPATIBLE_IOCTL(AUDIO_CHANNEL_SELECT)
-COMPATIBLE_IOCTL(AUDIO_GET_STATUS)
-COMPATIBLE_IOCTL(AUDIO_GET_CAPABILITIES)
-COMPATIBLE_IOCTL(AUDIO_CLEAR_BUFFER)
-COMPATIBLE_IOCTL(AUDIO_SET_ID)
-COMPATIBLE_IOCTL(AUDIO_SET_MIXER)
-COMPATIBLE_IOCTL(AUDIO_SET_STREAMTYPE)
-COMPATIBLE_IOCTL(AUDIO_SET_EXT_ID)
-COMPATIBLE_IOCTL(AUDIO_SET_ATTRIBUTES)
-COMPATIBLE_IOCTL(AUDIO_SET_KARAOKE)
-COMPATIBLE_IOCTL(DMX_START)
-COMPATIBLE_IOCTL(DMX_STOP)
-COMPATIBLE_IOCTL(DMX_SET_FILTER)
-COMPATIBLE_IOCTL(DMX_SET_PES_FILTER)
-COMPATIBLE_IOCTL(DMX_SET_BUFFER_SIZE)
-COMPATIBLE_IOCTL(DMX_GET_PES_PIDS)
-COMPATIBLE_IOCTL(DMX_GET_CAPS)
-COMPATIBLE_IOCTL(DMX_SET_SOURCE)
-COMPATIBLE_IOCTL(DMX_GET_STC)
-COMPATIBLE_IOCTL(FE_GET_INFO)
-COMPATIBLE_IOCTL(FE_DISEQC_RESET_OVERLOAD)
-COMPATIBLE_IOCTL(FE_DISEQC_SEND_MASTER_CMD)
-COMPATIBLE_IOCTL(FE_DISEQC_RECV_SLAVE_REPLY)
-COMPATIBLE_IOCTL(FE_DISEQC_SEND_BURST)
-COMPATIBLE_IOCTL(FE_SET_TONE)
-COMPATIBLE_IOCTL(FE_SET_VOLTAGE)
-COMPATIBLE_IOCTL(FE_ENABLE_HIGH_LNB_VOLTAGE)
-COMPATIBLE_IOCTL(FE_READ_STATUS)
-COMPATIBLE_IOCTL(FE_READ_BER)
-COMPATIBLE_IOCTL(FE_READ_SIGNAL_STRENGTH)
-COMPATIBLE_IOCTL(FE_READ_SNR)
-COMPATIBLE_IOCTL(FE_READ_UNCORRECTED_BLOCKS)
-COMPATIBLE_IOCTL(FE_SET_FRONTEND)
-COMPATIBLE_IOCTL(FE_GET_FRONTEND)
-COMPATIBLE_IOCTL(FE_GET_EVENT)
-COMPATIBLE_IOCTL(FE_DISHNETWORK_SEND_LEGACY_CMD)
-COMPATIBLE_IOCTL(VIDEO_STOP)
-COMPATIBLE_IOCTL(VIDEO_PLAY)
-COMPATIBLE_IOCTL(VIDEO_FREEZE)
-COMPATIBLE_IOCTL(VIDEO_CONTINUE)
-COMPATIBLE_IOCTL(VIDEO_SELECT_SOURCE)
-COMPATIBLE_IOCTL(VIDEO_SET_BLANK)
-COMPATIBLE_IOCTL(VIDEO_GET_STATUS)
-COMPATIBLE_IOCTL(VIDEO_SET_DISPLAY_FORMAT)
-COMPATIBLE_IOCTL(VIDEO_FAST_FORWARD)
-COMPATIBLE_IOCTL(VIDEO_SLOWMOTION)
-COMPATIBLE_IOCTL(VIDEO_GET_CAPABILITIES)
-COMPATIBLE_IOCTL(VIDEO_CLEAR_BUFFER)
-COMPATIBLE_IOCTL(VIDEO_SET_ID)
-COMPATIBLE_IOCTL(VIDEO_SET_STREAMTYPE)
-COMPATIBLE_IOCTL(VIDEO_SET_FORMAT)
-COMPATIBLE_IOCTL(VIDEO_SET_SYSTEM)
-COMPATIBLE_IOCTL(VIDEO_SET_HIGHLIGHT)
-COMPATIBLE_IOCTL(VIDEO_SET_SPU)
-COMPATIBLE_IOCTL(VIDEO_GET_NAVI)
-COMPATIBLE_IOCTL(VIDEO_SET_ATTRIBUTES)
-COMPATIBLE_IOCTL(VIDEO_GET_SIZE)
-COMPATIBLE_IOCTL(VIDEO_GET_FRAME_RATE)
diff --git a/include/linux/console.h b/include/linux/console.h
index de25ee3b791..62ef6e11d0d 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -51,7 +51,7 @@ struct consw {
int (*con_scrolldelta)(struct vc_data *, int);
int (*con_set_origin)(struct vc_data *);
void (*con_save_screen)(struct vc_data *);
- u8 (*con_build_attr)(struct vc_data *, u8, u8, u8, u8, u8);
+ u8 (*con_build_attr)(struct vc_data *, u8, u8, u8, u8, u8, u8);
void (*con_invert_region)(struct vc_data *, u16 *, int);
u16 *(*con_screen_pos)(struct vc_data *, int);
unsigned long (*con_getxy)(struct vc_data *, unsigned long, int *, int *);
@@ -92,9 +92,8 @@ void give_up_console(const struct consw *sw);
#define CON_BOOT (8)
#define CON_ANYTIME (16) /* Safe to call when cpu is offline */
-struct console
-{
- char name[8];
+struct console {
+ char name[16];
void (*write)(struct console *, const char *, unsigned);
int (*read)(struct console *, char *, unsigned);
struct tty_driver *(*device)(struct console *, int *);
diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
index a86162b26c0..a461f76fb00 100644
--- a/include/linux/console_struct.h
+++ b/include/linux/console_struct.h
@@ -37,6 +37,7 @@ struct vc_data {
unsigned char vc_color; /* Foreground & background */
unsigned char vc_s_color; /* Saved foreground & background */
unsigned char vc_ulcolor; /* Color for underline mode */
+ unsigned char vc_itcolor;
unsigned char vc_halfcolor; /* Color for half intensity mode */
/* cursor */
unsigned int vc_cursor_type;
@@ -71,10 +72,12 @@ struct vc_data {
unsigned int vc_deccolm : 1; /* 80/132 Column Mode */
/* attribute flags */
unsigned int vc_intensity : 2; /* 0=half-bright, 1=normal, 2=bold */
+ unsigned int vc_italic:1;
unsigned int vc_underline : 1;
unsigned int vc_blink : 1;
unsigned int vc_reverse : 1;
unsigned int vc_s_intensity : 2; /* saved rendition */
+ unsigned int vc_s_italic:1;
unsigned int vc_s_underline : 1;
unsigned int vc_s_blink : 1;
unsigned int vc_s_reverse : 1;
diff --git a/include/asm-x86_64/const.h b/include/linux/const.h
index 54fb08f3db9..07b300bfe34 100644
--- a/include/asm-x86_64/const.h
+++ b/include/linux/const.h
@@ -1,11 +1,11 @@
/* const.h: Macros for dealing with constants. */
-#ifndef _X86_64_CONST_H
-#define _X86_64_CONST_H
+#ifndef _LINUX_CONST_H
+#define _LINUX_CONST_H
/* Some constant macros are used in both assembler and
* C code. Therefore we cannot annotate them always with
- * 'UL' and other type specificers unilaterally. We
+ * 'UL' and other type specifiers unilaterally. We
* use the following macros to deal with this.
*/
@@ -16,5 +16,4 @@
#define _AC(X,Y) __AC(X,Y)
#endif
-
-#endif /* !(_X86_64_CONST_H) */
+#endif /* !(_LINUX_CONST_H) */
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index c22b0dfcbcd..3b2df2523f1 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -41,6 +41,9 @@ extern void cpu_remove_sysdev_attr(struct sysdev_attribute *attr);
extern int cpu_add_sysdev_attr_group(struct attribute_group *attrs);
extern void cpu_remove_sysdev_attr_group(struct attribute_group *attrs);
+extern struct sysdev_attribute attr_sched_mc_power_savings;
+extern struct sysdev_attribute attr_sched_smt_power_savings;
+extern int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls);
#ifdef CONFIG_HOTPLUG_CPU
extern void unregister_cpu(struct cpu *cpu);
diff --git a/include/linux/cyclades.h b/include/linux/cyclades.h
index 46d8254c1a7..72aa00cc4b2 100644
--- a/include/linux/cyclades.h
+++ b/include/linux/cyclades.h
@@ -67,6 +67,8 @@
#ifndef _LINUX_CYCLADES_H
#define _LINUX_CYCLADES_H
+#include <linux/types.h>
+
struct cyclades_monitor {
unsigned long int_count;
unsigned long char_count;
@@ -108,7 +110,6 @@ struct cyclades_idle_stats {
#define CYZSETPOLLCYCLE 0x43590e
#define CYZGETPOLLCYCLE 0x43590f
#define CYGETCD1400VER 0x435910
-#define CYGETCARDINFO 0x435911
#define CYSETWAIT 0x435912
#define CYGETWAIT 0x435913
@@ -149,14 +150,12 @@ struct CYZ_BOOT_CTRL {
* architectures and compilers.
*/
-#if defined(__alpha__)
-typedef unsigned long ucdouble; /* 64 bits, unsigned */
-typedef unsigned int uclong; /* 32 bits, unsigned */
-#else
-typedef unsigned long uclong; /* 32 bits, unsigned */
-#endif
-typedef unsigned short ucshort; /* 16 bits, unsigned */
-typedef unsigned char ucchar; /* 8 bits, unsigned */
+#include <asm/types.h>
+
+typedef __u64 ucdouble; /* 64 bits, unsigned */
+typedef __u32 uclong; /* 32 bits, unsigned */
+typedef __u16 ucshort; /* 16 bits, unsigned */
+typedef __u8 ucchar; /* 8 bits, unsigned */
/*
* Memory Window Sizes
@@ -174,24 +173,24 @@ typedef unsigned char ucchar; /* 8 bits, unsigned */
*/
struct CUSTOM_REG {
- uclong fpga_id; /* FPGA Identification Register */
- uclong fpga_version; /* FPGA Version Number Register */
- uclong cpu_start; /* CPU start Register (write) */
- uclong cpu_stop; /* CPU stop Register (write) */
- uclong misc_reg; /* Miscelaneous Register */
- uclong idt_mode; /* IDT mode Register */
- uclong uart_irq_status; /* UART IRQ status Register */
- uclong clear_timer0_irq; /* Clear timer interrupt Register */
- uclong clear_timer1_irq; /* Clear timer interrupt Register */
- uclong clear_timer2_irq; /* Clear timer interrupt Register */
- uclong test_register; /* Test Register */
- uclong test_count; /* Test Count Register */
- uclong timer_select; /* Timer select register */
- uclong pr_uart_irq_status; /* Prioritized UART IRQ stat Reg */
- uclong ram_wait_state; /* RAM wait-state Register */
- uclong uart_wait_state; /* UART wait-state Register */
- uclong timer_wait_state; /* timer wait-state Register */
- uclong ack_wait_state; /* ACK wait State Register */
+ __u32 fpga_id; /* FPGA Identification Register */
+ __u32 fpga_version; /* FPGA Version Number Register */
+ __u32 cpu_start; /* CPU start Register (write) */
+ __u32 cpu_stop; /* CPU stop Register (write) */
+ __u32 misc_reg; /* Miscelaneous Register */
+ __u32 idt_mode; /* IDT mode Register */
+ __u32 uart_irq_status; /* UART IRQ status Register */
+ __u32 clear_timer0_irq; /* Clear timer interrupt Register */
+ __u32 clear_timer1_irq; /* Clear timer interrupt Register */
+ __u32 clear_timer2_irq; /* Clear timer interrupt Register */
+ __u32 test_register; /* Test Register */
+ __u32 test_count; /* Test Count Register */
+ __u32 timer_select; /* Timer select register */
+ __u32 pr_uart_irq_status; /* Prioritized UART IRQ stat Reg */
+ __u32 ram_wait_state; /* RAM wait-state Register */
+ __u32 uart_wait_state; /* UART wait-state Register */
+ __u32 timer_wait_state; /* timer wait-state Register */
+ __u32 ack_wait_state; /* ACK wait State Register */
};
/*
@@ -201,34 +200,34 @@ struct CUSTOM_REG {
*/
struct RUNTIME_9060 {
- uclong loc_addr_range; /* 00h - Local Address Range */
- uclong loc_addr_base; /* 04h - Local Address Base */
- uclong loc_arbitr; /* 08h - Local Arbitration */
- uclong endian_descr; /* 0Ch - Big/Little Endian Descriptor */
- uclong loc_rom_range; /* 10h - Local ROM Range */
- uclong loc_rom_base; /* 14h - Local ROM Base */
- uclong loc_bus_descr; /* 18h - Local Bus descriptor */
- uclong loc_range_mst; /* 1Ch - Local Range for Master to PCI */
- uclong loc_base_mst; /* 20h - Local Base for Master PCI */
- uclong loc_range_io; /* 24h - Local Range for Master IO */
- uclong pci_base_mst; /* 28h - PCI Base for Master PCI */
- uclong pci_conf_io; /* 2Ch - PCI configuration for Master IO */
- uclong filler1; /* 30h */
- uclong filler2; /* 34h */
- uclong filler3; /* 38h */
- uclong filler4; /* 3Ch */
- uclong mail_box_0; /* 40h - Mail Box 0 */
- uclong mail_box_1; /* 44h - Mail Box 1 */
- uclong mail_box_2; /* 48h - Mail Box 2 */
- uclong mail_box_3; /* 4Ch - Mail Box 3 */
- uclong filler5; /* 50h */
- uclong filler6; /* 54h */
- uclong filler7; /* 58h */
- uclong filler8; /* 5Ch */
- uclong pci_doorbell; /* 60h - PCI to Local Doorbell */
- uclong loc_doorbell; /* 64h - Local to PCI Doorbell */
- uclong intr_ctrl_stat; /* 68h - Interrupt Control/Status */
- uclong init_ctrl; /* 6Ch - EEPROM control, Init Control, etc */
+ __u32 loc_addr_range; /* 00h - Local Address Range */
+ __u32 loc_addr_base; /* 04h - Local Address Base */
+ __u32 loc_arbitr; /* 08h - Local Arbitration */
+ __u32 endian_descr; /* 0Ch - Big/Little Endian Descriptor */
+ __u32 loc_rom_range; /* 10h - Local ROM Range */
+ __u32 loc_rom_base; /* 14h - Local ROM Base */
+ __u32 loc_bus_descr; /* 18h - Local Bus descriptor */
+ __u32 loc_range_mst; /* 1Ch - Local Range for Master to PCI */
+ __u32 loc_base_mst; /* 20h - Local Base for Master PCI */
+ __u32 loc_range_io; /* 24h - Local Range for Master IO */
+ __u32 pci_base_mst; /* 28h - PCI Base for Master PCI */
+ __u32 pci_conf_io; /* 2Ch - PCI configuration for Master IO */
+ __u32 filler1; /* 30h */
+ __u32 filler2; /* 34h */
+ __u32 filler3; /* 38h */
+ __u32 filler4; /* 3Ch */
+ __u32 mail_box_0; /* 40h - Mail Box 0 */
+ __u32 mail_box_1; /* 44h - Mail Box 1 */
+ __u32 mail_box_2; /* 48h - Mail Box 2 */
+ __u32 mail_box_3; /* 4Ch - Mail Box 3 */
+ __u32 filler5; /* 50h */
+ __u32 filler6; /* 54h */
+ __u32 filler7; /* 58h */
+ __u32 filler8; /* 5Ch */
+ __u32 pci_doorbell; /* 60h - PCI to Local Doorbell */
+ __u32 loc_doorbell; /* 64h - Local to PCI Doorbell */
+ __u32 intr_ctrl_stat; /* 68h - Interrupt Control/Status */
+ __u32 init_ctrl; /* 6Ch - EEPROM control, Init Control, etc */
};
/* Values for the Local Base Address re-map register */
@@ -270,8 +269,8 @@ struct RUNTIME_9060 {
#define ZF_TINACT ZF_TINACT_DEF
struct FIRM_ID {
- uclong signature; /* ZFIRM/U signature */
- uclong zfwctrl_addr; /* pointer to ZFW_CTRL structure */
+ __u32 signature; /* ZFIRM/U signature */
+ __u32 zfwctrl_addr; /* pointer to ZFW_CTRL structure */
};
/* Op. System id */
@@ -408,24 +407,24 @@ struct FIRM_ID {
*/
struct CH_CTRL {
- uclong op_mode; /* operation mode */
- uclong intr_enable; /* interrupt masking */
- uclong sw_flow; /* SW flow control */
- uclong flow_status; /* output flow status */
- uclong comm_baud; /* baud rate - numerically specified */
- uclong comm_parity; /* parity */
- uclong comm_data_l; /* data length/stop */
- uclong comm_flags; /* other flags */
- uclong hw_flow; /* HW flow control */
- uclong rs_control; /* RS-232 outputs */
- uclong rs_status; /* RS-232 inputs */
- uclong flow_xon; /* xon char */
- uclong flow_xoff; /* xoff char */
- uclong hw_overflow; /* hw overflow counter */
- uclong sw_overflow; /* sw overflow counter */
- uclong comm_error; /* frame/parity error counter */
- uclong ichar;
- uclong filler[7];
+ __u32 op_mode; /* operation mode */
+ __u32 intr_enable; /* interrupt masking */
+ __u32 sw_flow; /* SW flow control */
+ __u32 flow_status; /* output flow status */
+ __u32 comm_baud; /* baud rate - numerically specified */
+ __u32 comm_parity; /* parity */
+ __u32 comm_data_l; /* data length/stop */
+ __u32 comm_flags; /* other flags */
+ __u32 hw_flow; /* HW flow control */
+ __u32 rs_control; /* RS-232 outputs */
+ __u32 rs_status; /* RS-232 inputs */
+ __u32 flow_xon; /* xon char */
+ __u32 flow_xoff; /* xoff char */
+ __u32 hw_overflow; /* hw overflow counter */
+ __u32 sw_overflow; /* sw overflow counter */
+ __u32 comm_error; /* frame/parity error counter */
+ __u32 ichar;
+ __u32 filler[7];
};
@@ -435,18 +434,18 @@ struct CH_CTRL {
*/
struct BUF_CTRL {
- uclong flag_dma; /* buffers are in Host memory */
- uclong tx_bufaddr; /* address of the tx buffer */
- uclong tx_bufsize; /* tx buffer size */
- uclong tx_threshold; /* tx low water mark */
- uclong tx_get; /* tail index tx buf */
- uclong tx_put; /* head index tx buf */
- uclong rx_bufaddr; /* address of the rx buffer */
- uclong rx_bufsize; /* rx buffer size */
- uclong rx_threshold; /* rx high water mark */
- uclong rx_get; /* tail index rx buf */
- uclong rx_put; /* head index rx buf */
- uclong filler[5]; /* filler to align structures */
+ __u32 flag_dma; /* buffers are in Host memory */
+ __u32 tx_bufaddr; /* address of the tx buffer */
+ __u32 tx_bufsize; /* tx buffer size */
+ __u32 tx_threshold; /* tx low water mark */
+ __u32 tx_get; /* tail index tx buf */
+ __u32 tx_put; /* head index tx buf */
+ __u32 rx_bufaddr; /* address of the rx buffer */
+ __u32 rx_bufsize; /* rx buffer size */
+ __u32 rx_threshold; /* rx high water mark */
+ __u32 rx_get; /* tail index rx buf */
+ __u32 rx_put; /* head index rx buf */
+ __u32 filler[5]; /* filler to align structures */
};
/*
@@ -457,27 +456,27 @@ struct BUF_CTRL {
struct BOARD_CTRL {
/* static info provided by the on-board CPU */
- uclong n_channel; /* number of channels */
- uclong fw_version; /* firmware version */
+ __u32 n_channel; /* number of channels */
+ __u32 fw_version; /* firmware version */
/* static info provided by the driver */
- uclong op_system; /* op_system id */
- uclong dr_version; /* driver version */
+ __u32 op_system; /* op_system id */
+ __u32 dr_version; /* driver version */
/* board control area */
- uclong inactivity; /* inactivity control */
+ __u32 inactivity; /* inactivity control */
/* host to FW commands */
- uclong hcmd_channel; /* channel number */
- uclong hcmd_param; /* pointer to parameters */
+ __u32 hcmd_channel; /* channel number */
+ __u32 hcmd_param; /* pointer to parameters */
/* FW to Host commands */
- uclong fwcmd_channel; /* channel number */
- uclong fwcmd_param; /* pointer to parameters */
- uclong zf_int_queue_addr; /* offset for INT_QUEUE structure */
+ __u32 fwcmd_channel; /* channel number */
+ __u32 fwcmd_param; /* pointer to parameters */
+ __u32 zf_int_queue_addr; /* offset for INT_QUEUE structure */
/* filler so the structures are aligned */
- uclong filler[6];
+ __u32 filler[6];
};
/* Host Interrupt Queue */
@@ -506,11 +505,10 @@ struct ZFW_CTRL {
/****************** ****************** *******************/
#endif
+#ifdef __KERNEL__
+
/* Per card data structure */
-struct resource;
struct cyclades_card {
- unsigned long base_phys;
- unsigned long ctl_phys;
void __iomem *base_addr;
void __iomem *ctl_addr;
int irq;
@@ -519,33 +517,18 @@ struct cyclades_card {
int nports; /* Number of ports in the card */
int bus_index; /* address shift - 0 for ISA, 1 for PCI */
int intr_enabled; /* FW Interrupt flag - 0 disabled, 1 enabled */
- struct pci_dev *pdev;
-#ifdef __KERNEL__
spinlock_t card_lock;
-#else
- unsigned long filler;
-#endif
+ struct cyclades_port *ports;
};
-struct cyclades_chip {
- int filler;
-};
-
-
-#ifdef __KERNEL__
-
/***************************************
* Memory access functions/macros *
* (required to support Alpha systems) *
***************************************/
-#define cy_writeb(port,val) {writeb((val),(port)); mb();}
-#define cy_writew(port,val) {writew((val),(port)); mb();}
-#define cy_writel(port,val) {writel((val),(port)); mb();}
-
-#define cy_readb(port) readb(port)
-#define cy_readw(port) readw(port)
-#define cy_readl(port) readl(port)
+#define cy_writeb(port,val) do { writeb((val), (port)); mb(); } while (0)
+#define cy_writew(port,val) do { writew((val), (port)); mb(); } while (0)
+#define cy_writel(port,val) do { writel((val), (port)); mb(); } while (0)
/*
* Statistics counters
@@ -567,7 +550,7 @@ struct cyclades_icount {
struct cyclades_port {
int magic;
- int card;
+ struct cyclades_card *card;
int line;
int flags; /* defined in tty.h */
int type; /* UART type */
@@ -587,7 +570,6 @@ struct cyclades_port {
int close_delay;
unsigned short closing_wait;
unsigned long event;
- unsigned long last_active;
int count; /* # of fd on device */
int breakon;
int breakoff;
@@ -598,7 +580,6 @@ struct cyclades_port {
int xmit_cnt;
int default_threshold;
int default_timeout;
- unsigned long jiffies[3];
unsigned long rflush_count;
struct cyclades_monitor mon;
struct cyclades_idle_stats idle_stats;
@@ -606,7 +587,7 @@ struct cyclades_port {
struct work_struct tqueue;
wait_queue_head_t open_wait;
wait_queue_head_t close_wait;
- wait_queue_head_t shutdown_wait;
+ struct completion shutdown_wait;
wait_queue_head_t delta_msr_wait;
int throttle;
};
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 63f64a9a5bf..aab53df4faf 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -133,6 +133,7 @@ struct dentry_operations {
int (*d_delete)(struct dentry *);
void (*d_release)(struct dentry *);
void (*d_iput)(struct dentry *, struct inode *);
+ char *(*d_dname)(struct dentry *, char *, int);
};
/* the dentry parameter passed to d_hash and d_compare is the parent
@@ -293,6 +294,11 @@ extern struct dentry * d_hash_and_lookup(struct dentry *, struct qstr *);
/* validate "insecure" dentry pointer */
extern int d_validate(struct dentry *, struct dentry *);
+/*
+ * helper function for dentry_operations.d_dname() members
+ */
+extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
+
extern char * d_path(struct dentry *, struct vfsmount *, char *, int);
/* Allocation counts.. */
diff --git a/include/linux/device.h b/include/linux/device.h
index 6579068134d..2e1a2988b7e 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -412,12 +412,13 @@ struct device {
struct klist_node knode_parent; /* node in sibling list */
struct klist_node knode_driver;
struct klist_node knode_bus;
- struct device * parent;
+ struct device *parent;
struct kobject kobj;
char bus_id[BUS_ID_SIZE]; /* position on parent bus */
struct device_type *type;
unsigned is_registered:1;
+ unsigned uevent_suppress:1;
struct device_attribute uevent_attr;
struct device_attribute *devt_attr;
@@ -458,7 +459,6 @@ struct device {
struct class *class;
dev_t devt; /* dev_t, creates the sysfs "dev" */
struct attribute_group **groups; /* optional groups */
- int uevent_suppress;
void (*release)(struct device * dev);
};
diff --git a/include/linux/display.h b/include/linux/display.h
new file mode 100644
index 00000000000..3bf70d63972
--- /dev/null
+++ b/include/linux/display.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2006 James Simmons <jsimmons@infradead.org>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#ifndef _LINUX_DISPLAY_H
+#define _LINUX_DISPLAY_H
+
+#include <linux/device.h>
+
+struct display_device;
+
+/* This structure defines all the properties of a Display. */
+struct display_driver {
+ int (*set_contrast)(struct display_device *, unsigned int);
+ int (*get_contrast)(struct display_device *);
+ void (*suspend)(struct display_device *, pm_message_t state);
+ void (*resume)(struct display_device *);
+ int (*probe)(struct display_device *, void *);
+ int (*remove)(struct display_device *);
+ int max_contrast;
+};
+
+struct display_device {
+ struct module *owner; /* Owner module */
+ struct display_driver *driver;
+ struct device *parent; /* This is the parent */
+ struct device *dev; /* This is this display device */
+ struct mutex lock;
+ void *priv_data;
+ char type[16];
+ char *name;
+ int idx;
+};
+
+extern struct display_device *display_device_register(struct display_driver *driver,
+ struct device *dev, void *devdata);
+extern void display_device_unregister(struct display_device *dev);
+
+extern int probe_edid(struct display_device *dev, void *devdata);
+
+#define to_display_device(obj) container_of(obj, struct display_device, class_dev)
+
+#endif
diff --git a/include/linux/ds1wm.h b/include/linux/ds1wm.h
new file mode 100644
index 00000000000..31f6e3c427f
--- /dev/null
+++ b/include/linux/ds1wm.h
@@ -0,0 +1,11 @@
+/* platform data for the DS1WM driver */
+
+struct ds1wm_platform_data {
+ int bus_shift; /* number of shifts needed to calculate the
+ * offset between DS1WM registers;
+ * e.g. on h5xxx and h2200 this is 2
+ * (registers aligned to 4-byte boundaries),
+ * while on hx4700 this is 1 */
+ void (*enable)(struct platform_device *pdev);
+ void (*disable)(struct platform_device *pdev);
+};
diff --git a/include/linux/efi.h b/include/linux/efi.h
index f8ebd7c1ddb..0b9579a4cd4 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -213,7 +213,6 @@ typedef struct {
} efi_config_table_t;
#define EFI_SYSTEM_TABLE_SIGNATURE ((u64)0x5453595320494249ULL)
-#define EFI_SYSTEM_TABLE_REVISION ((1 << 16) | 00)
typedef struct {
efi_table_hdr_t hdr;
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
index 4eb18ac510a..ece49a804fe 100644
--- a/include/linux/ext3_fs.h
+++ b/include/linux/ext3_fs.h
@@ -824,6 +824,7 @@ extern int ext3_change_inode_journal_flag(struct inode *, int);
extern int ext3_get_inode_loc(struct inode *, struct ext3_iloc *);
extern void ext3_truncate (struct inode *);
extern void ext3_set_inode_flags(struct inode *);
+extern void ext3_get_inode_flags(struct ext3_inode_info *);
extern void ext3_set_aops(struct inode *inode);
/* ioctl.c */
diff --git a/include/linux/fb.h b/include/linux/fb.h
index be913ec8716..dff7a728948 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -4,6 +4,8 @@
#include <asm/types.h>
#include <linux/i2c.h>
+struct dentry;
+
/* Definitions of frame buffers */
#define FB_MAJOR 29
@@ -525,12 +527,20 @@ struct fb_cursor_user {
#define FB_EVENT_MODE_CHANGE_ALL 0x0B
/* A software display blank change occured */
#define FB_EVENT_CONBLANK 0x0C
+/* Get drawing requirements */
+#define FB_EVENT_GET_REQ 0x0D
struct fb_event {
struct fb_info *info;
void *data;
};
+struct fb_blit_caps {
+ u32 x;
+ u32 y;
+ u32 len;
+ u32 flags;
+};
extern int fb_register_client(struct notifier_block *nb);
extern int fb_unregister_client(struct notifier_block *nb);
@@ -556,11 +566,25 @@ struct fb_pixmap {
u32 scan_align; /* alignment per scanline */
u32 access_align; /* alignment per read/write (bits) */
u32 flags; /* see FB_PIXMAP_* */
+ u32 blit_x; /* supported bit block dimensions (1-32)*/
+ u32 blit_y; /* Format: blit_x = 1 << (width - 1) */
+ /* blit_y = 1 << (height - 1) */
+ /* if 0, will be set to 0xffffffff (all)*/
/* access methods */
void (*writeio)(struct fb_info *info, void __iomem *dst, void *src, unsigned int size);
void (*readio) (struct fb_info *info, void *dst, void __iomem *src, unsigned int size);
};
+#ifdef CONFIG_FB_DEFERRED_IO
+struct fb_deferred_io {
+ /* delay between mkwrite and deferred handler */
+ unsigned long delay;
+ struct mutex lock; /* mutex that protects the page list */
+ struct list_head pagelist; /* list of touched pages */
+ /* callback */
+ void (*deferred_io)(struct fb_info *info, struct list_head *pagelist);
+};
+#endif
/*
* Frame buffer operations
@@ -579,8 +603,10 @@ struct fb_ops {
/* For framebuffers with strange non linear layouts or that do not
* work with normal memory mapped access
*/
- ssize_t (*fb_read)(struct file *file, char __user *buf, size_t count, loff_t *ppos);
- ssize_t (*fb_write)(struct file *file, const char __user *buf, size_t count, loff_t *ppos);
+ ssize_t (*fb_read)(struct fb_info *info, char __user *buf,
+ size_t count, loff_t *ppos);
+ ssize_t (*fb_write)(struct fb_info *info, const char __user *buf,
+ size_t count, loff_t *ppos);
/* checks var and eventually tweaks it to something supported,
* DO NOT MODIFY PAR */
@@ -634,10 +660,13 @@ struct fb_ops {
/* restore saved state */
void (*fb_restore_state)(struct fb_info *info);
+
+ /* get capability given var */
+ void (*fb_get_caps)(struct fb_info *info, struct fb_blit_caps *caps,
+ struct fb_var_screeninfo *var);
};
#ifdef CONFIG_FB_TILEBLITTING
-
#define FB_TILE_CURSOR_NONE 0
#define FB_TILE_CURSOR_UNDERLINE 1
#define FB_TILE_CURSOR_LOWER_THIRD 2
@@ -709,6 +738,8 @@ struct fb_tile_ops {
/* cursor */
void (*fb_tilecursor)(struct fb_info *info,
struct fb_tilecursor *cursor);
+ /* get maximum length of the tile map */
+ int (*fb_get_tilemax)(struct fb_info *info);
};
#endif /* CONFIG_FB_TILEBLITTING */
@@ -778,6 +809,10 @@ struct fb_info {
struct mutex bl_curve_mutex;
u8 bl_curve[FB_BACKLIGHT_LEVELS];
#endif
+#ifdef CONFIG_FB_DEFERRED_IO
+ struct delayed_work deferred_work;
+ struct fb_deferred_io *fbdefio;
+#endif
struct fb_ops *fbops;
struct device *device; /* This is the parent */
@@ -879,6 +914,16 @@ extern int fb_blank(struct fb_info *info, int blank);
extern void cfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
extern void cfb_copyarea(struct fb_info *info, const struct fb_copyarea *area);
extern void cfb_imageblit(struct fb_info *info, const struct fb_image *image);
+/*
+ * Drawing operations where framebuffer is in system RAM
+ */
+extern void sys_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
+extern void sys_copyarea(struct fb_info *info, const struct fb_copyarea *area);
+extern void sys_imageblit(struct fb_info *info, const struct fb_image *image);
+extern ssize_t fb_sys_read(struct fb_info *info, char __user *buf,
+ size_t count, loff_t *ppos);
+extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf,
+ size_t count, loff_t *ppos);
/* drivers/video/fbmem.c */
extern int register_framebuffer(struct fb_info *fb_info);
@@ -913,6 +958,12 @@ static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch,
}
}
+/* drivers/video/fb_defio.c */
+extern void fb_deferred_io_init(struct fb_info *info);
+extern void fb_deferred_io_cleanup(struct fb_info *info);
+extern int fb_deferred_io_fsync(struct file *file, struct dentry *dentry,
+ int datasync);
+
/* drivers/video/fbsysfs.c */
extern struct fb_info *framebuffer_alloc(size_t size, struct device *dev);
extern void framebuffer_release(struct fb_info *info);
diff --git a/include/linux/font.h b/include/linux/font.h
index 53b129f07f6..40a24ab41b3 100644
--- a/include/linux/font.h
+++ b/include/linux/font.h
@@ -49,7 +49,8 @@ extern const struct font_desc *find_font(const char *name);
/* Get the default font for a specific screen size */
-extern const struct font_desc *get_default_font(int xres, int yres);
+extern const struct font_desc *get_default_font(int xres, int yres,
+ u32 font_w, u32 font_h);
/* Max. length for the name of a predefined font */
#define MAX_FONT_NAME 32
diff --git a/include/linux/fs.h b/include/linux/fs.h
index bc6d27cecaa..7cf0c54a46a 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -30,6 +30,7 @@
#define SEEK_SET 0 /* seek relative to beginning of file */
#define SEEK_CUR 1 /* seek relative to current file position */
#define SEEK_END 2 /* seek relative to end of file */
+#define SEEK_MAX SEEK_END
/* And dynamically-tunable limits and defaults: */
struct files_stat_struct {
@@ -91,6 +92,7 @@ extern int dir_notify_enable;
/* public flags for file_system_type */
#define FS_REQUIRES_DEV 1
#define FS_BINARY_MOUNTDATA 2
+#define FS_HAS_SUBTYPE 4
#define FS_REVAL_DOT 16384 /* Check the paths ".", ".." for staleness */
#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move()
* during rename() internally.
@@ -847,11 +849,6 @@ extern int fcntl_getlease(struct file *filp);
/* fs/sync.c */
extern int do_sync_mapping_range(struct address_space *mapping, loff_t offset,
loff_t endbyte, unsigned int flags);
-static inline int do_sync_file_range(struct file *file, loff_t offset,
- loff_t endbyte, unsigned int flags)
-{
- return do_sync_mapping_range(file->f_mapping, offset, endbyte, flags);
-}
/* fs/locks.c */
extern void locks_init_lock(struct file_lock *);
@@ -960,6 +957,12 @@ struct super_block {
/* Granularity of c/m/atime in ns.
Cannot be worse than a second */
u32 s_time_gran;
+
+ /*
+ * Filesystem subtype. If non-empty the filesystem type field
+ * in /proc/mounts will be "type.subtype"
+ */
+ char *s_subtype;
};
extern struct timespec current_fs_time(struct super_block *sb);
@@ -1735,6 +1738,8 @@ extern ssize_t generic_file_sendfile(struct file *, loff_t *, size_t, read_actor
extern void do_generic_mapping_read(struct address_space *mapping,
struct file_ra_state *, struct file *,
loff_t *, read_descriptor_t *, read_actor_t);
+extern int generic_segment_checks(const struct iovec *iov,
+ unsigned long *nr_segs, size_t *count, int access_flags);
/* fs/splice.c */
extern ssize_t generic_file_splice_read(struct file *, loff_t *,
diff --git a/include/linux/futex.h b/include/linux/futex.h
index 3f153b4e156..820125c628c 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -100,6 +100,35 @@ long do_futex(u32 __user *uaddr, int op, u32 val, unsigned long timeout,
extern int
handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi);
+/*
+ * Futexes are matched on equal values of this key.
+ * The key type depends on whether it's a shared or private mapping.
+ * Don't rearrange members without looking at hash_futex().
+ *
+ * offset is aligned to a multiple of sizeof(u32) (== 4) by definition.
+ * We set bit 0 to indicate if it's an inode-based key.
+ */
+union futex_key {
+ struct {
+ unsigned long pgoff;
+ struct inode *inode;
+ int offset;
+ } shared;
+ struct {
+ unsigned long address;
+ struct mm_struct *mm;
+ int offset;
+ } private;
+ struct {
+ unsigned long word;
+ void *ptr;
+ int offset;
+ } both;
+};
+int get_futex_key(u32 __user *uaddr, union futex_key *key);
+void get_futex_key_refs(union futex_key *key);
+void drop_futex_key_refs(union futex_key *key);
+
#ifdef CONFIG_FUTEX
extern void exit_robust_list(struct task_struct *curr);
extern void exit_pi_state_list(struct task_struct *curr);
diff --git a/include/linux/init.h b/include/linux/init.h
index dbbdbd1bec7..8bc32bb2fce 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -77,7 +77,8 @@ extern char *saved_command_line;
extern unsigned int reset_devices;
/* used by init/main.c */
-extern void setup_arch(char **);
+void setup_arch(char **);
+void prepare_namespace(void);
#endif
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index a2d95ff50e9..795102309bf 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -138,7 +138,7 @@ extern struct group_info init_groups;
.journal_info = NULL, \
.cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
.fs_excl = ATOMIC_INIT(0), \
- .pi_lock = SPIN_LOCK_UNLOCKED, \
+ .pi_lock = __SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
INIT_TRACE_IRQFLAGS \
INIT_LOCKDEP \
}
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 0319f665dd3..f7b01b9a35b 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -44,6 +44,9 @@
* IRQF_TIMER - Flag to mark this interrupt as timer interrupt
* IRQF_PERCPU - Interrupt is per cpu
* IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
+ * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
+ * registered first in an shared interrupt is considered for
+ * performance reasons)
*/
#define IRQF_DISABLED 0x00000020
#define IRQF_SAMPLE_RANDOM 0x00000040
@@ -52,22 +55,29 @@
#define IRQF_TIMER 0x00000200
#define IRQF_PERCPU 0x00000400
#define IRQF_NOBALANCING 0x00000800
+#define IRQF_IRQPOLL 0x00001000
/*
- * Migration helpers. Scheduled for removal in 1/2007
+ * Migration helpers. Scheduled for removal in 9/2007
* Do not use for new code !
*/
-#define SA_INTERRUPT IRQF_DISABLED
-#define SA_SAMPLE_RANDOM IRQF_SAMPLE_RANDOM
-#define SA_SHIRQ IRQF_SHARED
-#define SA_PROBEIRQ IRQF_PROBE_SHARED
-#define SA_PERCPU IRQF_PERCPU
-
-#define SA_TRIGGER_LOW IRQF_TRIGGER_LOW
-#define SA_TRIGGER_HIGH IRQF_TRIGGER_HIGH
-#define SA_TRIGGER_FALLING IRQF_TRIGGER_FALLING
-#define SA_TRIGGER_RISING IRQF_TRIGGER_RISING
-#define SA_TRIGGER_MASK IRQF_TRIGGER_MASK
+static inline
+unsigned long __deprecated deprecated_irq_flag(unsigned long flag)
+{
+ return flag;
+}
+
+#define SA_INTERRUPT deprecated_irq_flag(IRQF_DISABLED)
+#define SA_SAMPLE_RANDOM deprecated_irq_flag(IRQF_SAMPLE_RANDOM)
+#define SA_SHIRQ deprecated_irq_flag(IRQF_SHARED)
+#define SA_PROBEIRQ deprecated_irq_flag(IRQF_PROBE_SHARED)
+#define SA_PERCPU deprecated_irq_flag(IRQF_PERCPU)
+
+#define SA_TRIGGER_LOW deprecated_irq_flag(IRQF_TRIGGER_LOW)
+#define SA_TRIGGER_HIGH deprecated_irq_flag(IRQF_TRIGGER_HIGH)
+#define SA_TRIGGER_FALLING deprecated_irq_flag(IRQF_TRIGGER_FALLING)
+#define SA_TRIGGER_RISING deprecated_irq_flag(IRQF_TRIGGER_RISING)
+#define SA_TRIGGER_MASK deprecated_irq_flag(IRQF_TRIGGER_MASK)
typedef irqreturn_t (*irq_handler_t)(int, void *);
@@ -83,11 +93,11 @@ struct irqaction {
};
extern irqreturn_t no_action(int cpl, void *dev_id);
-extern int request_irq(unsigned int, irq_handler_t handler,
+extern int __must_check request_irq(unsigned int, irq_handler_t handler,
unsigned long, const char *, void *);
extern void free_irq(unsigned int, void *);
-extern int devm_request_irq(struct device *dev, unsigned int irq,
+extern int __must_check devm_request_irq(struct device *dev, unsigned int irq,
irq_handler_t handler, unsigned long irqflags,
const char *devname, void *dev_id);
extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
diff --git a/include/linux/ioctl32.h b/include/linux/ioctl32.h
deleted file mode 100644
index 948809d9991..00000000000
--- a/include/linux/ioctl32.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef IOCTL32_H
-#define IOCTL32_H 1
-
-#include <linux/compiler.h> /* for __deprecated */
-
-struct file;
-
-typedef int (*ioctl_trans_handler_t)(unsigned int, unsigned int,
- unsigned long, struct file *);
-
-struct ioctl_trans {
- unsigned long cmd;
- ioctl_trans_handler_t handler;
- struct ioctl_trans *next;
-};
-
-#endif
diff --git a/include/linux/ipc.h b/include/linux/ipc.h
index 6da6772c19f..1980867a64a 100644
--- a/include/linux/ipc.h
+++ b/include/linux/ipc.h
@@ -92,16 +92,19 @@ extern struct ipc_namespace init_ipc_ns;
#ifdef CONFIG_SYSVIPC
#define INIT_IPC_NS(ns) .ns = &init_ipc_ns,
-extern int copy_ipcs(unsigned long flags, struct task_struct *tsk);
+extern struct ipc_namespace *copy_ipcs(unsigned long flags,
+ struct ipc_namespace *ns);
#else
#define INIT_IPC_NS(ns)
-static inline int copy_ipcs(unsigned long flags, struct task_struct *tsk)
-{ return 0; }
+static inline struct ipc_namespace *copy_ipcs(unsigned long flags,
+ struct ipc_namespace *ns)
+{
+ return ns;
+}
#endif
#ifdef CONFIG_IPC_NS
extern void free_ipc_ns(struct kref *kref);
-extern int unshare_ipcs(unsigned long flags, struct ipc_namespace **ns);
#endif
static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
diff --git a/include/linux/irq.h b/include/linux/irq.h
index a6899402b52..1695054e8c6 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -147,8 +147,6 @@ struct irq_chip {
* @dir: /proc/irq/ procfs entry
* @affinity_entry: /proc/irq/smp_affinity procfs entry on SMP
* @name: flow handler name for /proc/interrupts output
- *
- * Pad this out to 32 bytes for cache and indexing reasons.
*/
struct irq_desc {
irq_flow_handler_t handle_irq;
@@ -175,7 +173,7 @@ struct irq_desc {
struct proc_dir_entry *dir;
#endif
const char *name;
-} ____cacheline_aligned;
+} ____cacheline_internodealigned_in_smp;
extern struct irq_desc irq_desc[NR_IRQS];
diff --git a/include/linux/isdn/capiutil.h b/include/linux/isdn/capiutil.h
index 63bd9cf821a..5a52f2c94f3 100644
--- a/include/linux/isdn/capiutil.h
+++ b/include/linux/isdn/capiutil.h
@@ -187,7 +187,6 @@ typedef struct {
#define CDEBUG_SIZE 1024
#define CDEBUG_GSIZE 4096
-_cdebbuf *cdebbuf_alloc(void);
void cdebbuf_free(_cdebbuf *cdb);
int cdebug_init(void);
void cdebug_exit(void);
diff --git a/include/linux/isdn_divertif.h b/include/linux/isdn_divertif.h
index 0e7e44ce830..07821ca5955 100644
--- a/include/linux/isdn_divertif.h
+++ b/include/linux/isdn_divertif.h
@@ -24,6 +24,10 @@
#define DIVERT_REL_ERR 0x04 /* module not registered */
#define DIVERT_REG_NAME isdn_register_divert
+#ifdef __KERNEL__
+#include <linux/isdnif.h>
+#include <linux/types.h>
+
/***************************************************************/
/* structure exchanging data between isdn hl and divert module */
/***************************************************************/
@@ -40,3 +44,4 @@ typedef struct
/* function register */
/*********************/
extern int DIVERT_REG_NAME(isdn_divert_if *);
+#endif
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index 3e3b92dabe3..12178d2c882 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -30,6 +30,9 @@ extern int sprint_symbol(char *buffer, unsigned long address);
/* Look up a kernel symbol and print it to the kernel messages. */
extern void __print_symbol(const char *fmt, unsigned long address);
+int lookup_symbol_name(unsigned long addr, char *symname);
+int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name);
+
#else /* !CONFIG_KALLSYMS */
static inline unsigned long kallsyms_lookup_name(const char *name)
@@ -58,6 +61,16 @@ static inline int sprint_symbol(char *buffer, unsigned long addr)
return 0;
}
+static inline int lookup_symbol_name(unsigned long addr, char *symname)
+{
+ return -ERANGE;
+}
+
+static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name)
+{
+ return -ERANGE;
+}
+
/* Stupid that this does nothing, but I didn't create this mess. */
#define __print_symbol(fmt, addr)
#endif /*CONFIG_KALLSYMS*/
diff --git a/include/linux/kdebug.h b/include/linux/kdebug.h
new file mode 100644
index 00000000000..5db38d6d8b9
--- /dev/null
+++ b/include/linux/kdebug.h
@@ -0,0 +1,20 @@
+#ifndef _LINUX_KDEBUG_H
+#define _LINUX_KDEBUG_H
+
+#include <asm/kdebug.h>
+
+struct die_args {
+ struct pt_regs *regs;
+ const char *str;
+ long err;
+ int trapnr;
+ int signr;
+};
+
+int register_die_notifier(struct notifier_block *nb);
+int unregister_die_notifier(struct notifier_block *nb);
+
+int notify_die(enum die_val val, const char *str,
+ struct pt_regs *regs, long err, int trap, int sig);
+
+#endif /* _LINUX_KDEBUG_H */
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 696e5ec63f7..8c2c7fcd58c 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -7,6 +7,8 @@
#include <linux/linkage.h>
#include <linux/compat.h>
#include <linux/ioport.h>
+#include <linux/elfcore.h>
+#include <linux/elf.h>
#include <asm/kexec.h>
/* Verify architecture specific macros are defined */
@@ -31,6 +33,19 @@
#error KEXEC_ARCH not defined
#endif
+#define KEXEC_NOTE_HEAD_BYTES ALIGN(sizeof(struct elf_note), 4)
+#define KEXEC_CORE_NOTE_NAME "CORE"
+#define KEXEC_CORE_NOTE_NAME_BYTES ALIGN(sizeof(KEXEC_CORE_NOTE_NAME), 4)
+#define KEXEC_CORE_NOTE_DESC_BYTES ALIGN(sizeof(struct elf_prstatus), 4)
+/*
+ * The per-cpu notes area is a list of notes terminated by a "NULL"
+ * note header. For kdump, the code in vmcore.c runs in the context
+ * of the second kernel to combine them into one note.
+ */
+#define KEXEC_NOTE_BYTES ( (KEXEC_NOTE_HEAD_BYTES * 2) + \
+ KEXEC_CORE_NOTE_NAME_BYTES + \
+ KEXEC_CORE_NOTE_DESC_BYTES )
+
/*
* This structure is used to hold the arguments that are used when loading
* kernel binaries.
@@ -136,7 +151,7 @@ extern struct kimage *kexec_crash_image;
/* Location of a reserved region to hold the crash kernel.
*/
extern struct resource crashk_res;
-typedef u32 note_buf_t[MAX_NOTE_BYTES/4];
+typedef u32 note_buf_t[KEXEC_NOTE_BYTES/4];
extern note_buf_t *crash_notes;
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 769be39b968..23adf6075ae 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -78,7 +78,7 @@ struct kprobe {
kprobe_opcode_t *addr;
/* Allow user to indicate symbol name of the probe point */
- char *symbol_name;
+ const char *symbol_name;
/* Offset into the symbol */
unsigned int offset;
@@ -123,12 +123,18 @@ DECLARE_PER_CPU(struct kprobe *, current_kprobe);
DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
#ifdef ARCH_SUPPORTS_KRETPROBES
-extern void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs);
+extern void arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs);
+extern int arch_trampoline_kprobe(struct kprobe *p);
#else /* ARCH_SUPPORTS_KRETPROBES */
static inline void arch_prepare_kretprobe(struct kretprobe *rp,
struct pt_regs *regs)
{
}
+static inline int arch_trampoline_kprobe(struct kprobe *p)
+{
+ return 0;
+}
#endif /* ARCH_SUPPORTS_KRETPROBES */
/*
* Function-return probe -
@@ -157,6 +163,16 @@ struct kretprobe_instance {
struct task_struct *task;
};
+static inline void kretprobe_assert(struct kretprobe_instance *ri,
+ unsigned long orig_ret_address, unsigned long trampoline_address)
+{
+ if (!orig_ret_address || (orig_ret_address == trampoline_address)) {
+ printk("kretprobe BUG!: Processing kretprobe %p @ %p\n",
+ ri->rp, ri->rp->kp.addr);
+ BUG();
+ }
+}
+
extern spinlock_t kretprobe_lock;
extern struct mutex kprobe_mutex;
extern int arch_prepare_kprobe(struct kprobe *p);
@@ -199,8 +215,6 @@ void jprobe_return(void);
int register_kretprobe(struct kretprobe *rp);
void unregister_kretprobe(struct kretprobe *rp);
-struct kretprobe_instance *get_free_rp_inst(struct kretprobe *rp);
-void add_rp_inst(struct kretprobe_instance *ri);
void kprobe_flush_task(struct task_struct *tk);
void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head);
#else /* CONFIG_KPROBES */
diff --git a/include/linux/list.h b/include/linux/list.h
index f9d71eab05e..9202703be2a 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -426,6 +426,17 @@ static inline void list_splice_init_rcu(struct list_head *list,
container_of(ptr, type, member)
/**
+ * list_first_entry - get the first element from a list
+ * @ptr: the list head to take the element from.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_struct within the struct.
+ *
+ * Note, that list is expected to be not empty.
+ */
+#define list_first_entry(ptr, type, member) \
+ list_entry((ptr)->next, type, member)
+
+/**
* list_for_each - iterate over a list
* @pos: the &struct list_head to use as a loop cursor.
* @head: the head for your list.
diff --git a/include/linux/loop.h b/include/linux/loop.h
index 191a595055f..0b99b31f017 100644
--- a/include/linux/loop.h
+++ b/include/linux/loop.h
@@ -64,6 +64,8 @@ struct loop_device {
wait_queue_head_t lo_event;
request_queue_t *lo_queue;
+ struct gendisk *lo_disk;
+ struct list_head lo_list;
};
#endif /* __KERNEL__ */
diff --git a/include/linux/mc146818rtc.h b/include/linux/mc146818rtc.h
index bdc01127dce..580b3f4956e 100644
--- a/include/linux/mc146818rtc.h
+++ b/include/linux/mc146818rtc.h
@@ -22,8 +22,15 @@ extern spinlock_t rtc_lock; /* serialize CMOS RAM access */
/* Some RTCs extend the mc146818 register set to support alarms of more
* than 24 hours in the future; or dates that include a century code.
* This platform_data structure can pass this information to the driver.
+ *
+ * Also, some platforms need suspend()/resume() hooks to kick in special
+ * handling of wake alarms, e.g. activating ACPI BIOS hooks or setting up
+ * a separate wakeup alarm used by some almost-clone chips.
*/
struct cmos_rtc_board_info {
+ void (*wake_on)(struct device *dev);
+ void (*wake_off)(struct device *dev);
+
u8 rtc_day_alarm; /* zero, or register index */
u8 rtc_mon_alarm; /* zero, or register index */
u8 rtc_century; /* zero, or register index */
diff --git a/include/linux/mnt_namespace.h b/include/linux/mnt_namespace.h
index 4af0b1fc282..1fa4d9813b3 100644
--- a/include/linux/mnt_namespace.h
+++ b/include/linux/mnt_namespace.h
@@ -14,10 +14,9 @@ struct mnt_namespace {
int event;
};
-extern int copy_mnt_ns(int, struct task_struct *);
-extern void __put_mnt_ns(struct mnt_namespace *ns);
-extern struct mnt_namespace *dup_mnt_ns(struct task_struct *,
+extern struct mnt_namespace *copy_mnt_ns(int, struct mnt_namespace *,
struct fs_struct *);
+extern void __put_mnt_ns(struct mnt_namespace *ns);
static inline void put_mnt_ns(struct mnt_namespace *ns)
{
diff --git a/include/linux/module.h b/include/linux/module.h
index f0b0faf42d5..6d3dc9c4ff9 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -370,16 +370,14 @@ struct module *module_text_address(unsigned long addr);
struct module *__module_text_address(unsigned long addr);
int is_module_address(unsigned long addr);
-/* Returns module and fills in value, defined and namebuf, or NULL if
+/* Returns 0 and fills in value, defined and namebuf, or -ERANGE if
symnum out of range. */
-struct module *module_get_kallsym(unsigned int symnum, unsigned long *value,
- char *type, char *name, size_t namelen);
+int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
+ char *name, char *module_name, int *exported);
/* Look for this name: can be of form module:name. */
unsigned long module_kallsyms_lookup_name(const char *name);
-int is_exported(const char *name, const struct module *mod);
-
extern void __module_put_and_exit(struct module *mod, long code)
__attribute__((noreturn));
#define module_put_and_exit(code) __module_put_and_exit(THIS_MODULE, code);
@@ -456,6 +454,8 @@ const char *module_address_lookup(unsigned long addr,
unsigned long *symbolsize,
unsigned long *offset,
char **modname);
+int lookup_module_symbol_name(unsigned long addr, char *symname);
+int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name);
/* For extable.c to search modules' exception tables. */
const struct exception_table_entry *search_module_extables(unsigned long addr);
@@ -527,20 +527,24 @@ static inline const char *module_address_lookup(unsigned long addr,
return NULL;
}
-static inline struct module *module_get_kallsym(unsigned int symnum,
- unsigned long *value,
- char *type, char *name,
- size_t namelen)
+static inline int lookup_module_symbol_name(unsigned long addr, char *symname)
{
- return NULL;
+ return -ERANGE;
}
-static inline unsigned long module_kallsyms_lookup_name(const char *name)
+static inline int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name)
{
- return 0;
+ return -ERANGE;
}
-static inline int is_exported(const char *name, const struct module *mod)
+static inline int module_get_kallsym(unsigned int symnum, unsigned long *value,
+ char *type, char *name,
+ char *module_name, int *exported)
+{
+ return -ERANGE;
+}
+
+static inline unsigned long module_kallsyms_lookup_name(const char *name)
{
return 0;
}
diff --git a/include/linux/msdos_fs.h b/include/linux/msdos_fs.h
index fa253fa73aa..0e09c005dda 100644
--- a/include/linux/msdos_fs.h
+++ b/include/linux/msdos_fs.h
@@ -205,7 +205,8 @@ struct fat_mount_options {
numtail:1, /* Does first alias have a numeric '~1' type tail? */
atari:1, /* Use Atari GEMDOS variation of MS-DOS fs */
flush:1, /* write things quickly */
- nocase:1; /* Does this need case conversion? 0=need case conversion*/
+ nocase:1, /* Does this need case conversion? 0=need case conversion*/
+ usefree:1; /* Use free_clusters for FAT32 */
};
#define FAT_HASH_BITS 8
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index c95d5e64254..52b4378311c 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -82,7 +82,7 @@ struct nfs_server {
struct rpc_clnt * client_acl; /* ACL RPC client handle */
struct nfs_iostats * io_stats; /* I/O statistics */
struct backing_dev_info backing_dev_info;
- atomic_t writeback; /* number of writeback pages */
+ atomic_long_t writeback; /* number of writeback pages */
int flags; /* various flags */
unsigned int caps; /* server capabilities */
unsigned int rsize; /* read size */
diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h
index 0b9f0dc30d6..189e0dc993a 100644
--- a/include/linux/nsproxy.h
+++ b/include/linux/nsproxy.h
@@ -31,10 +31,11 @@ struct nsproxy {
};
extern struct nsproxy init_nsproxy;
-struct nsproxy *dup_namespaces(struct nsproxy *orig);
int copy_namespaces(int flags, struct task_struct *tsk);
void get_task_namespaces(struct task_struct *tsk);
void free_nsproxy(struct nsproxy *ns);
+int unshare_nsproxy_namespaces(unsigned long, struct nsproxy **,
+ struct fs_struct *);
static inline void put_nsproxy(struct nsproxy *ns)
{
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index b4def5e083e..8a83537d697 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -11,6 +11,7 @@
#include <linux/compiler.h>
#include <asm/uaccess.h>
#include <linux/gfp.h>
+#include <linux/bitops.h>
/*
* Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
@@ -19,6 +20,16 @@
#define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */
#define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */
+static inline void mapping_set_error(struct address_space *mapping, int error)
+{
+ if (error) {
+ if (error == -ENOSPC)
+ set_bit(AS_ENOSPC, &mapping->flags);
+ else
+ set_bit(AS_EIO, &mapping->flags);
+ }
+}
+
static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
{
return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
diff --git a/include/linux/parport.h b/include/linux/parport.h
index 80682aaa8f1..9cdd6943e01 100644
--- a/include/linux/parport.h
+++ b/include/linux/parport.h
@@ -279,6 +279,10 @@ struct parport {
int dma;
int muxport; /* which muxport (if any) this is */
int portnum; /* which physical parallel port (not mux) */
+ struct device *dev; /* Physical device associated with IO/DMA.
+ * This may unfortulately be null if the
+ * port has a legacy driver.
+ */
struct parport *physport;
/* If this is a non-default mux
@@ -289,7 +293,7 @@ struct parport {
following structure members are
meaningless: devices, cad, muxsel,
waithead, waittail, flags, pdir,
- ieee1284, *_lock.
+ dev, ieee1284, *_lock.
It this is a default mux parport, or
there is no mux involved, this points to
@@ -302,7 +306,7 @@ struct parport {
struct pardevice *waithead;
struct pardevice *waittail;
-
+
struct list_head list;
unsigned int flags;
diff --git a/include/linux/parport_pc.h b/include/linux/parport_pc.h
index 1cc0f6b1a49..ea8c6d84996 100644
--- a/include/linux/parport_pc.h
+++ b/include/linux/parport_pc.h
@@ -38,7 +38,6 @@ struct parport_pc_private {
/* buffer suitable for DMA, if DMA enabled */
char *dma_buf;
dma_addr_t dma_handle;
- struct pci_dev *dev;
struct list_head list;
struct parport *port;
};
@@ -232,7 +231,7 @@ extern int parport_pc_claim_resources(struct parport *p);
extern struct parport *parport_pc_probe_port (unsigned long base,
unsigned long base_hi,
int irq, int dma,
- struct pci_dev *dev);
+ struct device *dev);
extern void parport_pc_unregister_port (struct parport *p);
#endif
diff --git a/include/linux/phantom.h b/include/linux/phantom.h
new file mode 100644
index 00000000000..d3ebbfae690
--- /dev/null
+++ b/include/linux/phantom.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2005-2007 Jiri Slaby <jirislaby@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __PHANTOM_H
+#define __PHANTOM_H
+
+#include <asm/types.h>
+
+/* PHN_(G/S)ET_REG param */
+struct phm_reg {
+ __u32 reg;
+ __u32 value;
+};
+
+/* PHN_(G/S)ET_REGS param */
+struct phm_regs {
+ __u32 count;
+ __u32 mask;
+ __u32 values[8];
+};
+
+#define PH_IOC_MAGIC 'p'
+#define PHN_GET_REG _IOWR(PH_IOC_MAGIC, 0, struct phm_reg *)
+#define PHN_SET_REG _IOW (PH_IOC_MAGIC, 1, struct phm_reg *)
+#define PHN_GET_REGS _IOWR(PH_IOC_MAGIC, 2, struct phm_regs *)
+#define PHN_SET_REGS _IOW (PH_IOC_MAGIC, 3, struct phm_regs *)
+#define PH_IOC_MAXNR 3
+
+#define PHN_CONTROL 0x6 /* control byte in iaddr space */
+#define PHN_CTL_AMP 0x1 /* switch after torques change */
+#define PHN_CTL_BUT 0x2 /* is button switched */
+#define PHN_CTL_IRQ 0x10 /* is irq enabled */
+
+#define PHN_ZERO_FORCE 2048 /* zero torque on motor */
+
+#endif
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index 2833806d42c..169c6c24209 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -29,7 +29,7 @@ static inline void get_pid_ns(struct pid_namespace *ns)
kref_get(&ns->kref);
}
-extern int copy_pid_ns(int flags, struct task_struct *tsk);
+extern struct pid_namespace *copy_pid_ns(int flags, struct pid_namespace *ns);
extern void free_pid_ns(struct kref *kref);
static inline void put_pid_ns(struct pid_namespace *ns)
diff --git a/include/linux/pmu.h b/include/linux/pmu.h
index b0952e532ed..37ca57392ad 100644
--- a/include/linux/pmu.h
+++ b/include/linux/pmu.h
@@ -225,4 +225,12 @@ extern unsigned int pmu_power_flags;
/* Backlight */
extern void pmu_backlight_init(void);
+/* some code needs to know if the PMU was suspended for hibernation */
+#ifdef CONFIG_PM
+extern int pmu_sys_suspended;
+#else
+/* if power management is not configured it can't be suspended */
+#define pmu_sys_suspended 0
+#endif
+
#endif /* __KERNEL__ */
diff --git a/include/linux/pnp.h b/include/linux/pnp.h
index 9a5226f0f16..2a1897e6f93 100644
--- a/include/linux/pnp.h
+++ b/include/linux/pnp.h
@@ -177,6 +177,7 @@ static inline void pnp_set_card_drvdata (struct pnp_card_link *pcard, void *data
struct pnp_dev {
struct device dev; /* Driver Model device interface */
+ u64 dma_mask;
unsigned char number; /* used as an index, must be unique */
int status;
@@ -363,6 +364,7 @@ int pnp_add_device(struct pnp_dev *dev);
int pnp_device_attach(struct pnp_dev *pnp_dev);
void pnp_device_detach(struct pnp_dev *pnp_dev);
extern struct list_head pnp_global;
+extern int pnp_platform_devices;
/* multidevice card support */
int pnp_add_card(struct pnp_card *card);
@@ -410,6 +412,7 @@ static inline int pnp_init_device(struct pnp_dev *dev) { return -ENODEV; }
static inline int pnp_add_device(struct pnp_dev *dev) { return -ENODEV; }
static inline int pnp_device_attach(struct pnp_dev *pnp_dev) { return -ENODEV; }
static inline void pnp_device_detach(struct pnp_dev *pnp_dev) { ; }
+#define pnp_platform_devices 0
/* multidevice card support */
static inline int pnp_add_card(struct pnp_card *card) { return -ENODEV; }
diff --git a/include/linux/poison.h b/include/linux/poison.h
index 95f518b1768..d93c300a344 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -15,8 +15,8 @@
* Magic nums for obj red zoning.
* Placed in the first word before and the first word after an obj.
*/
-#define RED_INACTIVE 0x5A2CF071UL /* when obj is inactive */
-#define RED_ACTIVE 0x170FC2A5UL /* when obj is active */
+#define RED_INACTIVE 0x09F911029D74E35BULL /* when obj is inactive */
+#define RED_ACTIVE 0xD84156C5635688C0ULL /* when obj is active */
#define SLUB_RED_INACTIVE 0xbb
#define SLUB_RED_ACTIVE 0xcc
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index f4f7a63cae1..3469f96bc8b 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -106,6 +106,9 @@ int task_statm(struct mm_struct *, int *, int *, int *, int *);
char *task_mem(struct mm_struct *, char *);
void clear_refs_smap(struct mm_struct *mm);
+struct proc_dir_entry *de_get(struct proc_dir_entry *de);
+void de_put(struct proc_dir_entry *de);
+
extern struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode,
struct proc_dir_entry *parent);
extern void remove_proc_entry(const char *name, struct proc_dir_entry *parent);
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 77db80a953d..62439828395 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -44,8 +44,6 @@
typedef __kernel_uid32_t qid_t; /* Type in which we store ids in memory */
typedef __u64 qsize_t; /* Type in which we store sizes */
-extern spinlock_t dq_data_lock;
-
/* Size of blocks in which are counted size limits */
#define QUOTABLOCK_BITS 10
#define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS)
@@ -139,6 +137,8 @@ struct if_dqinfo {
#include <linux/dqblk_v1.h>
#include <linux/dqblk_v2.h>
+extern spinlock_t dq_data_lock;
+
/* Maximal numbers of writes for quota operation (insert/delete/update)
* (over VFS all formats) */
#define DQUOT_INIT_ALLOC max(V1_INIT_ALLOC, V2_INIT_ALLOC)
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 90c23f690c0..5110201a415 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -37,9 +37,6 @@ extern int dquot_release(struct dquot *dquot);
extern int dquot_commit_info(struct super_block *sb, int type);
extern int dquot_mark_dquot_dirty(struct dquot *dquot);
-int remove_inode_dquot_ref(struct inode *inode, int type,
- struct list_head *tofree_head);
-
extern int vfs_quota_on(struct super_block *sb, int type, int format_id, char *path);
extern int vfs_quota_on_mount(struct super_block *sb, char *qf_name,
int format_id, int type);
diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
index 3a28742d86f..1e5488ede03 100644
--- a/include/linux/reiserfs_fs_sb.h
+++ b/include/linux/reiserfs_fs_sb.h
@@ -401,9 +401,10 @@ struct reiserfs_sb_info {
int reserved_blocks; /* amount of blocks reserved for further allocations */
spinlock_t bitmap_lock; /* this lock on now only used to protect reserved_blocks variable */
struct dentry *priv_root; /* root of /.reiserfs_priv */
+#ifdef CONFIG_REISERFS_FS_XATTR
struct dentry *xattr_root; /* root of /.reiserfs_priv/.xa */
struct rw_semaphore xattr_dir_sem;
-
+#endif
int j_errno;
#ifdef CONFIG_QUOTA
char *s_qf_names[MAXQUOTAS];
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 5e22d4510d1..6d5e4a46781 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -4,7 +4,7 @@
* service. It is used with both the legacy mc146818 and also EFI
* Struct rtc_time and first 12 ioctl by Paul Gortmaker, 1996 - separated out
* from <linux/mc146818rtc.h> to this file for 2.4 kernels.
- *
+ *
* Copyright (C) 1999 Hewlett-Packard Co.
* Copyright (C) 1999 Stephane Eranian <eranian@hpl.hp.com>
*/
@@ -13,7 +13,7 @@
/*
* The struct used to pass data via the following ioctl. Similar to the
- * struct tm in <time.h>, but it needs to be here so that the kernel
+ * struct tm in <time.h>, but it needs to be here so that the kernel
* source is self contained, allowing cross-compiles, etc. etc.
*/
@@ -50,7 +50,7 @@ struct rtc_wkalrm {
* pll_value*pll_posmult/pll_clock
* -ve pll_value means clock will run slower by
* pll_value*pll_negmult/pll_clock
- */
+ */
struct rtc_pll_info {
int pll_ctrl; /* placeholder for fancier control */
@@ -106,7 +106,6 @@ extern int rtc_year_days(unsigned int day, unsigned int month, unsigned int year
extern int rtc_valid_tm(struct rtc_time *tm);
extern int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time);
extern void rtc_time_to_tm(unsigned long time, struct rtc_time *tm);
-extern void rtc_merge_alarm(struct rtc_time *now, struct rtc_time *alarm);
#include <linux/device.h>
#include <linux/seq_file.h>
@@ -136,7 +135,7 @@ struct rtc_task;
struct rtc_device
{
- struct class_device class_dev;
+ struct device dev;
struct module *owner;
int id;
@@ -145,7 +144,6 @@ struct rtc_device
const struct rtc_class_ops *ops;
struct mutex ops_lock;
- struct class_device *rtc_dev;
struct cdev char_dev;
struct mutex char_lock;
@@ -169,35 +167,34 @@ struct rtc_device
unsigned int uie_timer_active:1;
#endif
};
-#define to_rtc_device(d) container_of(d, struct rtc_device, class_dev)
+#define to_rtc_device(d) container_of(d, struct rtc_device, dev)
extern struct rtc_device *rtc_device_register(const char *name,
struct device *dev,
const struct rtc_class_ops *ops,
struct module *owner);
-extern void rtc_device_unregister(struct rtc_device *rdev);
-extern int rtc_interface_register(struct class_interface *intf);
+extern void rtc_device_unregister(struct rtc_device *rtc);
-extern int rtc_read_time(struct class_device *class_dev, struct rtc_time *tm);
-extern int rtc_set_time(struct class_device *class_dev, struct rtc_time *tm);
-extern int rtc_set_mmss(struct class_device *class_dev, unsigned long secs);
-extern int rtc_read_alarm(struct class_device *class_dev,
+extern int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm);
+extern int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm);
+extern int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs);
+extern int rtc_read_alarm(struct rtc_device *rtc,
struct rtc_wkalrm *alrm);
-extern int rtc_set_alarm(struct class_device *class_dev,
+extern int rtc_set_alarm(struct rtc_device *rtc,
struct rtc_wkalrm *alrm);
-extern void rtc_update_irq(struct class_device *class_dev,
+extern void rtc_update_irq(struct rtc_device *rtc,
unsigned long num, unsigned long events);
-extern struct class_device *rtc_class_open(char *name);
-extern void rtc_class_close(struct class_device *class_dev);
+extern struct rtc_device *rtc_class_open(char *name);
+extern void rtc_class_close(struct rtc_device *rtc);
-extern int rtc_irq_register(struct class_device *class_dev,
+extern int rtc_irq_register(struct rtc_device *rtc,
struct rtc_task *task);
-extern void rtc_irq_unregister(struct class_device *class_dev,
+extern void rtc_irq_unregister(struct rtc_device *rtc,
struct rtc_task *task);
-extern int rtc_irq_set_state(struct class_device *class_dev,
+extern int rtc_irq_set_state(struct rtc_device *rtc,
struct rtc_task *task, int enabled);
-extern int rtc_irq_set_freq(struct class_device *class_dev,
+extern int rtc_irq_set_freq(struct rtc_device *rtc,
struct rtc_task *task, int freq);
typedef struct rtc_task {
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a1707583de4..3d95c480f58 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -194,6 +194,14 @@ extern void sched_init_smp(void);
extern void init_idle(struct task_struct *idle, int cpu);
extern cpumask_t nohz_cpu_mask;
+#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
+extern int select_nohz_load_balancer(int cpu);
+#else
+static inline int select_nohz_load_balancer(int cpu)
+{
+ return 0;
+}
+#endif
/*
* Only dump TASK_* tasks. (0 for all tasks)
@@ -226,6 +234,7 @@ extern void scheduler_tick(void);
extern void softlockup_tick(void);
extern void spawn_softlockup_task(void);
extern void touch_softlockup_watchdog(void);
+extern void touch_all_softlockup_watchdogs(void);
#else
static inline void softlockup_tick(void)
{
@@ -236,6 +245,9 @@ static inline void spawn_softlockup_task(void)
static inline void touch_softlockup_watchdog(void)
{
}
+static inline void touch_all_softlockup_watchdogs(void)
+{
+}
#endif
@@ -668,8 +680,14 @@ struct sched_group {
/*
* CPU power of this group, SCHED_LOAD_SCALE being max power for a
* single CPU. This is read only (except for setup, hotplug CPU).
+ * Note : Never change cpu_power without recompute its reciprocal
+ */
+ unsigned int __cpu_power;
+ /*
+ * reciprocal value of cpu_power to avoid expensive divides
+ * (see include/linux/reciprocal_div.h)
*/
- unsigned long cpu_power;
+ u32 reciprocal_cpu_power;
};
struct sched_domain {
@@ -801,8 +819,8 @@ struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
struct thread_info *thread_info;
atomic_t usage;
- unsigned long flags; /* per process flags, defined below */
- unsigned long ptrace;
+ unsigned int flags; /* per process flags, defined below */
+ unsigned int ptrace;
int lock_depth; /* BKL lock depth */
@@ -825,7 +843,7 @@ struct task_struct {
unsigned long long sched_time; /* sched_clock time spent running */
enum sleep_type sleep_type;
- unsigned long policy;
+ unsigned int policy;
cpumask_t cpus_allowed;
unsigned int time_slice, first_time_slice;
@@ -845,11 +863,11 @@ struct task_struct {
/* task state */
struct linux_binfmt *binfmt;
- long exit_state;
+ int exit_state;
int exit_code, exit_signal;
int pdeath_signal; /* The signal sent when the parent dies */
/* ??? */
- unsigned long personality;
+ unsigned int personality;
unsigned did_exec:1;
pid_t pid;
pid_t tgid;
@@ -881,7 +899,7 @@ struct task_struct {
int __user *set_child_tid; /* CLONE_CHILD_SETTID */
int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
- unsigned long rt_priority;
+ unsigned int rt_priority;
cputime_t utime, stime;
unsigned long nvcsw, nivcsw; /* context switch counts */
struct timespec start_time;
@@ -1641,10 +1659,7 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm)
extern long sched_setaffinity(pid_t pid, cpumask_t new_mask);
extern long sched_getaffinity(pid_t pid, cpumask_t *mask);
-#include <linux/sysdev.h>
extern int sched_mc_power_savings, sched_smt_power_savings;
-extern struct sysdev_attribute attr_sched_mc_power_savings, attr_sched_smt_power_savings;
-extern int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls);
extern void normalize_rt_tasks(void);
diff --git a/include/linux/spi/Kbuild b/include/linux/spi/Kbuild
new file mode 100644
index 00000000000..d375a082986
--- /dev/null
+++ b/include/linux/spi/Kbuild
@@ -0,0 +1 @@
+header-y += spidev.h
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 4f0f8c2e58a..b6bedc3ee95 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -32,11 +32,12 @@ extern struct bus_type spi_bus_type;
* @max_speed_hz: Maximum clock rate to be used with this chip
* (on this board); may be changed by the device's driver.
* The spi_transfer.speed_hz can override this for each transfer.
- * @chip-select: Chipselect, distinguishing chips handled by "master".
+ * @chip_select: Chipselect, distinguishing chips handled by @master.
* @mode: The spi mode defines how data is clocked out and in.
* This may be changed by the device's driver.
- * The "active low" default for chipselect mode can be overridden,
- * as can the "MSB first" default for each word in a transfer.
+ * The "active low" default for chipselect mode can be overridden
+ * (by specifying SPI_CS_HIGH) as can the "MSB first" default for
+ * each word in a transfer (by specifying SPI_LSB_FIRST).
* @bits_per_word: Data transfers involve one or more words; word sizes
* like eight or 12 bits are common. In-memory wordsizes are
* powers of two bytes (e.g. 20 bit samples use 32 bits).
@@ -48,14 +49,18 @@ extern struct bus_type spi_bus_type;
* @controller_state: Controller's runtime state
* @controller_data: Board-specific definitions for controller, such as
* FIFO initialization parameters; from board_info.controller_data
+ * @modalias: Name of the driver to use with this device, or an alias
+ * for that name. This appears in the sysfs "modalias" attribute
+ * for driver coldplugging, and in uevents used for hotplugging
*
- * An spi_device is used to interchange data between an SPI slave
+ * A @spi_device is used to interchange data between an SPI slave
* (usually a discrete chip) and CPU memory.
*
- * In "dev", the platform_data is used to hold information about this
+ * In @dev, the platform_data is used to hold information about this
* device that's meaningful to the device's protocol driver, but not
* to its controller. One example might be an identifier for a chip
- * variant with slightly different functionality.
+ * variant with slightly different functionality; another might be
+ * information about how this particular board wires the chip's pins.
*/
struct spi_device {
struct device dev;
@@ -77,13 +82,15 @@ struct spi_device {
void *controller_data;
const char *modalias;
- // likely need more hooks for more protocol options affecting how
- // the controller talks to each chip, like:
- // - memory packing (12 bit samples into low bits, others zeroed)
- // - priority
- // - drop chipselect after each word
- // - chipselect delays
- // - ...
+ /*
+ * likely need more hooks for more protocol options affecting how
+ * the controller talks to each chip, like:
+ * - memory packing (12 bit samples into low bits, others zeroed)
+ * - priority
+ * - drop chipselect after each word
+ * - chipselect delays
+ * - ...
+ */
};
static inline struct spi_device *to_spi_device(struct device *dev)
@@ -146,6 +153,11 @@ static inline struct spi_driver *to_spi_driver(struct device_driver *drv)
extern int spi_register_driver(struct spi_driver *sdrv);
+/**
+ * spi_unregister_driver - reverse effect of spi_register_driver
+ * @sdrv: the driver to unregister
+ * Context: can sleep
+ */
static inline void spi_unregister_driver(struct spi_driver *sdrv)
{
if (sdrv)
@@ -165,18 +177,20 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @setup: updates the device mode and clocking records used by a
* device's SPI controller; protocol code may call this. This
* must fail if an unrecognized or unsupported mode is requested.
+ * It's always safe to call this unless transfers are pending on
+ * the device whose settings are being modified.
* @transfer: adds a message to the controller's transfer queue.
* @cleanup: frees controller-specific state
*
- * Each SPI master controller can communicate with one or more spi_device
+ * Each SPI master controller can communicate with one or more @spi_device
* children. These make a small bus, sharing MOSI, MISO and SCK signals
* but not chip select signals. Each device may be configured to use a
* different clock rate, since those shared signals are ignored unless
* the chip is selected.
*
* The driver for an SPI controller manages access to those devices through
- * a queue of spi_message transactions, copyin data between CPU memory and
- * an SPI slave device). For each such message it queues, it calls the
+ * a queue of spi_message transactions, copying data between CPU memory and
+ * an SPI slave device. For each such message it queues, it calls the
* message's completion function when the transaction completes.
*/
struct spi_master {
@@ -280,27 +294,27 @@ extern struct spi_master *spi_busnum_to_master(u16 busnum);
* struct spi_transfer - a read/write buffer pair
* @tx_buf: data to be written (dma-safe memory), or NULL
* @rx_buf: data to be read (dma-safe memory), or NULL
- * @tx_dma: DMA address of tx_buf, if spi_message.is_dma_mapped
- * @rx_dma: DMA address of rx_buf, if spi_message.is_dma_mapped
+ * @tx_dma: DMA address of tx_buf, if @spi_message.is_dma_mapped
+ * @rx_dma: DMA address of rx_buf, if @spi_message.is_dma_mapped
* @len: size of rx and tx buffers (in bytes)
* @speed_hz: Select a speed other then the device default for this
- * transfer. If 0 the default (from spi_device) is used.
+ * transfer. If 0 the default (from @spi_device) is used.
* @bits_per_word: select a bits_per_word other then the device default
- * for this transfer. If 0 the default (from spi_device) is used.
+ * for this transfer. If 0 the default (from @spi_device) is used.
* @cs_change: affects chipselect after this transfer completes
* @delay_usecs: microseconds to delay after this transfer before
* (optionally) changing the chipselect status, then starting
- * the next transfer or completing this spi_message.
- * @transfer_list: transfers are sequenced through spi_message.transfers
+ * the next transfer or completing this @spi_message.
+ * @transfer_list: transfers are sequenced through @spi_message.transfers
*
* SPI transfers always write the same number of bytes as they read.
- * Protocol drivers should always provide rx_buf and/or tx_buf.
+ * Protocol drivers should always provide @rx_buf and/or @tx_buf.
* In some cases, they may also want to provide DMA addresses for
* the data being transferred; that may reduce overhead, when the
* underlying driver uses dma.
*
* If the transmit buffer is null, zeroes will be shifted out
- * while filling rx_buf. If the receive buffer is null, the data
+ * while filling @rx_buf. If the receive buffer is null, the data
* shifted in will be discarded. Only "len" bytes shift out (or in).
* It's an error to try to shift out a partial word. (For example, by
* shifting out three bytes with word size of sixteen or twenty bits;
@@ -309,7 +323,7 @@ extern struct spi_master *spi_busnum_to_master(u16 busnum);
* In-memory data values are always in native CPU byte order, translated
* from the wire byte order (big-endian except with SPI_LSB_FIRST). So
* for example when bits_per_word is sixteen, buffers are 2N bytes long
- * and hold N sixteen bit words in CPU byte order.
+ * (@len = 2N) and hold N sixteen bit words in CPU byte order.
*
* When the word size of the SPI transfer is not a power-of-two multiple
* of eight bits, those in-memory words include extra bits. In-memory
@@ -318,7 +332,7 @@ extern struct spi_master *spi_busnum_to_master(u16 busnum);
*
* All SPI transfers start with the relevant chipselect active. Normally
* it stays selected until after the last transfer in a message. Drivers
- * can affect the chipselect signal using cs_change:
+ * can affect the chipselect signal using cs_change.
*
* (i) If the transfer isn't the last one in the message, this flag is
* used to make the chipselect briefly go inactive in the middle of the
@@ -372,7 +386,7 @@ struct spi_transfer {
* @queue: for use by whichever driver currently owns the message
* @state: for use by whichever driver currently owns the message
*
- * An spi_message is used to execute an atomic sequence of data transfers,
+ * A @spi_message is used to execute an atomic sequence of data transfers,
* each represented by a struct spi_transfer. The sequence is "atomic"
* in the sense that no other spi_message may use that SPI bus until that
* sequence completes. On some systems, many such sequences can execute as
@@ -464,8 +478,9 @@ static inline void spi_message_free(struct spi_message *m)
}
/**
- * spi_setup -- setup SPI mode and clock rate
+ * spi_setup - setup SPI mode and clock rate
* @spi: the device whose settings are being modified
+ * Context: can sleep
*
* SPI protocol drivers may need to update the transfer mode if the
* device doesn't work with the mode 0 default. They may likewise need
@@ -474,7 +489,7 @@ static inline void spi_message_free(struct spi_message *m)
* The changes take effect the next time the device is selected and data
* is transferred to or from it.
*
- * Note that this call wil fail if the protocol driver specifies an option
+ * Note that this call will fail if the protocol driver specifies an option
* that the underlying controller or its driver does not support. For
* example, not all hardware supports wire transfers using nine bit words,
* LSB-first wire encoding, or active-high chipselects.
@@ -487,9 +502,10 @@ spi_setup(struct spi_device *spi)
/**
- * spi_async -- asynchronous SPI transfer
+ * spi_async - asynchronous SPI transfer
* @spi: device with which data will be exchanged
* @message: describes the data transfers, including completion callback
+ * Context: any (irqs may be blocked, etc)
*
* This call may be used in_irq and other contexts which can't sleep,
* as well as from task contexts which can sleep.
@@ -535,6 +551,7 @@ extern int spi_sync(struct spi_device *spi, struct spi_message *message);
* @spi: device to which data will be written
* @buf: data buffer
* @len: data buffer size
+ * Context: can sleep
*
* This writes the buffer and returns zero or a negative error code.
* Callable only from contexts that can sleep.
@@ -558,8 +575,9 @@ spi_write(struct spi_device *spi, const u8 *buf, size_t len)
* @spi: device from which data will be read
* @buf: data buffer
* @len: data buffer size
+ * Context: can sleep
*
- * This writes the buffer and returns zero or a negative error code.
+ * This reads the buffer and returns zero or a negative error code.
* Callable only from contexts that can sleep.
*/
static inline int
@@ -585,6 +603,7 @@ extern int spi_write_then_read(struct spi_device *spi,
* spi_w8r8 - SPI synchronous 8 bit write followed by 8 bit read
* @spi: device with which data will be exchanged
* @cmd: command to be written before data is read back
+ * Context: can sleep
*
* This returns the (unsigned) eight bit number returned by the
* device, or else a negative error code. Callable only from
@@ -605,6 +624,7 @@ static inline ssize_t spi_w8r8(struct spi_device *spi, u8 cmd)
* spi_w8r16 - SPI synchronous 8 bit write followed by 16 bit read
* @spi: device with which data will be exchanged
* @cmd: command to be written before data is read back
+ * Context: can sleep
*
* This returns the (unsigned) sixteen bit number returned by the
* device, or else a negative error code. Callable only from
diff --git a/include/linux/spi/spidev.h b/include/linux/spi/spidev.h
new file mode 100644
index 00000000000..7d700be5749
--- /dev/null
+++ b/include/linux/spi/spidev.h
@@ -0,0 +1,124 @@
+/*
+ * include/linux/spi/spidev.h
+ *
+ * Copyright (C) 2006 SWAPP
+ * Andrea Paterniani <a.paterniani@swapp-eng.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef SPIDEV_H
+#define SPIDEV_H
+
+
+/* User space versions of kernel symbols for SPI clocking modes,
+ * matching <linux/spi/spi.h>
+ */
+
+#define SPI_CPHA 0x01
+#define SPI_CPOL 0x02
+
+#define SPI_MODE_0 (0|0)
+#define SPI_MODE_1 (0|SPI_CPHA)
+#define SPI_MODE_2 (SPI_CPOL|0)
+#define SPI_MODE_3 (SPI_CPOL|SPI_CPHA)
+
+
+/*---------------------------------------------------------------------------*/
+
+/* IOCTL commands */
+
+#define SPI_IOC_MAGIC 'k'
+
+/**
+ * struct spi_ioc_transfer - describes a single SPI transfer
+ * @tx_buf: Holds pointer to userspace buffer with transmit data, or null.
+ * If no data is provided, zeroes are shifted out.
+ * @rx_buf: Holds pointer to userspace buffer for receive data, or null.
+ * @len: Length of tx and rx buffers, in bytes.
+ * @speed_hz: Temporary override of the device's bitrate.
+ * @bits_per_word: Temporary override of the device's wordsize.
+ * @delay_usecs: If nonzero, how long to delay after the last bit transfer
+ * before optionally deselecting the device before the next transfer.
+ * @cs_change: True to deselect device before starting the next transfer.
+ *
+ * This structure is mapped directly to the kernel spi_transfer structure;
+ * the fields have the same meanings, except of course that the pointers
+ * are in a different address space (and may be of different sizes in some
+ * cases, such as 32-bit i386 userspace over a 64-bit x86_64 kernel).
+ * Zero-initialize the structure, including currently unused fields, to
+ * accomodate potential future updates.
+ *
+ * SPI_IOC_MESSAGE gives userspace the equivalent of kernel spi_sync().
+ * Pass it an array of related transfers, they'll execute together.
+ * Each transfer may be half duplex (either direction) or full duplex.
+ *
+ * struct spi_ioc_transfer mesg[4];
+ * ...
+ * status = ioctl(fd, SPI_IOC_MESSAGE(4), mesg);
+ *
+ * So for example one transfer might send a nine bit command (right aligned
+ * in a 16-bit word), the next could read a block of 8-bit data before
+ * terminating that command by temporarily deselecting the chip; the next
+ * could send a different nine bit command (re-selecting the chip), and the
+ * last transfer might write some register values.
+ */
+struct spi_ioc_transfer {
+ __u64 tx_buf;
+ __u64 rx_buf;
+
+ __u32 len;
+ __u32 speed_hz;
+
+ __u16 delay_usecs;
+ __u8 bits_per_word;
+ __u8 cs_change;
+ __u32 pad;
+
+ /* If the contents of 'struct spi_ioc_transfer' ever change
+ * incompatibly, then the ioctl number (currently 0) must change;
+ * ioctls with constant size fields get a bit more in the way of
+ * error checking than ones (like this) where that field varies.
+ *
+ * NOTE: struct layout is the same in 64bit and 32bit userspace.
+ */
+};
+
+/* not all platforms use <asm-generic/ioctl.h> or _IOC_TYPECHECK() ... */
+#define SPI_MSGSIZE(N) \
+ ((((N)*(sizeof (struct spi_ioc_transfer))) < (1 << _IOC_SIZEBITS)) \
+ ? ((N)*(sizeof (struct spi_ioc_transfer))) : 0)
+#define SPI_IOC_MESSAGE(N) _IOW(SPI_IOC_MAGIC, 0, char[SPI_MSGSIZE(N)])
+
+
+/* Read / Write of SPI mode (SPI_MODE_0..SPI_MODE_3) */
+#define SPI_IOC_RD_MODE _IOR(SPI_IOC_MAGIC, 1, __u8)
+#define SPI_IOC_WR_MODE _IOW(SPI_IOC_MAGIC, 1, __u8)
+
+/* Read / Write SPI bit justification */
+#define SPI_IOC_RD_LSB_FIRST _IOR(SPI_IOC_MAGIC, 2, __u8)
+#define SPI_IOC_WR_LSB_FIRST _IOW(SPI_IOC_MAGIC, 2, __u8)
+
+/* Read / Write SPI device word length (1..N) */
+#define SPI_IOC_RD_BITS_PER_WORD _IOR(SPI_IOC_MAGIC, 3, __u8)
+#define SPI_IOC_WR_BITS_PER_WORD _IOW(SPI_IOC_MAGIC, 3, __u8)
+
+/* Read / Write SPI device default max speed hz */
+#define SPI_IOC_RD_MAX_SPEED_HZ _IOR(SPI_IOC_MAGIC, 4, __u32)
+#define SPI_IOC_WR_MAX_SPEED_HZ _IOW(SPI_IOC_MAGIC, 4, __u32)
+
+
+
+#endif /* SPIDEV_H */
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index dc5fb69e4de..210549ba4ef 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -85,6 +85,12 @@ typedef struct {
RW_DEP_MAP_INIT(lockname) }
#endif
+/*
+ * SPIN_LOCK_UNLOCKED and RW_LOCK_UNLOCKED defeat lockdep state tracking and
+ * are hence deprecated.
+ * Please use DEFINE_SPINLOCK()/DEFINE_RWLOCK() or
+ * __SPIN_LOCK_UNLOCKED()/__RW_LOCK_UNLOCKED() as appropriate.
+ */
#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init)
#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init)
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
index 50e2b01e517..1d2b084c018 100644
--- a/include/linux/stacktrace.h
+++ b/include/linux/stacktrace.h
@@ -6,15 +6,13 @@ struct stack_trace {
unsigned int nr_entries, max_entries;
unsigned long *entries;
int skip; /* input argument: How many entries to skip */
- int all_contexts; /* input argument: if true do than one stack */
};
-extern void save_stack_trace(struct stack_trace *trace,
- struct task_struct *task);
+extern void save_stack_trace(struct stack_trace *trace);
extern void print_stack_trace(struct stack_trace *trace, int spaces);
#else
-# define save_stack_trace(trace, task) do { } while (0)
+# define save_stack_trace(trace) do { } while (0)
# define print_stack_trace(trace) do { } while (0)
#endif
diff --git a/include/linux/stat.h b/include/linux/stat.h
index 679ef0d70b6..611c398dab7 100644
--- a/include/linux/stat.h
+++ b/include/linux/stat.h
@@ -53,6 +53,9 @@
#define S_IWUGO (S_IWUSR|S_IWGRP|S_IWOTH)
#define S_IXUGO (S_IXUSR|S_IXGRP|S_IXOTH)
+#define UTIME_NOW ((1l << 30) - 1l)
+#define UTIME_OMIT ((1l << 30) - 2l)
+
#include <linux/types.h>
#include <linux/time.h>
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 96868be9c21..9d2aa1a12aa 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -1,7 +1,7 @@
#ifndef _LINUX_SWSUSP_H
#define _LINUX_SWSUSP_H
-#if defined(CONFIG_X86) || defined(CONFIG_FRV) || defined(CONFIG_PPC32)
+#if defined(CONFIG_X86) || defined(CONFIG_FRV) || defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
#include <asm/suspend.h>
#endif
#include <linux/swap.h>
diff --git a/include/linux/svga.h b/include/linux/svga.h
index eadb981bb37..e1cc552e04f 100644
--- a/include/linux/svga.h
+++ b/include/linux/svga.h
@@ -112,6 +112,7 @@ void svga_tilecopy(struct fb_info *info, struct fb_tilearea *area);
void svga_tilefill(struct fb_info *info, struct fb_tilerect *rect);
void svga_tileblit(struct fb_info *info, struct fb_tileblit *blit);
void svga_tilecursor(struct fb_info *info, struct fb_tilecursor *cursor);
+int svga_get_tilemax(struct fb_info *info);
int svga_compute_pll(const struct svga_pll *pll, u32 f_wanted, u16 *m, u16 *n, u16 *r, int node);
int svga_check_timings(const struct svga_timing_regs *tm, struct fb_var_screeninfo *var, int node);
diff --git a/include/linux/sysdev.h b/include/linux/sysdev.h
index 389ccf858d3..e699ab279c2 100644
--- a/include/linux/sysdev.h
+++ b/include/linux/sysdev.h
@@ -22,6 +22,7 @@
#define _SYSDEV_H_
#include <linux/kobject.h>
+#include <linux/module.h>
#include <linux/pm.h>
diff --git a/include/linux/time.h b/include/linux/time.h
index 8ea8dea713c..dda9be685ab 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -109,7 +109,7 @@ extern void do_gettimeofday(struct timeval *tv);
extern int do_settimeofday(struct timespec *tv);
extern int do_sys_settimeofday(struct timespec *tv, struct timezone *tz);
#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts)
-extern long do_utimes(int dfd, char __user *filename, struct timeval *times);
+extern long do_utimes(int dfd, char __user *filename, struct timespec *times, int flags);
struct itimerval;
extern int do_setitimer(int which, struct itimerval *value,
struct itimerval *ovalue);
@@ -119,6 +119,7 @@ extern void getnstimeofday(struct timespec *tv);
extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
extern int timekeeping_is_continuous(void);
+extern void update_wall_time(void);
/**
* timespec_to_ns - Convert timespec to nanoseconds
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 719113b652d..e0c5c16c992 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -37,6 +37,7 @@ extern struct tvec_t_base_s boot_tvec_bases;
TIMER_INITIALIZER(_function, _expires, _data)
void fastcall init_timer(struct timer_list * timer);
+void fastcall init_timer_deferrable(struct timer_list *timer);
static inline void setup_timer(struct timer_list * timer,
void (*function)(unsigned long),
diff --git a/include/linux/tty.h b/include/linux/tty.h
index dee72b9a20f..bb457608520 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -313,6 +313,7 @@ extern int tty_hung_up_p(struct file * filp);
extern void do_SAK(struct tty_struct *tty);
extern void __do_SAK(struct tty_struct *tty);
extern void disassociate_ctty(int priv);
+extern void no_tty(void);
extern void tty_flip_buffer_push(struct tty_struct *tty);
extern speed_t tty_get_baud_rate(struct tty_struct *tty);
extern speed_t tty_termios_baud_rate(struct ktermios *termios);
@@ -333,7 +334,6 @@ extern int tty_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
extern dev_t tty_devnum(struct tty_struct *tty);
extern void proc_clear_tty(struct task_struct *p);
-extern void proc_set_tty(struct task_struct *tsk, struct tty_struct *tty);
extern struct tty_struct *get_current_tty(void);
extern struct mutex tty_mutex;
diff --git a/include/linux/uinput.h b/include/linux/uinput.h
index 1fd61eeed66..a6c1e8eed22 100644
--- a/include/linux/uinput.h
+++ b/include/linux/uinput.h
@@ -32,6 +32,8 @@
* - first public version
*/
+#include <linux/input.h>
+
#define UINPUT_VERSION 3
#ifdef __KERNEL__
diff --git a/include/linux/utsname.h b/include/linux/utsname.h
index e10267d402c..f8d3b326e93 100644
--- a/include/linux/utsname.h
+++ b/include/linux/utsname.h
@@ -49,9 +49,7 @@ static inline void get_uts_ns(struct uts_namespace *ns)
}
#ifdef CONFIG_UTS_NS
-extern int unshare_utsname(unsigned long unshare_flags,
- struct uts_namespace **new_uts);
-extern int copy_utsname(int flags, struct task_struct *tsk);
+extern struct uts_namespace *copy_utsname(int flags, struct uts_namespace *ns);
extern void free_uts_ns(struct kref *kref);
static inline void put_uts_ns(struct uts_namespace *ns)
@@ -59,21 +57,12 @@ static inline void put_uts_ns(struct uts_namespace *ns)
kref_put(&ns->kref, free_uts_ns);
}
#else
-static inline int unshare_utsname(unsigned long unshare_flags,
- struct uts_namespace **new_uts)
+static inline struct uts_namespace *copy_utsname(int flags,
+ struct uts_namespace *ns)
{
- if (unshare_flags & CLONE_NEWUTS)
- return -EINVAL;
-
- return 0;
+ return ns;
}
-static inline int copy_utsname(int flags, struct task_struct *tsk)
-{
- if (flags & CLONE_NEWUTS)
- return -EINVAL;
- return 0;
-}
static inline void put_uts_ns(struct uts_namespace *ns)
{
}
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 924e502905d..4b7ee83787c 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -53,6 +53,7 @@ extern void vunmap(void *addr);
extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
unsigned long pgoff);
+void vmalloc_sync_all(void);
/*
* Lowlevel-APIs (not for driver use!)
diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h
index e0db669998f..d961635d0e6 100644
--- a/include/linux/vt_kern.h
+++ b/include/linux/vt_kern.h
@@ -9,6 +9,7 @@
#include <linux/vt.h>
#include <linux/kd.h>
#include <linux/tty.h>
+#include <linux/mutex.h>
#include <linux/console_struct.h>
#include <linux/mm.h>
@@ -82,7 +83,7 @@ void reset_vc(struct vc_data *vc);
#define CON_BUF_SIZE (CONFIG_BASE_SMALL ? 256 : PAGE_SIZE)
extern char con_buf[CON_BUF_SIZE];
-extern struct semaphore con_buf_sem;
+extern struct mutex con_buf_mtx;
extern char vt_dont_switch;
struct vt_spawn_console {
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index b8abfc74d03..f16ba1e0687 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -121,6 +121,12 @@ struct execute_work {
init_timer(&(_work)->timer); \
} while (0)
+#define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \
+ do { \
+ INIT_WORK(&(_work)->work, (_func)); \
+ init_timer_deferrable(&(_work)->timer); \
+ } while (0)
+
/**
* work_pending - Find out whether a work item is currently pending
* @work: The work item in question
diff --git a/include/math-emu/extended.h b/include/math-emu/extended.h
deleted file mode 100644
index 84770fceb53..00000000000
--- a/include/math-emu/extended.h
+++ /dev/null
@@ -1,396 +0,0 @@
-/* Software floating-point emulation.
- Definitions for IEEE Extended Precision.
- Copyright (C) 1999 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
- Contributed by Jakub Jelinek (jj@ultra.linux.cz).
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Library General Public License as
- published by the Free Software Foundation; either version 2 of the
- License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Library General Public License for more details.
-
- You should have received a copy of the GNU Library General Public
- License along with the GNU C Library; see the file COPYING.LIB. If
- not, write to the Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
-
-
-#ifndef __MATH_EMU_EXTENDED_H__
-#define __MATH_EMU_EXTENDED_H__
-
-#if _FP_W_TYPE_SIZE < 32
-#error "Here's a nickel, kid. Go buy yourself a real computer."
-#endif
-
-#if _FP_W_TYPE_SIZE < 64
-#define _FP_FRACTBITS_E (4*_FP_W_TYPE_SIZE)
-#else
-#define _FP_FRACTBITS_E (2*_FP_W_TYPE_SIZE)
-#endif
-
-#define _FP_FRACBITS_E 64
-#define _FP_FRACXBITS_E (_FP_FRACTBITS_E - _FP_FRACBITS_E)
-#define _FP_WFRACBITS_E (_FP_WORKBITS + _FP_FRACBITS_E)
-#define _FP_WFRACXBITS_E (_FP_FRACTBITS_E - _FP_WFRACBITS_E)
-#define _FP_EXPBITS_E 15
-#define _FP_EXPBIAS_E 16383
-#define _FP_EXPMAX_E 32767
-
-#define _FP_QNANBIT_E \
- ((_FP_W_TYPE)1 << (_FP_FRACBITS_E-2) % _FP_W_TYPE_SIZE)
-#define _FP_IMPLBIT_E \
- ((_FP_W_TYPE)1 << (_FP_FRACBITS_E-1) % _FP_W_TYPE_SIZE)
-#define _FP_OVERFLOW_E \
- ((_FP_W_TYPE)1 << (_FP_WFRACBITS_E % _FP_W_TYPE_SIZE))
-
-#if _FP_W_TYPE_SIZE < 64
-
-union _FP_UNION_E
-{
- long double flt;
- struct
- {
-#if __BYTE_ORDER == __BIG_ENDIAN
- unsigned long pad1 : _FP_W_TYPE_SIZE;
- unsigned long pad2 : (_FP_W_TYPE_SIZE - 1 - _FP_EXPBITS_E);
- unsigned long sign : 1;
- unsigned long exp : _FP_EXPBITS_E;
- unsigned long frac1 : _FP_W_TYPE_SIZE;
- unsigned long frac0 : _FP_W_TYPE_SIZE;
-#else
- unsigned long frac0 : _FP_W_TYPE_SIZE;
- unsigned long frac1 : _FP_W_TYPE_SIZE;
- unsigned exp : _FP_EXPBITS_E;
- unsigned sign : 1;
-#endif /* not bigendian */
- } bits __attribute__((packed));
-};
-
-
-#define FP_DECL_E(X) _FP_DECL(4,X)
-
-#define FP_UNPACK_RAW_E(X, val) \
- do { \
- union _FP_UNION_E _flo; _flo.flt = (val); \
- \
- X##_f[2] = 0; X##_f[3] = 0; \
- X##_f[0] = _flo.bits.frac0; \
- X##_f[1] = _flo.bits.frac1; \
- X##_e = _flo.bits.exp; \
- X##_s = _flo.bits.sign; \
- if (!X##_e && (X##_f[1] || X##_f[0]) \
- && !(X##_f[1] & _FP_IMPLBIT_E)) \
- { \
- X##_e++; \
- FP_SET_EXCEPTION(FP_EX_DENORM); \
- } \
- } while (0)
-
-#define FP_UNPACK_RAW_EP(X, val) \
- do { \
- union _FP_UNION_E *_flo = \
- (union _FP_UNION_E *)(val); \
- \
- X##_f[2] = 0; X##_f[3] = 0; \
- X##_f[0] = _flo->bits.frac0; \
- X##_f[1] = _flo->bits.frac1; \
- X##_e = _flo->bits.exp; \
- X##_s = _flo->bits.sign; \
- if (!X##_e && (X##_f[1] || X##_f[0]) \
- && !(X##_f[1] & _FP_IMPLBIT_E)) \
- { \
- X##_e++; \
- FP_SET_EXCEPTION(FP_EX_DENORM); \
- } \
- } while (0)
-
-#define FP_PACK_RAW_E(val, X) \
- do { \
- union _FP_UNION_E _flo; \
- \
- if (X##_e) X##_f[1] |= _FP_IMPLBIT_E; \
- else X##_f[1] &= ~(_FP_IMPLBIT_E); \
- _flo.bits.frac0 = X##_f[0]; \
- _flo.bits.frac1 = X##_f[1]; \
- _flo.bits.exp = X##_e; \
- _flo.bits.sign = X##_s; \
- \
- (val) = _flo.flt; \
- } while (0)
-
-#define FP_PACK_RAW_EP(val, X) \
- do { \
- if (!FP_INHIBIT_RESULTS) \
- { \
- union _FP_UNION_E *_flo = \
- (union _FP_UNION_E *)(val); \
- \
- if (X##_e) X##_f[1] |= _FP_IMPLBIT_E; \
- else X##_f[1] &= ~(_FP_IMPLBIT_E); \
- _flo->bits.frac0 = X##_f[0]; \
- _flo->bits.frac1 = X##_f[1]; \
- _flo->bits.exp = X##_e; \
- _flo->bits.sign = X##_s; \
- } \
- } while (0)
-
-#define FP_UNPACK_E(X,val) \
- do { \
- FP_UNPACK_RAW_E(X,val); \
- _FP_UNPACK_CANONICAL(E,4,X); \
- } while (0)
-
-#define FP_UNPACK_EP(X,val) \
- do { \
- FP_UNPACK_RAW_2_P(X,val); \
- _FP_UNPACK_CANONICAL(E,4,X); \
- } while (0)
-
-#define FP_PACK_E(val,X) \
- do { \
- _FP_PACK_CANONICAL(E,4,X); \
- FP_PACK_RAW_E(val,X); \
- } while (0)
-
-#define FP_PACK_EP(val,X) \
- do { \
- _FP_PACK_CANONICAL(E,4,X); \
- FP_PACK_RAW_EP(val,X); \
- } while (0)
-
-#define FP_ISSIGNAN_E(X) _FP_ISSIGNAN(E,4,X)
-#define FP_NEG_E(R,X) _FP_NEG(E,4,R,X)
-#define FP_ADD_E(R,X,Y) _FP_ADD(E,4,R,X,Y)
-#define FP_SUB_E(R,X,Y) _FP_SUB(E,4,R,X,Y)
-#define FP_MUL_E(R,X,Y) _FP_MUL(E,4,R,X,Y)
-#define FP_DIV_E(R,X,Y) _FP_DIV(E,4,R,X,Y)
-#define FP_SQRT_E(R,X) _FP_SQRT(E,4,R,X)
-
-/*
- * Square root algorithms:
- * We have just one right now, maybe Newton approximation
- * should be added for those machines where division is fast.
- * This has special _E version because standard _4 square
- * root would not work (it has to start normally with the
- * second word and not the first), but as we have to do it
- * anyway, we optimize it by doing most of the calculations
- * in two UWtype registers instead of four.
- */
-
-#define _FP_SQRT_MEAT_E(R, S, T, X, q) \
- do { \
- q = (_FP_W_TYPE)1 << (_FP_W_TYPE_SIZE - 1); \
- _FP_FRAC_SRL_4(X, (_FP_WORKBITS)); \
- while (q) \
- { \
- T##_f[1] = S##_f[1] + q; \
- if (T##_f[1] <= X##_f[1]) \
- { \
- S##_f[1] = T##_f[1] + q; \
- X##_f[1] -= T##_f[1]; \
- R##_f[1] += q; \
- } \
- _FP_FRAC_SLL_2(X, 1); \
- q >>= 1; \
- } \
- q = (_FP_W_TYPE)1 << (_FP_W_TYPE_SIZE - 1); \
- while (q) \
- { \
- T##_f[0] = S##_f[0] + q; \
- T##_f[1] = S##_f[1]; \
- if (T##_f[1] < X##_f[1] || \
- (T##_f[1] == X##_f[1] && \
- T##_f[0] <= X##_f[0])) \
- { \
- S##_f[0] = T##_f[0] + q; \
- S##_f[1] += (T##_f[0] > S##_f[0]); \
- _FP_FRAC_DEC_2(X, T); \
- R##_f[0] += q; \
- } \
- _FP_FRAC_SLL_2(X, 1); \
- q >>= 1; \
- } \
- _FP_FRAC_SLL_4(R, (_FP_WORKBITS)); \
- if (X##_f[0] | X##_f[1]) \
- { \
- if (S##_f[1] < X##_f[1] || \
- (S##_f[1] == X##_f[1] && \
- S##_f[0] < X##_f[0])) \
- R##_f[0] |= _FP_WORK_ROUND; \
- R##_f[0] |= _FP_WORK_STICKY; \
- } \
- } while (0)
-
-#define FP_CMP_E(r,X,Y,un) _FP_CMP(E,4,r,X,Y,un)
-#define FP_CMP_EQ_E(r,X,Y) _FP_CMP_EQ(E,4,r,X,Y)
-
-#define FP_TO_INT_E(r,X,rsz,rsg) _FP_TO_INT(E,4,r,X,rsz,rsg)
-#define FP_TO_INT_ROUND_E(r,X,rsz,rsg) _FP_TO_INT_ROUND(E,4,r,X,rsz,rsg)
-#define FP_FROM_INT_E(X,r,rs,rt) _FP_FROM_INT(E,4,X,r,rs,rt)
-
-#define _FP_FRAC_HIGH_E(X) (X##_f[2])
-#define _FP_FRAC_HIGH_RAW_E(X) (X##_f[1])
-
-#else /* not _FP_W_TYPE_SIZE < 64 */
-union _FP_UNION_E
-{
- long double flt /* __attribute__((mode(TF))) */ ;
- struct {
-#if __BYTE_ORDER == __BIG_ENDIAN
- unsigned long pad : (_FP_W_TYPE_SIZE - 1 - _FP_EXPBITS_E);
- unsigned sign : 1;
- unsigned exp : _FP_EXPBITS_E;
- unsigned long frac : _FP_W_TYPE_SIZE;
-#else
- unsigned long frac : _FP_W_TYPE_SIZE;
- unsigned exp : _FP_EXPBITS_E;
- unsigned sign : 1;
-#endif
- } bits;
-};
-
-#define FP_DECL_E(X) _FP_DECL(2,X)
-
-#define FP_UNPACK_RAW_E(X, val) \
- do { \
- union _FP_UNION_E _flo; _flo.flt = (val); \
- \
- X##_f0 = _flo.bits.frac; \
- X##_f1 = 0; \
- X##_e = _flo.bits.exp; \
- X##_s = _flo.bits.sign; \
- if (!X##_e && X##_f0 && !(X##_f0 & _FP_IMPLBIT_E)) \
- { \
- X##_e++; \
- FP_SET_EXCEPTION(FP_EX_DENORM); \
- } \
- } while (0)
-
-#define FP_UNPACK_RAW_EP(X, val) \
- do { \
- union _FP_UNION_E *_flo = \
- (union _FP_UNION_E *)(val); \
- \
- X##_f0 = _flo->bits.frac; \
- X##_f1 = 0; \
- X##_e = _flo->bits.exp; \
- X##_s = _flo->bits.sign; \
- if (!X##_e && X##_f0 && !(X##_f0 & _FP_IMPLBIT_E)) \
- { \
- X##_e++; \
- FP_SET_EXCEPTION(FP_EX_DENORM); \
- } \
- } while (0)
-
-#define FP_PACK_RAW_E(val, X) \
- do { \
- union _FP_UNION_E _flo; \
- \
- if (X##_e) X##_f0 |= _FP_IMPLBIT_E; \
- else X##_f0 &= ~(_FP_IMPLBIT_E); \
- _flo.bits.frac = X##_f0; \
- _flo.bits.exp = X##_e; \
- _flo.bits.sign = X##_s; \
- \
- (val) = _flo.flt; \
- } while (0)
-
-#define FP_PACK_RAW_EP(fs, val, X) \
- do { \
- if (!FP_INHIBIT_RESULTS) \
- { \
- union _FP_UNION_E *_flo = \
- (union _FP_UNION_E *)(val); \
- \
- if (X##_e) X##_f0 |= _FP_IMPLBIT_E; \
- else X##_f0 &= ~(_FP_IMPLBIT_E); \
- _flo->bits.frac = X##_f0; \
- _flo->bits.exp = X##_e; \
- _flo->bits.sign = X##_s; \
- } \
- } while (0)
-
-
-#define FP_UNPACK_E(X,val) \
- do { \
- FP_UNPACK_RAW_E(X,val); \
- _FP_UNPACK_CANONICAL(E,2,X); \
- } while (0)
-
-#define FP_UNPACK_EP(X,val) \
- do { \
- FP_UNPACK_RAW_EP(X,val); \
- _FP_UNPACK_CANONICAL(E,2,X); \
- } while (0)
-
-#define FP_PACK_E(val,X) \
- do { \
- _FP_PACK_CANONICAL(E,2,X); \
- FP_PACK_RAW_E(val,X); \
- } while (0)
-
-#define FP_PACK_EP(val,X) \
- do { \
- _FP_PACK_CANONICAL(E,2,X); \
- FP_PACK_RAW_EP(val,X); \
- } while (0)
-
-#define FP_ISSIGNAN_E(X) _FP_ISSIGNAN(E,2,X)
-#define FP_NEG_E(R,X) _FP_NEG(E,2,R,X)
-#define FP_ADD_E(R,X,Y) _FP_ADD(E,2,R,X,Y)
-#define FP_SUB_E(R,X,Y) _FP_SUB(E,2,R,X,Y)
-#define FP_MUL_E(R,X,Y) _FP_MUL(E,2,R,X,Y)
-#define FP_DIV_E(R,X,Y) _FP_DIV(E,2,R,X,Y)
-#define FP_SQRT_E(R,X) _FP_SQRT(E,2,R,X)
-
-/*
- * Square root algorithms:
- * We have just one right now, maybe Newton approximation
- * should be added for those machines where division is fast.
- * We optimize it by doing most of the calculations
- * in one UWtype registers instead of two, although we don't
- * have to.
- */
-#define _FP_SQRT_MEAT_E(R, S, T, X, q) \
- do { \
- q = (_FP_W_TYPE)1 << (_FP_W_TYPE_SIZE - 1); \
- _FP_FRAC_SRL_2(X, (_FP_WORKBITS)); \
- while (q) \
- { \
- T##_f0 = S##_f0 + q; \
- if (T##_f0 <= X##_f0) \
- { \
- S##_f0 = T##_f0 + q; \
- X##_f0 -= T##_f0; \
- R##_f0 += q; \
- } \
- _FP_FRAC_SLL_1(X, 1); \
- q >>= 1; \
- } \
- _FP_FRAC_SLL_2(R, (_FP_WORKBITS)); \
- if (X##_f0) \
- { \
- if (S##_f0 < X##_f0) \
- R##_f0 |= _FP_WORK_ROUND; \
- R##_f0 |= _FP_WORK_STICKY; \
- } \
- } while (0)
-
-#define FP_CMP_E(r,X,Y,un) _FP_CMP(E,2,r,X,Y,un)
-#define FP_CMP_EQ_E(r,X,Y) _FP_CMP_EQ(E,2,r,X,Y)
-
-#define FP_TO_INT_E(r,X,rsz,rsg) _FP_TO_INT(E,2,r,X,rsz,rsg)
-#define FP_TO_INT_ROUND_E(r,X,rsz,rsg) _FP_TO_INT_ROUND(E,2,r,X,rsz,rsg)
-#define FP_FROM_INT_E(X,r,rs,rt) _FP_FROM_INT(E,2,X,r,rs,rt)
-
-#define _FP_FRAC_HIGH_E(X) (X##_f1)
-#define _FP_FRAC_HIGH_RAW_E(X) (X##_f0)
-
-#endif /* not _FP_W_TYPE_SIZE < 64 */
-
-#endif /* __MATH_EMU_EXTENDED_H__ */
diff --git a/include/net/sock.h b/include/net/sock.h
index 25c37e34bfd..689b886038d 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1361,15 +1361,6 @@ static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
extern __u32 sysctl_wmem_max;
extern __u32 sysctl_rmem_max;
-#ifdef CONFIG_NET
-int siocdevprivate_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg);
-#else
-static inline int siocdevprivate_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- return -ENODEV;
-}
-#endif
-
extern void sk_init(void);
#ifdef CONFIG_SYSCTL
diff --git a/include/video/mach64.h b/include/video/mach64.h
index 09a7f4a7289..a8332e528ec 100644
--- a/include/video/mach64.h
+++ b/include/video/mach64.h
@@ -885,6 +885,7 @@
#define SDRAM 4
#define SGRAM 5
#define WRAM 6
+#define SDRAM32 6
#define DAC_INTERNAL 0x00
#define DAC_IBMRGB514 0x01
diff --git a/include/video/permedia2.h b/include/video/permedia2.h
index b95d3628933..9e49c9571ec 100644
--- a/include/video/permedia2.h
+++ b/include/video/permedia2.h
@@ -154,6 +154,10 @@
#define PM2VI_RD_CLK1_PRESCALE 0x204
#define PM2VI_RD_CLK1_FEEDBACK 0x205
#define PM2VI_RD_CLK1_POSTSCALE 0x206
+#define PM2VI_RD_MCLK_CONTROL 0x20D
+#define PM2VI_RD_MCLK_PRESCALE 0x20E
+#define PM2VI_RD_MCLK_FEEDBACK 0x20F
+#define PM2VI_RD_MCLK_POSTSCALE 0x210
#define PM2VI_RD_CURSOR_PALETTE 0x303
#define PM2VI_RD_CURSOR_PATTERN 0x400
diff --git a/include/video/tgafb.h b/include/video/tgafb.h
index be2b3e94e25..03d0dbe293a 100644
--- a/include/video/tgafb.h
+++ b/include/video/tgafb.h
@@ -39,6 +39,7 @@
#define TGA_RASTEROP_REG 0x0034
#define TGA_PIXELSHIFT_REG 0x0038
#define TGA_DEEP_REG 0x0050
+#define TGA_START_REG 0x0054
#define TGA_PIXELMASK_REG 0x005c
#define TGA_CURSOR_BASE_REG 0x0060
#define TGA_HORIZ_REG 0x0064
@@ -140,7 +141,7 @@
/*
- * Useful defines for managing the BT463 on the 24-plane TGAs
+ * Useful defines for managing the BT463 on the 24-plane TGAs/SFB+s
*/
#define BT463_ADDR_LO 0x0
@@ -168,12 +169,35 @@
#define BT463_WINDOW_TYPE_BASE 0x0300
/*
+ * Useful defines for managing the BT459 on the 8-plane SFB+s
+ */
+
+#define BT459_ADDR_LO 0x0
+#define BT459_ADDR_HI 0x1
+#define BT459_REG_ACC 0x2
+#define BT459_PALETTE 0x3
+
+#define BT459_CUR_CLR_1 0x0181
+#define BT459_CUR_CLR_2 0x0182
+#define BT459_CUR_CLR_3 0x0183
+
+#define BT459_CMD_REG_0 0x0201
+#define BT459_CMD_REG_1 0x0202
+#define BT459_CMD_REG_2 0x0203
+
+#define BT459_READ_MASK 0x0204
+
+#define BT459_BLINK_MASK 0x0206
+
+#define BT459_CUR_CMD_REG 0x0300
+
+/*
* The framebuffer driver private data.
*/
struct tga_par {
- /* PCI device. */
- struct pci_dev *pdev;
+ /* PCI/TC device. */
+ struct device *dev;
/* Device dependent information. */
void __iomem *tga_mem_base;
@@ -235,4 +259,21 @@ BT463_WRITE(struct tga_par *par, u32 m, u16 a, u8 v)
TGA_WRITE_REG(par, m << 10 | v, TGA_RAMDAC_REG);
}
+static inline void
+BT459_LOAD_ADDR(struct tga_par *par, u16 a)
+{
+ TGA_WRITE_REG(par, BT459_ADDR_LO << 2, TGA_RAMDAC_SETUP_REG);
+ TGA_WRITE_REG(par, a & 0xff, TGA_RAMDAC_REG);
+ TGA_WRITE_REG(par, BT459_ADDR_HI << 2, TGA_RAMDAC_SETUP_REG);
+ TGA_WRITE_REG(par, a >> 8, TGA_RAMDAC_REG);
+}
+
+static inline void
+BT459_WRITE(struct tga_par *par, u32 m, u16 a, u8 v)
+{
+ BT459_LOAD_ADDR(par, a);
+ TGA_WRITE_REG(par, m << 2, TGA_RAMDAC_SETUP_REG);
+ TGA_WRITE_REG(par, v, TGA_RAMDAC_REG);
+}
+
#endif /* TGAFB_H */