diff options
author | Jeff Garzik <jeff@garzik.org> | 2006-12-07 04:57:19 -0500 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2006-12-07 04:57:19 -0500 |
commit | 8d1413b28033c49c7f1a4d320e815d7a5531acee (patch) | |
tree | b37281abef014cd60803b81c100388d7a475d49e /include/asm-mips | |
parent | ed25ffa16434724f5ed825aa48734c7f3aefa203 (diff) | |
parent | 620034c84d1d939717bdfbe02c51a3fee43541c3 (diff) |
Merge branch 'master' into upstream
Conflicts:
drivers/net/netxen/netxen_nic.h
drivers/net/netxen/netxen_nic_main.c
Diffstat (limited to 'include/asm-mips')
-rw-r--r-- | include/asm-mips/atomic.h | 39 | ||||
-rw-r--r-- | include/asm-mips/barrier.h | 132 | ||||
-rw-r--r-- | include/asm-mips/bitops.h | 27 | ||||
-rw-r--r-- | include/asm-mips/compat.h | 68 | ||||
-rw-r--r-- | include/asm-mips/futex.h | 22 | ||||
-rw-r--r-- | include/asm-mips/i8259.h | 37 | ||||
-rw-r--r-- | include/asm-mips/pgtable-32.h | 6 | ||||
-rw-r--r-- | include/asm-mips/pgtable-64.h | 4 | ||||
-rw-r--r-- | include/asm-mips/sn/klconfig.h | 2 | ||||
-rw-r--r-- | include/asm-mips/spinlock.h | 53 | ||||
-rw-r--r-- | include/asm-mips/system.h | 156 | ||||
-rw-r--r-- | include/asm-mips/types.h | 10 |
12 files changed, 322 insertions, 234 deletions
diff --git a/include/asm-mips/atomic.h b/include/asm-mips/atomic.h index 7978d8e1164..c1a2409bb52 100644 --- a/include/asm-mips/atomic.h +++ b/include/asm-mips/atomic.h @@ -15,6 +15,7 @@ #define _ASM_ATOMIC_H #include <linux/irqflags.h> +#include <asm/barrier.h> #include <asm/cpu-features.h> #include <asm/war.h> @@ -130,6 +131,8 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) { unsigned long result; + smp_mb(); + if (cpu_has_llsc && R10000_LLSC_WAR) { unsigned long temp; @@ -140,7 +143,6 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) " sc %0, %2 \n" " beqzl %0, 1b \n" " addu %0, %1, %3 \n" - " sync \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter) @@ -155,7 +157,6 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) " sc %0, %2 \n" " beqz %0, 1b \n" " addu %0, %1, %3 \n" - " sync \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter) @@ -170,6 +171,8 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) local_irq_restore(flags); } + smp_mb(); + return result; } @@ -177,6 +180,8 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) { unsigned long result; + smp_mb(); + if (cpu_has_llsc && R10000_LLSC_WAR) { unsigned long temp; @@ -187,7 +192,6 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) " sc %0, %2 \n" " beqzl %0, 1b \n" " subu %0, %1, %3 \n" - " sync \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter) @@ -202,7 +206,6 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) " sc %0, %2 \n" " beqz %0, 1b \n" " subu %0, %1, %3 \n" - " sync \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter) @@ -217,6 +220,8 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) local_irq_restore(flags); } + smp_mb(); + return result; } @@ -232,6 +237,8 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) { unsigned long result; + smp_mb(); + if (cpu_has_llsc && R10000_LLSC_WAR) { unsigned long temp; @@ -245,7 +252,6 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) " beqzl %0, 1b \n" " subu %0, %1, %3 \n" " .set reorder \n" - " sync \n" "1: \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) @@ -264,7 +270,6 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) " beqz %0, 1b \n" " subu %0, %1, %3 \n" " .set reorder \n" - " sync \n" "1: \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) @@ -281,6 +286,8 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) local_irq_restore(flags); } + smp_mb(); + return result; } @@ -375,7 +382,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) #ifdef CONFIG_64BIT -typedef struct { volatile __s64 counter; } atomic64_t; +typedef struct { volatile long counter; } atomic64_t; #define ATOMIC64_INIT(i) { (i) } @@ -484,6 +491,8 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v) { unsigned long result; + smp_mb(); + if (cpu_has_llsc && R10000_LLSC_WAR) { unsigned long temp; @@ -494,7 +503,6 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v) " scd %0, %2 \n" " beqzl %0, 1b \n" " addu %0, %1, %3 \n" - " sync \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter) @@ -509,7 +517,6 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v) " scd %0, %2 \n" " beqz %0, 1b \n" " addu %0, %1, %3 \n" - " sync \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter) @@ -524,6 +531,8 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v) local_irq_restore(flags); } + smp_mb(); + return result; } @@ -531,6 +540,8 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) { unsigned long result; + smp_mb(); + if (cpu_has_llsc && R10000_LLSC_WAR) { unsigned long temp; @@ -541,7 +552,6 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) " scd %0, %2 \n" " beqzl %0, 1b \n" " subu %0, %1, %3 \n" - " sync \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter) @@ -556,7 +566,6 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) " scd %0, %2 \n" " beqz %0, 1b \n" " subu %0, %1, %3 \n" - " sync \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter) @@ -571,6 +580,8 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) local_irq_restore(flags); } + smp_mb(); + return result; } @@ -586,6 +597,8 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) { unsigned long result; + smp_mb(); + if (cpu_has_llsc && R10000_LLSC_WAR) { unsigned long temp; @@ -599,7 +612,6 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) " beqzl %0, 1b \n" " dsubu %0, %1, %3 \n" " .set reorder \n" - " sync \n" "1: \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) @@ -618,7 +630,6 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) " beqz %0, 1b \n" " dsubu %0, %1, %3 \n" " .set reorder \n" - " sync \n" "1: \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), "=m" (v->counter) @@ -635,6 +646,8 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) local_irq_restore(flags); } + smp_mb(); + return result; } diff --git a/include/asm-mips/barrier.h b/include/asm-mips/barrier.h new file mode 100644 index 00000000000..ed82631b001 --- /dev/null +++ b/include/asm-mips/barrier.h @@ -0,0 +1,132 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2006 by Ralf Baechle (ralf@linux-mips.org) + */ +#ifndef __ASM_BARRIER_H +#define __ASM_BARRIER_H + +/* + * read_barrier_depends - Flush all pending reads that subsequents reads + * depend on. + * + * No data-dependent reads from memory-like regions are ever reordered + * over this barrier. All reads preceding this primitive are guaranteed + * to access memory (but not necessarily other CPUs' caches) before any + * reads following this primitive that depend on the data return by + * any of the preceding reads. This primitive is much lighter weight than + * rmb() on most CPUs, and is never heavier weight than is + * rmb(). + * + * These ordering constraints are respected by both the local CPU + * and the compiler. + * + * Ordering is not guaranteed by anything other than these primitives, + * not even by data dependencies. See the documentation for + * memory_barrier() for examples and URLs to more information. + * + * For example, the following code would force ordering (the initial + * value of "a" is zero, "b" is one, and "p" is "&a"): + * + * <programlisting> + * CPU 0 CPU 1 + * + * b = 2; + * memory_barrier(); + * p = &b; q = p; + * read_barrier_depends(); + * d = *q; + * </programlisting> + * + * because the read of "*q" depends on the read of "p" and these + * two reads are separated by a read_barrier_depends(). However, + * the following code, with the same initial values for "a" and "b": + * + * <programlisting> + * CPU 0 CPU 1 + * + * a = 2; + * memory_barrier(); + * b = 3; y = b; + * read_barrier_depends(); + * x = a; + * </programlisting> + * + * does not enforce ordering, since there is no data dependency between + * the read of "a" and the read of "b". Therefore, on some CPUs, such + * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() + * in cases like this where there are no data dependencies. + */ + +#define read_barrier_depends() do { } while(0) +#define smp_read_barrier_depends() do { } while(0) + +#ifdef CONFIG_CPU_HAS_SYNC +#define __sync() \ + __asm__ __volatile__( \ + ".set push\n\t" \ + ".set noreorder\n\t" \ + ".set mips2\n\t" \ + "sync\n\t" \ + ".set pop" \ + : /* no output */ \ + : /* no input */ \ + : "memory") +#else +#define __sync() do { } while(0) +#endif + +#define __fast_iob() \ + __asm__ __volatile__( \ + ".set push\n\t" \ + ".set noreorder\n\t" \ + "lw $0,%0\n\t" \ + "nop\n\t" \ + ".set pop" \ + : /* no output */ \ + : "m" (*(int *)CKSEG1) \ + : "memory") + +#define fast_wmb() __sync() +#define fast_rmb() __sync() +#define fast_mb() __sync() +#define fast_iob() \ + do { \ + __sync(); \ + __fast_iob(); \ + } while (0) + +#ifdef CONFIG_CPU_HAS_WB + +#include <asm/wbflush.h> + +#define wmb() fast_wmb() +#define rmb() fast_rmb() +#define mb() wbflush() +#define iob() wbflush() + +#else /* !CONFIG_CPU_HAS_WB */ + +#define wmb() fast_wmb() +#define rmb() fast_rmb() +#define mb() fast_mb() +#define iob() fast_iob() + +#endif /* !CONFIG_CPU_HAS_WB */ + +#if defined(CONFIG_WEAK_ORDERING) && defined(CONFIG_SMP) +#define __WEAK_ORDERING_MB " sync \n" +#else +#define __WEAK_ORDERING_MB " \n" +#endif + +#define smp_mb() __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory") +#define smp_rmb() __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory") +#define smp_wmb() __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory") + +#define set_mb(var, value) \ + do { var = value; smp_mb(); } while (0) + +#endif /* __ASM_BARRIER_H */ diff --git a/include/asm-mips/bitops.h b/include/asm-mips/bitops.h index b9007411b60..06445de1324 100644 --- a/include/asm-mips/bitops.h +++ b/include/asm-mips/bitops.h @@ -3,7 +3,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (c) 1994 - 1997, 1999, 2000 Ralf Baechle (ralf@gnu.org) + * Copyright (c) 1994 - 1997, 1999, 2000, 06 Ralf Baechle (ralf@linux-mips.org) * Copyright (c) 1999, 2000 Silicon Graphics, Inc. */ #ifndef _ASM_BITOPS_H @@ -12,6 +12,7 @@ #include <linux/compiler.h> #include <linux/irqflags.h> #include <linux/types.h> +#include <asm/barrier.h> #include <asm/bug.h> #include <asm/byteorder.h> /* sigh ... */ #include <asm/cpu-features.h> @@ -204,9 +205,6 @@ static inline int test_and_set_bit(unsigned long nr, " " __SC "%2, %1 \n" " beqzl %2, 1b \n" " and %2, %0, %3 \n" -#ifdef CONFIG_SMP - " sync \n" -#endif " .set mips0 \n" : "=&r" (temp), "=m" (*m), "=&r" (res) : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) @@ -226,9 +224,6 @@ static inline int test_and_set_bit(unsigned long nr, " " __SC "%2, %1 \n" " beqz %2, 1b \n" " and %2, %0, %3 \n" -#ifdef CONFIG_SMP - " sync \n" -#endif " .set pop \n" : "=&r" (temp), "=m" (*m), "=&r" (res) : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) @@ -250,6 +245,8 @@ static inline int test_and_set_bit(unsigned long nr, return retval; } + + smp_mb(); } /* @@ -275,9 +272,6 @@ static inline int test_and_clear_bit(unsigned long nr, " " __SC "%2, %1 \n" " beqzl %2, 1b \n" " and %2, %0, %3 \n" -#ifdef CONFIG_SMP - " sync \n" -#endif " .set mips0 \n" : "=&r" (temp), "=m" (*m), "=&r" (res) : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) @@ -298,9 +292,6 @@ static inline int test_and_clear_bit(unsigned long nr, " " __SC "%2, %1 \n" " beqz %2, 1b \n" " and %2, %0, %3 \n" -#ifdef CONFIG_SMP - " sync \n" -#endif " .set pop \n" : "=&r" (temp), "=m" (*m), "=&r" (res) : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) @@ -322,6 +313,8 @@ static inline int test_and_clear_bit(unsigned long nr, return retval; } + + smp_mb(); } /* @@ -346,9 +339,6 @@ static inline int test_and_change_bit(unsigned long nr, " " __SC "%2, %1 \n" " beqzl %2, 1b \n" " and %2, %0, %3 \n" -#ifdef CONFIG_SMP - " sync \n" -#endif " .set mips0 \n" : "=&r" (temp), "=m" (*m), "=&r" (res) : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) @@ -368,9 +358,6 @@ static inline int test_and_change_bit(unsigned long nr, " " __SC "\t%2, %1 \n" " beqz %2, 1b \n" " and %2, %0, %3 \n" -#ifdef CONFIG_SMP - " sync \n" -#endif " .set pop \n" : "=&r" (temp), "=m" (*m), "=&r" (res) : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m) @@ -391,6 +378,8 @@ static inline int test_and_change_bit(unsigned long nr, return retval; } + + smp_mb(); } #include <asm-generic/bitops/non-atomic.h> diff --git a/include/asm-mips/compat.h b/include/asm-mips/compat.h index 900f472fdd2..55a0152feb0 100644 --- a/include/asm-mips/compat.h +++ b/include/asm-mips/compat.h @@ -32,6 +32,7 @@ typedef struct { s32 val[2]; } compat_fsid_t; typedef s32 compat_timer_t; +typedef s32 compat_key_t; typedef s32 compat_int_t; typedef s32 compat_long_t; @@ -146,4 +147,71 @@ static inline void __user *compat_alloc_user_space(long len) return (void __user *) (regs->regs[29] - len); } +struct compat_ipc64_perm { + compat_key_t key; + __compat_uid32_t uid; + __compat_gid32_t gid; + __compat_uid32_t cuid; + __compat_gid32_t cgid; + compat_mode_t mode; + unsigned short seq; + unsigned short __pad2; + compat_ulong_t __unused1; + compat_ulong_t __unused2; +}; + +struct compat_semid64_ds { + struct compat_ipc64_perm sem_perm; + compat_time_t sem_otime; + compat_time_t sem_ctime; + compat_ulong_t sem_nsems; + compat_ulong_t __unused1; + compat_ulong_t __unused2; +}; + +struct compat_msqid64_ds { + struct compat_ipc64_perm msg_perm; +#ifndef CONFIG_CPU_LITTLE_ENDIAN + compat_ulong_t __unused1; +#endif + compat_time_t msg_stime; +#ifdef CONFIG_CPU_LITTLE_ENDIAN + compat_ulong_t __unused1; +#endif +#ifndef CONFIG_CPU_LITTLE_ENDIAN + compat_ulong_t __unused2; +#endif + compat_time_t msg_rtime; +#ifdef CONFIG_CPU_LITTLE_ENDIAN + compat_ulong_t __unused2; +#endif +#ifndef CONFIG_CPU_LITTLE_ENDIAN + compat_ulong_t __unused3; +#endif + compat_time_t msg_ctime; +#ifdef CONFIG_CPU_LITTLE_ENDIAN + compat_ulong_t __unused3; +#endif + compat_ulong_t msg_cbytes; + compat_ulong_t msg_qnum; + compat_ulong_t msg_qbytes; + compat_pid_t msg_lspid; + compat_pid_t msg_lrpid; + compat_ulong_t __unused4; + compat_ulong_t __unused5; +}; + +struct compat_shmid64_ds { + struct compat_ipc64_perm shm_perm; + compat_size_t shm_segsz; + compat_time_t shm_atime; + compat_time_t shm_dtime; + compat_time_t shm_ctime; + compat_pid_t shm_cpid; + compat_pid_t shm_lpid; + compat_ulong_t shm_nattch; + compat_ulong_t __unused1; + compat_ulong_t __unused2; +}; + #endif /* _ASM_COMPAT_H */ diff --git a/include/asm-mips/futex.h b/include/asm-mips/futex.h index ed023eae067..927a216bd53 100644 --- a/include/asm-mips/futex.h +++ b/include/asm-mips/futex.h @@ -1,19 +1,21 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2006 Ralf Baechle (ralf@linux-mips.org) + */ #ifndef _ASM_FUTEX_H #define _ASM_FUTEX_H #ifdef __KERNEL__ #include <linux/futex.h> +#include <asm/barrier.h> #include <asm/errno.h> #include <asm/uaccess.h> #include <asm/war.h> -#ifdef CONFIG_SMP -#define __FUTEX_SMP_SYNC " sync \n" -#else -#define __FUTEX_SMP_SYNC -#endif - #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ { \ if (cpu_has_llsc && R10000_LLSC_WAR) { \ @@ -27,7 +29,7 @@ " .set mips3 \n" \ "2: sc $1, %2 \n" \ " beqzl $1, 1b \n" \ - __FUTEX_SMP_SYNC \ + __WEAK_ORDERING_MB \ "3: \n" \ " .set pop \n" \ " .set mips0 \n" \ @@ -53,7 +55,7 @@ " .set mips3 \n" \ "2: sc $1, %2 \n" \ " beqz $1, 1b \n" \ - __FUTEX_SMP_SYNC \ + __WEAK_ORDERING_MB \ "3: \n" \ " .set pop \n" \ " .set mips0 \n" \ @@ -150,7 +152,7 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) " .set mips3 \n" "2: sc $1, %1 \n" " beqzl $1, 1b \n" - __FUTEX_SMP_SYNC + __WEAK_ORDERING_MB "3: \n" " .set pop \n" " .section .fixup,\"ax\" \n" @@ -177,7 +179,7 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) " .set mips3 \n" "2: sc $1, %1 \n" " beqz $1, 1b \n" - __FUTEX_SMP_SYNC + __WEAK_ORDERING_MB "3: \n" " .set pop \n" " .section .fixup,\"ax\" \n" diff --git a/include/asm-mips/i8259.h b/include/asm-mips/i8259.h index 0214abe3f0a..4df8d8b118c 100644 --- a/include/asm-mips/i8259.h +++ b/include/asm-mips/i8259.h @@ -19,10 +19,31 @@ #include <asm/io.h> +/* i8259A PIC registers */ +#define PIC_MASTER_CMD 0x20 +#define PIC_MASTER_IMR 0x21 +#define PIC_MASTER_ISR PIC_MASTER_CMD +#define PIC_MASTER_POLL PIC_MASTER_ISR +#define PIC_MASTER_OCW3 PIC_MASTER_ISR +#define PIC_SLAVE_CMD 0xa0 +#define PIC_SLAVE_IMR 0xa1 + +/* i8259A PIC related value */ +#define PIC_CASCADE_IR 2 +#define MASTER_ICW4_DEFAULT 0x01 +#define SLAVE_ICW4_DEFAULT 0x01 +#define PIC_ICW4_AEOI 2 + extern spinlock_t i8259A_lock; +extern void init_8259A(int auto_eoi); +extern void enable_8259A_irq(unsigned int irq); +extern void disable_8259A_irq(unsigned int irq); + extern void init_i8259_irqs(void); +#define I8259A_IRQ_BASE 0 + /* * Do the traditional i8259 interrupt polling thing. This is for the few * cases where no better interrupt acknowledge method is available and we @@ -35,15 +56,15 @@ static inline int i8259_irq(void) spin_lock(&i8259A_lock); /* Perform an interrupt acknowledge cycle on controller 1. */ - outb(0x0C, 0x20); /* prepare for poll */ - irq = inb(0x20) & 7; - if (irq == 2) { + outb(0x0C, PIC_MASTER_CMD); /* prepare for poll */ + irq = inb(PIC_MASTER_CMD) & 7; + if (irq == PIC_CASCADE_IR) { /* * Interrupt is cascaded so perform interrupt * acknowledge on controller 2. */ - outb(0x0C, 0xA0); /* prepare for poll */ - irq = (inb(0xA0) & 7) + 8; + outb(0x0C, PIC_SLAVE_CMD); /* prepare for poll */ + irq = (inb(PIC_SLAVE_CMD) & 7) + 8; } if (unlikely(irq == 7)) { @@ -54,14 +75,14 @@ static inline int i8259_irq(void) * significant bit is not set then there is no valid * interrupt. */ - outb(0x0B, 0x20); /* ISR register */ - if(~inb(0x20) & 0x80) + outb(0x0B, PIC_MASTER_ISR); /* ISR register */ + if(~inb(PIC_MASTER_ISR) & 0x80) irq = -1; } spin_unlock(&i8259A_lock); - return irq; + return likely(irq >= 0) ? irq + I8259A_IRQ_BASE : irq; } #endif /* _ASM_I8259_H */ diff --git a/include/asm-mips/pgtable-32.h b/include/asm-mips/pgtable-32.h index d20f2e9b28b..2fbd47eba32 100644 --- a/include/asm-mips/pgtable-32.h +++ b/include/asm-mips/pgtable-32.h @@ -156,9 +156,9 @@ pfn_pte(unsigned long pfn, pgprot_t prot) #define __pte_offset(address) \ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) #define pte_offset(dir, address) \ - ((pte_t *) (pmd_page_vaddr(*dir)) + __pte_offset(address)) -#define pte_offset_kernel(dir, address) \ - ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) + ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) +#define pte_offset_kernel(dir, address) \ + ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) #define pte_offset_map(dir, address) \ ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) diff --git a/include/asm-mips/pgtable-64.h b/include/asm-mips/pgtable-64.h index b9b1e86493e..a5b18710b6a 100644 --- a/include/asm-mips/pgtable-64.h +++ b/include/asm-mips/pgtable-64.h @@ -212,9 +212,9 @@ static inline pmd_t *pmd_offset(pud_t * pud, unsigned long address) #define __pte_offset(address) \ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) #define pte_offset(dir, address) \ - ((pte_t *) (pmd_page_vaddr(*dir)) + __pte_offset(address)) + ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) #define pte_offset_kernel(dir, address) \ - ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) + ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) #define pte_offset_map(dir, address) \ ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) #define pte_offset_map_nested(dir, address) \ diff --git a/include/asm-mips/sn/klconfig.h b/include/asm-mips/sn/klconfig.h index b63cd0655b3..15d70ca5618 100644 --- a/include/asm-mips/sn/klconfig.h +++ b/include/asm-mips/sn/klconfig.h @@ -176,7 +176,7 @@ typedef struct kl_config_hdr { /* --- New Macros for the changed kl_config_hdr_t structure --- */ #define PTR_CH_MALLOC_HDR(_k) ((klc_malloc_hdr_t *)\ - (unsigned long)_k + (_k->ch_malloc_hdr_off))) + ((unsigned long)_k + (_k->ch_malloc_hdr_off))) #define KL_CONFIG_CH_MALLOC_HDR(_n) PTR_CH_MALLOC_HDR(KL_CONFIG_HDR(_n)) diff --git a/include/asm-mips/spinlock.h b/include/asm-mips/spinlock.h index c8d5587467b..fc3217fc111 100644 --- a/include/asm-mips/spinlock.h +++ b/include/asm-mips/spinlock.h @@ -3,12 +3,13 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 1999, 2000 by Ralf Baechle + * Copyright (C) 1999, 2000, 06 by Ralf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc. */ #ifndef _ASM_SPINLOCK_H #define _ASM_SPINLOCK_H +#include <asm/barrier.h> #include <asm/war.h> /* @@ -40,7 +41,6 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) " sc %1, %0 \n" " beqzl %1, 1b \n" " nop \n" - " sync \n" " .set reorder \n" : "=m" (lock->lock), "=&r" (tmp) : "m" (lock->lock) @@ -53,19 +53,22 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) " li %1, 1 \n" " sc %1, %0 \n" " beqz %1, 1b \n" - " sync \n" + " nop \n" " .set reorder \n" : "=m" (lock->lock), "=&r" (tmp) : "m" (lock->lock) : "memory"); } + + smp_mb(); } static inline void __raw_spin_unlock(raw_spinlock_t *lock) { + smp_mb(); + __asm__ __volatile__( " .set noreorder # __raw_spin_unlock \n" - " sync \n" " sw $0, %0 \n" " .set\treorder \n" : "=m" (lock->lock) @@ -86,7 +89,6 @@ static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock) " beqzl %2, 1b \n" " nop \n" " andi %2, %0, 1 \n" - " sync \n" " .set reorder" : "=&r" (temp), "=m" (lock->lock), "=&r" (res) : "m" (lock->lock) @@ -99,13 +101,14 @@ static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock) " sc %2, %1 \n" " beqz %2, 1b \n" " andi %2, %0, 1 \n" - " sync \n" " .set reorder" : "=&r" (temp), "=m" (lock->lock), "=&r" (res) : "m" (lock->lock) : "memory"); } + smp_mb(); + return res == 0; } @@ -143,7 +146,6 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) " sc %1, %0 \n" " beqzl %1, 1b \n" " nop \n" - " sync \n" " .set reorder \n" : "=m" (rw->lock), "=&r" (tmp) : "m" (rw->lock) @@ -156,12 +158,14 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) " addu %1, 1 \n" " sc %1, %0 \n" " beqz %1, 1b \n" - " sync \n" + " nop \n" " .set reorder \n" : "=m" (rw->lock), "=&r" (tmp) : "m" (rw->lock) : "memory"); } + + smp_mb(); } /* Note the use of sub, not subu which will make the kernel die with an @@ -171,13 +175,14 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) { unsigned int tmp; + smp_mb(); + if (R10000_LLSC_WAR) { __asm__ __volatile__( "1: ll %1, %2 # __raw_read_unlock \n" " sub %1, 1 \n" " sc %1, %0 \n" " beqzl %1, 1b \n" - " sync \n" : "=m" (rw->lock), "=&r" (tmp) : "m" (rw->lock) : "memory"); @@ -188,7 +193,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) " sub %1, 1 \n" " sc %1, %0 \n" " beqz %1, 1b \n" - " sync \n" + " nop \n" " .set reorder \n" : "=m" (rw->lock), "=&r" (tmp) : "m" (rw->lock) @@ -208,7 +213,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) " lui %1, 0x8000 \n" " sc %1, %0 \n" " beqzl %1, 1b \n" - " sync \n" + " nop \n" " .set reorder \n" : "=m" (rw->lock), "=&r" (tmp) : "m" (rw->lock) @@ -221,18 +226,22 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) " lui %1, 0x8000 \n" " sc %1, %0 \n" " beqz %1, 1b \n" - " sync \n" + " nop \n" " .set reorder \n" : "=m" (rw->lock), "=&r" (tmp) : "m" (rw->lock) : "memory"); } + + smp_mb(); } static inline void __raw_write_unlock(raw_rwlock_t *rw) { + smp_mb(); + __asm__ __volatile__( - " sync # __raw_write_unlock \n" + " # __raw_write_unlock \n" " sw $0, %0 \n" : "=m" (rw->lock) : "m" (rw->lock) @@ -252,11 +261,10 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) " bnez %1, 2f \n" " addu %1, 1 \n" " sc %1, %0 \n" - " beqzl %1, 1b \n" " .set reorder \n" -#ifdef CONFIG_SMP - " sync \n" -#endif + " beqzl %1, 1b \n" + " nop \n" + __WEAK_ORDERING_MB " li %2, 1 \n" "2: \n" : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) @@ -271,10 +279,9 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) " addu %1, 1 \n" " sc %1, %0 \n" " beqz %1, 1b \n" + " nop \n" " .set reorder \n" -#ifdef CONFIG_SMP - " sync \n" -#endif + __WEAK_ORDERING_MB " li %2, 1 \n" "2: \n" : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) @@ -299,7 +306,8 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) " lui %1, 0x8000 \n" " sc %1, %0 \n" " beqzl %1, 1b \n" - " sync \n" + " nop \n" + __WEAK_ORDERING_MB " li %2, 1 \n" " .set reorder \n" "2: \n" @@ -315,7 +323,8 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) " lui %1, 0x8000 \n" " sc %1, %0 \n" " beqz %1, 1b \n" - " sync \n" + " nop \n" + __WEAK_ORDERING_MB " li %2, 1 \n" " .set reorder \n" "2: \n" diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h index 3056feed5a3..9428057a50c 100644 --- a/include/asm-mips/system.h +++ b/include/asm-mips/system.h @@ -3,7 +3,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle + * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle * Copyright (C) 1996 by Paul M. Antoine * Copyright (C) 1999 Silicon Graphics * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com @@ -16,132 +16,12 @@ #include <linux/irqflags.h> #include <asm/addrspace.h> +#include <asm/barrier.h> #include <asm/cpu-features.h> #include <asm/dsp.h> #include <asm/ptrace.h> #include <asm/war.h> -/* - * read_barrier_depends - Flush all pending reads that subsequents reads - * depend on. - * - * No data-dependent reads from memory-like regions are ever reordered - * over this barrier. All reads preceding this primitive are guaranteed - * to access memory (but not necessarily other CPUs' caches) before any - * reads following this primitive that depend on the data return by - * any of the preceding reads. This primitive is much lighter weight than - * rmb() on most CPUs, and is never heavier weight than is - * rmb(). - * - * These ordering constraints are respected by both the local CPU - * and the compiler. - * - * Ordering is not guaranteed by anything other than these primitives, - * not even by data dependencies. See the documentation for - * memory_barrier() for examples and URLs to more information. - * - * For example, the following code would force ordering (the initial - * value of "a" is zero, "b" is one, and "p" is "&a"): - * - * <programlisting> - * CPU 0 CPU 1 - * - * b = 2; - * memory_barrier(); - * p = &b; q = p; - * read_barrier_depends(); - * d = *q; - * </programlisting> - * - * because the read of "*q" depends on the read of "p" and these - * two reads are separated by a read_barrier_depends(). However, - * the following code, with the same initial values for "a" and "b": - * - * <programlisting> - * CPU 0 CPU 1 - * - * a = 2; - * memory_barrier(); - * b = 3; y = b; - * read_barrier_depends(); - * x = a; - * </programlisting> - * - * does not enforce ordering, since there is no data dependency between - * the read of "a" and the read of "b". Therefore, on some CPUs, such - * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() - * in cases like this where there are no data dependencies. - */ - -#define read_barrier_depends() do { } while(0) - -#ifdef CONFIG_CPU_HAS_SYNC -#define __sync() \ - __asm__ __volatile__( \ - ".set push\n\t" \ - ".set noreorder\n\t" \ - ".set mips2\n\t" \ - "sync\n\t" \ - ".set pop" \ - : /* no output */ \ - : /* no input */ \ - : "memory") -#else -#define __sync() do { } while(0) -#endif - -#define __fast_iob() \ - __asm__ __volatile__( \ - ".set push\n\t" \ - ".set noreorder\n\t" \ - "lw $0,%0\n\t" \ - "nop\n\t" \ - ".set pop" \ - : /* no output */ \ - : "m" (*(int *)CKSEG1) \ - : "memory") - -#define fast_wmb() __sync() -#define fast_rmb() __sync() -#define fast_mb() __sync() -#define fast_iob() \ - do { \ - __sync(); \ - __fast_iob(); \ - } while (0) - -#ifdef CONFIG_CPU_HAS_WB - -#include <asm/wbflush.h> - -#define wmb() fast_wmb() -#define rmb() fast_rmb() -#define mb() wbflush() -#define iob() wbflush() - -#else /* !CONFIG_CPU_HAS_WB */ - -#define wmb() fast_wmb() -#define rmb() fast_rmb() -#define mb() fast_mb() -#define iob() fast_iob() - -#endif /* !CONFIG_CPU_HAS_WB */ - -#ifdef CONFIG_SMP -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#define smp_read_barrier_depends() read_barrier_depends() -#else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do { } while(0) -#endif - -#define set_mb(var, value) \ -do { var = value; mb(); } while (0) /* * switch_to(n) should switch tasks to task nr n, first @@ -217,9 +97,6 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) " .set mips3 \n" " sc %2, %1 \n" " beqzl %2, 1b \n" -#ifdef CONFIG_SMP - " sync \n" -#endif " .set mips0 \n" : "=&r" (retval), "=m" (*m), "=&r" (dummy) : "R" (*m), "Jr" (val) @@ -235,9 +112,6 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) " .set mips3 \n" " sc %2, %1 \n" " beqz %2, 1b \n" -#ifdef CONFIG_SMP - " sync \n" -#endif " .set mips0 \n" : "=&r" (retval), "=m" (*m), "=&r" (dummy) : "R" (*m), "Jr" (val) @@ -251,6 +125,8 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) local_irq_restore(flags); /* implies memory barrier */ } + smp_mb(); + return retval; } @@ -268,9 +144,6 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) " move %2, %z4 \n" " scd %2, %1 \n" " beqzl %2, 1b \n" -#ifdef CONFIG_SMP - " sync \n" -#endif " .set mips0 \n" : "=&r" (retval), "=m" (*m), "=&r" (dummy) : "R" (*m), "Jr" (val) @@ -284,9 +157,6 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) " move %2, %z4 \n" " scd %2, %1 \n" " beqz %2, 1b \n" -#ifdef CONFIG_SMP - " sync \n" -#endif " .set mips0 \n" : "=&r" (retval), "=m" (*m), "=&r" (dummy) : "R" (*m), "Jr" (val) @@ -300,6 +170,8 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) local_irq_restore(flags); /* implies memory barrier */ } + smp_mb(); + return retval; } #else @@ -345,9 +217,6 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old, " .set mips3 \n" " sc $1, %1 \n" " beqzl $1, 1b \n" -#ifdef CONFIG_SMP - " sync \n" -#endif "2: \n" " .set pop \n" : "=&r" (retval), "=R" (*m) @@ -365,9 +234,6 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old, " .set mips3 \n" " sc $1, %1 \n" " beqz $1, 1b \n" -#ifdef CONFIG_SMP - " sync \n" -#endif "2: \n" " .set pop \n" : "=&r" (retval), "=R" (*m) @@ -383,6 +249,8 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old, local_irq_restore(flags); /* implies memory barrier */ } + smp_mb(); + return retval; } @@ -402,9 +270,6 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old, " move $1, %z4 \n" " scd $1, %1 \n" " beqzl $1, 1b \n" -#ifdef CONFIG_SMP - " sync \n" -#endif "2: \n" " .set pop \n" : "=&r" (retval), "=R" (*m) @@ -420,9 +285,6 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old, " move $1, %z4 \n" " scd $1, %1 \n" " beqz $1, 1b \n" -#ifdef CONFIG_SMP - " sync \n" -#endif "2: \n" " .set pop \n" : "=&r" (retval), "=R" (*m) @@ -438,6 +300,8 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old, local_irq_restore(flags); /* implies memory barrier */ } + smp_mb(); + return retval; } #else diff --git a/include/asm-mips/types.h b/include/asm-mips/types.h index 2b52e180c6f..63a13c5bd83 100644 --- a/include/asm-mips/types.h +++ b/include/asm-mips/types.h @@ -93,16 +93,6 @@ typedef unsigned long long phys_t; typedef unsigned long phys_t; #endif -#ifdef CONFIG_LBD -typedef u64 sector_t; -#define HAVE_SECTOR_T -#endif - -#ifdef CONFIG_LSF -typedef u64 blkcnt_t; -#define HAVE_BLKCNT_T -#endif - #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ |