aboutsummaryrefslogtreecommitdiff
path: root/include/asm-powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-powerpc')
-rw-r--r--include/asm-powerpc/atomic.h46
-rw-r--r--include/asm-powerpc/bitops.h6
-rw-r--r--include/asm-powerpc/cputable.h14
-rw-r--r--include/asm-powerpc/elf.h16
-rw-r--r--include/asm-powerpc/futex.h2
-rw-r--r--include/asm-powerpc/hvcall.h5
-rw-r--r--include/asm-powerpc/kexec.h85
-rw-r--r--include/asm-powerpc/lppaca.h6
-rw-r--r--include/asm-powerpc/paca.h14
-rw-r--r--include/asm-powerpc/ppc_asm.h76
-rw-r--r--include/asm-powerpc/prom.h8
-rw-r--r--include/asm-powerpc/spinlock.h21
-rw-r--r--include/asm-powerpc/synch.h23
-rw-r--r--include/asm-powerpc/system.h8
-rw-r--r--include/asm-powerpc/time.h5
15 files changed, 196 insertions, 139 deletions
diff --git a/include/asm-powerpc/atomic.h b/include/asm-powerpc/atomic.h
index 248f9aec959..147a38dcc76 100644
--- a/include/asm-powerpc/atomic.h
+++ b/include/asm-powerpc/atomic.h
@@ -36,7 +36,7 @@ static __inline__ int atomic_add_return(int a, atomic_t *v)
int t;
__asm__ __volatile__(
- EIEIO_ON_SMP
+ LWSYNC_ON_SMP
"1: lwarx %0,0,%2 # atomic_add_return\n\
add %0,%1,%0\n"
PPC405_ERR77(0,%2)
@@ -72,7 +72,7 @@ static __inline__ int atomic_sub_return(int a, atomic_t *v)
int t;
__asm__ __volatile__(
- EIEIO_ON_SMP
+ LWSYNC_ON_SMP
"1: lwarx %0,0,%2 # atomic_sub_return\n\
subf %0,%1,%0\n"
PPC405_ERR77(0,%2)
@@ -106,7 +106,7 @@ static __inline__ int atomic_inc_return(atomic_t *v)
int t;
__asm__ __volatile__(
- EIEIO_ON_SMP
+ LWSYNC_ON_SMP
"1: lwarx %0,0,%1 # atomic_inc_return\n\
addic %0,%0,1\n"
PPC405_ERR77(0,%1)
@@ -150,7 +150,7 @@ static __inline__ int atomic_dec_return(atomic_t *v)
int t;
__asm__ __volatile__(
- EIEIO_ON_SMP
+ LWSYNC_ON_SMP
"1: lwarx %0,0,%1 # atomic_dec_return\n\
addic %0,%0,-1\n"
PPC405_ERR77(0,%1)
@@ -176,19 +176,19 @@ static __inline__ int atomic_dec_return(atomic_t *v)
* Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise.
*/
-#define atomic_add_unless(v, a, u) \
-({ \
- int c, old; \
- c = atomic_read(v); \
- for (;;) { \
- if (unlikely(c == (u))) \
- break; \
- old = atomic_cmpxchg((v), c, c + (a)); \
- if (likely(old == c)) \
- break; \
- c = old; \
- } \
- c != (u); \
+#define atomic_add_unless(v, a, u) \
+({ \
+ int c, old; \
+ c = atomic_read(v); \
+ for (;;) { \
+ if (unlikely(c == (u))) \
+ break; \
+ old = atomic_cmpxchg((v), c, c + (a)); \
+ if (likely(old == c)) \
+ break; \
+ c = old; \
+ } \
+ c != (u); \
})
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
@@ -204,7 +204,7 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
int t;
__asm__ __volatile__(
- EIEIO_ON_SMP
+ LWSYNC_ON_SMP
"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
addic. %0,%0,-1\n\
blt- 2f\n"
@@ -253,7 +253,7 @@ static __inline__ long atomic64_add_return(long a, atomic64_t *v)
long t;
__asm__ __volatile__(
- EIEIO_ON_SMP
+ LWSYNC_ON_SMP
"1: ldarx %0,0,%2 # atomic64_add_return\n\
add %0,%1,%0\n\
stdcx. %0,0,%2 \n\
@@ -287,7 +287,7 @@ static __inline__ long atomic64_sub_return(long a, atomic64_t *v)
long t;
__asm__ __volatile__(
- EIEIO_ON_SMP
+ LWSYNC_ON_SMP
"1: ldarx %0,0,%2 # atomic64_sub_return\n\
subf %0,%1,%0\n\
stdcx. %0,0,%2 \n\
@@ -319,7 +319,7 @@ static __inline__ long atomic64_inc_return(atomic64_t *v)
long t;
__asm__ __volatile__(
- EIEIO_ON_SMP
+ LWSYNC_ON_SMP
"1: ldarx %0,0,%1 # atomic64_inc_return\n\
addic %0,%0,1\n\
stdcx. %0,0,%1 \n\
@@ -361,7 +361,7 @@ static __inline__ long atomic64_dec_return(atomic64_t *v)
long t;
__asm__ __volatile__(
- EIEIO_ON_SMP
+ LWSYNC_ON_SMP
"1: ldarx %0,0,%1 # atomic64_dec_return\n\
addic %0,%0,-1\n\
stdcx. %0,0,%1\n\
@@ -386,7 +386,7 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
long t;
__asm__ __volatile__(
- EIEIO_ON_SMP
+ LWSYNC_ON_SMP
"1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
addic. %0,%0,-1\n\
blt- 2f\n\
diff --git a/include/asm-powerpc/bitops.h b/include/asm-powerpc/bitops.h
index 1996eaa8aea..bf6941a810b 100644
--- a/include/asm-powerpc/bitops.h
+++ b/include/asm-powerpc/bitops.h
@@ -112,7 +112,7 @@ static __inline__ int test_and_set_bit(unsigned long nr,
unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
__asm__ __volatile__(
- EIEIO_ON_SMP
+ LWSYNC_ON_SMP
"1:" PPC_LLARX "%0,0,%3 # test_and_set_bit\n"
"or %1,%0,%2 \n"
PPC405_ERR77(0,%3)
@@ -134,7 +134,7 @@ static __inline__ int test_and_clear_bit(unsigned long nr,
unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
__asm__ __volatile__(
- EIEIO_ON_SMP
+ LWSYNC_ON_SMP
"1:" PPC_LLARX "%0,0,%3 # test_and_clear_bit\n"
"andc %1,%0,%2 \n"
PPC405_ERR77(0,%3)
@@ -156,7 +156,7 @@ static __inline__ int test_and_change_bit(unsigned long nr,
unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
__asm__ __volatile__(
- EIEIO_ON_SMP
+ LWSYNC_ON_SMP
"1:" PPC_LLARX "%0,0,%3 # test_and_change_bit\n"
"xor %1,%0,%2 \n"
PPC405_ERR77(0,%3)
diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h
index ef6ead34a77..64210549f56 100644
--- a/include/asm-powerpc/cputable.h
+++ b/include/asm-powerpc/cputable.h
@@ -19,6 +19,7 @@
#define PPC_FEATURE_POWER5 0x00040000
#define PPC_FEATURE_POWER5_PLUS 0x00020000
#define PPC_FEATURE_CELL 0x00010000
+#define PPC_FEATURE_BOOKE 0x00008000
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
@@ -31,11 +32,11 @@ struct cpu_spec;
typedef void (*cpu_setup_t)(unsigned long offset, struct cpu_spec* spec);
enum powerpc_oprofile_type {
- INVALID = 0,
- RS64 = 1,
- POWER4 = 2,
- G4 = 3,
- BOOKE = 4,
+ PPC_OPROFILE_INVALID = 0,
+ PPC_OPROFILE_RS64 = 1,
+ PPC_OPROFILE_POWER4 = 2,
+ PPC_OPROFILE_G4 = 3,
+ PPC_OPROFILE_BOOKE = 4,
};
struct cpu_spec {
@@ -64,6 +65,9 @@ struct cpu_spec {
/* Processor specific oprofile operations */
enum powerpc_oprofile_type oprofile_type;
+
+ /* Name of processor class, for the ELF AT_PLATFORM entry */
+ char *platform;
};
extern struct cpu_spec *cur_cpu_spec;
diff --git a/include/asm-powerpc/elf.h b/include/asm-powerpc/elf.h
index 45f2af6f89c..94d228f9c6a 100644
--- a/include/asm-powerpc/elf.h
+++ b/include/asm-powerpc/elf.h
@@ -221,20 +221,18 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
instruction set this cpu supports. This could be done in userspace,
but it's not easy, and we've already done it here. */
# define ELF_HWCAP (cur_cpu_spec->cpu_user_features)
-#ifdef __powerpc64__
-# define ELF_PLAT_INIT(_r, load_addr) do { \
- _r->gpr[2] = load_addr; \
-} while (0)
-#endif /* __powerpc64__ */
/* This yields a string that ld.so will use to load implementation
specific libraries for optimization. This is more specific in
- intent than poking at uname or /proc/cpuinfo.
+ intent than poking at uname or /proc/cpuinfo. */
- For the moment, we have only optimizations for the Intel generations,
- but that could change... */
+#define ELF_PLATFORM (cur_cpu_spec->platform)
-#define ELF_PLATFORM (NULL)
+#ifdef __powerpc64__
+# define ELF_PLAT_INIT(_r, load_addr) do { \
+ _r->gpr[2] = load_addr; \
+} while (0)
+#endif /* __powerpc64__ */
#ifdef __KERNEL__
diff --git a/include/asm-powerpc/futex.h b/include/asm-powerpc/futex.h
index f0319d50b12..39e85f320a7 100644
--- a/include/asm-powerpc/futex.h
+++ b/include/asm-powerpc/futex.h
@@ -11,7 +11,7 @@
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
__asm__ __volatile ( \
- SYNC_ON_SMP \
+ LWSYNC_ON_SMP \
"1: lwarx %0,0,%2\n" \
insn \
PPC405_ERR77(0, %2) \
diff --git a/include/asm-powerpc/hvcall.h b/include/asm-powerpc/hvcall.h
index da7af5a720e..38ca9ad6110 100644
--- a/include/asm-powerpc/hvcall.h
+++ b/include/asm-powerpc/hvcall.h
@@ -6,7 +6,10 @@
#define H_Success 0
#define H_Busy 1 /* Hardware busy -- retry later */
+#define H_Closed 2 /* Resource closed */
#define H_Constrained 4 /* Resource request constrained to max allowed */
+#define H_InProgress 14 /* Kind of like busy */
+#define H_Continue 18 /* Returned from H_Join on success */
#define H_LongBusyStartRange 9900 /* Start of long busy range */
#define H_LongBusyOrder1msec 9900 /* Long busy, hint that 1msec is a good time to retry */
#define H_LongBusyOrder10msec 9901 /* Long busy, hint that 10msec is a good time to retry */
@@ -114,6 +117,8 @@
#define H_REGISTER_VTERM 0x154
#define H_FREE_VTERM 0x158
#define H_POLL_PENDING 0x1D8
+#define H_JOIN 0x298
+#define H_ENABLE_CRQ 0x2B0
#ifndef __ASSEMBLY__
diff --git a/include/asm-powerpc/kexec.h b/include/asm-powerpc/kexec.h
index fffdf690b84..640a6459f2f 100644
--- a/include/asm-powerpc/kexec.h
+++ b/include/asm-powerpc/kexec.h
@@ -31,12 +31,80 @@
#define KEXEC_ARCH KEXEC_ARCH_PPC
#endif
-#define HAVE_ARCH_COPY_OLDMEM_PAGE
-
-#ifndef __ASSEMBLY__
-
#ifdef CONFIG_KEXEC
+#ifdef __powerpc64__
+/*
+ * This function is responsible for capturing register states if coming
+ * via panic or invoking dump using sysrq-trigger.
+ */
+static inline void crash_setup_regs(struct pt_regs *newregs,
+ struct pt_regs *oldregs)
+{
+ if (oldregs)
+ memcpy(newregs, oldregs, sizeof(*newregs));
+ else {
+ /* FIXME Merge this with xmon_save_regs ?? */
+ unsigned long tmp1, tmp2;
+ __asm__ __volatile__ (
+ "std 0,0(%2)\n"
+ "std 1,8(%2)\n"
+ "std 2,16(%2)\n"
+ "std 3,24(%2)\n"
+ "std 4,32(%2)\n"
+ "std 5,40(%2)\n"
+ "std 6,48(%2)\n"
+ "std 7,56(%2)\n"
+ "std 8,64(%2)\n"
+ "std 9,72(%2)\n"
+ "std 10,80(%2)\n"
+ "std 11,88(%2)\n"
+ "std 12,96(%2)\n"
+ "std 13,104(%2)\n"
+ "std 14,112(%2)\n"
+ "std 15,120(%2)\n"
+ "std 16,128(%2)\n"
+ "std 17,136(%2)\n"
+ "std 18,144(%2)\n"
+ "std 19,152(%2)\n"
+ "std 20,160(%2)\n"
+ "std 21,168(%2)\n"
+ "std 22,176(%2)\n"
+ "std 23,184(%2)\n"
+ "std 24,192(%2)\n"
+ "std 25,200(%2)\n"
+ "std 26,208(%2)\n"
+ "std 27,216(%2)\n"
+ "std 28,224(%2)\n"
+ "std 29,232(%2)\n"
+ "std 30,240(%2)\n"
+ "std 31,248(%2)\n"
+ "mfmsr %0\n"
+ "std %0, 264(%2)\n"
+ "mfctr %0\n"
+ "std %0, 280(%2)\n"
+ "mflr %0\n"
+ "std %0, 288(%2)\n"
+ "bl 1f\n"
+ "1: mflr %1\n"
+ "std %1, 256(%2)\n"
+ "mtlr %0\n"
+ "mfxer %0\n"
+ "std %0, 296(%2)\n"
+ : "=&r" (tmp1), "=&r" (tmp2)
+ : "b" (newregs));
+ }
+}
+#else
+/*
+ * Provide a dummy definition to avoid build failures. Will remain
+ * empty till crash dump support is enabled.
+ */
+static inline void crash_setup_regs(struct pt_regs *newregs,
+ struct pt_regs *oldregs) { }
+#endif /* !__powerpc64 __ */
+
+#ifndef __ASSEMBLY__
#define MAX_NOTE_BYTES 1024
#ifdef __powerpc64__
@@ -53,14 +121,7 @@ extern void default_machine_kexec(struct kimage *image);
extern int default_machine_kexec_prepare(struct kimage *image);
extern void default_machine_crash_shutdown(struct pt_regs *regs);
-#endif /* !CONFIG_KEXEC */
-
-/*
- * Provide a dummy definition to avoid build failures. Will remain
- * empty till crash dump support is enabled.
- */
-static inline void crash_setup_regs(struct pt_regs *newregs,
- struct pt_regs *oldregs) { }
#endif /* ! __ASSEMBLY__ */
+#endif /* CONFIG_KEXEC */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_KEXEC_H */
diff --git a/include/asm-powerpc/lppaca.h b/include/asm-powerpc/lppaca.h
index ff82ea7c482..4dc514aabfe 100644
--- a/include/asm-powerpc/lppaca.h
+++ b/include/asm-powerpc/lppaca.h
@@ -29,6 +29,8 @@
//----------------------------------------------------------------------------
#include <asm/types.h>
+/* The Hypervisor barfs if the lppaca crosses a page boundary. A 1k
+ * alignment is sufficient to prevent this */
struct lppaca {
//=============================================================================
// CACHE_LINE_1 0x0000 - 0x007F Contains read-only data
@@ -127,7 +129,9 @@ struct lppaca {
// CACHE_LINE_4-5 0x0100 - 0x01FF Contains PMC interrupt data
//=============================================================================
u8 pmc_save_area[256]; // PMC interrupt Area x00-xFF
-};
+} __attribute__((__aligned__(0x400)));
+
+extern struct lppaca lppaca[];
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_LPPACA_H */
diff --git a/include/asm-powerpc/paca.h b/include/asm-powerpc/paca.h
index a64b4d425da..c9add8f1ad9 100644
--- a/include/asm-powerpc/paca.h
+++ b/include/asm-powerpc/paca.h
@@ -23,6 +23,7 @@
register struct paca_struct *local_paca asm("r13");
#define get_paca() local_paca
+#define get_lppaca() (get_paca()->lppaca_ptr)
struct task_struct;
@@ -95,19 +96,6 @@ struct paca_struct {
u64 saved_r1; /* r1 save for RTAS calls */
u64 saved_msr; /* MSR saved here by enter_rtas */
u8 proc_enabled; /* irq soft-enable flag */
-
- /*
- * iSeries structure which the hypervisor knows about -
- * this structure should not cross a page boundary.
- * The vpa_init/register_vpa call is now known to fail if the
- * lppaca structure crosses a page boundary.
- * The lppaca is also used on POWER5 pSeries boxes.
- * The lppaca is 640 bytes long, and cannot readily change
- * since the hypervisor knows its layout, so a 1kB
- * alignment will suffice to ensure that it doesn't
- * cross a page boundary.
- */
- struct lppaca lppaca __attribute__((__aligned__(0x400)));
};
extern struct paca_struct paca[];
diff --git a/include/asm-powerpc/ppc_asm.h b/include/asm-powerpc/ppc_asm.h
index 0dc798d46ea..ab8688d3902 100644
--- a/include/asm-powerpc/ppc_asm.h
+++ b/include/asm-powerpc/ppc_asm.h
@@ -156,52 +156,56 @@ n:
#endif
/*
- * LOADADDR( rn, name )
- * loads the address of 'name' into 'rn'
+ * LOAD_REG_IMMEDIATE(rn, expr)
+ * Loads the value of the constant expression 'expr' into register 'rn'
+ * using immediate instructions only. Use this when it's important not
+ * to reference other data (i.e. on ppc64 when the TOC pointer is not
+ * valid).
*
- * LOADBASE( rn, name )
- * loads the address (possibly without the low 16 bits) of 'name' into 'rn'
- * suitable for base+disp addressing
+ * LOAD_REG_ADDR(rn, name)
+ * Loads the address of label 'name' into register 'rn'. Use this when
+ * you don't particularly need immediate instructions only, but you need
+ * the whole address in one register (e.g. it's a structure address and
+ * you want to access various offsets within it). On ppc32 this is
+ * identical to LOAD_REG_IMMEDIATE.
+ *
+ * LOAD_REG_ADDRBASE(rn, name)
+ * ADDROFF(name)
+ * LOAD_REG_ADDRBASE loads part of the address of label 'name' into
+ * register 'rn'. ADDROFF(name) returns the remainder of the address as
+ * a constant expression. ADDROFF(name) is a signed expression < 16 bits
+ * in size, so is suitable for use directly as an offset in load and store
+ * instructions. Use this when loading/storing a single word or less as:
+ * LOAD_REG_ADDRBASE(rX, name)
+ * ld rY,ADDROFF(name)(rX)
*/
#ifdef __powerpc64__
-#define LOADADDR(rn,name) \
- lis rn,name##@highest; \
- ori rn,rn,name##@higher; \
- rldicr rn,rn,32,31; \
- oris rn,rn,name##@h; \
- ori rn,rn,name##@l
-
-#define LOADBASE(rn,name) \
- ld rn,name@got(r2)
-
-#define OFF(name) 0
-
-#define SET_REG_TO_CONST(reg, value) \
- lis reg,(((value)>>48)&0xFFFF); \
- ori reg,reg,(((value)>>32)&0xFFFF); \
- rldicr reg,reg,32,31; \
- oris reg,reg,(((value)>>16)&0xFFFF); \
- ori reg,reg,((value)&0xFFFF);
-
-#define SET_REG_TO_LABEL(reg, label) \
- lis reg,(label)@highest; \
- ori reg,reg,(label)@higher; \
- rldicr reg,reg,32,31; \
- oris reg,reg,(label)@h; \
- ori reg,reg,(label)@l;
+#define LOAD_REG_IMMEDIATE(reg,expr) \
+ lis (reg),(expr)@highest; \
+ ori (reg),(reg),(expr)@higher; \
+ rldicr (reg),(reg),32,31; \
+ oris (reg),(reg),(expr)@h; \
+ ori (reg),(reg),(expr)@l;
+
+#define LOAD_REG_ADDR(reg,name) \
+ ld (reg),name@got(r2)
+
+#define LOAD_REG_ADDRBASE(reg,name) LOAD_REG_ADDR(reg,name)
+#define ADDROFF(name) 0
/* offsets for stack frame layout */
#define LRSAVE 16
#else /* 32-bit */
-#define LOADADDR(rn,name) \
- lis rn,name@ha; \
- addi rn,rn,name@l
-#define LOADBASE(rn,name) \
- lis rn,name@ha
+#define LOAD_REG_IMMEDIATE(reg,expr) \
+ lis (reg),(expr)@ha; \
+ addi (reg),(reg),(expr)@l;
+
+#define LOAD_REG_ADDR(reg,name) LOAD_REG_IMMEDIATE(reg, name)
-#define OFF(name) name@l
+#define LOAD_REG_ADDRBASE(reg, name) lis (reg),name@ha
+#define ADDROFF(name) name@l
/* offsets for stack frame layout */
#define LRSAVE 4
diff --git a/include/asm-powerpc/prom.h b/include/asm-powerpc/prom.h
index 329e9bf6226..5b2bd4eefb0 100644
--- a/include/asm-powerpc/prom.h
+++ b/include/asm-powerpc/prom.h
@@ -87,6 +87,7 @@ struct device_node {
char *full_name;
struct property *properties;
+ struct property *deadprops; /* removed properties */
struct device_node *parent;
struct device_node *child;
struct device_node *sibling;
@@ -135,6 +136,9 @@ extern struct device_node *of_find_all_nodes(struct device_node *prev);
extern struct device_node *of_get_parent(const struct device_node *node);
extern struct device_node *of_get_next_child(const struct device_node *node,
struct device_node *prev);
+extern struct property *of_find_property(struct device_node *np,
+ const char *name,
+ int *lenp);
extern struct device_node *of_node_get(struct device_node *node);
extern void of_node_put(struct device_node *node);
@@ -164,6 +168,10 @@ extern int prom_n_size_cells(struct device_node* np);
extern int prom_n_intr_cells(struct device_node* np);
extern void prom_get_irq_senses(unsigned char *senses, int off, int max);
extern int prom_add_property(struct device_node* np, struct property* prop);
+extern int prom_remove_property(struct device_node *np, struct property *prop);
+extern int prom_update_property(struct device_node *np,
+ struct property *newprop,
+ struct property *oldprop);
#ifdef CONFIG_PPC32
/*
diff --git a/include/asm-powerpc/spinlock.h b/include/asm-powerpc/spinlock.h
index 754900901cd..895cb6d3a42 100644
--- a/include/asm-powerpc/spinlock.h
+++ b/include/asm-powerpc/spinlock.h
@@ -46,7 +46,7 @@ static __inline__ unsigned long __spin_trylock(raw_spinlock_t *lock)
token = LOCK_TOKEN;
__asm__ __volatile__(
-"1: lwarx %0,0,%2 # __spin_trylock\n\
+"1: lwarx %0,0,%2\n\
cmpwi 0,%0,0\n\
bne- 2f\n\
stwcx. %1,0,%2\n\
@@ -80,7 +80,7 @@ static int __inline__ __raw_spin_trylock(raw_spinlock_t *lock)
#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
/* We only yield to the hypervisor if we are in shared processor mode */
-#define SHARED_PROCESSOR (get_paca()->lppaca.shared_proc)
+#define SHARED_PROCESSOR (get_lppaca()->shared_proc)
extern void __spin_yield(raw_spinlock_t *lock);
extern void __rw_yield(raw_rwlock_t *lock);
#else /* SPLPAR || ISERIES */
@@ -124,8 +124,8 @@ static void __inline__ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long
static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock)
{
- __asm__ __volatile__(SYNC_ON_SMP" # __raw_spin_unlock"
- : : :"memory");
+ __asm__ __volatile__("# __raw_spin_unlock\n\t"
+ LWSYNC_ON_SMP: : :"memory");
lock->slock = 0;
}
@@ -167,7 +167,7 @@ static long __inline__ __read_trylock(raw_rwlock_t *rw)
long tmp;
__asm__ __volatile__(
-"1: lwarx %0,0,%1 # read_trylock\n"
+"1: lwarx %0,0,%1\n"
__DO_SIGN_EXTEND
" addic. %0,%0,1\n\
ble- 2f\n"
@@ -192,7 +192,7 @@ static __inline__ long __write_trylock(raw_rwlock_t *rw)
token = WRLOCK_TOKEN;
__asm__ __volatile__(
-"1: lwarx %0,0,%2 # write_trylock\n\
+"1: lwarx %0,0,%2\n\
cmpwi 0,%0,0\n\
bne- 2f\n"
PPC405_ERR77(0,%1)
@@ -249,8 +249,9 @@ static void __inline__ __raw_read_unlock(raw_rwlock_t *rw)
long tmp;
__asm__ __volatile__(
- "eieio # read_unlock\n\
-1: lwarx %0,0,%1\n\
+ "# read_unlock\n\t"
+ LWSYNC_ON_SMP
+"1: lwarx %0,0,%1\n\
addic %0,%0,-1\n"
PPC405_ERR77(0,%1)
" stwcx. %0,0,%1\n\
@@ -262,8 +263,8 @@ static void __inline__ __raw_read_unlock(raw_rwlock_t *rw)
static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
{
- __asm__ __volatile__(SYNC_ON_SMP" # write_unlock"
- : : :"memory");
+ __asm__ __volatile__("# write_unlock\n\t"
+ LWSYNC_ON_SMP: : :"memory");
rw->lock = 0;
}
diff --git a/include/asm-powerpc/synch.h b/include/asm-powerpc/synch.h
index 794870ab8fd..c90d9d9aae7 100644
--- a/include/asm-powerpc/synch.h
+++ b/include/asm-powerpc/synch.h
@@ -2,6 +2,8 @@
#define _ASM_POWERPC_SYNCH_H
#ifdef __KERNEL__
+#include <linux/stringify.h>
+
#ifdef __powerpc64__
#define __SUBARCH_HAS_LWSYNC
#endif
@@ -12,20 +14,12 @@
# define LWSYNC sync
#endif
-
-/*
- * Arguably the bitops and *xchg operations don't imply any memory barrier
- * or SMP ordering, but in fact a lot of drivers expect them to imply
- * both, since they do on x86 cpus.
- */
#ifdef CONFIG_SMP
-#define EIEIO_ON_SMP "eieio\n"
#define ISYNC_ON_SMP "\n\tisync"
-#define SYNC_ON_SMP __stringify(LWSYNC) "\n"
+#define LWSYNC_ON_SMP __stringify(LWSYNC) "\n"
#else
-#define EIEIO_ON_SMP
#define ISYNC_ON_SMP
-#define SYNC_ON_SMP
+#define LWSYNC_ON_SMP
#endif
static inline void eieio(void)
@@ -38,14 +32,5 @@ static inline void isync(void)
__asm__ __volatile__ ("isync" : : : "memory");
}
-#ifdef CONFIG_SMP
-#define eieio_on_smp() eieio()
-#define isync_on_smp() isync()
-#else
-#define eieio_on_smp() __asm__ __volatile__("": : :"memory")
-#define isync_on_smp() __asm__ __volatile__("": : :"memory")
-#endif
-
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_SYNCH_H */
-
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
index 9b822afa7d0..d9bf53653b1 100644
--- a/include/asm-powerpc/system.h
+++ b/include/asm-powerpc/system.h
@@ -212,7 +212,7 @@ __xchg_u32(volatile void *p, unsigned long val)
unsigned long prev;
__asm__ __volatile__(
- EIEIO_ON_SMP
+ LWSYNC_ON_SMP
"1: lwarx %0,0,%2 \n"
PPC405_ERR77(0,%2)
" stwcx. %3,0,%2 \n\
@@ -232,7 +232,7 @@ __xchg_u64(volatile void *p, unsigned long val)
unsigned long prev;
__asm__ __volatile__(
- EIEIO_ON_SMP
+ LWSYNC_ON_SMP
"1: ldarx %0,0,%2 \n"
PPC405_ERR77(0,%2)
" stdcx. %3,0,%2 \n\
@@ -287,7 +287,7 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
unsigned int prev;
__asm__ __volatile__ (
- EIEIO_ON_SMP
+ LWSYNC_ON_SMP
"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
cmpw 0,%0,%3\n\
bne- 2f\n"
@@ -311,7 +311,7 @@ __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
unsigned long prev;
__asm__ __volatile__ (
- EIEIO_ON_SMP
+ LWSYNC_ON_SMP
"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
cmpd 0,%0,%3\n\
bne- 2f\n\
diff --git a/include/asm-powerpc/time.h b/include/asm-powerpc/time.h
index d9b86a17271..baddc9ab57a 100644
--- a/include/asm-powerpc/time.h
+++ b/include/asm-powerpc/time.h
@@ -175,11 +175,10 @@ static inline void set_dec(int val)
set_dec_cpu6(val);
#else
#ifdef CONFIG_PPC_ISERIES
- struct paca_struct *lpaca = get_paca();
int cur_dec;
- if (lpaca->lppaca.shared_proc) {
- lpaca->lppaca.virtual_decr = val;
+ if (get_lppaca()->shared_proc) {
+ get_lppaca()->virtual_decr = val;
cur_dec = get_dec();
if (cur_dec > val)
HvCall_setVirtualDecr();