diff options
Diffstat (limited to 'include/asm-sparc64')
-rw-r--r-- | include/asm-sparc64/atomic.h | 1 | ||||
-rw-r--r-- | include/asm-sparc64/elf.h | 2 | ||||
-rw-r--r-- | include/asm-sparc64/kprobes.h | 10 | ||||
-rw-r--r-- | include/asm-sparc64/mmu_context.h | 2 | ||||
-rw-r--r-- | include/asm-sparc64/mutex.h | 9 | ||||
-rw-r--r-- | include/asm-sparc64/processor.h | 5 | ||||
-rw-r--r-- | include/asm-sparc64/system.h | 14 |
7 files changed, 28 insertions, 15 deletions
diff --git a/include/asm-sparc64/atomic.h b/include/asm-sparc64/atomic.h index 11f5aa5d108..25256bdc8aa 100644 --- a/include/asm-sparc64/atomic.h +++ b/include/asm-sparc64/atomic.h @@ -72,6 +72,7 @@ extern int atomic64_sub_ret(int, atomic64_t *); #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0) #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) +#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) #define atomic_add_unless(v, a, u) \ ({ \ diff --git a/include/asm-sparc64/elf.h b/include/asm-sparc64/elf.h index 91458118277..69539a8ab83 100644 --- a/include/asm-sparc64/elf.h +++ b/include/asm-sparc64/elf.h @@ -119,7 +119,7 @@ typedef struct { #endif #define ELF_CORE_COPY_TASK_REGS(__tsk, __elf_regs) \ - ({ ELF_CORE_COPY_REGS((*(__elf_regs)), (__tsk)->thread_info->kregs); 1; }) + ({ ELF_CORE_COPY_REGS((*(__elf_regs)), task_pt_regs(__tsk)); 1; }) /* * This is used to ensure we don't load something for the wrong architecture. diff --git a/include/asm-sparc64/kprobes.h b/include/asm-sparc64/kprobes.h index 7ba845320f5..e4efe652b54 100644 --- a/include/asm-sparc64/kprobes.h +++ b/include/asm-sparc64/kprobes.h @@ -12,6 +12,7 @@ typedef u32 kprobe_opcode_t; #define MAX_INSN_SIZE 2 #define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry +#define arch_remove_kprobe(p) do {} while (0) /* Architecture specific copy of original instruction*/ struct arch_specific_insn { @@ -38,15 +39,6 @@ struct kprobe_ctlblk { struct prev_kprobe prev_kprobe; }; -#ifdef CONFIG_KPROBES extern int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data); -#else /* !CONFIG_KPROBES */ -static inline int kprobe_exceptions_notify(struct notifier_block *self, - unsigned long val, void *data) -{ - return 0; -} -#endif - #endif /* _SPARC64_KPROBES_H */ diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h index 08ba72d7722..57ee7b30618 100644 --- a/include/asm-sparc64/mmu_context.h +++ b/include/asm-sparc64/mmu_context.h @@ -60,7 +60,7 @@ do { \ register unsigned long pgd_cache asm("o4"); \ paddr = __pa((__mm)->pgd); \ pgd_cache = 0UL; \ - if ((__tsk)->thread_info->flags & _TIF_32BIT) \ + if (task_thread_info(__tsk)->flags & _TIF_32BIT) \ pgd_cache = get_pgd_cache((__mm)->pgd); \ __asm__ __volatile__("wrpr %%g0, 0x494, %%pstate\n\t" \ "mov %3, %%g4\n\t" \ diff --git a/include/asm-sparc64/mutex.h b/include/asm-sparc64/mutex.h new file mode 100644 index 00000000000..458c1f7fbc1 --- /dev/null +++ b/include/asm-sparc64/mutex.h @@ -0,0 +1,9 @@ +/* + * Pull in the generic implementation for the mutex fastpath. + * + * TODO: implement optimized primitives instead, or leave the generic + * implementation in place, or pick the atomic_xchg() based generic + * implementation. (see asm-generic/mutex-xchg.h for details) + */ + +#include <asm-generic/mutex-dec.h> diff --git a/include/asm-sparc64/processor.h b/include/asm-sparc64/processor.h index 3169f3e2237..cd8d9b4c865 100644 --- a/include/asm-sparc64/processor.h +++ b/include/asm-sparc64/processor.h @@ -186,8 +186,9 @@ extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); extern unsigned long get_wchan(struct task_struct *task); -#define KSTK_EIP(tsk) ((tsk)->thread_info->kregs->tpc) -#define KSTK_ESP(tsk) ((tsk)->thread_info->kregs->u_regs[UREG_FP]) +#define task_pt_regs(tsk) (task_thread_info(tsk)->kregs) +#define KSTK_EIP(tsk) (task_pt_regs(tsk)->tpc) +#define KSTK_ESP(tsk) (task_pt_regs(tsk)->u_regs[UREG_FP]) #define cpu_relax() barrier() diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h index 309f1466b6f..af254e58183 100644 --- a/include/asm-sparc64/system.h +++ b/include/asm-sparc64/system.h @@ -208,7 +208,7 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \ /* If you are tempted to conditionalize the following */ \ /* so that ASI is only written if it changes, think again. */ \ __asm__ __volatile__("wr %%g0, %0, %%asi" \ - : : "r" (__thread_flag_byte_ptr(next->thread_info)[TI_FLAG_BYTE_CURRENT_DS]));\ + : : "r" (__thread_flag_byte_ptr(task_thread_info(next))[TI_FLAG_BYTE_CURRENT_DS]));\ __asm__ __volatile__( \ "mov %%g4, %%g7\n\t" \ "wrpr %%g0, 0x95, %%pstate\n\t" \ @@ -238,7 +238,7 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \ "b,a ret_from_syscall\n\t" \ "1:\n\t" \ : "=&r" (last) \ - : "0" (next->thread_info), \ + : "0" (task_thread_info(next)), \ "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_NEW_CHILD), \ "i" (TI_CWP), "i" (TI_TASK) \ : "cc", \ @@ -253,6 +253,16 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \ } \ } while(0) +/* + * On SMP systems, when the scheduler does migration-cost autodetection, + * it needs a way to flush as much of the CPU's caches as possible. + * + * TODO: fill this in! + */ +static inline void sched_cacheflush(void) +{ +} + static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val) { unsigned long tmp1, tmp2; |