aboutsummaryrefslogtreecommitdiff
path: root/include/asm-sparc64/system.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-sparc64/system.h')
-rw-r--r--include/asm-sparc64/system.h18
1 files changed, 12 insertions, 6 deletions
diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h
index b5417529f6f..af254e58183 100644
--- a/include/asm-sparc64/system.h
+++ b/include/asm-sparc64/system.h
@@ -193,11 +193,7 @@ do { \
* not preserve it's value. Hairy, but it lets us remove 2 loads
* and 2 stores in this critical code path. -DaveM
*/
-#if __GNUC__ >= 3
#define EXTRA_CLOBBER ,"%l1"
-#else
-#define EXTRA_CLOBBER
-#endif
#define switch_to(prev, next, last) \
do { if (test_thread_flag(TIF_PERFCTR)) { \
unsigned long __tmp; \
@@ -212,7 +208,7 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \
/* If you are tempted to conditionalize the following */ \
/* so that ASI is only written if it changes, think again. */ \
__asm__ __volatile__("wr %%g0, %0, %%asi" \
- : : "r" (__thread_flag_byte_ptr(next->thread_info)[TI_FLAG_BYTE_CURRENT_DS]));\
+ : : "r" (__thread_flag_byte_ptr(task_thread_info(next))[TI_FLAG_BYTE_CURRENT_DS]));\
__asm__ __volatile__( \
"mov %%g4, %%g7\n\t" \
"wrpr %%g0, 0x95, %%pstate\n\t" \
@@ -242,7 +238,7 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \
"b,a ret_from_syscall\n\t" \
"1:\n\t" \
: "=&r" (last) \
- : "0" (next->thread_info), \
+ : "0" (task_thread_info(next)), \
"i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_NEW_CHILD), \
"i" (TI_CWP), "i" (TI_TASK) \
: "cc", \
@@ -257,6 +253,16 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \
} \
} while(0)
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
{
unsigned long tmp1, tmp2;