aboutsummaryrefslogtreecommitdiff
path: root/arch/s390/kernel/process.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/kernel/process.c')
-rw-r--r--arch/s390/kernel/process.c64
1 files changed, 3 insertions, 61 deletions
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 1e06436f07c..b6110bdf8dc 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -46,7 +46,6 @@
#include <asm/processor.h>
#include <asm/irq.h>
#include <asm/timer.h>
-#include <asm/cpu.h>
#include "entry.h"
asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
@@ -76,35 +75,12 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
return sf->gprs[8];
}
-DEFINE_PER_CPU(struct s390_idle_data, s390_idle) = {
- .lock = __SPIN_LOCK_UNLOCKED(s390_idle.lock)
-};
-
-void s390_idle_leave(void)
-{
- struct s390_idle_data *idle;
- unsigned long long idle_time;
-
- idle = &__get_cpu_var(s390_idle);
- idle_time = S390_lowcore.int_clock - idle->idle_enter;
- spin_lock(&idle->lock);
- idle->idle_time += idle_time;
- idle->idle_enter = 0ULL;
- idle->idle_count++;
- spin_unlock(&idle->lock);
- vtime_start_cpu_timer();
-}
-
extern void s390_handle_mcck(void);
/*
* The idle loop on a S390...
*/
static void default_idle(void)
{
- struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
- unsigned long addr;
- psw_t psw;
-
/* CPU is going idle. */
local_irq_disable();
if (need_resched()) {
@@ -120,7 +96,6 @@ static void default_idle(void)
local_mcck_disable();
if (test_thread_flag(TIF_MCCK_PENDING)) {
local_mcck_enable();
- s390_idle_leave();
local_irq_enable();
s390_handle_mcck();
return;
@@ -128,42 +103,9 @@ static void default_idle(void)
trace_hardirqs_on();
/* Don't trace preempt off for idle. */
stop_critical_timings();
- vtime_stop_cpu_timer();
-
- /*
- * The inline assembly is equivalent to
- * idle->idle_enter = get_clock();
- * __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
- * PSW_MASK_IO | PSW_MASK_EXT);
- * The difference is that the inline assembly makes sure that
- * the stck instruction is right before the lpsw instruction.
- * This is done to increase the precision.
- */
-
- /* Wait for external, I/O or machine check interrupt. */
- psw.mask = psw_kernel_bits|PSW_MASK_WAIT|PSW_MASK_IO|PSW_MASK_EXT;
-#ifndef __s390x__
- asm volatile(
- " basr %0,0\n"
- "0: ahi %0,1f-0b\n"
- " st %0,4(%2)\n"
- " stck 0(%3)\n"
- " lpsw 0(%2)\n"
- "1:"
- : "=&d" (addr), "=m" (idle->idle_enter)
- : "a" (&psw), "a" (&idle->idle_enter), "m" (psw)
- : "memory", "cc");
-#else /* __s390x__ */
- asm volatile(
- " larl %0,1f\n"
- " stg %0,8(%2)\n"
- " stck 0(%3)\n"
- " lpswe 0(%2)\n"
- "1:"
- : "=&d" (addr), "=m" (idle->idle_enter)
- : "a" (&psw), "a" (&idle->idle_enter), "m" (psw)
- : "memory", "cc");
-#endif /* __s390x__ */
+ /* Stop virtual timer and halt the cpu. */
+ vtime_stop_cpu();
+ /* Reenable preemption tracer. */
start_critical_timings();
}