aboutsummaryrefslogtreecommitdiff
path: root/arch/alpha/kernel
diff options
context:
space:
mode:
authorjohn stultz <johnstul@us.ibm.com>2009-09-21 17:04:00 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-22 07:17:43 -0700
commit4f543fa41e78bd366123424a3378f2f4918c0f33 (patch)
treebf6e9fdb30c41de9833dcf2a363177ea0dc28a5d /arch/alpha/kernel
parent6e0c9e77771e08b171c4abeb073285d8fb03f528 (diff)
alpha: convert to use arch_gettimeoffset()
Converts alpha to use GENERIC_TIME via the arch_getoffset() infrastructure, reducing the amount of arch specific code we need to maintain. I suspect the alpha arch could even be further improved to provide and rpcc() based clocksource, but not having the hardware, I don't feel comfortable attempting the more complicated conversion (but I'd be glad to help if anyone else is interested). [akpm@linux-foundation.org: fix build] Signed-off-by: John Stultz <johnstul@us.ibm.com> Cc: Richard Henderson <rth@twiddle.net> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/alpha/kernel')
-rw-r--r--arch/alpha/kernel/time.c79
1 files changed, 7 insertions, 72 deletions
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index b04e2cbf23a..5d0826654c6 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -408,28 +408,17 @@ time_init(void)
* part. So we can't do the "find absolute time in terms of cycles" thing
* that the other ports do.
*/
-void
-do_gettimeofday(struct timeval *tv)
+u32 arch_gettimeoffset(void)
{
- unsigned long flags;
- unsigned long sec, usec, seq;
- unsigned long delta_cycles, delta_usec, partial_tick;
-
- do {
- seq = read_seqbegin_irqsave(&xtime_lock, flags);
-
- delta_cycles = rpcc() - state.last_time;
- sec = xtime.tv_sec;
- usec = (xtime.tv_nsec / 1000);
- partial_tick = state.partial_tick;
-
- } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
-
#ifdef CONFIG_SMP
/* Until and unless we figure out how to get cpu cycle counters
in sync and keep them there, we can't use the rpcc tricks. */
- delta_usec = 0;
+ return 0;
#else
+ unsigned long delta_cycles, delta_usec, partial_tick;
+
+ delta_cycles = rpcc() - state.last_time;
+ partial_tick = state.partial_tick;
/*
* usec = cycles * ticks_per_cycle * 2**48 * 1e6 / (2**48 * ticks)
* = cycles * (s_t_p_c) * 1e6 / (2**48 * ticks)
@@ -446,64 +435,10 @@ do_gettimeofday(struct timeval *tv)
delta_usec = (delta_cycles * state.scaled_ticks_per_cycle
+ partial_tick) * 15625;
delta_usec = ((delta_usec / ((1UL << (FIX_SHIFT-6-1)) * HZ)) + 1) / 2;
+ return delta_usec * 1000;
#endif
-
- usec += delta_usec;
- if (usec >= 1000000) {
- sec += 1;
- usec -= 1000000;
- }
-
- tv->tv_sec = sec;
- tv->tv_usec = usec;
}
-EXPORT_SYMBOL(do_gettimeofday);
-
-int
-do_settimeofday(struct timespec *tv)
-{
- time_t wtm_sec, sec = tv->tv_sec;
- long wtm_nsec, nsec = tv->tv_nsec;
- unsigned long delta_nsec;
-
- if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
- return -EINVAL;
-
- write_seqlock_irq(&xtime_lock);
-
- /* The offset that is added into time in do_gettimeofday above
- must be subtracted out here to keep a coherent view of the
- time. Without this, a full-tick error is possible. */
-
-#ifdef CONFIG_SMP
- delta_nsec = 0;
-#else
- delta_nsec = rpcc() - state.last_time;
- delta_nsec = (delta_nsec * state.scaled_ticks_per_cycle
- + state.partial_tick) * 15625;
- delta_nsec = ((delta_nsec / ((1UL << (FIX_SHIFT-6-1)) * HZ)) + 1) / 2;
- delta_nsec *= 1000;
-#endif
-
- nsec -= delta_nsec;
-
- wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
- wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
-
- set_normalized_timespec(&xtime, sec, nsec);
- set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
-
- ntp_clear();
-
- write_sequnlock_irq(&xtime_lock);
- clock_was_set();
- return 0;
-}
-
-EXPORT_SYMBOL(do_settimeofday);
-
-
/*
* In order to set the CMOS clock precisely, set_rtc_mmss has to be
* called 500 ms after the second nowtime has started, because when