1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
|
/*
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#include "linux/clockchips.h"
#include "linux/interrupt.h"
#include "linux/jiffies.h"
#include "linux/threads.h"
#include "asm/irq.h"
#include "asm/param.h"
#include "kern_util.h"
#include "os.h"
/*
* Scheduler clock - returns current time in nanosec units.
*/
unsigned long long sched_clock(void)
{
return (unsigned long long)jiffies_64 * (1000000000 / HZ);
}
#ifdef CONFIG_UML_REAL_TIME_CLOCK
static unsigned long long prev_nsecs[NR_CPUS];
static long long delta[NR_CPUS]; /* Deviation per interval */
#endif
void timer_handler(int sig, struct uml_pt_regs *regs)
{
unsigned long long ticks = 0;
unsigned long flags;
#ifdef CONFIG_UML_REAL_TIME_CLOCK
int c = cpu();
if (prev_nsecs[c]) {
/* We've had 1 tick */
unsigned long long nsecs = os_nsecs();
delta[c] += nsecs - prev_nsecs[c];
prev_nsecs[c] = nsecs;
/* Protect against the host clock being set backwards */
if (delta[c] < 0)
delta[c] = 0;
ticks += (delta[c] * HZ) / BILLION;
delta[c] -= (ticks * BILLION) / HZ;
}
else prev_nsecs[c] = os_nsecs();
#else
ticks = 1;
#endif
local_irq_save(flags);
while (ticks > 0) {
do_IRQ(TIMER_IRQ, regs);
ticks--;
}
local_irq_restore(flags);
}
static void itimer_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
switch(mode) {
case CLOCK_EVT_MODE_PERIODIC:
set_interval();
break;
case CLOCK_EVT_MODE_SHUTDOWN:
case CLOCK_EVT_MODE_UNUSED:
disable_timer();
break;
case CLOCK_EVT_MODE_ONESHOT:
BUG();
break;
case CLOCK_EVT_MODE_RESUME:
break;
}
}
static struct clock_event_device itimer_clockevent = {
.name = "itimer",
.rating = 250,
.cpumask = CPU_MASK_ALL,
.features = CLOCK_EVT_FEAT_PERIODIC,
.set_mode = itimer_set_mode,
.set_next_event = NULL,
.shift = 32,
.irq = 0,
};
static irqreturn_t um_timer(int irq, void *dev)
{
(*itimer_clockevent.event_handler)(&itimer_clockevent);
return IRQ_HANDLED;
}
static cycle_t itimer_read(void)
{
return os_nsecs();
}
static struct clocksource itimer_clocksource = {
.name = "itimer",
.rating = 300,
.read = itimer_read,
.mask = CLOCKSOURCE_MASK(64),
.mult = 1,
.shift = 0,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static void __init setup_itimer(void)
{
int err;
err = request_irq(TIMER_IRQ, um_timer, IRQF_DISABLED, "timer", NULL);
if (err != 0)
printk(KERN_ERR "register_timer : request_irq failed - "
"errno = %d\n", -err);
itimer_clockevent.mult = div_sc(HZ, NSEC_PER_SEC, 32);
itimer_clockevent.max_delta_ns =
clockevent_delta2ns(60 * HZ, &itimer_clockevent);
itimer_clockevent.min_delta_ns =
clockevent_delta2ns(1, &itimer_clockevent);
err = clocksource_register(&itimer_clocksource);
if (err) {
printk(KERN_ERR "clocksource_register returned %d\n", err);
return;
}
clockevents_register_device(&itimer_clockevent);
}
extern void (*late_time_init)(void);
void __init time_init(void)
{
long long nsecs;
timer_init();
nsecs = os_nsecs();
set_normalized_timespec(&wall_to_monotonic, -nsecs / BILLION,
-nsecs % BILLION);
set_normalized_timespec(&xtime, nsecs / BILLION, nsecs % BILLION);
late_time_init = setup_itimer;
}
|