aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-08-20 17:06:25 +0200
committerThomas Gleixner <tglx@linutronix.de>2009-08-31 09:35:47 +0200
commit2d826404f0bdcac2a4dd7e3c446b70d6a3b63b78 (patch)
tree7db2dc0fbde3a25a89f1fc1514152567f612ccde /arch/x86/kernel
parent47926214d8b2bef13b2be57c500194a804f16198 (diff)
x86: Move tsc_calibration to x86_init_ops
TSC calibration is modified by the vmware hypervisor and paravirt by separate means. Moorestown wants to add its own calibration routine as well. So make calibrate_tsc a proper x86_init_ops function and override it by paravirt or by the early setup of the vmware hypervisor. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/cpu/hypervisor.c14
-rw-r--r--arch/x86/kernel/cpu/vmware.c21
-rw-r--r--arch/x86/kernel/kvmclock.c2
-rw-r--r--arch/x86/kernel/paravirt.c1
-rw-r--r--arch/x86/kernel/setup.c2
-rw-r--r--arch/x86/kernel/tsc.c13
-rw-r--r--arch/x86/kernel/vmi_32.c2
-rw-r--r--arch/x86/kernel/vmiclock_32.c2
-rw-r--r--arch/x86/kernel/x86_init.c5
9 files changed, 32 insertions, 30 deletions
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c
index 93ba8eeb100..08be922de33 100644
--- a/arch/x86/kernel/cpu/hypervisor.c
+++ b/arch/x86/kernel/cpu/hypervisor.c
@@ -34,13 +34,6 @@ detect_hypervisor_vendor(struct cpuinfo_x86 *c)
c->x86_hyper_vendor = X86_HYPER_VENDOR_NONE;
}
-unsigned long get_hypervisor_tsc_freq(void)
-{
- if (boot_cpu_data.x86_hyper_vendor == X86_HYPER_VENDOR_VMWARE)
- return vmware_get_tsc_khz();
- return 0;
-}
-
static inline void __cpuinit
hypervisor_set_feature_bits(struct cpuinfo_x86 *c)
{
@@ -55,3 +48,10 @@ void __cpuinit init_hypervisor(struct cpuinfo_x86 *c)
detect_hypervisor_vendor(c);
hypervisor_set_feature_bits(c);
}
+
+void __init init_hypervisor_platform(void)
+{
+ init_hypervisor(&boot_cpu_data);
+ if (boot_cpu_data.x86_hyper_vendor == X86_HYPER_VENDOR_VMWARE)
+ vmware_platform_setup();
+}
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index bc24f514ec9..0a46b4df5d8 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -24,6 +24,7 @@
#include <linux/dmi.h>
#include <asm/div64.h>
#include <asm/vmware.h>
+#include <asm/x86_init.h>
#define CPUID_VMWARE_INFO_LEAF 0x40000000
#define VMWARE_HYPERVISOR_MAGIC 0x564D5868
@@ -47,21 +48,29 @@ static inline int __vmware_platform(void)
return eax != (uint32_t)-1 && ebx == VMWARE_HYPERVISOR_MAGIC;
}
-static unsigned long __vmware_get_tsc_khz(void)
+static unsigned long vmware_get_tsc_khz(void)
{
uint64_t tsc_hz;
uint32_t eax, ebx, ecx, edx;
VMWARE_PORT(GETHZ, eax, ebx, ecx, edx);
- if (ebx == UINT_MAX)
- return 0;
tsc_hz = eax | (((uint64_t)ebx) << 32);
do_div(tsc_hz, 1000);
BUG_ON(tsc_hz >> 32);
return tsc_hz;
}
+void __init vmware_platform_setup(void)
+{
+ uint32_t eax, ebx, ecx, edx;
+
+ VMWARE_PORT(GETHZ, eax, ebx, ecx, edx);
+
+ if (ebx != UINT_MAX)
+ x86_platform.calibrate_tsc = vmware_get_tsc_khz;
+}
+
/*
* While checking the dmi string infomation, just checking the product
* serial key should be enough, as this will always have a VMware
@@ -87,12 +96,6 @@ int vmware_platform(void)
return 0;
}
-unsigned long vmware_get_tsc_khz(void)
-{
- BUG_ON(!vmware_platform());
- return __vmware_get_tsc_khz();
-}
-
/*
* VMware hypervisor takes care of exporting a reliable TSC to the guest.
* Still, due to timing difference when running on virtual cpus, the TSC can
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 64e9b5f59d2..75a21b61b86 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -187,7 +187,7 @@ void __init kvmclock_init(void)
pv_time_ops.get_wallclock = kvm_get_wallclock;
pv_time_ops.set_wallclock = kvm_set_wallclock;
pv_time_ops.sched_clock = kvm_clock_read;
- pv_time_ops.get_tsc_khz = kvm_get_tsc_khz;
+ x86_platform.calibrate_tsc = kvm_get_tsc_khz;
#ifdef CONFIG_X86_LOCAL_APIC
x86_cpuinit.setup_percpu_clockev =
kvm_setup_secondary_clock;
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 9c0e644a76d..7cbf898d839 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -309,7 +309,6 @@ struct pv_time_ops pv_time_ops = {
.get_wallclock = native_get_wallclock,
.set_wallclock = native_set_wallclock,
.sched_clock = native_sched_clock,
- .get_tsc_khz = native_calibrate_tsc,
};
struct pv_irq_ops pv_irq_ops = {
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index bb207a47c63..2d93026af7c 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -818,7 +818,7 @@ void __init setup_arch(char **cmdline_p)
* VMware detection requires dmi to be available, so this
* needs to be done after dmi_scan_machine, for the BP.
*/
- init_hypervisor(&boot_cpu_data);
+ init_hypervisor_platform();
x86_init.resources.probe_roms();
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 97a0bcbad10..9917632a8b4 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -18,6 +18,7 @@
#include <asm/delay.h>
#include <asm/hypervisor.h>
#include <asm/nmi.h>
+#include <asm/x86_init.h>
unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
EXPORT_SYMBOL(cpu_khz);
@@ -401,15 +402,9 @@ unsigned long native_calibrate_tsc(void)
{
u64 tsc1, tsc2, delta, ref1, ref2;
unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX;
- unsigned long flags, latch, ms, fast_calibrate, hv_tsc_khz;
+ unsigned long flags, latch, ms, fast_calibrate;
int hpet = is_hpet_enabled(), i, loopmin;
- hv_tsc_khz = get_hypervisor_tsc_freq();
- if (hv_tsc_khz) {
- printk(KERN_INFO "TSC: Frequency read from the hypervisor\n");
- return hv_tsc_khz;
- }
-
local_irq_save(flags);
fast_calibrate = quick_pit_calibrate();
local_irq_restore(flags);
@@ -567,7 +562,7 @@ int recalibrate_cpu_khz(void)
unsigned long cpu_khz_old = cpu_khz;
if (cpu_has_tsc) {
- tsc_khz = calibrate_tsc();
+ tsc_khz = x86_platform.calibrate_tsc();
cpu_khz = tsc_khz;
cpu_data(0).loops_per_jiffy =
cpufreq_scale(cpu_data(0).loops_per_jiffy,
@@ -917,7 +912,7 @@ void __init tsc_init(void)
if (!cpu_has_tsc)
return;
- tsc_khz = calibrate_tsc();
+ tsc_khz = x86_platform.calibrate_tsc();
cpu_khz = tsc_khz;
if (!tsc_khz) {
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
index cd7d0fbbf66..052ae81ee08 100644
--- a/arch/x86/kernel/vmi_32.c
+++ b/arch/x86/kernel/vmi_32.c
@@ -825,7 +825,7 @@ static inline int __init activate_vmi(void)
x86_cpuinit.setup_percpu_clockev = vmi_time_ap_init;
#endif
pv_time_ops.sched_clock = vmi_sched_clock;
- pv_time_ops.get_tsc_khz = vmi_tsc_khz;
+ x86_platform.calibrate_tsc = vmi_tsc_khz;
/* We have true wallclock functions; disable CMOS clock sync */
no_sync_cmos_clock = 1;
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c
index 2b3eb82efee..611b9e2360d 100644
--- a/arch/x86/kernel/vmiclock_32.c
+++ b/arch/x86/kernel/vmiclock_32.c
@@ -68,7 +68,7 @@ unsigned long long vmi_sched_clock(void)
return cycles_2_ns(vmi_timer_ops.get_cycle_counter(VMI_CYCLES_AVAILABLE));
}
-/* paravirt_ops.get_tsc_khz = vmi_tsc_khz */
+/* x86_platform.calibrate_tsc = vmi_tsc_khz */
unsigned long vmi_tsc_khz(void)
{
unsigned long long khz;
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index 4790b92714a..13081b92191 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -13,6 +13,7 @@
#include <asm/e820.h>
#include <asm/time.h>
#include <asm/irq.h>
+#include <asm/tsc.h>
void __cpuinit x86_init_noop(void) { }
void __init x86_init_uint_noop(unsigned int unused) { }
@@ -67,3 +68,7 @@ struct __initdata x86_init_ops x86_init = {
__cpuinitdata struct x86_cpuinit_ops x86_cpuinit = {
.setup_percpu_clockev = setup_secondary_APIC_clock,
};
+
+struct x86_platform_ops x86_platform = {
+ .calibrate_tsc = native_calibrate_tsc,
+};