diff options
Diffstat (limited to 'arch/x86')
46 files changed, 213 insertions, 110 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index c9086e6307a..df9e885eee1 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -277,6 +277,7 @@ config SPARSE_IRQ config NUMA_MIGRATE_IRQ_DESC bool "Move irq desc when changing irq smp_affinity" depends on SPARSE_IRQ && NUMA + depends on BROKEN default n ---help--- This enables moving irq_desc to cpu/node that irq will use handled. @@ -664,6 +665,7 @@ config MAXSMP config NR_CPUS int "Maximum number of CPUs" if SMP && !MAXSMP + range 2 8 if SMP && X86_32 && !X86_BIGSMP range 2 512 if SMP && !MAXSMP default "1" if !SMP default "4096" if MAXSMP diff --git a/arch/x86/Makefile b/arch/x86/Makefile index f05d8c91d9e..8c86b72afdc 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -153,7 +153,7 @@ endif boot := arch/x86/boot -BOOT_TARGETS = bzlilo bzdisk fdimage fdimage144 fdimage288 isoimage install +BOOT_TARGETS = bzlilo bzdisk fdimage fdimage144 fdimage288 isoimage PHONY += bzImage $(BOOT_TARGETS) @@ -171,6 +171,10 @@ bzImage: vmlinux $(BOOT_TARGETS): vmlinux $(Q)$(MAKE) $(build)=$(boot) $@ +PHONY += install +install: + $(Q)$(MAKE) $(build)=$(boot) $@ + PHONY += vdso_install vdso_install: $(Q)$(MAKE) $(build)=arch/x86/vdso $@ diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c index efac92fd1ef..085a8c35f14 100644 --- a/arch/x86/ia32/sys_ia32.c +++ b/arch/x86/ia32/sys_ia32.c @@ -129,21 +129,12 @@ asmlinkage long sys32_fstatat(unsigned int dfd, char __user *filename, struct stat64 __user *statbuf, int flag) { struct kstat stat; - int error = -EINVAL; + int error; - if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0) - goto out; - - if (flag & AT_SYMLINK_NOFOLLOW) - error = vfs_lstat_fd(dfd, filename, &stat); - else - error = vfs_stat_fd(dfd, filename, &stat); - - if (!error) - error = cp_stat64(statbuf, &stat); - -out: - return error; + error = vfs_fstatat(dfd, filename, &stat, flag); + if (error) + return error; + return cp_stat64(statbuf, &stat); } /* diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h index 5623c50d67b..c45f415ce31 100644 --- a/arch/x86/include/asm/desc.h +++ b/arch/x86/include/asm/desc.h @@ -37,7 +37,7 @@ extern gate_desc idt_table[]; struct gdt_page { struct desc_struct gdt[GDT_ENTRIES]; } __attribute__((aligned(PAGE_SIZE))); -DECLARE_PER_CPU(struct gdt_page, gdt_page); +DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page); static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) { diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h index 039db6aa8e0..37555e52f98 100644 --- a/arch/x86/include/asm/hardirq.h +++ b/arch/x86/include/asm/hardirq.h @@ -26,7 +26,7 @@ typedef struct { #endif } ____cacheline_aligned irq_cpustat_t; -DECLARE_PER_CPU(irq_cpustat_t, irq_stat); +DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); /* We can have at most NR_VECTORS irqs routed to a cpu at a time */ #define MAX_HARDIRQS_PER_CPU NR_VECTORS diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 563933e06a3..4f8c199584e 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -137,6 +137,7 @@ DECLARE_PER_CPU(mce_banks_t, mce_poll_banks); enum mcp_flags { MCP_TIMESTAMP = (1 << 0), /* log time stamp */ MCP_UC = (1 << 1), /* log uncorrected errors */ + MCP_DONTLOG = (1 << 2), /* only clear, don't log */ }; extern void machine_check_poll(enum mcp_flags flags, mce_banks_t *b); diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index fcf4d92e7e0..c2cceae709c 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -138,7 +138,7 @@ extern struct tss_struct doublefault_tss; extern __u32 cleared_cpu_caps[NCAPINTS]; #ifdef CONFIG_SMP -DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); +DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); #define cpu_data(cpu) per_cpu(cpu_info, cpu) #define current_cpu_data __get_cpu_var(cpu_info) #else @@ -270,7 +270,7 @@ struct tss_struct { } ____cacheline_aligned; -DECLARE_PER_CPU(struct tss_struct, init_tss); +DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss); /* * Save the original ist values for checking stack pointers during debugging @@ -393,7 +393,7 @@ union irq_stack_union { }; }; -DECLARE_PER_CPU(union irq_stack_union, irq_stack_union); +DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union); DECLARE_INIT_PER_CPU(irq_stack_union); DECLARE_PER_CPU(char *, irq_stack_ptr); diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index d3539f998f8..16a5c84b032 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -152,7 +152,7 @@ struct tlb_state { struct mm_struct *active_mm; int state; }; -DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate); +DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate); static inline void reset_lazy_tlbstate(void) { diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index 892b119dba6..f44b49abca4 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h @@ -200,7 +200,7 @@ static inline void arch_fix_phys_package_id(int num, u32 slot) } struct pci_bus; -void set_pci_bus_resources_arch_default(struct pci_bus *b); +void x86_pci_root_bus_res_quirks(struct pci_bus *b); #ifdef CONFIG_SMP #define mc_capable() (cpumask_weight(cpu_core_mask(0)) != nr_cpu_ids) diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index 42c33cebf00..8c0be0902da 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c @@ -49,10 +49,10 @@ #define IVHD_DEV_EXT_SELECT 0x46 #define IVHD_DEV_EXT_SELECT_RANGE 0x47 -#define IVHD_FLAG_HT_TUN_EN 0x00 -#define IVHD_FLAG_PASSPW_EN 0x01 -#define IVHD_FLAG_RESPASSPW_EN 0x02 -#define IVHD_FLAG_ISOC_EN 0x03 +#define IVHD_FLAG_HT_TUN_EN_MASK 0x01 +#define IVHD_FLAG_PASSPW_EN_MASK 0x02 +#define IVHD_FLAG_RESPASSPW_EN_MASK 0x04 +#define IVHD_FLAG_ISOC_EN_MASK 0x08 #define IVMD_FLAG_EXCL_RANGE 0x08 #define IVMD_FLAG_UNITY_MAP 0x01 @@ -569,19 +569,19 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu, * First set the recommended feature enable bits from ACPI * into the IOMMU control registers */ - h->flags & IVHD_FLAG_HT_TUN_EN ? + h->flags & IVHD_FLAG_HT_TUN_EN_MASK ? iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); - h->flags & IVHD_FLAG_PASSPW_EN ? + h->flags & IVHD_FLAG_PASSPW_EN_MASK ? iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : iommu_feature_disable(iommu, CONTROL_PASSPW_EN); - h->flags & IVHD_FLAG_RESPASSPW_EN ? + h->flags & IVHD_FLAG_RESPASSPW_EN_MASK ? iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); - h->flags & IVHD_FLAG_ISOC_EN ? + h->flags & IVHD_FLAG_ISOC_EN_MASK ? iommu_feature_enable(iommu, CONTROL_ISOC_EN) : iommu_feature_disable(iommu, CONTROL_ISOC_EN); diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index a2789e42e16..30da617d18e 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -3670,12 +3670,14 @@ int arch_setup_hpet_msi(unsigned int irq) { int ret; struct msi_msg msg; + struct irq_desc *desc = irq_to_desc(irq); ret = msi_compose_msg(NULL, irq, &msg); if (ret < 0) return ret; hpet_msi_write(irq, &msg); + desc->status |= IRQ_MOVE_PCNTXT; set_irq_chip_and_handler_name(irq, &hpet_msi_type, handle_edge_irq, "edge"); diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c index d6bd6240715..ce4fbfa315a 100644 --- a/arch/x86/kernel/apic/nmi.c +++ b/arch/x86/kernel/apic/nmi.c @@ -138,7 +138,7 @@ int __init check_nmi_watchdog(void) if (!prev_nmi_count) goto error; - alloc_cpumask_var(&backtrace_mask, GFP_KERNEL); + alloc_cpumask_var(&backtrace_mask, GFP_KERNEL|__GFP_ZERO); printk(KERN_INFO "Testing NMI watchdog ... "); #ifdef CONFIG_SMP @@ -414,7 +414,8 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason) touched = 1; } - if (cpumask_test_cpu(cpu, backtrace_mask)) { + /* We can be called before check_nmi_watchdog, hence NULL check. */ + if (backtrace_mask != NULL && cpumask_test_cpu(cpu, backtrace_mask)) { static DEFINE_SPINLOCK(lock); /* Serialise the printks */ spin_lock(&lock); diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index de1a50af807..2bda6935297 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -19,6 +19,7 @@ #include <linux/timer.h> #include <linux/cpu.h> #include <linux/init.h> +#include <linux/io.h> #include <asm/uv/uv_mmrs.h> #include <asm/uv/uv_hub.h> @@ -34,6 +35,17 @@ DEFINE_PER_CPU(int, x2apic_extra_bits); static enum uv_system_type uv_system_type; +static int early_get_nodeid(void) +{ + union uvh_node_id_u node_id; + unsigned long *mmr; + + mmr = early_ioremap(UV_LOCAL_MMR_BASE | UVH_NODE_ID, sizeof(*mmr)); + node_id.v = *mmr; + early_iounmap(mmr, sizeof(*mmr)); + return node_id.s.node_id; +} + static int uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) { if (!strcmp(oem_id, "SGI")) { @@ -42,6 +54,8 @@ static int uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) else if (!strcmp(oem_table_id, "UVX")) uv_system_type = UV_X2APIC; else if (!strcmp(oem_table_id, "UVH")) { + __get_cpu_var(x2apic_extra_bits) = + early_get_nodeid() << (UV_APIC_PNODE_SHIFT - 1); uv_system_type = UV_NON_UNIQUE_APIC; return 1; } @@ -638,6 +652,7 @@ void __init uv_system_init(void) if (uv_node_to_blade[nid] >= 0) continue; paddr = node_start_pfn(nid) << PAGE_SHIFT; + paddr = uv_soc_phys_ram_to_gpa(paddr); pnode = (paddr >> m_val) & pnode_mask; blade = boot_pnode_to_blade(pnode); uv_node_to_blade[nid] = blade; diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index c4f667896c2..c1caefc82e6 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1203,6 +1203,8 @@ void __cpuinit cpu_init(void) load_TR_desc(); load_LDT(&init_mm.context); + t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); + #ifdef CONFIG_DOUBLEFAULT /* Set up doublefault TSS pointer in the GDT */ __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index ecdb682ab51..208ecf6643d 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -65,14 +65,18 @@ enum { struct acpi_cpufreq_data { struct acpi_processor_performance *acpi_data; struct cpufreq_frequency_table *freq_table; - unsigned int max_freq; unsigned int resume; unsigned int cpu_feature; - u64 saved_aperf, saved_mperf; }; static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data); +struct acpi_msr_data { + u64 saved_aperf, saved_mperf; +}; + +static DEFINE_PER_CPU(struct acpi_msr_data, msr_data); + DEFINE_TRACE(power_mark); /* acpi_perf_data is a pointer to percpu data. */ @@ -287,11 +291,11 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy, return 0; cur.aperf.whole = readin.aperf.whole - - per_cpu(drv_data, cpu)->saved_aperf; + per_cpu(msr_data, cpu).saved_aperf; cur.mperf.whole = readin.mperf.whole - - per_cpu(drv_data, cpu)->saved_mperf; - per_cpu(drv_data, cpu)->saved_aperf = readin.aperf.whole; - per_cpu(drv_data, cpu)->saved_mperf = readin.mperf.whole; + per_cpu(msr_data, cpu).saved_mperf; + per_cpu(msr_data, cpu).saved_aperf = readin.aperf.whole; + per_cpu(msr_data, cpu).saved_mperf = readin.mperf.whole; #ifdef __i386__ /* @@ -335,7 +339,7 @@ static unsigned int get_measured_perf(struct cpufreq_policy *policy, #endif - retval = per_cpu(drv_data, policy->cpu)->max_freq * perf_percent / 100; + retval = (policy->cpuinfo.max_freq * perf_percent) / 100; return retval; } @@ -688,16 +692,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */ if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE && policy->cpuinfo.transition_latency > 20 * 1000) { - static int print_once; policy->cpuinfo.transition_latency = 20 * 1000; - if (!print_once) { - print_once = 1; - printk(KERN_INFO "Capping off P-state tranision latency" - " at 20 uS\n"); - } + printk_once(KERN_INFO "Capping off P-state tranision" + " latency at 20 uS\n"); } - data->max_freq = perf->states[0].core_frequency * 1000; /* table init */ for (i = 0; i < perf->state_count; i++) { if (i > 0 && perf->states[i].core_frequency >= @@ -716,6 +715,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) if (result) goto err_freqfree; + if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq) + printk(KERN_WARNING FW_WARN "P-state 0 is not max freq\n"); + switch (perf->control_register.space_id) { case ACPI_ADR_SPACE_SYSTEM_IO: /* Current speed is unknown and not detectable by IO port */ diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c index 863f89568b1..6fb0b359d2a 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_64.c @@ -239,9 +239,10 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) * Don't get the IP here because it's unlikely to * have anything to do with the actual error location. */ - - mce_log(&m); - add_taint(TAINT_MACHINE_CHECK); + if (!(flags & MCP_DONTLOG)) { + mce_log(&m); + add_taint(TAINT_MACHINE_CHECK); + } /* * Clear state for this bank. @@ -452,13 +453,14 @@ void mce_log_therm_throt_event(__u64 status) */ static int check_interval = 5 * 60; /* 5 minutes */ -static int next_interval; /* in jiffies */ +static DEFINE_PER_CPU(int, next_interval); /* in jiffies */ static void mcheck_timer(unsigned long); static DEFINE_PER_CPU(struct timer_list, mce_timer); static void mcheck_timer(unsigned long data) { struct timer_list *t = &per_cpu(mce_timer, data); + int *n; WARN_ON(smp_processor_id() != data); @@ -470,14 +472,14 @@ static void mcheck_timer(unsigned long data) * Alert userspace if needed. If we logged an MCE, reduce the * polling interval, otherwise increase the polling interval. */ + n = &__get_cpu_var(next_interval); if (mce_notify_user()) { - next_interval = max(next_interval/2, HZ/100); + *n = max(*n/2, HZ/100); } else { - next_interval = min(next_interval * 2, - (int)round_jiffies_relative(check_interval*HZ)); + *n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ)); } - t->expires = jiffies + next_interval; + t->expires = jiffies + *n; add_timer(t); } @@ -584,7 +586,7 @@ static void mce_init(void *dummy) * Log the machine checks left over from the previous reset. */ bitmap_fill(all_banks, MAX_NR_BANKS); - machine_check_poll(MCP_UC, &all_banks); + machine_check_poll(MCP_UC|(!mce_bootlog ? MCP_DONTLOG : 0), &all_banks); set_in_cr4(X86_CR4_MCE); @@ -632,14 +634,13 @@ static void mce_cpu_features(struct cpuinfo_x86 *c) static void mce_init_timer(void) { struct timer_list *t = &__get_cpu_var(mce_timer); + int *n = &__get_cpu_var(next_interval); - /* data race harmless because everyone sets to the same value */ - if (!next_interval) - next_interval = check_interval * HZ; - if (!next_interval) + *n = check_interval * HZ; + if (!*n) return; setup_timer(t, mcheck_timer, smp_processor_id()); - t->expires = round_jiffies(jiffies + next_interval); + t->expires = round_jiffies(jiffies + *n); add_timer(t); } @@ -907,7 +908,6 @@ static void mce_cpu_restart(void *data) /* Reinit MCEs after user configuration changes */ static void mce_restart(void) { - next_interval = check_interval * HZ; on_each_cpu(mce_cpu_restart, NULL, 1); } @@ -1110,7 +1110,8 @@ static int __cpuinit mce_cpu_callback(struct notifier_block *nfb, break; case CPU_DOWN_FAILED: case CPU_DOWN_FAILED_FROZEN: - t->expires = round_jiffies(jiffies + next_interval); + t->expires = round_jiffies(jiffies + + __get_cpu_var(next_interval)); add_timer_on(t, cpu); smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); break; diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c index d6b72df89d6..cef3ee30744 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c @@ -151,10 +151,11 @@ static void print_update(char *type, int *hdr, int num) static void cmci_discover(int banks, int boot) { unsigned long *owned = (void *)&__get_cpu_var(mce_banks_owned); + unsigned long flags; int hdr = 0; int i; - spin_lock(&cmci_discover_lock); + spin_lock_irqsave(&cmci_discover_lock, flags); for (i = 0; i < banks; i++) { u64 val; @@ -184,7 +185,7 @@ static void cmci_discover(int banks, int boot) WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks))); } } - spin_unlock(&cmci_discover_lock); + spin_unlock_irqrestore(&cmci_discover_lock, flags); if (hdr) printk(KERN_CONT "\n"); } @@ -211,13 +212,14 @@ void cmci_recheck(void) */ void cmci_clear(void) { + unsigned long flags; int i; int banks; u64 val; if (!cmci_supported(&banks)) return; - spin_lock(&cmci_discover_lock); + spin_lock_irqsave(&cmci_discover_lock, flags); for (i = 0; i < banks; i++) { if (!test_bit(i, __get_cpu_var(mce_banks_owned))) continue; @@ -227,7 +229,7 @@ void cmci_clear(void) wrmsrl(MSR_IA32_MC0_CTL2 + i, val); __clear_bit(i, __get_cpu_var(mce_banks_owned)); } - spin_unlock(&cmci_discover_lock); + spin_unlock_irqrestore(&cmci_discover_lock, flags); } /* diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index f93047fed79..d5e30397246 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c @@ -14,7 +14,7 @@ static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, if (c->x86_max_cores * smp_num_siblings > 1) { seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); seq_printf(m, "siblings\t: %d\n", - cpumask_weight(cpu_sibling_mask(cpu))); + cpumask_weight(cpu_core_mask(cpu))); seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); seq_printf(m, "apicid\t\t: %d\n", c->apicid); diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index ef2c3563357..00628130292 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -1074,12 +1074,13 @@ u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align) u64 addr; u64 start; - start = startt; - while (size < sizet && (start + 1)) + for (start = startt; ; start += size) { start = find_e820_area_size(start, &size, align); - - if (size < sizet) - return 0; + if (!(start + 1)) + return 0; + if (size >= sizet) + break; + } #ifdef CONFIG_X86_32 if (start >= MAXMEM) diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index a331ec38af9..38946c6e843 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -1410,7 +1410,10 @@ ENTRY(paranoid_exit) paranoid_swapgs: TRACE_IRQS_IRETQ 0 SWAPGS_UNSAFE_STACK + RESTORE_ALL 8 + jmp irq_return paranoid_restore: + TRACE_IRQS_IRETQ 0 RESTORE_ALL 8 jmp irq_return paranoid_userspace: diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 648b3a2a3a4..81408b93f88 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -236,6 +236,10 @@ static void hpet_stop_counter(void) unsigned long cfg = hpet_readl(HPET_CFG); cfg &= ~HPET_CFG_ENABLE; hpet_writel(cfg, HPET_CFG); +} + +static void hpet_reset_counter(void) +{ hpet_writel(0, HPET_COUNTER); hpet_writel(0, HPET_COUNTER + 4); } @@ -250,6 +254,7 @@ static void hpet_start_counter(void) static void hpet_restart_counter(void) { hpet_stop_counter(); + hpet_reset_counter(); hpet_start_counter(); } @@ -309,7 +314,7 @@ static int hpet_setup_msi_irq(unsigned int irq); static void hpet_set_mode(enum clock_event_mode mode, struct clock_event_device *evt, int timer) { - unsigned long cfg; + unsigned long cfg, cmp, now; uint64_t delta; switch (mode) { @@ -317,12 +322,23 @@ static void hpet_set_mode(enum clock_event_mode mode, hpet_stop_counter(); delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * evt->mult; delta >>= evt->shift; + now = hpet_readl(HPET_COUNTER); + cmp = now + (unsigned long) delta; cfg = hpet_readl(HPET_Tn_CFG(timer)); /* Make sure we use edge triggered interrupts */ cfg &= ~HPET_TN_LEVEL; cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL | HPET_TN_32BIT; hpet_writel(cfg, HPET_Tn_CFG(timer)); + hpet_writel(cmp, HPET_Tn_CMP(timer)); + udelay(1); + /* + * HPET on AMD 81xx needs a second write (with HPET_TN_SETVAL + * cleared) to T0_CMP to set the period. The HPET_TN_SETVAL + * bit is automatically cleared after the first write. + * (See AMD-8111 HyperTransport I/O Hub Data Sheet, + * Publication # 24674) + */ hpet_writel((unsigned long) delta, HPET_Tn_CMP(timer)); hpet_start_counter(); hpet_print_config(); @@ -722,7 +738,7 @@ static int hpet_cpuhp_notify(struct notifier_block *n, /* * Clock source related code */ -static cycle_t read_hpet(void) +static cycle_t read_hpet(struct clocksource *cs) { return (cycle_t)hpet_readl(HPET_COUNTER); } @@ -756,7 +772,7 @@ static int hpet_clocksource_register(void) hpet_restart_counter(); /* Verify whether hpet counter works */ - t1 = read_hpet(); + t1 = hpet_readl(HPET_COUNTER); rdtscll(start); /* @@ -770,7 +786,7 @@ static int hpet_clocksource_register(void) rdtscll(now); } while ((now - start) < 200000UL); - if (t1 == read_hpet()) { + if (t1 == hpet_readl(HPET_COUNTER)) { printk(KERN_WARNING "HPET counter not counting. HPET disabled\n"); return -ENODEV; diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c index 3475440baa5..c2e0bb0890d 100644 --- a/arch/x86/kernel/i8253.c +++ b/arch/x86/kernel/i8253.c @@ -129,7 +129,7 @@ void __init setup_pit_timer(void) * to just read by itself. So use jiffies to emulate a free * running counter: */ -static cycle_t pit_read(void) +static cycle_t pit_read(struct clocksource *cs) { static int old_count; static u32 old_jifs; diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 137f2e8132d..223af43f152 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -77,6 +77,11 @@ static cycle_t kvm_clock_read(void) return ret; } +static cycle_t kvm_clock_get_cycles(struct clocksource *cs) +{ + return kvm_clock_read(); +} + /* * If we don't do that, there is the possibility that the guest * will calibrate under heavy load - thus, getting a lower lpj - @@ -107,7 +112,7 @@ static void kvm_get_preset_lpj(void) static struct clocksource kvm_clock = { .name = "kvm-clock", - .read = kvm_clock_read, + .read = kvm_clock_get_cycles, .rating = 400, .mask = CLOCKSOURCE_MASK(64), .mult = 1 << KVM_SCALE, diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c index e7368c1da01..c1c429d0013 100644 --- a/arch/x86/kernel/machine_kexec_32.c +++ b/arch/x86/kernel/machine_kexec_32.c @@ -194,7 +194,7 @@ void machine_kexec(struct kimage *image) unsigned int preserve_context); #ifdef CONFIG_KEXEC_JUMP - if (kexec_image->preserve_context) + if (image->preserve_context) save_processor_state(); #endif @@ -253,7 +253,7 @@ void machine_kexec(struct kimage *image) image->preserve_context); #ifdef CONFIG_KEXEC_JUMP - if (kexec_image->preserve_context) + if (image->preserve_context) restore_processor_state(); #endif diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 89cea4d4467..84c3bf209e9 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c @@ -274,7 +274,7 @@ void machine_kexec(struct kimage *image) int save_ftrace_enabled; #ifdef CONFIG_KEXEC_JUMP - if (kexec_image->preserve_context) + if (image->preserve_context) save_processor_state(); #endif @@ -333,7 +333,7 @@ void machine_kexec(struct kimage *image) image->preserve_context); #ifdef CONFIG_KEXEC_JUMP - if (kexec_image->preserve_context) + if (image->preserve_context) restore_processor_state(); #endif diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index e95022e4f5d..7563b31b4f0 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c @@ -261,8 +261,6 @@ static void old_ich_force_enable_hpet_user(struct pci_dev *dev) { if (hpet_force_user) old_ich_force_enable_hpet(dev); - else - hpet_print_force_info(); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 7a567ebe636..d57de05dc43 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -699,7 +699,7 @@ static struct clocksource clocksource_tsc; * code, which is necessary to support wrapping clocksources like pm * timer. */ -static cycle_t read_tsc(void) +static cycle_t read_tsc(struct clocksource *cs) { cycle_t ret = (cycle_t)get_cycles(); diff --git a/arch/x86/kernel/uv_time.c b/arch/x86/kernel/uv_time.c index 2ffb6c53326..583f11d5c48 100644 --- a/arch/x86/kernel/uv_time.c +++ b/arch/x86/kernel/uv_time.c @@ -29,7 +29,7 @@ #define RTC_NAME "sgi_rtc" -static cycle_t uv_read_rtc(void); +static cycle_t uv_read_rtc(struct clocksource *cs); static int uv_rtc_next_event(unsigned long, struct clock_event_device *); static void uv_rtc_timer_setup(enum clock_event_mode, struct clock_event_device *); @@ -123,7 +123,7 @@ static int uv_setup_intr(int cpu, u64 expires) /* Initialize comparator value */ uv_write_global_mmr64(pnode, UVH_INT_CMPB, expires); - return (expires < uv_read_rtc() && !uv_intr_pending(pnode)); + return (expires < uv_read_rtc(NULL) && !uv_intr_pending(pnode)); } /* @@ -256,7 +256,7 @@ static int uv_rtc_unset_timer(int cpu) spin_lock_irqsave(&head->lock, flags); - if (head->next_cpu == bcpu && uv_read_rtc() >= *t) + if (head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) rc = 1; *t = ULLONG_MAX; @@ -278,7 +278,7 @@ static int uv_rtc_unset_timer(int cpu) /* * Read the RTC. */ -static cycle_t uv_read_rtc(void) +static cycle_t uv_read_rtc(struct clocksource *cs) { return (cycle_t)uv_read_local_mmr(UVH_RTC); } @@ -291,7 +291,7 @@ static int uv_rtc_next_event(unsigned long delta, { int ced_cpu = cpumask_first(ced->cpumask); - return uv_rtc_set_timer(ced_cpu, delta + uv_read_rtc()); + return uv_rtc_set_timer(ced_cpu, delta + uv_read_rtc(NULL)); } /* diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c index d303369a7ba..2b3eb82efee 100644 --- a/arch/x86/kernel/vmiclock_32.c +++ b/arch/x86/kernel/vmiclock_32.c @@ -283,7 +283,7 @@ void __devinit vmi_time_ap_init(void) /** vmi clocksource */ static struct clocksource clocksource_vmi; -static cycle_t read_real_cycles(void) +static cycle_t read_real_cycles(struct clocksource *cs) { cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL); return max(ret, clocksource_vmi.cycle_last); diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index 0a5b04aa98f..c5ee17e8c6d 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c @@ -89,7 +89,7 @@ int save_i387_xstate(void __user *buf) if (!used_math()) return 0; - clear_used_math(); /* trigger finit */ + if (task_thread_info(tsk)->status & TS_USEDFPU) { /* * Start with clearing the user buffer. This will present a @@ -114,6 +114,8 @@ int save_i387_xstate(void __user *buf) return -1; } + clear_used_math(); /* trigger finit */ + if (task_thread_info(tsk)->status & TS_XSAVE) { struct _fpstate __user *fx = buf; struct _xstate __user *x = buf; diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 2a36f7f7c4c..b6caf1329b1 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1248,7 +1248,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word); sp->gfn = gfn; sp->role = role; - sp->global = role.cr4_pge; + sp->global = 0; hlist_add_head(&sp->hash_link, bucket); if (!direct) { if (rmap_write_protect(vcpu->kvm, gfn)) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 8ca100a9eca..7c1ce5ac613 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2775,6 +2775,9 @@ out: void kvm_arch_exit(void) { + if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) + cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); kvm_x86_ops = NULL; kvm_mmu_module_exit(); } @@ -4159,6 +4162,11 @@ EXPORT_SYMBOL_GPL(kvm_put_guest_fpu); void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) { + if (vcpu->arch.time_page) { + kvm_release_page_dirty(vcpu->arch.time_page); + vcpu->arch.time_page = NULL; + } + kvm_x86_ops->vcpu_free(vcpu); } diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index a2085368a3d..ca7ec44bafc 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -663,7 +663,7 @@ static unsigned long lguest_tsc_khz(void) /* If we can't use the TSC, the kernel falls back to our lower-priority * "lguest_clock", where we read the time value given to us by the Host. */ -static cycle_t lguest_clock_read(void) +static cycle_t lguest_clock_read(struct clocksource *cs) { unsigned long sec, nsec; diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 4d67c33a2e1..95f5ecf2be5 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -7,6 +7,7 @@ #include <asm/page.h> #include <asm/page_types.h> #include <asm/sections.h> +#include <asm/setup.h> #include <asm/system.h> #include <asm/tlbflush.h> #include <asm/tlb.h> @@ -366,8 +367,23 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, #endif #ifdef CONFIG_X86_64 - if (!after_bootmem) + if (!after_bootmem && !start) { + pud_t *pud; + pmd_t *pmd; + mmu_cr4_features = read_cr4(); + + /* + * _brk_end cannot change anymore, but it and _end may be + * located on different 2M pages. cleanup_highmap(), however, + * can only consider _end when it runs, so destroy any + * mappings beyond _brk_end here. + */ + pud = pud_offset(pgd_offset_k(_brk_end), _brk_end); + pmd = pmd_offset(pud, _brk_end - 1); + while (++pmd <= pmd_offset(pud, (unsigned long)_end - 1)) + pmd_clear(pmd); + } #endif __flush_tlb_all(); diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c index 4f115e00486..50dc802a1c4 100644 --- a/arch/x86/mm/kmmio.c +++ b/arch/x86/mm/kmmio.c @@ -87,7 +87,7 @@ static struct kmmio_probe *get_kmmio_probe(unsigned long addr) { struct kmmio_probe *p; list_for_each_entry_rcu(p, &kmmio_probes, list) { - if (addr >= p->addr && addr <= (p->addr + p->len)) + if (addr >= p->addr && addr < (p->addr + p->len)) return p; } return NULL; diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index 3daefa04ace..d2530062fe0 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c @@ -257,7 +257,7 @@ void resume_map_numa_kva(pgd_t *pgd_base) } #endif -static unsigned long calculate_numa_remap_pages(void) +static __init unsigned long calculate_numa_remap_pages(void) { int nid; unsigned long size, reserve_pages = 0; diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index d73aaa89237..2d05a12029d 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c @@ -188,6 +188,9 @@ void __init setup_node_bootmem(int nodeid, unsigned long start, const int pgdat_size = roundup(sizeof(pg_data_t), PAGE_SIZE); int nid; + if (!end) + return; + start = roundup(start, ZONE_ALIGN); printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid, diff --git a/arch/x86/mm/srat_32.c b/arch/x86/mm/srat_32.c index 16ae70fc57e..29a0e37114f 100644 --- a/arch/x86/mm/srat_32.c +++ b/arch/x86/mm/srat_32.c @@ -216,7 +216,7 @@ int __init get_memcfg_from_srat(void) if (num_memory_chunks == 0) { printk(KERN_WARNING - "could not finy any ACPI SRAT memory areas.\n"); + "could not find any ACPI SRAT memory areas.\n"); goto out_fail; } diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index c7d272b8574..01765955baa 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c @@ -28,6 +28,7 @@ int acpi_numa __initdata; static struct acpi_table_slit *acpi_slit; static nodemask_t nodes_parsed __initdata; +static nodemask_t cpu_nodes_parsed __initdata; static struct bootnode nodes[MAX_NUMNODES] __initdata; static struct bootnode nodes_add[MAX_NUMNODES]; static int found_add_area __initdata; @@ -141,6 +142,7 @@ acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa) apic_id = pa->apic_id; apicid_to_node[apic_id] = node; + node_set(node, cpu_nodes_parsed); acpi_numa = 1; printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n", pxm, apic_id, node); @@ -174,6 +176,7 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) else apic_id = pa->apic_id; apicid_to_node[apic_id] = node; + node_set(node, cpu_nodes_parsed); acpi_numa = 1; printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n", pxm, apic_id, node); @@ -358,6 +361,7 @@ static void __init unparse_node(int node) { int i; node_clear(node, nodes_parsed); + node_clear(node, cpu_nodes_parsed); for (i = 0; i < MAX_LOCAL_APIC; i++) { if (apicid_to_node[i] == node) apicid_to_node[i] = NUMA_NO_NODE; @@ -402,7 +406,8 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end) return -1; } - node_possible_map = nodes_parsed; + /* Account for nodes with cpus and no memory */ + nodes_or(node_possible_map, nodes_parsed, cpu_nodes_parsed); /* Finally register nodes */ for_each_node_mask(i, node_possible_map) diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c index 9bb09823b36..f893d6a6e80 100644 --- a/arch/x86/pci/amd_bus.c +++ b/arch/x86/pci/amd_bus.c @@ -94,12 +94,16 @@ struct pci_root_info { static int pci_root_num; static struct pci_root_info pci_root_info[PCI_ROOT_NR]; -void set_pci_bus_resources_arch_default(struct pci_bus *b) +void x86_pci_root_bus_res_quirks(struct pci_bus *b) { int i; int j; struct pci_root_info *info; + /* don't go for it if _CRS is used */ + if (pci_probe & PCI_USE__CRS) + return; + /* if only one root bus, don't need to anything */ if (pci_root_num < 2) return; diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index 8c362b96b64..2202b6257b8 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c @@ -147,10 +147,13 @@ static void __devinit pcibios_fixup_device_resources(struct pci_dev *dev) * are examined. */ -void __devinit pcibios_fixup_bus(struct pci_bus *b) +void __devinit pcibios_fixup_bus(struct pci_bus *b) { struct pci_dev *dev; + /* root bus? */ + if (!b->parent) + x86_pci_root_bus_res_quirks(b); pci_read_bridge_bases(b); list_for_each_entry(dev, &b->devices, bus_list) pcibios_fixup_device_resources(dev); diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index f1817f71e00..a85bef20a3b 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c @@ -238,6 +238,10 @@ void __init pcibios_resource_survey(void) */ fs_initcall(pcibios_assign_resources); +void __weak x86_pci_root_bus_res_quirks(struct pci_bus *b) +{ +} + /* * If we set up a device for bus mastering, we need to check the latency * timer as certain crappy BIOSes forget to set it properly. diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c index 905bb526b13..5fa10bb9604 100644 --- a/arch/x86/pci/mmconfig-shared.c +++ b/arch/x86/pci/mmconfig-shared.c @@ -375,7 +375,7 @@ static acpi_status __init check_mcfg_resource(struct acpi_resource *res, if (!fixmem32) return AE_OK; if ((mcfg_res->start >= fixmem32->address) && - (mcfg_res->end < (fixmem32->address + + (mcfg_res->end <= (fixmem32->address + fixmem32->address_length))) { mcfg_res->flags = 1; return AE_CTRL_TERMINATE; @@ -392,7 +392,7 @@ static acpi_status __init check_mcfg_resource(struct acpi_resource *res, return AE_OK; if ((mcfg_res->start >= address.minimum) && - (mcfg_res->end < (address.minimum + address.address_length))) { + (mcfg_res->end <= (address.minimum + address.address_length))) { mcfg_res->flags = 1; return AE_CTRL_TERMINATE; } @@ -439,7 +439,7 @@ static int __init is_mmconf_reserved(check_reserved_t is_reserved, u64 old_size = size; int valid = 0; - while (!is_reserved(addr, addr + size - 1, E820_RESERVED)) { + while (!is_reserved(addr, addr + size, E820_RESERVED)) { size >>= 1; if (size < (16UL<<20)) break; diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c index d9d35824c56..6a40b78b46a 100644 --- a/arch/x86/vdso/vclock_gettime.c +++ b/arch/x86/vdso/vclock_gettime.c @@ -104,11 +104,13 @@ notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) { long ret; if (likely(gtod->sysctl_enabled && gtod->clock.vread)) { - BUILD_BUG_ON(offsetof(struct timeval, tv_usec) != - offsetof(struct timespec, tv_nsec) || - sizeof(*tv) != sizeof(struct timespec)); - do_realtime((struct timespec *)tv); - tv->tv_usec /= 1000; + if (likely(tv != NULL)) { + BUILD_BUG_ON(offsetof(struct timeval, tv_usec) != + offsetof(struct timespec, tv_nsec) || + sizeof(*tv) != sizeof(struct timespec)); + do_realtime((struct timespec *)tv); + tv->tv_usec /= 1000; + } if (unlikely(tz != NULL)) { /* Avoid memcpy. Some old compilers fail to inline it */ tz->tz_minuteswest = gtod->sys_tz.tz_minuteswest; diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 9842b121240..e25a78e1113 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -1794,6 +1794,11 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir))); + reserve_early(__pa(xen_start_info->pt_base), + __pa(xen_start_info->pt_base + + xen_start_info->nr_pt_frames * PAGE_SIZE), + "XEN PAGETABLES"); + return swapper_pg_dir; } #endif /* CONFIG_X86_64 */ diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 14f24062349..0a5aa44299a 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c @@ -213,6 +213,11 @@ cycle_t xen_clocksource_read(void) return ret; } +static cycle_t xen_clocksource_get_cycles(struct clocksource *cs) +{ + return xen_clocksource_read(); +} + static void xen_read_wallclock(struct timespec *ts) { struct shared_info *s = HYPERVISOR_shared_info; @@ -241,7 +246,7 @@ int xen_set_wallclock(unsigned long now) static struct clocksource xen_clocksource __read_mostly = { .name = "xen", .rating = 400, - .read = xen_clocksource_read, + .read = xen_clocksource_get_cycles, .mask = ~0, .mult = 1<<XEN_SHIFT, /* time directly in nanoseconds */ .shift = XEN_SHIFT, |