diff options
Diffstat (limited to 'arch/x86')
70 files changed, 2462 insertions, 386 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index a12dbb2b93f..845ea2b2d48 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -25,6 +25,18 @@ config X86 select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) select HAVE_ARCH_KGDB if !X86_VOYAGER +config DEFCONFIG_LIST + string + depends on X86_32 + option defconfig_list + default "arch/x86/configs/i386_defconfig" + +config DEFCONFIG_LIST + string + depends on X86_64 + option defconfig_list + default "arch/x86/configs/x86_64_defconfig" + config GENERIC_LOCKBREAK def_bool n @@ -180,7 +192,7 @@ config X86_HT config X86_BIOS_REBOOT bool - depends on X86_32 && !(X86_VISWS || X86_VOYAGER) + depends on !X86_VISWS && !X86_VOYAGER default y config X86_TRAMPOLINE @@ -537,9 +549,6 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT Calgary anyway, pass 'iommu=calgary' on the kernel command line. If unsure, say Y. -config IOMMU_HELPER - def_bool (CALGARY_IOMMU || GART_IOMMU) - # need this always selected by IOMMU for the VIA workaround config SWIOTLB bool @@ -550,6 +559,8 @@ config SWIOTLB access 32-bits of memory can be used on systems with more than 3 GB of memory. If unsure, say Y. +config IOMMU_HELPER + def_bool (CALGARY_IOMMU || GART_IOMMU || SWIOTLB) config NR_CPUS int "Maximum number of CPUs (2-255)" @@ -1162,7 +1173,7 @@ source kernel/Kconfig.hz config KEXEC bool "kexec system call" - depends on X86_64 || X86_BIOS_REBOOT + depends on X86_BIOS_REBOOT help kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot @@ -1505,6 +1516,10 @@ config PCI_GODIRECT config PCI_GOANY bool "Any" +config PCI_GOOLPC + bool "OLPC" + depends on OLPC + endchoice config PCI_BIOS @@ -1514,12 +1529,17 @@ config PCI_BIOS # x86-64 doesn't support PCI BIOS access from long mode so always go direct. config PCI_DIRECT def_bool y - depends on PCI && (X86_64 || (PCI_GODIRECT || PCI_GOANY) || X86_VISWS) + depends on PCI && (X86_64 || (PCI_GODIRECT || PCI_GOANY || PCI_GOOLPC) || X86_VISWS) config PCI_MMCONFIG def_bool y depends on X86_32 && PCI && ACPI && (PCI_GOMMCONFIG || PCI_GOANY) +config PCI_OLPC + bool + depends on PCI && PCI_GOOLPC + default y + config PCI_DOMAINS def_bool y depends on PCI @@ -1639,6 +1659,14 @@ config GEODE_MFGPT_TIMER MFGPTs have a better resolution and max interval than the generic PIT, and are suitable for use as high-res timers. +config OLPC + bool "One Laptop Per Child support" + depends on MGEODE_LX + default n + help + Add support for detecting the unique features of the OLPC + XO hardware. + endif # X86_32 config K8_NB diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index 7ef18b01f0b..2ad6301849a 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu @@ -278,11 +278,6 @@ config GENERIC_CPU endchoice -config X86_CPU - def_bool y - select GENERIC_FIND_FIRST_BIT - select GENERIC_FIND_NEXT_BIT - config X86_GENERIC bool "Generic x86 support" depends on X86_32 @@ -297,6 +292,11 @@ config X86_GENERIC endif +config X86_CPU + def_bool y + select GENERIC_FIND_FIRST_BIT + select GENERIC_FIND_NEXT_BIT + # # Define implied options from the CPU selection here config X86_L1_CACHE_BYTES diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index 5b1979a45a1..ac1e31ba479 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -118,7 +118,6 @@ config DEBUG_NX_TEST config 4KSTACKS bool "Use 4Kb for kernel stacks instead of 8Kb" depends on X86_32 - default y help If you say Y here the kernel will use a 4Kb stacksize for the kernel stack attached to each process/thread. This facilitates @@ -256,11 +255,9 @@ config CPA_DEBUG help Do change_page_attr() self-tests every 30 seconds. -endmenu - config OPTIMIZE_INLINING bool "Allow gcc to uninline functions marked 'inline'" - default y + depends on BROKEN help This option determines if the kernel forces gcc to inline the functions developers have marked 'inline'. Doing so takes away freedom from gcc to @@ -270,3 +267,6 @@ config OPTIMIZE_INLINING this algorithm is so good that allowing gcc4 to make the decision can become the default in the future, until then this option is there to test gcc for this. + +endmenu + diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c index d01ea42187e..edaadea90aa 100644 --- a/arch/x86/boot/compressed/relocs.c +++ b/arch/x86/boot/compressed/relocs.c @@ -191,7 +191,7 @@ static void read_ehdr(FILE *fp) die("Cannot read ELF header: %s\n", strerror(errno)); } - if (memcmp(ehdr.e_ident, ELFMAG, 4) != 0) { + if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0) { die("No ELF magic\n"); } if (ehdr.e_ident[EI_CLASS] != ELFCLASS32) { diff --git a/arch/x86/boot/edd.c b/arch/x86/boot/edd.c index d84a48ece78..03399d64013 100644 --- a/arch/x86/boot/edd.c +++ b/arch/x86/boot/edd.c @@ -126,17 +126,25 @@ void query_edd(void) { char eddarg[8]; int do_mbr = 1; +#ifdef CONFIG_EDD_OFF + int do_edd = 0; +#else int do_edd = 1; +#endif int be_quiet; int devno; struct edd_info ei, *edp; u32 *mbrptr; if (cmdline_find_option("edd", eddarg, sizeof eddarg) > 0) { - if (!strcmp(eddarg, "skipmbr") || !strcmp(eddarg, "skip")) + if (!strcmp(eddarg, "skipmbr") || !strcmp(eddarg, "skip")) { + do_edd = 1; do_mbr = 0; + } else if (!strcmp(eddarg, "off")) do_edd = 0; + else if (!strcmp(eddarg, "on")) + do_edd = 1; } be_quiet = cmdline_find_option_bool("quiet"); diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c index bbed3a26ce5..cb3856a18c8 100644 --- a/arch/x86/ia32/ia32_signal.c +++ b/arch/x86/ia32/ia32_signal.c @@ -128,7 +128,7 @@ asmlinkage long sys32_sigsuspend(int history0, int history1, old_sigset_t mask) current->state = TASK_INTERRUPTIBLE; schedule(); - set_thread_flag(TIF_RESTORE_SIGMASK); + set_restore_sigmask(); return -ERESTARTNOHAND; } diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index fa19c381954..bbdacb398d4 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -40,7 +40,6 @@ obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-y += cpu/ obj-y += acpi/ obj-$(CONFIG_X86_BIOS_REBOOT) += reboot.o -obj-$(CONFIG_X86_64) += reboot.o obj-$(CONFIG_MCA) += mca_32.o obj-$(CONFIG_X86_MSR) += msr.o obj-$(CONFIG_X86_CPUID) += cpuid.o @@ -91,6 +90,8 @@ endif obj-$(CONFIG_SCx200) += scx200.o scx200-y += scx200_32.o +obj-$(CONFIG_OLPC) += olpc.o + ### # 64 bit specific files ifeq ($(CONFIG_X86_64),y) @@ -101,4 +102,6 @@ ifeq ($(CONFIG_X86_64),y) obj-$(CONFIG_GART_IOMMU) += pci-gart_64.o aperture_64.o obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o obj-$(CONFIG_SWIOTLB) += pci-swiotlb_64.o + + obj-$(CONFIG_PCI_MMCONFIG) += mmconf-fam10h_64.o endif diff --git a/arch/x86/kernel/acpi/Makefile b/arch/x86/kernel/acpi/Makefile index 7335959b6af..fd5ca97a2ad 100644 --- a/arch/x86/kernel/acpi/Makefile +++ b/arch/x86/kernel/acpi/Makefile @@ -10,5 +10,5 @@ endif $(obj)/wakeup_rm.o: $(obj)/realmode/wakeup.bin $(obj)/realmode/wakeup.bin: FORCE - $(Q)$(MAKE) $(build)=$(obj)/realmode $@ + $(Q)$(MAKE) $(build)=$(obj)/realmode diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 977ed5cdeaa..c49ebcc6c41 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -771,6 +771,32 @@ static void __init acpi_register_lapic_address(unsigned long address) boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); } +static int __init early_acpi_parse_madt_lapic_addr_ovr(void) +{ + int count; + + if (!cpu_has_apic) + return -ENODEV; + + /* + * Note that the LAPIC address is obtained from the MADT (32-bit value) + * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value). + */ + + count = + acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE, + acpi_parse_lapic_addr_ovr, 0); + if (count < 0) { + printk(KERN_ERR PREFIX + "Error parsing LAPIC address override entry\n"); + return count; + } + + acpi_register_lapic_address(acpi_lapic_addr); + + return count; +} + static int __init acpi_parse_madt_lapic_entries(void) { int count; @@ -901,6 +927,33 @@ static inline int acpi_parse_madt_ioapic_entries(void) } #endif /* !CONFIG_X86_IO_APIC */ +static void __init early_acpi_process_madt(void) +{ +#ifdef CONFIG_X86_LOCAL_APIC + int error; + + if (!acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) { + + /* + * Parse MADT LAPIC entries + */ + error = early_acpi_parse_madt_lapic_addr_ovr(); + if (!error) { + acpi_lapic = 1; + smp_found_config = 1; + } + if (error == -EINVAL) { + /* + * Dell Precision Workstation 410, 610 come here. + */ + printk(KERN_ERR PREFIX + "Invalid BIOS MADT, disabling ACPI\n"); + disable_acpi(); + } + } +#endif +} + static void __init acpi_process_madt(void) { #ifdef CONFIG_X86_LOCAL_APIC @@ -1233,6 +1286,23 @@ int __init acpi_boot_table_init(void) return 0; } +int __init early_acpi_boot_init(void) +{ + /* + * If acpi_disabled, bail out + * One exception: acpi=ht continues far enough to enumerate LAPICs + */ + if (acpi_disabled && !acpi_ht) + return 1; + + /* + * Process the Multiple APIC Description Table (MADT), if present + */ + early_acpi_process_madt(); + + return 0; +} + int __init acpi_boot_init(void) { /* diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile index 092900854ac..1c31cc0e9de 100644 --- a/arch/x86/kernel/acpi/realmode/Makefile +++ b/arch/x86/kernel/acpi/realmode/Makefile @@ -6,7 +6,8 @@ # for more details. # -targets := wakeup.bin wakeup.elf +always := wakeup.bin +targets := wakeup.elf wakeup.lds wakeup-y += wakeup.o wakemain.o video-mode.o copy.o @@ -48,7 +49,7 @@ LDFLAGS_wakeup.elf := -T CPPFLAGS_wakeup.lds += -P -C -$(obj)/wakeup.elf: $(src)/wakeup.lds $(WAKEUP_OBJS) FORCE +$(obj)/wakeup.elf: $(obj)/wakeup.lds $(WAKEUP_OBJS) FORCE $(call if_changed,ld) OBJCOPYFLAGS_wakeup.bin := -O binary diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index e4ea362e848..bf9290e2901 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c @@ -1192,19 +1192,6 @@ static int suspend(int vetoable) int err; struct apm_user *as; - if (pm_send_all(PM_SUSPEND, (void *)3)) { - /* Vetoed */ - if (vetoable) { - if (apm_info.connection_version > 0x100) - set_system_power_state(APM_STATE_REJECT); - err = -EBUSY; - ignore_sys_suspend = 0; - printk(KERN_WARNING "apm: suspend was vetoed.\n"); - goto out; - } - printk(KERN_CRIT "apm: suspend was vetoed, but suspending anyway.\n"); - } - device_suspend(PMSG_SUSPEND); local_irq_disable(); device_power_down(PMSG_SUSPEND); @@ -1227,9 +1214,7 @@ static int suspend(int vetoable) device_power_up(); local_irq_enable(); device_resume(); - pm_send_all(PM_RESUME, (void *)0); queue_event(APM_NORMAL_RESUME, NULL); - out: spin_lock(&user_list_lock); for (as = user_list; as != NULL; as = as->next) { as->suspend_wait = 0; @@ -1340,7 +1325,6 @@ static void check_events(void) if ((event != APM_NORMAL_RESUME) || (ignore_normal_resume == 0)) { device_resume(); - pm_send_all(PM_RESUME, (void *)0); queue_event(event, NULL); } ignore_normal_resume = 0; diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c index 670c3c31128..92588083950 100644 --- a/arch/x86/kernel/asm-offsets_32.c +++ b/arch/x86/kernel/asm-offsets_32.c @@ -9,6 +9,7 @@ #include <linux/signal.h> #include <linux/personality.h> #include <linux/suspend.h> +#include <linux/kbuild.h> #include <asm/ucontext.h> #include "sigframe.h" #include <asm/pgtable.h> @@ -23,14 +24,6 @@ #include <linux/lguest.h> #include "../../../drivers/lguest/lg.h" -#define DEFINE(sym, val) \ - asm volatile("\n->" #sym " %0 " #val : : "i" (val)) - -#define BLANK() asm volatile("\n->" : : ) - -#define OFFSET(sym, str, mem) \ - DEFINE(sym, offsetof(struct str, mem)); - /* workaround for a warning with -Wmissing-prototypes */ void foo(void); diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index 494e1e096ee..f126c05d617 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c @@ -10,6 +10,7 @@ #include <linux/errno.h> #include <linux/hardirq.h> #include <linux/suspend.h> +#include <linux/kbuild.h> #include <asm/pda.h> #include <asm/processor.h> #include <asm/segment.h> @@ -17,14 +18,6 @@ #include <asm/ia32.h> #include <asm/bootparam.h> -#define DEFINE(sym, val) \ - asm volatile("\n->" #sym " %0 " #val : : "i" (val)) - -#define BLANK() asm volatile("\n->" : : ) - -#define OFFSET(sym, str, mem) \ - DEFINE(sym, offsetof(struct str, mem)) - #define __NO_STUBS 1 #undef __SYSCALL #undef _ASM_X86_64_UNISTD_H_ diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 8db8f73503b..b0c8208df9f 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -601,6 +601,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { policy->cpus = perf->shared_cpu_map; } + policy->related_cpus = perf->shared_cpu_map; #ifdef CONFIG_SMP dmi_check_system(sw_any_bug_dmi_table); diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index 353efe4f501..5d241ce94a4 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c @@ -90,7 +90,7 @@ u8 mtrr_type_lookup(u64 start, u64 end) * Look of multiple ranges matching this address and pick type * as per MTRR precedence */ - if (!mtrr_state.enabled & 2) { + if (!(mtrr_state.enabled & 2)) { return mtrr_state.def_type; } diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c index 1960f1985e5..84c480bb371 100644 --- a/arch/x86/kernel/cpu/mtrr/if.c +++ b/arch/x86/kernel/cpu/mtrr/if.c @@ -424,7 +424,7 @@ static int __init mtrr_if_init(void) return -ENODEV; proc_root_mtrr = - proc_create("mtrr", S_IWUSR | S_IRUGO, &proc_root, &mtrr_fops); + proc_create("mtrr", S_IWUSR | S_IRUGO, NULL, &mtrr_fops); if (proc_root_mtrr) proc_root_mtrr->owner = THIS_MODULE; diff --git a/arch/x86/kernel/e820_64.c b/arch/x86/kernel/e820_64.c index 645ee5e32a2..124480c0008 100644 --- a/arch/x86/kernel/e820_64.c +++ b/arch/x86/kernel/e820_64.c @@ -100,7 +100,7 @@ void __init free_early(unsigned long start, unsigned long end) for (j = i + 1; j < MAX_EARLY_RES && early_res[j].end; j++) ; - memcpy(&early_res[i], &early_res[i + 1], + memmove(&early_res[i], &early_res[i + 1], (j - 1 - i) * sizeof(struct early_res)); early_res[j - 1].end = 0; diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c index 021624c8358..cbaaf69bedb 100644 --- a/arch/x86/kernel/genapic_64.c +++ b/arch/x86/kernel/genapic_64.c @@ -83,7 +83,7 @@ unsigned int read_apic_id(void) { unsigned int id; - WARN_ON(preemptible()); + WARN_ON(preemptible() && num_online_cpus() > 1); id = apic_read(APIC_ID); if (uv_system_type >= UV_X2APIC) id |= __get_cpu_var(x2apic_extra_bits); diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 90f038af3ad..b2cc73768a9 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -656,15 +656,16 @@ int_msg: .asciz "Unknown interrupt or fault at EIP %p %p %p\n" fault_msg: - .asciz \ -/* fault info: */ "BUG: Int %d: CR2 %p\n" \ -/* pusha regs: */ " EDI %p ESI %p EBP %p ESP %p\n" \ - " EBX %p EDX %p ECX %p EAX %p\n" \ -/* fault frame: */ " err %p EIP %p CS %p flg %p\n" \ - \ - "Stack: %p %p %p %p %p %p %p %p\n" \ - " %p %p %p %p %p %p %p %p\n" \ - " %p %p %p %p %p %p %p %p\n" +/* fault info: */ + .ascii "BUG: Int %d: CR2 %p\n" +/* pusha regs: */ + .ascii " EDI %p ESI %p EBP %p ESP %p\n" + .ascii " EBX %p EDX %p ECX %p EAX %p\n" +/* fault frame: */ + .ascii " err %p EIP %p CS %p flg %p\n" + .ascii "Stack: %p %p %p %p %p %p %p %p\n" + .ascii " %p %p %p %p %p %p %p %p\n" + .asciz " %p %p %p %p %p %p %p %p\n" #include "../../x86/xen/xen-head.S" diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 9007f9ea64e..9b5cfcdfc42 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -137,9 +137,10 @@ static void hpet_reserve_platform_timers(unsigned long id) hd.hd_irq[0] = HPET_LEGACY_8254; hd.hd_irq[1] = HPET_LEGACY_RTC; - for (i = 2; i < nrtimers; timer++, i++) - hd.hd_irq[i] = (timer->hpet_config & Tn_INT_ROUTE_CNF_MASK) >> + for (i = 2; i < nrtimers; timer++, i++) { + hd.hd_irq[i] = (readl(&timer->hpet_config) & Tn_INT_ROUTE_CNF_MASK) >> Tn_INT_ROUTE_CNF_SHIFT; + } hpet_alloc(&hd); diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 00bda7bcda6..147352df28b 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c @@ -190,8 +190,6 @@ void irq_ctx_exit(int cpu) hardirq_ctx[cpu] = NULL; } -extern asmlinkage void __do_softirq(void); - asmlinkage void do_softirq(void) { unsigned long flags; diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index ddee04043ae..4bc1be5d547 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -133,6 +133,7 @@ static int kvm_register_clock(void) return native_write_msr_safe(MSR_KVM_SYSTEM_TIME, low, high); } +#ifdef CONFIG_X86_LOCAL_APIC static void kvm_setup_secondary_clock(void) { /* @@ -143,6 +144,7 @@ static void kvm_setup_secondary_clock(void) /* ok, done with our trickery, call native */ setup_secondary_APIC_clock(); } +#endif /* * After the clock is registered, the host will keep writing to the @@ -177,7 +179,9 @@ void __init kvmclock_init(void) pv_time_ops.get_wallclock = kvm_get_wallclock; pv_time_ops.set_wallclock = kvm_set_wallclock; pv_time_ops.sched_clock = kvm_clock_read; +#ifdef CONFIG_X86_LOCAL_APIC pv_apic_ops.setup_secondary_clock = kvm_setup_secondary_clock; +#endif machine_ops.shutdown = kvm_shutdown; #ifdef CONFIG_KEXEC machine_ops.crash_shutdown = kvm_crash_shutdown; diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c new file mode 100644 index 00000000000..edc5fbfe85c --- /dev/null +++ b/arch/x86/kernel/mmconf-fam10h_64.c @@ -0,0 +1,243 @@ +/* + * AMD Family 10h mmconfig enablement + */ + +#include <linux/types.h> +#include <linux/mm.h> +#include <linux/string.h> +#include <linux/pci.h> +#include <linux/dmi.h> +#include <asm/pci-direct.h> +#include <linux/sort.h> +#include <asm/io.h> +#include <asm/msr.h> +#include <asm/acpi.h> + +#include "../pci/pci.h" + +struct pci_hostbridge_probe { + u32 bus; + u32 slot; + u32 vendor; + u32 device; +}; + +static u64 __cpuinitdata fam10h_pci_mmconf_base; +static int __cpuinitdata fam10h_pci_mmconf_base_status; + +static struct pci_hostbridge_probe pci_probes[] __cpuinitdata = { + { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1200 }, + { 0xff, 0, PCI_VENDOR_ID_AMD, 0x1200 }, +}; + +struct range { + u64 start; + u64 end; +}; + +static int __cpuinit cmp_range(const void *x1, const void *x2) +{ + const struct range *r1 = x1; + const struct range *r2 = x2; + int start1, start2; + + start1 = r1->start >> 32; + start2 = r2->start >> 32; + + return start1 - start2; +} + +/*[47:0] */ +/* need to avoid (0xfd<<32) and (0xfe<<32), ht used space */ +#define FAM10H_PCI_MMCONF_BASE (0xfcULL<<32) +#define BASE_VALID(b) ((b != (0xfdULL << 32)) && (b != (0xfeULL << 32))) +static void __cpuinit get_fam10h_pci_mmconf_base(void) +{ + int i; + unsigned bus; + unsigned slot; + int found; + + u64 val; + u32 address; + u64 tom2; + u64 base = FAM10H_PCI_MMCONF_BASE; + + int hi_mmio_num; + struct range range[8]; + + /* only try to get setting from BSP */ + /* -1 or 1 */ + if (fam10h_pci_mmconf_base_status) + return; + + if (!early_pci_allowed()) + goto fail; + + found = 0; + for (i = 0; i < ARRAY_SIZE(pci_probes); i++) { + u32 id; + u16 device; + u16 vendor; + + bus = pci_probes[i].bus; + slot = pci_probes[i].slot; + id = read_pci_config(bus, slot, 0, PCI_VENDOR_ID); + + vendor = id & 0xffff; + device = (id>>16) & 0xffff; + if (pci_probes[i].vendor == vendor && + pci_probes[i].device == device) { + found = 1; + break; + } + } + + if (!found) + goto fail; + + /* SYS_CFG */ + address = MSR_K8_SYSCFG; + rdmsrl(address, val); + + /* TOP_MEM2 is not enabled? */ + if (!(val & (1<<21))) { + tom2 = 0; + } else { + /* TOP_MEM2 */ + address = MSR_K8_TOP_MEM2; + rdmsrl(address, val); + tom2 = val & (0xffffULL<<32); + } + + if (base <= tom2) + base = tom2 + (1ULL<<32); + + /* + * need to check if the range is in the high mmio range that is + * above 4G + */ + hi_mmio_num = 0; + for (i = 0; i < 8; i++) { + u32 reg; + u64 start; + u64 end; + reg = read_pci_config(bus, slot, 1, 0x80 + (i << 3)); + if (!(reg & 3)) + continue; + + start = (((u64)reg) << 8) & (0xffULL << 32); /* 39:16 on 31:8*/ + reg = read_pci_config(bus, slot, 1, 0x84 + (i << 3)); + end = (((u64)reg) << 8) & (0xffULL << 32); /* 39:16 on 31:8*/ + + if (!end) + continue; + + range[hi_mmio_num].start = start; + range[hi_mmio_num].end = end; + hi_mmio_num++; + } + + if (!hi_mmio_num) + goto out; + + /* sort the range */ + sort(range, hi_mmio_num, sizeof(struct range), cmp_range, NULL); + + if (range[hi_mmio_num - 1].end < base) + goto out; + if (range[0].start > base) + goto out; + + /* need to find one window */ + base = range[0].start - (1ULL << 32); + if ((base > tom2) && BASE_VALID(base)) + goto out; + base = range[hi_mmio_num - 1].end + (1ULL << 32); + if ((base > tom2) && BASE_VALID(base)) + goto out; + /* need to find window between ranges */ + if (hi_mmio_num > 1) + for (i = 0; i < hi_mmio_num - 1; i++) { + if (range[i + 1].start > (range[i].end + (1ULL << 32))) { + base = range[i].end + (1ULL << 32); + if ((base > tom2) && BASE_VALID(base)) + goto out; + } + } + +fail: + fam10h_pci_mmconf_base_status = -1; + return; +out: + fam10h_pci_mmconf_base = base; + fam10h_pci_mmconf_base_status = 1; +} + +void __cpuinit fam10h_check_enable_mmcfg(void) +{ + u64 val; + u32 address; + + if (!(pci_probe & PCI_CHECK_ENABLE_AMD_MMCONF)) + return; + + address = MSR_FAM10H_MMIO_CONF_BASE; + rdmsrl(address, val); + + /* try to make sure that AP's setting is identical to BSP setting */ + if (val & FAM10H_MMIO_CONF_ENABLE) { + unsigned busnbits; + busnbits = (val >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) & + FAM10H_MMIO_CONF_BUSRANGE_MASK; + + /* only trust the one handle 256 buses, if acpi=off */ + if (!acpi_pci_disabled || busnbits >= 8) { + u64 base; + base = val & (0xffffULL << 32); + if (fam10h_pci_mmconf_base_status <= 0) { + fam10h_pci_mmconf_base = base; + fam10h_pci_mmconf_base_status = 1; + return; + } else if (fam10h_pci_mmconf_base == base) + return; + } + } + + /* + * if it is not enabled, try to enable it and assume only one segment + * with 256 buses + */ + get_fam10h_pci_mmconf_base(); + if (fam10h_pci_mmconf_base_status <= 0) + return; + + printk(KERN_INFO "Enable MMCONFIG on AMD Family 10h\n"); + val &= ~((FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT) | + (FAM10H_MMIO_CONF_BUSRANGE_MASK<<FAM10H_MMIO_CONF_BUSRANGE_SHIFT)); + val |= fam10h_pci_mmconf_base | (8 << FAM10H_MMIO_CONF_BUSRANGE_SHIFT) | + FAM10H_MMIO_CONF_ENABLE; + wrmsrl(address, val); +} + +static int __devinit set_check_enable_amd_mmconf(const struct dmi_system_id *d) +{ + pci_probe |= PCI_CHECK_ENABLE_AMD_MMCONF; + return 0; +} + +static struct dmi_system_id __devinitdata mmconf_dmi_table[] = { + { + .callback = set_check_enable_amd_mmconf, + .ident = "Sun Microsystems Machine", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sun Microsystems"), + }, + }, + {} +}; + +void __init check_enable_amd_mmconf_dmi(void) +{ + dmi_check_system(mmconf_dmi_table); +} diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index 3e2c54dc8b2..404683b94e7 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -794,6 +794,11 @@ void __init find_smp_config(void) ACPI-based MP Configuration -------------------------------------------------------------------------- */ +/* + * Keep this outside and initialized to 0, for !CONFIG_ACPI builds: + */ +int es7000_plat; + #ifdef CONFIG_ACPI #ifdef CONFIG_X86_IO_APIC @@ -909,8 +914,6 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi) MP_intsrc_info(&intsrc); } -int es7000_plat; - void __init mp_config_acpi_legacy_irqs(void) { struct mpc_config_intsrc intsrc; diff --git a/arch/x86/kernel/olpc.c b/arch/x86/kernel/olpc.c new file mode 100644 index 00000000000..3e667227480 --- /dev/null +++ b/arch/x86/kernel/olpc.c @@ -0,0 +1,260 @@ +/* + * Support for the OLPC DCON and OLPC EC access + * + * Copyright © 2006 Advanced Micro Devices, Inc. + * Copyright © 2007-2008 Andres Salomon <dilinger@debian.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/spinlock.h> +#include <linux/io.h> +#include <linux/string.h> +#include <asm/geode.h> +#include <asm/olpc.h> + +#ifdef CONFIG_OPEN_FIRMWARE +#include <asm/ofw.h> +#endif + +struct olpc_platform_t olpc_platform_info; +EXPORT_SYMBOL_GPL(olpc_platform_info); + +static DEFINE_SPINLOCK(ec_lock); + +/* what the timeout *should* be (in ms) */ +#define EC_BASE_TIMEOUT 20 + +/* the timeout that bugs in the EC might force us to actually use */ +static int ec_timeout = EC_BASE_TIMEOUT; + +static int __init olpc_ec_timeout_set(char *str) +{ + if (get_option(&str, &ec_timeout) != 1) { + ec_timeout = EC_BASE_TIMEOUT; + printk(KERN_ERR "olpc-ec: invalid argument to " + "'olpc_ec_timeout=', ignoring!\n"); + } + printk(KERN_DEBUG "olpc-ec: using %d ms delay for EC commands.\n", + ec_timeout); + return 1; +} +__setup("olpc_ec_timeout=", olpc_ec_timeout_set); + +/* + * These {i,o}bf_status functions return whether the buffers are full or not. + */ + +static inline unsigned int ibf_status(unsigned int port) +{ + return !!(inb(port) & 0x02); +} + +static inline unsigned int obf_status(unsigned int port) +{ + return inb(port) & 0x01; +} + +#define wait_on_ibf(p, d) __wait_on_ibf(__LINE__, (p), (d)) +static int __wait_on_ibf(unsigned int line, unsigned int port, int desired) +{ + unsigned int timeo; + int state = ibf_status(port); + + for (timeo = ec_timeout; state != desired && timeo; timeo--) { + mdelay(1); + state = ibf_status(port); + } + + if ((state == desired) && (ec_timeout > EC_BASE_TIMEOUT) && + timeo < (ec_timeout - EC_BASE_TIMEOUT)) { + printk(KERN_WARNING "olpc-ec: %d: waited %u ms for IBF!\n", + line, ec_timeout - timeo); + } + + return !(state == desired); +} + +#define wait_on_obf(p, d) __wait_on_obf(__LINE__, (p), (d)) +static int __wait_on_obf(unsigned int line, unsigned int port, int desired) +{ + unsigned int timeo; + int state = obf_status(port); + + for (timeo = ec_timeout; state != desired && timeo; timeo--) { + mdelay(1); + state = obf_status(port); + } + + if ((state == desired) && (ec_timeout > EC_BASE_TIMEOUT) && + timeo < (ec_timeout - EC_BASE_TIMEOUT)) { + printk(KERN_WARNING "olpc-ec: %d: waited %u ms for OBF!\n", + line, ec_timeout - timeo); + } + + return !(state == desired); +} + +/* + * This allows the kernel to run Embedded Controller commands. The EC is + * documented at <http://wiki.laptop.org/go/Embedded_controller>, and the + * available EC commands are here: + * <http://wiki.laptop.org/go/Ec_specification>. Unfortunately, while + * OpenFirmware's source is available, the EC's is not. + */ +int olpc_ec_cmd(unsigned char cmd, unsigned char *inbuf, size_t inlen, + unsigned char *outbuf, size_t outlen) +{ + unsigned long flags; + int ret = -EIO; + int i; + + spin_lock_irqsave(&ec_lock, flags); + + /* Clear OBF */ + for (i = 0; i < 10 && (obf_status(0x6c) == 1); i++) + inb(0x68); + if (i == 10) { + printk(KERN_ERR "olpc-ec: timeout while attempting to " + "clear OBF flag!\n"); + goto err; + } + + if (wait_on_ibf(0x6c, 0)) { + printk(KERN_ERR "olpc-ec: timeout waiting for EC to " + "quiesce!\n"); + goto err; + } + +restart: + /* + * Note that if we time out during any IBF checks, that's a failure; + * we have to return. There's no way for the kernel to clear that. + * + * If we time out during an OBF check, we can restart the command; + * reissuing it will clear the OBF flag, and we should be alright. + * The OBF flag will sometimes misbehave due to what we believe + * is a hardware quirk.. + */ + printk(KERN_DEBUG "olpc-ec: running cmd 0x%x\n", cmd); + outb(cmd, 0x6c); + + if (wait_on_ibf(0x6c, 0)) { + printk(KERN_ERR "olpc-ec: timeout waiting for EC to read " + "command!\n"); + goto err; + } + + if (inbuf && inlen) { + /* write data to EC */ + for (i = 0; i < inlen; i++) { + if (wait_on_ibf(0x6c, 0)) { + printk(KERN_ERR "olpc-ec: timeout waiting for" + " EC accept data!\n"); + goto err; + } + printk(KERN_DEBUG "olpc-ec: sending cmd arg 0x%x\n", + inbuf[i]); + outb(inbuf[i], 0x68); + } + } + if (outbuf && outlen) { + /* read data from EC */ + for (i = 0; i < outlen; i++) { + if (wait_on_obf(0x6c, 1)) { + printk(KERN_ERR "olpc-ec: timeout waiting for" + " EC to provide data!\n"); + goto restart; + } + outbuf[i] = inb(0x68); + printk(KERN_DEBUG "olpc-ec: received 0x%x\n", + outbuf[i]); + } + } + + ret = 0; +err: + spin_unlock_irqrestore(&ec_lock, flags); + return ret; +} +EXPORT_SYMBOL_GPL(olpc_ec_cmd); + +#ifdef CONFIG_OPEN_FIRMWARE +static void __init platform_detect(void) +{ + size_t propsize; + u32 rev; + + if (ofw("getprop", 4, 1, NULL, "board-revision-int", &rev, 4, + &propsize) || propsize != 4) { + printk(KERN_ERR "ofw: getprop call failed!\n"); + rev = 0; + } + olpc_platform_info.boardrev = be32_to_cpu(rev); +} +#else +static void __init platform_detect(void) +{ + /* stopgap until OFW support is added to the kernel */ + olpc_platform_info.boardrev = be32_to_cpu(0xc2); +} +#endif + +static int __init olpc_init(void) +{ + unsigned char *romsig; + + /* The ioremap check is dangerous; limit what we run it on */ + if (!is_geode() || geode_has_vsa2()) + return 0; + + spin_lock_init(&ec_lock); + + romsig = ioremap(0xffffffc0, 16); + if (!romsig) + return 0; + + if (strncmp(romsig, "CL1 Q", 7)) + goto unmap; + if (strncmp(romsig+6, romsig+13, 3)) { + printk(KERN_INFO "OLPC BIOS signature looks invalid. " + "Assuming not OLPC\n"); + goto unmap; + } + + printk(KERN_INFO "OLPC board with OpenFirmware %.16s\n", romsig); + olpc_platform_info.flags |= OLPC_F_PRESENT; + + /* get the platform revision */ + platform_detect(); + + /* assume B1 and above models always have a DCON */ + if (olpc_board_at_least(olpc_board(0xb1))) + olpc_platform_info.flags |= OLPC_F_DCON; + + /* get the EC revision */ + olpc_ec_cmd(EC_FIRMWARE_REV, NULL, 0, + (unsigned char *) &olpc_platform_info.ecver, 1); + + /* check to see if the VSA exists */ + if (geode_has_vsa2()) + olpc_platform_info.flags |= OLPC_F_VSA; + + printk(KERN_INFO "OLPC board revision %s%X (EC=%x)\n", + ((olpc_platform_info.boardrev & 0xf) < 8) ? "pre" : "", + olpc_platform_info.boardrev >> 4, + olpc_platform_info.ecver); + +unmap: + iounmap(romsig); + return 0; +} + +postcore_initcall(olpc_init); diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 388b113a7d8..0c37f16b695 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -14,7 +14,7 @@ EXPORT_SYMBOL(forbid_dac); const struct dma_mapping_ops *dma_ops; EXPORT_SYMBOL(dma_ops); -int iommu_sac_force __read_mostly = 0; +static int iommu_sac_force __read_mostly; #ifdef CONFIG_IOMMU_DEBUG int panic_on_overflow __read_mostly = 1; diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index a4a838306b2..f6be7d5f82f 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -9,6 +9,7 @@ #include <asm/desc.h> #include <asm/hpet.h> #include <asm/pgtable.h> +#include <asm/proto.h> #include <asm/reboot_fixups.h> #include <asm/reboot.h> @@ -148,7 +149,6 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"), - DMI_MATCH(DMI_BOARD_NAME, "0WF810"), }, }, { /* Handle problems with rebooting on Dell Optiplex 745's DFF*/ diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index c0c68c18a78..cc6f5eb20b2 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -95,7 +95,7 @@ void __init setup_per_cpu_areas(void) /* Copy section for each CPU (we discard the original) */ size = PERCPU_ENOUGH_ROOM; - printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n", + printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n", size); for_each_possible_cpu(i) { diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c index a94fb959a87..22c14e21c97 100644 --- a/arch/x86/kernel/setup_64.c +++ b/arch/x86/kernel/setup_64.c @@ -29,6 +29,7 @@ #include <linux/crash_dump.h> #include <linux/root_dev.h> #include <linux/pci.h> +#include <asm/pci-direct.h> #include <linux/efi.h> #include <linux/acpi.h> #include <linux/kallsyms.h> @@ -40,6 +41,7 @@ #include <linux/dmi.h> #include <linux/dma-mapping.h> #include <linux/ctype.h> +#include <linux/sort.h> #include <linux/uaccess.h> #include <linux/init_ohci1394_dma.h> #include <linux/kvm_para.h> @@ -288,6 +290,18 @@ static void __init parse_setup_data(void) } } +#ifdef CONFIG_PCI_MMCONFIG +extern void __cpuinit fam10h_check_enable_mmcfg(void); +extern void __init check_enable_amd_mmconf_dmi(void); +#else +void __cpuinit fam10h_check_enable_mmcfg(void) +{ +} +void __init check_enable_amd_mmconf_dmi(void) +{ +} +#endif + /* * setup_arch - architecture-specific boot-time initializations * @@ -515,6 +529,9 @@ void __init setup_arch(char **cmdline_p) conswitchp = &dummy_con; #endif #endif + + /* do this before identify_cpu for boot cpu */ + check_enable_amd_mmconf_dmi(); } static int __cpuinit get_model_name(struct cpuinfo_x86 *c) @@ -767,6 +784,9 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) /* MFENCE stops RDTSC speculation */ set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); + if (c->x86 == 0x10) + fam10h_check_enable_mmcfg(); + if (amd_apic_timer_broken()) disable_apic_timer = 1; diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c index 8e05e7f7bd4..d9237363096 100644 --- a/arch/x86/kernel/signal_32.c +++ b/arch/x86/kernel/signal_32.c @@ -57,7 +57,7 @@ sys_sigsuspend(int history0, int history1, old_sigset_t mask) current->state = TASK_INTERRUPTIBLE; schedule(); - set_thread_flag(TIF_RESTORE_SIGMASK); + set_restore_sigmask(); return -ERESTARTNOHAND; } @@ -593,7 +593,7 @@ static void do_signal(struct pt_regs *regs) if (!user_mode(regs)) return; - if (test_thread_flag(TIF_RESTORE_SIGMASK)) + if (current_thread_info()->status & TS_RESTORE_SIGMASK) oldset = ¤t->saved_sigmask; else oldset = ¤t->blocked; @@ -612,13 +612,12 @@ static void do_signal(struct pt_regs *regs) /* Whee! Actually deliver the signal. */ if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { /* - * a signal was successfully delivered; the saved + * A signal was successfully delivered; the saved * sigmask will have been stored in the signal frame, * and will be restored by sigreturn, so we can simply - * clear the TIF_RESTORE_SIGMASK flag + * clear the TS_RESTORE_SIGMASK flag. */ - if (test_thread_flag(TIF_RESTORE_SIGMASK)) - clear_thread_flag(TIF_RESTORE_SIGMASK); + current_thread_info()->status &= ~TS_RESTORE_SIGMASK; } return; } @@ -645,8 +644,8 @@ static void do_signal(struct pt_regs *regs) * If there's no signal to deliver, we just put the saved sigmask * back. */ - if (test_thread_flag(TIF_RESTORE_SIGMASK)) { - clear_thread_flag(TIF_RESTORE_SIGMASK); + if (current_thread_info()->status & TS_RESTORE_SIGMASK) { + current_thread_info()->status &= ~TS_RESTORE_SIGMASK; sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); } } @@ -665,7 +664,7 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) } /* deal with pending signal delivery */ - if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) + if (thread_info_flags & _TIF_SIGPENDING) do_signal(regs); if (thread_info_flags & _TIF_HRTICK_RESCHED) diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c index ccb2a4560c2..e53b267662e 100644 --- a/arch/x86/kernel/signal_64.c +++ b/arch/x86/kernel/signal_64.c @@ -427,7 +427,7 @@ static void do_signal(struct pt_regs *regs) if (!user_mode(regs)) return; - if (test_thread_flag(TIF_RESTORE_SIGMASK)) + if (current_thread_info()->status & TS_RESTORE_SIGMASK) oldset = ¤t->saved_sigmask; else oldset = ¤t->blocked; @@ -444,11 +444,13 @@ static void do_signal(struct pt_regs *regs) /* Whee! Actually deliver the signal. */ if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { - /* a signal was successfully delivered; the saved + /* + * A signal was successfully delivered; the saved * sigmask will have been stored in the signal frame, * and will be restored by sigreturn, so we can simply - * clear the TIF_RESTORE_SIGMASK flag */ - clear_thread_flag(TIF_RESTORE_SIGMASK); + * clear the TS_RESTORE_SIGMASK flag. + */ + current_thread_info()->status &= ~TS_RESTORE_SIGMASK; } return; } @@ -476,8 +478,8 @@ static void do_signal(struct pt_regs *regs) * If there's no signal to deliver, we just put the saved sigmask * back. */ - if (test_thread_flag(TIF_RESTORE_SIGMASK)) { - clear_thread_flag(TIF_RESTORE_SIGMASK); + if (current_thread_info()->status & TS_RESTORE_SIGMASK) { + current_thread_info()->status &= ~TS_RESTORE_SIGMASK; sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); } } @@ -498,7 +500,7 @@ void do_notify_resume(struct pt_regs *regs, void *unused, #endif /* CONFIG_X86_MCE */ /* deal with pending signal delivery */ - if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) + if (thread_info_flags & _TIF_SIGPENDING) do_signal(regs); if (thread_info_flags & _TIF_HRTICK_RESCHED) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 04c662ba18f..6b087ab6cd8 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -299,7 +299,7 @@ static void __cpuinit smp_callin(void) /* * Activate a secondary processor. */ -void __cpuinit start_secondary(void *unused) +static void __cpuinit start_secondary(void *unused) { /* * Don't put *anything* before cpu_init(), SMP booting is too @@ -1149,14 +1149,10 @@ static int __init smp_sanity_check(unsigned max_cpus) "forcing use of dummy APIC emulation.\n"); smpboot_clear_io_apic(); #ifdef CONFIG_X86_32 - if (nmi_watchdog == NMI_LOCAL_APIC) { - printk(KERN_INFO "activating minimal APIC for" - "NMI watchdog use.\n"); - connect_bsp_APIC(); - setup_local_APIC(); - end_local_APIC_setup(); - } + connect_bsp_APIC(); #endif + setup_local_APIC(); + end_local_APIC_setup(); return -1; } @@ -1310,7 +1306,7 @@ static void remove_siblinginfo(int cpu) cpu_clear(cpu, cpu_sibling_setup_map); } -int additional_cpus __initdata = -1; +static int additional_cpus __initdata = -1; static __init int setup_additional_cpus(char *s) { diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c index a86d26f036e..d2ab52cc1d6 100644 --- a/arch/x86/kernel/sys_i386_32.c +++ b/arch/x86/kernel/sys_i386_32.c @@ -22,23 +22,6 @@ #include <asm/uaccess.h> #include <asm/unistd.h> -/* - * sys_pipe() is the normal C calling standard for creating - * a pipe. It's not the way Unix traditionally does this, though. - */ -asmlinkage int sys_pipe(unsigned long __user * fildes) -{ - int fd[2]; - int error; - - error = do_pipe(fd); - if (!error) { - if (copy_to_user(fildes, fd, 2*sizeof(int))) - error = -EFAULT; - } - return error; -} - asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff) diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index bd802a5e1aa..3b360ef3381 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c @@ -17,23 +17,6 @@ #include <asm/uaccess.h> #include <asm/ia32.h> -/* - * sys_pipe() is the normal C calling standard for creating - * a pipe. It's not the way Unix traditionally does this, though. - */ -asmlinkage long sys_pipe(int __user *fildes) -{ - int fd[2]; - int error; - - error = do_pipe(fd); - if (!error) { - if (copy_to_user(fildes, fd, 2*sizeof(int))) - error = -EFAULT; - } - return error; -} - asmlinkage long sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long off) { diff --git a/arch/x86/kernel/time_32.c b/arch/x86/kernel/time_32.c index 1a89e93f3f1..2ff21f39893 100644 --- a/arch/x86/kernel/time_32.c +++ b/arch/x86/kernel/time_32.c @@ -115,7 +115,6 @@ irqreturn_t timer_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -extern void (*late_time_init)(void); /* Duplicate of time_init() below, with hpet_enable part added */ void __init hpet_time_init(void) { diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c index caf2a26f5cf..ba8c0b75ab0 100644 --- a/arch/x86/kernel/vsmp_64.c +++ b/arch/x86/kernel/vsmp_64.c @@ -133,7 +133,7 @@ int is_vsmp_box(void) } } #else -static int __init detect_vsmp_box(void) +static void __init detect_vsmp_box(void) { } int is_vsmp_box(void) diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 361e3161127..3324d90038e 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c @@ -35,7 +35,7 @@ #include "i8254.h" #ifndef CONFIG_X86_64 -#define mod_64(x, y) ((x) - (y) * div64_64(x, y)) +#define mod_64(x, y) ((x) - (y) * div64_u64(x, y)) #else #define mod_64(x, y) ((x) % (y)) #endif @@ -60,8 +60,8 @@ static u64 muldiv64(u64 a, u32 b, u32 c) rl = (u64)u.l.low * (u64)b; rh = (u64)u.l.high * (u64)b; rh += (rl >> 32); - res.l.high = div64_64(rh, c); - res.l.low = div64_64(((mod_64(rh, c) << 32) + (rl & 0xffffffff)), c); + res.l.high = div64_u64(rh, c); + res.l.low = div64_u64(((mod_64(rh, c) << 32) + (rl & 0xffffffff)), c); return res.ll; } @@ -288,6 +288,8 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val) * mode 1 is one shot, mode 2 is period, otherwise del timer */ switch (ps->channels[0].mode) { case 1: + /* FIXME: enhance mode 4 precision */ + case 4: create_pit_timer(&ps->pit_timer, val, 0); break; case 2: diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 57ac4e4c556..36809d79788 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -25,13 +25,13 @@ #include <linux/hrtimer.h> #include <linux/io.h> #include <linux/module.h> +#include <linux/math64.h> #include <asm/processor.h> #include <asm/msr.h> #include <asm/page.h> #include <asm/current.h> #include <asm/apicdef.h> #include <asm/atomic.h> -#include <asm/div64.h> #include "irq.h" #define PRId64 "d" @@ -526,8 +526,8 @@ static u32 apic_get_tmcct(struct kvm_lapic *apic) } else passed = ktime_sub(now, apic->timer.last_update); - counter_passed = div64_64(ktime_to_ns(passed), - (APIC_BUS_CYCLE_NS * apic->timer.divide_count)); + counter_passed = div64_u64(ktime_to_ns(passed), + (APIC_BUS_CYCLE_NS * apic->timer.divide_count)); if (counter_passed > tmcct) { if (unlikely(!apic_lvtt_period(apic))) { diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 2ad6f548167..36c5406b181 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -79,36 +79,6 @@ static int dbg = 1; } #endif -#define PT64_PT_BITS 9 -#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS) -#define PT32_PT_BITS 10 -#define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS) - -#define PT_WRITABLE_SHIFT 1 - -#define PT_PRESENT_MASK (1ULL << 0) -#define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT) -#define PT_USER_MASK (1ULL << 2) -#define PT_PWT_MASK (1ULL << 3) -#define PT_PCD_MASK (1ULL << 4) -#define PT_ACCESSED_MASK (1ULL << 5) -#define PT_DIRTY_MASK (1ULL << 6) -#define PT_PAGE_SIZE_MASK (1ULL << 7) -#define PT_PAT_MASK (1ULL << 7) -#define PT_GLOBAL_MASK (1ULL << 8) -#define PT64_NX_SHIFT 63 -#define PT64_NX_MASK (1ULL << PT64_NX_SHIFT) - -#define PT_PAT_SHIFT 7 -#define PT_DIR_PAT_SHIFT 12 -#define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT) - -#define PT32_DIR_PSE36_SIZE 4 -#define PT32_DIR_PSE36_SHIFT 13 -#define PT32_DIR_PSE36_MASK \ - (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT) - - #define PT_FIRST_AVAIL_BITS_SHIFT 9 #define PT64_SECOND_AVAIL_BITS_SHIFT 52 @@ -154,10 +124,6 @@ static int dbg = 1; #define PFERR_USER_MASK (1U << 2) #define PFERR_FETCH_MASK (1U << 4) -#define PT64_ROOT_LEVEL 4 -#define PT32_ROOT_LEVEL 2 -#define PT32E_ROOT_LEVEL 3 - #define PT_DIRECTORY_LEVEL 2 #define PT_PAGE_TABLE_LEVEL 1 @@ -186,6 +152,12 @@ static struct kmem_cache *mmu_page_header_cache; static u64 __read_mostly shadow_trap_nonpresent_pte; static u64 __read_mostly shadow_notrap_nonpresent_pte; +static u64 __read_mostly shadow_base_present_pte; +static u64 __read_mostly shadow_nx_mask; +static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */ +static u64 __read_mostly shadow_user_mask; +static u64 __read_mostly shadow_accessed_mask; +static u64 __read_mostly shadow_dirty_mask; void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte) { @@ -194,6 +166,23 @@ void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte) } EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes); +void kvm_mmu_set_base_ptes(u64 base_pte) +{ + shadow_base_present_pte = base_pte; +} +EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes); + +void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, + u64 dirty_mask, u64 nx_mask, u64 x_mask) +{ + shadow_user_mask = user_mask; + shadow_accessed_mask = accessed_mask; + shadow_dirty_mask = dirty_mask; + shadow_nx_mask = nx_mask; + shadow_x_mask = x_mask; +} +EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); + static int is_write_protection(struct kvm_vcpu *vcpu) { return vcpu->arch.cr0 & X86_CR0_WP; @@ -232,7 +221,7 @@ static int is_writeble_pte(unsigned long pte) static int is_dirty_pte(unsigned long pte) { - return pte & PT_DIRTY_MASK; + return pte & shadow_dirty_mask; } static int is_rmap_pte(u64 pte) @@ -387,7 +376,6 @@ static void account_shadowed(struct kvm *kvm, gfn_t gfn) write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn)); *write_count += 1; - WARN_ON(*write_count > KVM_PAGES_PER_HPAGE); } static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn) @@ -547,7 +535,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte) return; sp = page_header(__pa(spte)); pfn = spte_to_pfn(*spte); - if (*spte & PT_ACCESSED_MASK) + if (*spte & shadow_accessed_mask) kvm_set_pfn_accessed(pfn); if (is_writeble_pte(*spte)) kvm_release_pfn_dirty(pfn); @@ -1073,17 +1061,17 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, * whether the guest actually used the pte (in order to detect * demand paging). */ - spte = PT_PRESENT_MASK | PT_DIRTY_MASK; + spte = shadow_base_present_pte | shadow_dirty_mask; if (!speculative) pte_access |= PT_ACCESSED_MASK; if (!dirty) pte_access &= ~ACC_WRITE_MASK; - if (!(pte_access & ACC_EXEC_MASK)) - spte |= PT64_NX_MASK; - - spte |= PT_PRESENT_MASK; + if (pte_access & ACC_EXEC_MASK) + spte |= shadow_x_mask; + else + spte |= shadow_nx_mask; if (pte_access & ACC_USER_MASK) - spte |= PT_USER_MASK; + spte |= shadow_user_mask; if (largepage) spte |= PT_PAGE_SIZE_MASK; @@ -1188,8 +1176,9 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, return -ENOMEM; } - table[index] = __pa(new_table->spt) | PT_PRESENT_MASK - | PT_WRITABLE_MASK | PT_USER_MASK; + table[index] = __pa(new_table->spt) + | PT_PRESENT_MASK | PT_WRITABLE_MASK + | shadow_user_mask | shadow_x_mask; } table_addr = table[index] & PT64_BASE_ADDR_MASK; } @@ -1244,7 +1233,6 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu) if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) return; spin_lock(&vcpu->kvm->mmu_lock); -#ifdef CONFIG_X86_64 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { hpa_t root = vcpu->arch.mmu.root_hpa; @@ -1256,7 +1244,6 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu) spin_unlock(&vcpu->kvm->mmu_lock); return; } -#endif for (i = 0; i < 4; ++i) { hpa_t root = vcpu->arch.mmu.pae_root[i]; @@ -1282,7 +1269,6 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT; -#ifdef CONFIG_X86_64 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { hpa_t root = vcpu->arch.mmu.root_hpa; @@ -1297,7 +1283,6 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) vcpu->arch.mmu.root_hpa = root; return; } -#endif metaphysical = !is_paging(vcpu); if (tdp_enabled) metaphysical = 1; @@ -1377,7 +1362,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, spin_lock(&vcpu->kvm->mmu_lock); kvm_mmu_free_some_pages(vcpu); r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK, - largepage, gfn, pfn, TDP_ROOT_LEVEL); + largepage, gfn, pfn, kvm_x86_ops->get_tdp_level()); spin_unlock(&vcpu->kvm->mmu_lock); return r; @@ -1484,7 +1469,7 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) context->page_fault = tdp_page_fault; context->free = nonpaging_free; context->prefetch_page = nonpaging_prefetch_page; - context->shadow_root_level = TDP_ROOT_LEVEL; + context->shadow_root_level = kvm_x86_ops->get_tdp_level(); context->root_hpa = INVALID_PAGE; if (!is_paging(vcpu)) { @@ -1633,7 +1618,7 @@ static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu) { u64 *spte = vcpu->arch.last_pte_updated; - return !!(spte && (*spte & PT_ACCESSED_MASK)); + return !!(spte && (*spte & shadow_accessed_mask)); } static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index e64e9f56a65..1730757bbc7 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -3,11 +3,38 @@ #include <linux/kvm_host.h> -#ifdef CONFIG_X86_64 -#define TDP_ROOT_LEVEL PT64_ROOT_LEVEL -#else -#define TDP_ROOT_LEVEL PT32E_ROOT_LEVEL -#endif +#define PT64_PT_BITS 9 +#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS) +#define PT32_PT_BITS 10 +#define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS) + +#define PT_WRITABLE_SHIFT 1 + +#define PT_PRESENT_MASK (1ULL << 0) +#define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT) +#define PT_USER_MASK (1ULL << 2) +#define PT_PWT_MASK (1ULL << 3) +#define PT_PCD_MASK (1ULL << 4) +#define PT_ACCESSED_MASK (1ULL << 5) +#define PT_DIRTY_MASK (1ULL << 6) +#define PT_PAGE_SIZE_MASK (1ULL << 7) +#define PT_PAT_MASK (1ULL << 7) +#define PT_GLOBAL_MASK (1ULL << 8) +#define PT64_NX_SHIFT 63 +#define PT64_NX_MASK (1ULL << PT64_NX_SHIFT) + +#define PT_PAT_SHIFT 7 +#define PT_DIR_PAT_SHIFT 12 +#define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT) + +#define PT32_DIR_PSE36_SIZE 4 +#define PT32_DIR_PSE36_SHIFT 13 +#define PT32_DIR_PSE36_MASK \ + (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT) + +#define PT64_ROOT_LEVEL 4 +#define PT32_ROOT_LEVEL 2 +#define PT32E_ROOT_LEVEL 3 static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) { diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 89e0be2c10d..ab22615eee8 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1863,6 +1863,15 @@ static bool svm_cpu_has_accelerated_tpr(void) return false; } +static int get_npt_level(void) +{ +#ifdef CONFIG_X86_64 + return PT64_ROOT_LEVEL; +#else + return PT32E_ROOT_LEVEL; +#endif +} + static struct kvm_x86_ops svm_x86_ops = { .cpu_has_kvm_support = has_svm, .disabled_by_bios = is_disabled, @@ -1920,6 +1929,7 @@ static struct kvm_x86_ops svm_x86_ops = { .inject_pending_vectors = do_interrupt_requests, .set_tss_addr = svm_set_tss_addr, + .get_tdp_level = get_npt_level, }; static int __init svm_init(void) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 8e5d6645b90..bfe4db11989 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -42,6 +42,9 @@ module_param(enable_vpid, bool, 0); static int flexpriority_enabled = 1; module_param(flexpriority_enabled, bool, 0); +static int enable_ept = 1; +module_param(enable_ept, bool, 0); + struct vmcs { u32 revision_id; u32 abort; @@ -84,7 +87,7 @@ static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) return container_of(vcpu, struct vcpu_vmx, vcpu); } -static int init_rmode_tss(struct kvm *kvm); +static int init_rmode(struct kvm *kvm); static DEFINE_PER_CPU(struct vmcs *, vmxarea); static DEFINE_PER_CPU(struct vmcs *, current_vmcs); @@ -107,6 +110,11 @@ static struct vmcs_config { u32 vmentry_ctrl; } vmcs_config; +struct vmx_capability { + u32 ept; + u32 vpid; +} vmx_capability; + #define VMX_SEGMENT_FIELD(seg) \ [VCPU_SREG_##seg] = { \ .selector = GUEST_##seg##_SELECTOR, \ @@ -214,6 +222,32 @@ static inline bool cpu_has_vmx_virtualize_apic_accesses(void) SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES); } +static inline int cpu_has_vmx_invept_individual_addr(void) +{ + return (!!(vmx_capability.ept & VMX_EPT_EXTENT_INDIVIDUAL_BIT)); +} + +static inline int cpu_has_vmx_invept_context(void) +{ + return (!!(vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT)); +} + +static inline int cpu_has_vmx_invept_global(void) +{ + return (!!(vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT)); +} + +static inline int cpu_has_vmx_ept(void) +{ + return (vmcs_config.cpu_based_2nd_exec_ctrl & + SECONDARY_EXEC_ENABLE_EPT); +} + +static inline int vm_need_ept(void) +{ + return (cpu_has_vmx_ept() && enable_ept); +} + static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm) { return ((cpu_has_vmx_virtualize_apic_accesses()) && @@ -250,6 +284,18 @@ static inline void __invvpid(int ext, u16 vpid, gva_t gva) : : "a"(&operand), "c"(ext) : "cc", "memory"); } +static inline void __invept(int ext, u64 eptp, gpa_t gpa) +{ + struct { + u64 eptp, gpa; + } operand = {eptp, gpa}; + + asm volatile (ASM_VMX_INVEPT + /* CF==1 or ZF==1 --> rc = -1 */ + "; ja 1f ; ud2 ; 1:\n" + : : "a" (&operand), "c" (ext) : "cc", "memory"); +} + static struct kvm_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr) { int i; @@ -301,6 +347,33 @@ static inline void vpid_sync_vcpu_all(struct vcpu_vmx *vmx) __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vmx->vpid, 0); } +static inline void ept_sync_global(void) +{ + if (cpu_has_vmx_invept_global()) + __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0); +} + +static inline void ept_sync_context(u64 eptp) +{ + if (vm_need_ept()) { + if (cpu_has_vmx_invept_context()) + __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0); + else + ept_sync_global(); + } +} + +static inline void ept_sync_individual_addr(u64 eptp, gpa_t gpa) +{ + if (vm_need_ept()) { + if (cpu_has_vmx_invept_individual_addr()) + __invept(VMX_EPT_EXTENT_INDIVIDUAL_ADDR, + eptp, gpa); + else + ept_sync_context(eptp); + } +} + static unsigned long vmcs_readl(unsigned long field) { unsigned long value; @@ -388,6 +461,8 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu) eb |= 1u << 1; if (vcpu->arch.rmode.active) eb = ~0; + if (vm_need_ept()) + eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */ vmcs_write32(EXCEPTION_BITMAP, eb); } @@ -985,7 +1060,7 @@ static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) { u32 vmx_msr_low, vmx_msr_high; - u32 min, opt; + u32 min, opt, min2, opt2; u32 _pin_based_exec_control = 0; u32 _cpu_based_exec_control = 0; u32 _cpu_based_2nd_exec_control = 0; @@ -1003,6 +1078,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING | #endif + CPU_BASED_CR3_LOAD_EXITING | + CPU_BASED_CR3_STORE_EXITING | CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MOV_DR_EXITING | CPU_BASED_USE_TSC_OFFSETING; @@ -1018,11 +1095,13 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) ~CPU_BASED_CR8_STORE_EXITING; #endif if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) { - min = 0; - opt = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + min2 = 0; + opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | SECONDARY_EXEC_WBINVD_EXITING | - SECONDARY_EXEC_ENABLE_VPID; - if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS2, + SECONDARY_EXEC_ENABLE_VPID | + SECONDARY_EXEC_ENABLE_EPT; + if (adjust_vmx_controls(min2, opt2, + MSR_IA32_VMX_PROCBASED_CTLS2, &_cpu_based_2nd_exec_control) < 0) return -EIO; } @@ -1031,6 +1110,16 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW; #endif + if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) { + /* CR3 accesses don't need to cause VM Exits when EPT enabled */ + min &= ~(CPU_BASED_CR3_LOAD_EXITING | + CPU_BASED_CR3_STORE_EXITING); + if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS, + &_cpu_based_exec_control) < 0) + return -EIO; + rdmsr(MSR_IA32_VMX_EPT_VPID_CAP, + vmx_capability.ept, vmx_capability.vpid); + } min = 0; #ifdef CONFIG_X86_64 @@ -1256,7 +1345,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu) fix_rmode_seg(VCPU_SREG_FS, &vcpu->arch.rmode.fs); kvm_mmu_reset_context(vcpu); - init_rmode_tss(vcpu->kvm); + init_rmode(vcpu->kvm); } #ifdef CONFIG_X86_64 @@ -1304,8 +1393,64 @@ static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK; } +static void ept_load_pdptrs(struct kvm_vcpu *vcpu) +{ + if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { + if (!load_pdptrs(vcpu, vcpu->arch.cr3)) { + printk(KERN_ERR "EPT: Fail to load pdptrs!\n"); + return; + } + vmcs_write64(GUEST_PDPTR0, vcpu->arch.pdptrs[0]); + vmcs_write64(GUEST_PDPTR1, vcpu->arch.pdptrs[1]); + vmcs_write64(GUEST_PDPTR2, vcpu->arch.pdptrs[2]); + vmcs_write64(GUEST_PDPTR3, vcpu->arch.pdptrs[3]); + } +} + +static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); + +static void ept_update_paging_mode_cr0(unsigned long *hw_cr0, + unsigned long cr0, + struct kvm_vcpu *vcpu) +{ + if (!(cr0 & X86_CR0_PG)) { + /* From paging/starting to nonpaging */ + vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, + vmcs_config.cpu_based_exec_ctrl | + (CPU_BASED_CR3_LOAD_EXITING | + CPU_BASED_CR3_STORE_EXITING)); + vcpu->arch.cr0 = cr0; + vmx_set_cr4(vcpu, vcpu->arch.cr4); + *hw_cr0 |= X86_CR0_PE | X86_CR0_PG; + *hw_cr0 &= ~X86_CR0_WP; + } else if (!is_paging(vcpu)) { + /* From nonpaging to paging */ + vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, + vmcs_config.cpu_based_exec_ctrl & + ~(CPU_BASED_CR3_LOAD_EXITING | + CPU_BASED_CR3_STORE_EXITING)); + vcpu->arch.cr0 = cr0; + vmx_set_cr4(vcpu, vcpu->arch.cr4); + if (!(vcpu->arch.cr0 & X86_CR0_WP)) + *hw_cr0 &= ~X86_CR0_WP; + } +} + +static void ept_update_paging_mode_cr4(unsigned long *hw_cr4, + struct kvm_vcpu *vcpu) +{ + if (!is_paging(vcpu)) { + *hw_cr4 &= ~X86_CR4_PAE; + *hw_cr4 |= X86_CR4_PSE; + } else if (!(vcpu->arch.cr4 & X86_CR4_PAE)) + *hw_cr4 &= ~X86_CR4_PAE; +} + static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) { + unsigned long hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK) | + KVM_VM_CR0_ALWAYS_ON; + vmx_fpu_deactivate(vcpu); if (vcpu->arch.rmode.active && (cr0 & X86_CR0_PE)) @@ -1323,29 +1468,61 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) } #endif + if (vm_need_ept()) + ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu); + vmcs_writel(CR0_READ_SHADOW, cr0); - vmcs_writel(GUEST_CR0, - (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON); + vmcs_writel(GUEST_CR0, hw_cr0); vcpu->arch.cr0 = cr0; if (!(cr0 & X86_CR0_TS) || !(cr0 & X86_CR0_PE)) vmx_fpu_activate(vcpu); } +static u64 construct_eptp(unsigned long root_hpa) +{ + u64 eptp; + + /* TODO write the value reading from MSR */ + eptp = VMX_EPT_DEFAULT_MT | + VMX_EPT_DEFAULT_GAW << VMX_EPT_GAW_EPTP_SHIFT; + eptp |= (root_hpa & PAGE_MASK); + + return eptp; +} + static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) { + unsigned long guest_cr3; + u64 eptp; + + guest_cr3 = cr3; + if (vm_need_ept()) { + eptp = construct_eptp(cr3); + vmcs_write64(EPT_POINTER, eptp); + ept_sync_context(eptp); + ept_load_pdptrs(vcpu); + guest_cr3 = is_paging(vcpu) ? vcpu->arch.cr3 : + VMX_EPT_IDENTITY_PAGETABLE_ADDR; + } + vmx_flush_tlb(vcpu); - vmcs_writel(GUEST_CR3, cr3); + vmcs_writel(GUEST_CR3, guest_cr3); if (vcpu->arch.cr0 & X86_CR0_PE) vmx_fpu_deactivate(vcpu); } static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { - vmcs_writel(CR4_READ_SHADOW, cr4); - vmcs_writel(GUEST_CR4, cr4 | (vcpu->arch.rmode.active ? - KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON)); + unsigned long hw_cr4 = cr4 | (vcpu->arch.rmode.active ? + KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON); + vcpu->arch.cr4 = cr4; + if (vm_need_ept()) + ept_update_paging_mode_cr4(&hw_cr4, vcpu); + + vmcs_writel(CR4_READ_SHADOW, cr4); + vmcs_writel(GUEST_CR4, hw_cr4); } static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) @@ -1530,6 +1707,41 @@ out: return ret; } +static int init_rmode_identity_map(struct kvm *kvm) +{ + int i, r, ret; + pfn_t identity_map_pfn; + u32 tmp; + + if (!vm_need_ept()) + return 1; + if (unlikely(!kvm->arch.ept_identity_pagetable)) { + printk(KERN_ERR "EPT: identity-mapping pagetable " + "haven't been allocated!\n"); + return 0; + } + if (likely(kvm->arch.ept_identity_pagetable_done)) + return 1; + ret = 0; + identity_map_pfn = VMX_EPT_IDENTITY_PAGETABLE_ADDR >> PAGE_SHIFT; + r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE); + if (r < 0) + goto out; + /* Set up identity-mapping pagetable for EPT in real mode */ + for (i = 0; i < PT32_ENT_PER_PAGE; i++) { + tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | + _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE); + r = kvm_write_guest_page(kvm, identity_map_pfn, + &tmp, i * sizeof(tmp), sizeof(tmp)); + if (r < 0) + goto out; + } + kvm->arch.ept_identity_pagetable_done = true; + ret = 1; +out: + return ret; +} + static void seg_setup(int seg) { struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; @@ -1564,6 +1776,31 @@ out: return r; } +static int alloc_identity_pagetable(struct kvm *kvm) +{ + struct kvm_userspace_memory_region kvm_userspace_mem; + int r = 0; + + down_write(&kvm->slots_lock); + if (kvm->arch.ept_identity_pagetable) + goto out; + kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT; + kvm_userspace_mem.flags = 0; + kvm_userspace_mem.guest_phys_addr = VMX_EPT_IDENTITY_PAGETABLE_ADDR; + kvm_userspace_mem.memory_size = PAGE_SIZE; + r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0); + if (r) + goto out; + + down_read(¤t->mm->mmap_sem); + kvm->arch.ept_identity_pagetable = gfn_to_page(kvm, + VMX_EPT_IDENTITY_PAGETABLE_ADDR >> PAGE_SHIFT); + up_read(¤t->mm->mmap_sem); +out: + up_write(&kvm->slots_lock); + return r; +} + static void allocate_vpid(struct vcpu_vmx *vmx) { int vpid; @@ -1638,6 +1875,9 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) CPU_BASED_CR8_LOAD_EXITING; #endif } + if (!vm_need_ept()) + exec_control |= CPU_BASED_CR3_STORE_EXITING | + CPU_BASED_CR3_LOAD_EXITING; vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control); if (cpu_has_secondary_exec_ctrls()) { @@ -1647,6 +1887,8 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; if (vmx->vpid == 0) exec_control &= ~SECONDARY_EXEC_ENABLE_VPID; + if (!vm_need_ept()) + exec_control &= ~SECONDARY_EXEC_ENABLE_EPT; vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); } @@ -1722,6 +1964,15 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) return 0; } +static int init_rmode(struct kvm *kvm) +{ + if (!init_rmode_tss(kvm)) + return 0; + if (!init_rmode_identity_map(kvm)) + return 0; + return 1; +} + static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); @@ -1729,7 +1980,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) int ret; down_read(&vcpu->kvm->slots_lock); - if (!init_rmode_tss(vmx->vcpu.kvm)) { + if (!init_rmode(vmx->vcpu.kvm)) { ret = -ENOMEM; goto out; } @@ -1994,6 +2245,9 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) if (intr_info & INTR_INFO_DELIVER_CODE_MASK) error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE); if (is_page_fault(intr_info)) { + /* EPT won't cause page fault directly */ + if (vm_need_ept()) + BUG(); cr2 = vmcs_readl(EXIT_QUALIFICATION); KVMTRACE_3D(PAGE_FAULT, vcpu, error_code, (u32)cr2, (u32)((u64)cr2 >> 32), handler); @@ -2323,6 +2577,64 @@ static int handle_task_switch(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) return kvm_task_switch(vcpu, tss_selector, reason); } +static int handle_ept_violation(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) +{ + u64 exit_qualification; + enum emulation_result er; + gpa_t gpa; + unsigned long hva; + int gla_validity; + int r; + + exit_qualification = vmcs_read64(EXIT_QUALIFICATION); + + if (exit_qualification & (1 << 6)) { + printk(KERN_ERR "EPT: GPA exceeds GAW!\n"); + return -ENOTSUPP; + } + + gla_validity = (exit_qualification >> 7) & 0x3; + if (gla_validity != 0x3 && gla_validity != 0x1 && gla_validity != 0) { + printk(KERN_ERR "EPT: Handling EPT violation failed!\n"); + printk(KERN_ERR "EPT: GPA: 0x%lx, GVA: 0x%lx\n", + (long unsigned int)vmcs_read64(GUEST_PHYSICAL_ADDRESS), + (long unsigned int)vmcs_read64(GUEST_LINEAR_ADDRESS)); + printk(KERN_ERR "EPT: Exit qualification is 0x%lx\n", + (long unsigned int)exit_qualification); + kvm_run->exit_reason = KVM_EXIT_UNKNOWN; + kvm_run->hw.hardware_exit_reason = 0; + return -ENOTSUPP; + } + + gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); + hva = gfn_to_hva(vcpu->kvm, gpa >> PAGE_SHIFT); + if (!kvm_is_error_hva(hva)) { + r = kvm_mmu_page_fault(vcpu, gpa & PAGE_MASK, 0); + if (r < 0) { + printk(KERN_ERR "EPT: Not enough memory!\n"); + return -ENOMEM; + } + return 1; + } else { + /* must be MMIO */ + er = emulate_instruction(vcpu, kvm_run, 0, 0, 0); + + if (er == EMULATE_FAIL) { + printk(KERN_ERR + "EPT: Fail to handle EPT violation vmexit!er is %d\n", + er); + printk(KERN_ERR "EPT: GPA: 0x%lx, GVA: 0x%lx\n", + (long unsigned int)vmcs_read64(GUEST_PHYSICAL_ADDRESS), + (long unsigned int)vmcs_read64(GUEST_LINEAR_ADDRESS)); + printk(KERN_ERR "EPT: Exit qualification is 0x%lx\n", + (long unsigned int)exit_qualification); + return -ENOTSUPP; + } else if (er == EMULATE_DO_MMIO) + return 0; + } + return 1; +} + /* * The exit handlers return 1 if the exit was handled fully and guest execution * may resume. Otherwise they set the kvm_run parameter to indicate what needs @@ -2346,6 +2658,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu, [EXIT_REASON_APIC_ACCESS] = handle_apic_access, [EXIT_REASON_WBINVD] = handle_wbinvd, [EXIT_REASON_TASK_SWITCH] = handle_task_switch, + [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation, }; static const int kvm_vmx_max_exit_handlers = @@ -2364,6 +2677,13 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) KVMTRACE_3D(VMEXIT, vcpu, exit_reason, (u32)vmcs_readl(GUEST_RIP), (u32)((u64)vmcs_readl(GUEST_RIP) >> 32), entryexit); + /* Access CR3 don't cause VMExit in paging mode, so we need + * to sync with guest real CR3. */ + if (vm_need_ept() && is_paging(vcpu)) { + vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); + ept_load_pdptrs(vcpu); + } + if (unlikely(vmx->fail)) { kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; kvm_run->fail_entry.hardware_entry_failure_reason @@ -2372,7 +2692,8 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) } if ((vectoring_info & VECTORING_INFO_VALID_MASK) && - exit_reason != EXIT_REASON_EXCEPTION_NMI) + (exit_reason != EXIT_REASON_EXCEPTION_NMI && + exit_reason != EXIT_REASON_EPT_VIOLATION)) printk(KERN_WARNING "%s: unexpected, valid vectoring info and " "exit reason is 0x%x\n", __func__, exit_reason); if (exit_reason < kvm_vmx_max_exit_handlers @@ -2674,6 +2995,15 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) return ERR_PTR(-ENOMEM); allocate_vpid(vmx); + if (id == 0 && vm_need_ept()) { + kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK | + VMX_EPT_WRITABLE_MASK | + VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT); + kvm_mmu_set_mask_ptes(0ull, VMX_EPT_FAKE_ACCESSED_MASK, + VMX_EPT_FAKE_DIRTY_MASK, 0ull, + VMX_EPT_EXECUTABLE_MASK); + kvm_enable_tdp(); + } err = kvm_vcpu_init(&vmx->vcpu, kvm, id); if (err) @@ -2706,6 +3036,10 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) if (alloc_apic_access_page(kvm) != 0) goto free_vmcs; + if (vm_need_ept()) + if (alloc_identity_pagetable(kvm) != 0) + goto free_vmcs; + return &vmx->vcpu; free_vmcs: @@ -2735,6 +3069,11 @@ static void __init vmx_check_processor_compat(void *rtn) } } +static int get_ept_level(void) +{ + return VMX_EPT_DEFAULT_GAW + 1; +} + static struct kvm_x86_ops vmx_x86_ops = { .cpu_has_kvm_support = cpu_has_kvm_support, .disabled_by_bios = vmx_disabled_by_bios, @@ -2791,6 +3130,7 @@ static struct kvm_x86_ops vmx_x86_ops = { .inject_pending_vectors = do_interrupt_requests, .set_tss_addr = vmx_set_tss_addr, + .get_tdp_level = get_ept_level, }; static int __init vmx_init(void) @@ -2843,9 +3183,14 @@ static int __init vmx_init(void) vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_ESP); vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_EIP); + if (cpu_has_vmx_ept()) + bypass_guest_pf = 0; + if (bypass_guest_pf) kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull); + ept_sync_global(); + return 0; out2: diff --git a/arch/x86/kvm/vmx.h b/arch/x86/kvm/vmx.h index 5dff4606b98..79d94c610df 100644 --- a/arch/x86/kvm/vmx.h +++ b/arch/x86/kvm/vmx.h @@ -35,6 +35,8 @@ #define CPU_BASED_MWAIT_EXITING 0x00000400 #define CPU_BASED_RDPMC_EXITING 0x00000800 #define CPU_BASED_RDTSC_EXITING 0x00001000 +#define CPU_BASED_CR3_LOAD_EXITING 0x00008000 +#define CPU_BASED_CR3_STORE_EXITING 0x00010000 #define CPU_BASED_CR8_LOAD_EXITING 0x00080000 #define CPU_BASED_CR8_STORE_EXITING 0x00100000 #define CPU_BASED_TPR_SHADOW 0x00200000 @@ -49,6 +51,7 @@ * Definitions of Secondary Processor-Based VM-Execution Controls. */ #define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001 +#define SECONDARY_EXEC_ENABLE_EPT 0x00000002 #define SECONDARY_EXEC_ENABLE_VPID 0x00000020 #define SECONDARY_EXEC_WBINVD_EXITING 0x00000040 @@ -100,10 +103,22 @@ enum vmcs_field { VIRTUAL_APIC_PAGE_ADDR_HIGH = 0x00002013, APIC_ACCESS_ADDR = 0x00002014, APIC_ACCESS_ADDR_HIGH = 0x00002015, + EPT_POINTER = 0x0000201a, + EPT_POINTER_HIGH = 0x0000201b, + GUEST_PHYSICAL_ADDRESS = 0x00002400, + GUEST_PHYSICAL_ADDRESS_HIGH = 0x00002401, VMCS_LINK_POINTER = 0x00002800, VMCS_LINK_POINTER_HIGH = 0x00002801, GUEST_IA32_DEBUGCTL = 0x00002802, GUEST_IA32_DEBUGCTL_HIGH = 0x00002803, + GUEST_PDPTR0 = 0x0000280a, + GUEST_PDPTR0_HIGH = 0x0000280b, + GUEST_PDPTR1 = 0x0000280c, + GUEST_PDPTR1_HIGH = 0x0000280d, + GUEST_PDPTR2 = 0x0000280e, + GUEST_PDPTR2_HIGH = 0x0000280f, + GUEST_PDPTR3 = 0x00002810, + GUEST_PDPTR3_HIGH = 0x00002811, PIN_BASED_VM_EXEC_CONTROL = 0x00004000, CPU_BASED_VM_EXEC_CONTROL = 0x00004002, EXCEPTION_BITMAP = 0x00004004, @@ -226,6 +241,8 @@ enum vmcs_field { #define EXIT_REASON_MWAIT_INSTRUCTION 36 #define EXIT_REASON_TPR_BELOW_THRESHOLD 43 #define EXIT_REASON_APIC_ACCESS 44 +#define EXIT_REASON_EPT_VIOLATION 48 +#define EXIT_REASON_EPT_MISCONFIG 49 #define EXIT_REASON_WBINVD 54 /* @@ -316,15 +333,36 @@ enum vmcs_field { #define MSR_IA32_VMX_CR4_FIXED1 0x489 #define MSR_IA32_VMX_VMCS_ENUM 0x48a #define MSR_IA32_VMX_PROCBASED_CTLS2 0x48b +#define MSR_IA32_VMX_EPT_VPID_CAP 0x48c #define MSR_IA32_FEATURE_CONTROL 0x3a #define MSR_IA32_FEATURE_CONTROL_LOCKED 0x1 #define MSR_IA32_FEATURE_CONTROL_VMXON_ENABLED 0x4 #define APIC_ACCESS_PAGE_PRIVATE_MEMSLOT 9 +#define IDENTITY_PAGETABLE_PRIVATE_MEMSLOT 10 #define VMX_NR_VPIDS (1 << 16) #define VMX_VPID_EXTENT_SINGLE_CONTEXT 1 #define VMX_VPID_EXTENT_ALL_CONTEXT 2 +#define VMX_EPT_EXTENT_INDIVIDUAL_ADDR 0 +#define VMX_EPT_EXTENT_CONTEXT 1 +#define VMX_EPT_EXTENT_GLOBAL 2 +#define VMX_EPT_EXTENT_INDIVIDUAL_BIT (1ull << 24) +#define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25) +#define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26) +#define VMX_EPT_DEFAULT_GAW 3 +#define VMX_EPT_MAX_GAW 0x4 +#define VMX_EPT_MT_EPTE_SHIFT 3 +#define VMX_EPT_GAW_EPTP_SHIFT 3 +#define VMX_EPT_DEFAULT_MT 0x6ull +#define VMX_EPT_READABLE_MASK 0x1ull +#define VMX_EPT_WRITABLE_MASK 0x2ull +#define VMX_EPT_EXECUTABLE_MASK 0x4ull +#define VMX_EPT_FAKE_ACCESSED_MASK (1ull << 62) +#define VMX_EPT_FAKE_DIRTY_MASK (1ull << 63) + +#define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul + #endif diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 0ce556372a4..21338bdb28f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2417,6 +2417,9 @@ int kvm_arch_init(void *opaque) kvm_x86_ops = ops; kvm_mmu_set_nonpresent_ptes(0ull, 0ull); + kvm_mmu_set_base_ptes(PT_PRESENT_MASK); + kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK, + PT_DIRTY_MASK, PT64_NX_MASK, 0); return 0; out: @@ -3019,6 +3022,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) kvm_x86_ops->decache_regs(vcpu); + vcpu->arch.exception.pending = false; + vcpu_put(vcpu); return 0; @@ -3481,7 +3486,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason) } if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) { - cseg_desc.type &= ~(1 << 8); //clear the B flag + cseg_desc.type &= ~(1 << 1); //clear the B flag save_guest_segment_descriptor(vcpu, tr_seg.selector, &cseg_desc); } @@ -3507,7 +3512,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason) } if (reason != TASK_SWITCH_IRET) { - nseg_desc.type |= (1 << 8); + nseg_desc.type |= (1 << 1); save_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc); } @@ -3698,10 +3703,19 @@ void fx_init(struct kvm_vcpu *vcpu) { unsigned after_mxcsr_mask; + /* + * Touch the fpu the first time in non atomic context as if + * this is the first fpu instruction the exception handler + * will fire before the instruction returns and it'll have to + * allocate ram with GFP_KERNEL. + */ + if (!used_math()) + fx_save(&vcpu->arch.host_fx_image); + /* Initialize guest FPU by resetting ours and saving into guest's */ preempt_disable(); fx_save(&vcpu->arch.host_fx_image); - fpu_init(); + fx_finit(); fx_save(&vcpu->arch.guest_fx_image); fx_restore(&vcpu->arch.host_fx_image); preempt_enable(); @@ -3906,6 +3920,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm) kvm_free_physmem(kvm); if (kvm->arch.apic_access_page) put_page(kvm->arch.apic_access_page); + if (kvm->arch.ept_identity_pagetable) + put_page(kvm->arch.ept_identity_pagetable); kfree(kvm); } diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c index 2ca08386f99..f2a696d6a24 100644 --- a/arch/x86/kvm/x86_emulate.c +++ b/arch/x86/kvm/x86_emulate.c @@ -1761,6 +1761,7 @@ twobyte_insn: case 6: /* lmsw */ realmode_lmsw(ctxt->vcpu, (u16)c->src.val, &ctxt->eflags); + c->dst.type = OP_NONE; break; case 7: /* invlpg*/ emulate_invlpg(ctxt->vcpu, memop); diff --git a/arch/x86/mach-voyager/voyager_cat.c b/arch/x86/mach-voyager/voyager_cat.c index ecab9fff0fd..2ad598c104a 100644 --- a/arch/x86/mach-voyager/voyager_cat.c +++ b/arch/x86/mach-voyager/voyager_cat.c @@ -877,7 +877,7 @@ void __init voyager_cat_init(void) request_resource(&iomem_resource, res); } - qic_addr = (unsigned long)ioremap(qic_addr, 0x400); + qic_addr = (unsigned long)ioremap_cache(qic_addr, 0x400); for (j = 0; j < 4; j++) { __u8 cpu; diff --git a/arch/x86/mm/discontig_32.c b/arch/x86/mm/discontig_32.c index 18378850e25..914ccf98368 100644 --- a/arch/x86/mm/discontig_32.c +++ b/arch/x86/mm/discontig_32.c @@ -476,29 +476,3 @@ int memory_add_physaddr_to_nid(u64 addr) EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); #endif - -#ifndef CONFIG_HAVE_ARCH_PARSE_SRAT -/* - * XXX FIXME: Make SLIT table parsing available to 32-bit NUMA - * - * These stub functions are needed to compile 32-bit NUMA when SRAT is - * not set. There are functions in srat_64.c for parsing this table - * and it may be possible to make them common functions. - */ -void acpi_numa_slit_init (struct acpi_table_slit *slit) -{ - printk(KERN_INFO "ACPI: No support for parsing SLIT table\n"); -} - -void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa) -{ -} - -void acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma) -{ -} - -void acpi_numa_arch_fixup(void) -{ -} -#endif /* CONFIG_HAVE_ARCH_PARSE_SRAT */ diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c index 9cf33d3ee5b..165c871ba9a 100644 --- a/arch/x86/mm/highmem_32.c +++ b/arch/x86/mm/highmem_32.c @@ -155,4 +155,3 @@ EXPORT_SYMBOL(kmap); EXPORT_SYMBOL(kunmap); EXPORT_SYMBOL(kmap_atomic); EXPORT_SYMBOL(kunmap_atomic); -EXPORT_SYMBOL(kmap_atomic_to_page); diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 804de18abcc..71bb3159031 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -149,7 +149,8 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, * Don't allow anybody to remap normal RAM that we're using.. */ for (pfn = phys_addr >> PAGE_SHIFT; - (pfn << PAGE_SHIFT) < last_addr; pfn++) { + (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); + pfn++) { int is_ram = page_is_ram(pfn); @@ -176,11 +177,11 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, /* * Do not fallback to certain memory types with certain * requested type: - * - request is uncached, return cannot be write-back - * - request is uncached, return cannot be write-combine + * - request is uc-, return cannot be write-back + * - request is uc-, return cannot be write-combine * - request is write-combine, return cannot be write-back */ - if ((prot_val == _PAGE_CACHE_UC && + if ((prot_val == _PAGE_CACHE_UC_MINUS && (new_prot_val == _PAGE_CACHE_WB || new_prot_val == _PAGE_CACHE_WC)) || (prot_val == _PAGE_CACHE_WC && @@ -201,6 +202,9 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, default: prot = PAGE_KERNEL_NOCACHE; break; + case _PAGE_CACHE_UC_MINUS: + prot = PAGE_KERNEL_UC_MINUS; + break; case _PAGE_CACHE_WC: prot = PAGE_KERNEL_WC; break; @@ -255,7 +259,16 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, */ void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size) { - return __ioremap_caller(phys_addr, size, _PAGE_CACHE_UC, + /* + * Ideally, this should be: + * pat_wc_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS; + * + * Till we fix all X drivers to use ioremap_wc(), we will use + * UC MINUS. + */ + unsigned long val = _PAGE_CACHE_UC_MINUS; + + return __ioremap_caller(phys_addr, size, val, __builtin_return_address(0)); } EXPORT_SYMBOL(ioremap_nocache); diff --git a/arch/x86/mm/k8topology_64.c b/arch/x86/mm/k8topology_64.c index 86808e666f9..1f476e47784 100644 --- a/arch/x86/mm/k8topology_64.c +++ b/arch/x86/mm/k8topology_64.c @@ -13,12 +13,15 @@ #include <linux/nodemask.h> #include <asm/io.h> #include <linux/pci_ids.h> +#include <linux/acpi.h> #include <asm/types.h> #include <asm/mmzone.h> #include <asm/proto.h> #include <asm/e820.h> #include <asm/pci-direct.h> #include <asm/numa.h> +#include <asm/mpspec.h> +#include <asm/apic.h> static __init int find_northbridge(void) { @@ -44,6 +47,30 @@ static __init int find_northbridge(void) return -1; } +static __init void early_get_boot_cpu_id(void) +{ + /* + * need to get boot_cpu_id so can use that to create apicid_to_node + * in k8_scan_nodes() + */ + /* + * Find possible boot-time SMP configuration: + */ + early_find_smp_config(); +#ifdef CONFIG_ACPI + /* + * Read APIC information from ACPI tables. + */ + early_acpi_boot_init(); +#endif + /* + * get boot-time SMP configuration: + */ + if (smp_found_config) + early_get_smp_config(); + early_init_lapic_mapping(); +} + int __init k8_scan_nodes(unsigned long start, unsigned long end) { unsigned long prevbase; @@ -56,6 +83,7 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end) unsigned cores; unsigned bits; int j; + unsigned apicid_base; if (!early_pci_allowed()) return -1; @@ -174,11 +202,19 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end) /* use the coreid bits from early_identify_cpu */ bits = boot_cpu_data.x86_coreid_bits; cores = (1<<bits); + apicid_base = 0; + /* need to get boot_cpu_id early for system with apicid lifting */ + early_get_boot_cpu_id(); + if (boot_cpu_physical_apicid > 0) { + printk(KERN_INFO "BSP APIC ID: %02x\n", + boot_cpu_physical_apicid); + apicid_base = boot_cpu_physical_apicid; + } for (i = 0; i < 8; i++) { if (nodes[i].start != nodes[i].end) { nodeid = nodeids[i]; - for (j = 0; j < cores; j++) + for (j = apicid_base; j < cores + apicid_base; j++) apicid_to_node[(nodeid << bits) + j] = i; setup_node_bootmem(i, nodes[i].start, nodes[i].end); } diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index bd5e05c654d..60bcb5b6a37 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -777,14 +777,20 @@ static inline int change_page_attr_clear(unsigned long addr, int numpages, int _set_memory_uc(unsigned long addr, int numpages) { + /* + * for now UC MINUS. see comments in ioremap_nocache() + */ return change_page_attr_set(addr, numpages, - __pgprot(_PAGE_CACHE_UC)); + __pgprot(_PAGE_CACHE_UC_MINUS)); } int set_memory_uc(unsigned long addr, int numpages) { + /* + * for now UC MINUS. see comments in ioremap_nocache() + */ if (reserve_memtype(addr, addr + numpages * PAGE_SIZE, - _PAGE_CACHE_UC, NULL)) + _PAGE_CACHE_UC_MINUS, NULL)) return -EINVAL; return _set_memory_uc(addr, numpages); diff --git a/arch/x86/pci/Makefile_32 b/arch/x86/pci/Makefile_32 index cdd6828b5ab..89ec35d00ef 100644 --- a/arch/x86/pci/Makefile_32 +++ b/arch/x86/pci/Makefile_32 @@ -3,12 +3,22 @@ obj-y := i386.o init.o obj-$(CONFIG_PCI_BIOS) += pcbios.o obj-$(CONFIG_PCI_MMCONFIG) += mmconfig_32.o direct.o mmconfig-shared.o obj-$(CONFIG_PCI_DIRECT) += direct.o +obj-$(CONFIG_PCI_OLPC) += olpc.o pci-y := fixup.o + +# Do not change the ordering here. There is a nasty init function +# ordering dependency which breaks when you move acpi.o below +# legacy/irq.o pci-$(CONFIG_ACPI) += acpi.o pci-y += legacy.o irq.o +# Careful: VISWS and NUMAQ overrule the pci-y above. The colons are +# therefor correct. This needs a proper fix by distangling the code. pci-$(CONFIG_X86_VISWS) := visws.o fixup.o pci-$(CONFIG_X86_NUMAQ) := numa.o irq.o +# Necessary for NUMAQ as well +pci-$(CONFIG_NUMA) += mp_bus_to_node.o + obj-y += $(pci-y) common.o early.o diff --git a/arch/x86/pci/Makefile_64 b/arch/x86/pci/Makefile_64 index 7d8c467bf14..8fbd19832cf 100644 --- a/arch/x86/pci/Makefile_64 +++ b/arch/x86/pci/Makefile_64 @@ -13,5 +13,5 @@ obj-y += legacy.o irq.o common.o early.o # mmconfig has a 64bit special obj-$(CONFIG_PCI_MMCONFIG) += mmconfig_64.o direct.o mmconfig-shared.o -obj-$(CONFIG_NUMA) += k8-bus_64.o +obj-y += k8-bus_64.o diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index 2664cb3fc96..1a9c0c6a1a1 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c @@ -191,7 +191,10 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_device *device, int do { struct pci_bus *bus; struct pci_sysdata *sd; + int node; +#ifdef CONFIG_ACPI_NUMA int pxm; +#endif dmi_check_system(acpi_pciprobe_dmi_table); @@ -201,6 +204,17 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_device *device, int do return NULL; } + node = -1; +#ifdef CONFIG_ACPI_NUMA + pxm = acpi_get_pxm(device->handle); + if (pxm >= 0) + node = pxm_to_node(pxm); + if (node != -1) + set_mp_bus_to_node(busnum, node); + else + node = get_mp_bus_to_node(busnum); +#endif + /* Allocate per-root-bus (not per bus) arch-specific data. * TODO: leak; this memory is never freed. * It's arguable whether it's worth the trouble to care. @@ -212,13 +226,7 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_device *device, int do } sd->domain = domain; - sd->node = -1; - - pxm = acpi_get_pxm(device->handle); -#ifdef CONFIG_ACPI_NUMA - if (pxm >= 0) - sd->node = pxm_to_node(pxm); -#endif + sd->node = node; /* * Maybe the desired pci bus has been already scanned. In such case * it is unnecessary to scan the pci bus with the given domain,busnum. @@ -238,9 +246,9 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_device *device, int do kfree(sd); #ifdef CONFIG_ACPI_NUMA - if (bus != NULL) { + if (bus) { if (pxm >= 0) { - printk("bus %d -> pxm %d -> node %d\n", + printk(KERN_DEBUG "bus %02x -> pxm %d -> node %d\n", busnum, pxm, pxm_to_node(pxm)); } } @@ -248,7 +256,6 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_device *device, int do if (bus && (pci_probe & PCI_USE__CRS)) get_current_resources(device, busnum, domain, bus); - return bus; } diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index 75fcc29ecf5..2a4d751818b 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c @@ -342,9 +342,14 @@ struct pci_bus * __devinit pcibios_scan_root(int busnum) return NULL; } + sd->node = get_mp_bus_to_node(busnum); + printk(KERN_DEBUG "PCI: Probing PCI hardware (bus %02x)\n", busnum); + bus = pci_scan_bus_parented(NULL, busnum, &pci_root_ops, sd); + if (!bus) + kfree(sd); - return pci_scan_bus_parented(NULL, busnum, &pci_root_ops, sd); + return bus; } extern u8 pci_cache_line_size; @@ -420,6 +425,10 @@ char * __devinit pcibios_setup(char *str) pci_probe &= ~PCI_PROBE_MMCONF; return NULL; } + else if (!strcmp(str, "check_enable_amd_mmconf")) { + pci_probe |= PCI_CHECK_ENABLE_AMD_MMCONF; + return NULL; + } #endif else if (!strcmp(str, "noacpi")) { acpi_noirq_set(); @@ -480,7 +489,7 @@ void pcibios_disable_device (struct pci_dev *dev) pcibios_disable_irq(dev); } -struct pci_bus *__devinit pci_scan_bus_with_sysdata(int busno) +struct pci_bus *pci_scan_bus_on_node(int busno, struct pci_ops *ops, int node) { struct pci_bus *bus = NULL; struct pci_sysdata *sd; @@ -495,10 +504,15 @@ struct pci_bus *__devinit pci_scan_bus_with_sysdata(int busno) printk(KERN_ERR "PCI: OOM, skipping PCI bus %02x\n", busno); return NULL; } - sd->node = -1; - bus = pci_scan_bus(busno, &pci_root_ops, sd); + sd->node = node; + bus = pci_scan_bus(busno, ops, sd); if (!bus) kfree(sd); return bus; } + +struct pci_bus *pci_scan_bus_with_sysdata(int busno) +{ + return pci_scan_bus_on_node(busno, &pci_root_ops, -1); +} diff --git a/arch/x86/pci/direct.c b/arch/x86/pci/direct.c index 42f3e4cad17..21d1e0e0d53 100644 --- a/arch/x86/pci/direct.c +++ b/arch/x86/pci/direct.c @@ -258,7 +258,8 @@ void __init pci_direct_init(int type) { if (type == 0) return; - printk(KERN_INFO "PCI: Using configuration type %d\n", type); + printk(KERN_INFO "PCI: Using configuration type %d for base access\n", + type); if (type == 1) raw_pci_ops = &pci_direct_conf1; else @@ -275,8 +276,10 @@ int __init pci_direct_probe(void) if (!region) goto type2; - if (pci_check_type1()) + if (pci_check_type1()) { + raw_pci_ops = &pci_direct_conf1; return 1; + } release_resource(region); type2: @@ -290,7 +293,6 @@ int __init pci_direct_probe(void) goto fail2; if (pci_check_type2()) { - printk(KERN_INFO "PCI: Using configuration type 2\n"); raw_pci_ops = &pci_direct_conf2; return 2; } diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c index a5ef5f55137..b60b2abd480 100644 --- a/arch/x86/pci/fixup.c +++ b/arch/x86/pci/fixup.c @@ -493,3 +493,20 @@ static void __devinit pci_siemens_interrupt_controller(struct pci_dev *dev) } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SIEMENS, 0x0015, pci_siemens_interrupt_controller); + +/* + * Regular PCI devices have 256 bytes, but AMD Family 10h Opteron ext config + * have 4096 bytes. Even if the device is capable, that doesn't mean we can + * access it. Maybe we don't have a way to generate extended config space + * accesses. So check it + */ +static void fam10h_pci_cfg_space_size(struct pci_dev *dev) +{ + dev->cfg_size = pci_cfg_space_size_ext(dev, 0); +} + +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1200, fam10h_pci_cfg_space_size); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1201, fam10h_pci_cfg_space_size); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1202, fam10h_pci_cfg_space_size); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1203, fam10h_pci_cfg_space_size); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1204, fam10h_pci_cfg_space_size); diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index 94f6c73a53d..8af0f0bae2a 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c @@ -301,6 +301,13 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, prot = pgprot_val(vma->vm_page_prot); if (pat_wc_enabled && write_combine) prot |= _PAGE_CACHE_WC; + else if (pat_wc_enabled) + /* + * ioremap() and ioremap_nocache() defaults to UC MINUS for now. + * To avoid attribute conflicts, request UC MINUS here + * aswell. + */ + prot |= _PAGE_CACHE_UC_MINUS; else if (boot_cpu_data.x86 > 3) prot |= _PAGE_CACHE_UC; @@ -319,9 +326,8 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, * - request is uncached, return cannot be write-combine * - request is write-combine, return cannot be write-back */ - if ((flags == _PAGE_CACHE_UC && - (new_flags == _PAGE_CACHE_WB || - new_flags == _PAGE_CACHE_WC)) || + if ((flags == _PAGE_CACHE_UC_MINUS && + (new_flags == _PAGE_CACHE_WB)) || (flags == _PAGE_CACHE_WC && new_flags == _PAGE_CACHE_WB)) { free_memtype(addr, addr+len); diff --git a/arch/x86/pci/init.c b/arch/x86/pci/init.c index 3de9f9ba2da..dd30c6076b5 100644 --- a/arch/x86/pci/init.c +++ b/arch/x86/pci/init.c @@ -6,16 +6,17 @@ in the right sequence from here. */ static __init int pci_access_init(void) { - int type __maybe_unused = 0; - #ifdef CONFIG_PCI_DIRECT + int type = 0; + type = pci_direct_probe(); #endif -#ifdef CONFIG_PCI_MMCONFIG - pci_mmcfg_init(type); + + pci_mmcfg_early_init(); + +#ifdef CONFIG_PCI_OLPC + pci_olpc_init(); #endif - if (raw_pci_ops) - return 0; #ifdef CONFIG_PCI_BIOS pci_pcbios_init(); #endif @@ -28,7 +29,7 @@ static __init int pci_access_init(void) #ifdef CONFIG_PCI_DIRECT pci_direct_init(type); #endif - if (!raw_pci_ops) + if (!raw_pci_ops && !raw_pci_ext_ops) printk(KERN_ERR "PCI: Fatal: No config space access function found\n"); diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index 579745ca6b6..0908fca901b 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c @@ -136,9 +136,11 @@ static void __init pirq_peer_trick(void) busmap[e->bus] = 1; } for(i = 1; i < 256; i++) { + int node; if (!busmap[i] || pci_find_bus(0, i)) continue; - if (pci_scan_bus_with_sysdata(i)) + node = get_mp_bus_to_node(i); + if (pci_scan_bus_on_node(i, &pci_root_ops, node)) printk(KERN_INFO "PCI: Discovered primary peer " "bus %02x [IRQ]\n", i); } diff --git a/arch/x86/pci/k8-bus_64.c b/arch/x86/pci/k8-bus_64.c index 9cc813e2970..ab6d4b18a88 100644 --- a/arch/x86/pci/k8-bus_64.c +++ b/arch/x86/pci/k8-bus_64.c @@ -1,83 +1,536 @@ #include <linux/init.h> #include <linux/pci.h> +#include <asm/pci-direct.h> #include <asm/mpspec.h> #include <linux/cpumask.h> +#include <linux/topology.h> /* * This discovers the pcibus <-> node mapping on AMD K8. - * - * RED-PEN need to call this again on PCI hotplug - * RED-PEN empty cpus get reported wrong + * also get peer root bus resource for io,mmio */ -#define NODE_ID_REGISTER 0x60 -#define NODE_ID(dword) (dword & 0x07) -#define LDT_BUS_NUMBER_REGISTER_0 0x94 -#define LDT_BUS_NUMBER_REGISTER_1 0xB4 -#define LDT_BUS_NUMBER_REGISTER_2 0xD4 -#define NR_LDT_BUS_NUMBER_REGISTERS 3 -#define SECONDARY_LDT_BUS_NUMBER(dword) ((dword >> 8) & 0xFF) -#define SUBORDINATE_LDT_BUS_NUMBER(dword) ((dword >> 16) & 0xFF) -#define PCI_DEVICE_ID_K8HTCONFIG 0x1100 + +/* + * sub bus (transparent) will use entres from 3 to store extra from root, + * so need to make sure have enought slot there, increase PCI_BUS_NUM_RESOURCES? + */ +#define RES_NUM 16 +struct pci_root_info { + char name[12]; + unsigned int res_num; + struct resource res[RES_NUM]; + int bus_min; + int bus_max; + int node; + int link; +}; + +/* 4 at this time, it may become to 32 */ +#define PCI_ROOT_NR 4 +static int pci_root_num; +static struct pci_root_info pci_root_info[PCI_ROOT_NR]; + +#ifdef CONFIG_NUMA + +#define BUS_NR 256 + +static int mp_bus_to_node[BUS_NR]; + +void set_mp_bus_to_node(int busnum, int node) +{ + if (busnum >= 0 && busnum < BUS_NR) + mp_bus_to_node[busnum] = node; +} + +int get_mp_bus_to_node(int busnum) +{ + int node = -1; + + if (busnum < 0 || busnum > (BUS_NR - 1)) + return node; + + node = mp_bus_to_node[busnum]; + + /* + * let numa_node_id to decide it later in dma_alloc_pages + * if there is no ram on that node + */ + if (node != -1 && !node_online(node)) + node = -1; + + return node; +} +#endif + +void set_pci_bus_resources_arch_default(struct pci_bus *b) +{ + int i; + int j; + struct pci_root_info *info; + + /* if only one root bus, don't need to anything */ + if (pci_root_num < 2) + return; + + for (i = 0; i < pci_root_num; i++) { + if (pci_root_info[i].bus_min == b->number) + break; + } + + if (i == pci_root_num) + return; + + info = &pci_root_info[i]; + for (j = 0; j < info->res_num; j++) { + struct resource *res; + struct resource *root; + + res = &info->res[j]; + b->resource[j] = res; + if (res->flags & IORESOURCE_IO) + root = &ioport_resource; + else + root = &iomem_resource; + insert_resource(root, res); + } +} + +#define RANGE_NUM 16 + +struct res_range { + size_t start; + size_t end; +}; + +static void __init update_range(struct res_range *range, size_t start, + size_t end) +{ + int i; + int j; + + for (j = 0; j < RANGE_NUM; j++) { + if (!range[j].end) + continue; + + if (start <= range[j].start && end >= range[j].end) { + range[j].start = 0; + range[j].end = 0; + continue; + } + + if (start <= range[j].start && end < range[j].end && range[j].start < end + 1) { + range[j].start = end + 1; + continue; + } + + + if (start > range[j].start && end >= range[j].end && range[j].end > start - 1) { + range[j].end = start - 1; + continue; + } + + if (start > range[j].start && end < range[j].end) { + /* find the new spare */ + for (i = 0; i < RANGE_NUM; i++) { + if (range[i].end == 0) + break; + } + if (i < RANGE_NUM) { + range[i].end = range[j].end; + range[i].start = end + 1; + } else { + printk(KERN_ERR "run of slot in ranges\n"); + } + range[j].end = start - 1; + continue; + } + } +} + +static void __init update_res(struct pci_root_info *info, size_t start, + size_t end, unsigned long flags, int merge) +{ + int i; + struct resource *res; + + if (!merge) + goto addit; + + /* try to merge it with old one */ + for (i = 0; i < info->res_num; i++) { + size_t final_start, final_end; + size_t common_start, common_end; + + res = &info->res[i]; + if (res->flags != flags) + continue; + + common_start = max((size_t)res->start, start); + common_end = min((size_t)res->end, end); + if (common_start > common_end + 1) + continue; + + final_start = min((size_t)res->start, start); + final_end = max((size_t)res->end, end); + + res->start = final_start; + res->end = final_end; + return; + } + +addit: + + /* need to add that */ + if (info->res_num >= RES_NUM) + return; + + res = &info->res[info->res_num]; + res->name = info->name; + res->flags = flags; + res->start = start; + res->end = end; + res->child = NULL; + info->res_num++; +} + +struct pci_hostbridge_probe { + u32 bus; + u32 slot; + u32 vendor; + u32 device; +}; + +static struct pci_hostbridge_probe pci_probes[] __initdata = { + { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1100 }, + { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1200 }, + { 0xff, 0, PCI_VENDOR_ID_AMD, 0x1200 }, + { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1300 }, +}; + +static u64 __initdata fam10h_mmconf_start; +static u64 __initdata fam10h_mmconf_end; +static void __init get_pci_mmcfg_amd_fam10h_range(void) +{ + u32 address; + u64 base, msr; + unsigned segn_busn_bits; + + /* assume all cpus from fam10h have mmconf */ + if (boot_cpu_data.x86 < 0x10) + return; + + address = MSR_FAM10H_MMIO_CONF_BASE; + rdmsrl(address, msr); + + /* mmconfig is not enable */ + if (!(msr & FAM10H_MMIO_CONF_ENABLE)) + return; + + base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT); + + segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) & + FAM10H_MMIO_CONF_BUSRANGE_MASK; + + fam10h_mmconf_start = base; + fam10h_mmconf_end = base + (1ULL<<(segn_busn_bits + 20)) - 1; +} /** - * fill_mp_bus_to_cpumask() + * early_fill_mp_bus_to_node() + * called before pcibios_scan_root and pci_scan_bus * fills the mp_bus_to_cpumask array based according to the LDT Bus Number * Registers found in the K8 northbridge */ -__init static int -fill_mp_bus_to_cpumask(void) +static int __init early_fill_mp_bus_info(void) { - struct pci_dev *nb_dev = NULL; - int i, j; - u32 ldtbus, nid; - static int lbnr[3] = { - LDT_BUS_NUMBER_REGISTER_0, - LDT_BUS_NUMBER_REGISTER_1, - LDT_BUS_NUMBER_REGISTER_2 - }; - - while ((nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, - PCI_DEVICE_ID_K8HTCONFIG, nb_dev))) { - pci_read_config_dword(nb_dev, NODE_ID_REGISTER, &nid); - - for (i = 0; i < NR_LDT_BUS_NUMBER_REGISTERS; i++) { - pci_read_config_dword(nb_dev, lbnr[i], &ldtbus); - /* - * if there are no busses hanging off of the current - * ldt link then both the secondary and subordinate - * bus number fields are set to 0. - * - * RED-PEN - * This is slightly broken because it assumes - * HT node IDs == Linux node ids, which is not always - * true. However it is probably mostly true. - */ - if (!(SECONDARY_LDT_BUS_NUMBER(ldtbus) == 0 - && SUBORDINATE_LDT_BUS_NUMBER(ldtbus) == 0)) { - for (j = SECONDARY_LDT_BUS_NUMBER(ldtbus); - j <= SUBORDINATE_LDT_BUS_NUMBER(ldtbus); - j++) { - struct pci_bus *bus; - struct pci_sysdata *sd; - - long node = NODE_ID(nid); - /* Algorithm a bit dumb, but - it shouldn't matter here */ - bus = pci_find_bus(0, j); - if (!bus) - continue; - if (!node_online(node)) - node = 0; - - sd = bus->sysdata; - sd->node = node; - } + int i; + int j; + unsigned bus; + unsigned slot; + int found; + int node; + int link; + int def_node; + int def_link; + struct pci_root_info *info; + u32 reg; + struct resource *res; + size_t start; + size_t end; + struct res_range range[RANGE_NUM]; + u64 val; + u32 address; + +#ifdef CONFIG_NUMA + for (i = 0; i < BUS_NR; i++) + mp_bus_to_node[i] = -1; +#endif + + if (!early_pci_allowed()) + return -1; + + found = 0; + for (i = 0; i < ARRAY_SIZE(pci_probes); i++) { + u32 id; + u16 device; + u16 vendor; + + bus = pci_probes[i].bus; + slot = pci_probes[i].slot; + id = read_pci_config(bus, slot, 0, PCI_VENDOR_ID); + + vendor = id & 0xffff; + device = (id>>16) & 0xffff; + if (pci_probes[i].vendor == vendor && + pci_probes[i].device == device) { + found = 1; + break; + } + } + + if (!found) + return 0; + + pci_root_num = 0; + for (i = 0; i < 4; i++) { + int min_bus; + int max_bus; + reg = read_pci_config(bus, slot, 1, 0xe0 + (i << 2)); + + /* Check if that register is enabled for bus range */ + if ((reg & 7) != 3) + continue; + + min_bus = (reg >> 16) & 0xff; + max_bus = (reg >> 24) & 0xff; + node = (reg >> 4) & 0x07; +#ifdef CONFIG_NUMA + for (j = min_bus; j <= max_bus; j++) + mp_bus_to_node[j] = (unsigned char) node; +#endif + link = (reg >> 8) & 0x03; + + info = &pci_root_info[pci_root_num]; + info->bus_min = min_bus; + info->bus_max = max_bus; + info->node = node; + info->link = link; + sprintf(info->name, "PCI Bus #%02x", min_bus); + pci_root_num++; + } + + /* get the default node and link for left over res */ + reg = read_pci_config(bus, slot, 0, 0x60); + def_node = (reg >> 8) & 0x07; + reg = read_pci_config(bus, slot, 0, 0x64); + def_link = (reg >> 8) & 0x03; + + memset(range, 0, sizeof(range)); + range[0].end = 0xffff; + /* io port resource */ + for (i = 0; i < 4; i++) { + reg = read_pci_config(bus, slot, 1, 0xc0 + (i << 3)); + if (!(reg & 3)) + continue; + + start = reg & 0xfff000; + reg = read_pci_config(bus, slot, 1, 0xc4 + (i << 3)); + node = reg & 0x07; + link = (reg >> 4) & 0x03; + end = (reg & 0xfff000) | 0xfff; + + /* find the position */ + for (j = 0; j < pci_root_num; j++) { + info = &pci_root_info[j]; + if (info->node == node && info->link == link) + break; + } + if (j == pci_root_num) + continue; /* not found */ + + info = &pci_root_info[j]; + printk(KERN_DEBUG "node %d link %d: io port [%llx, %llx]\n", + node, link, (u64)start, (u64)end); + + /* kernel only handle 16 bit only */ + if (end > 0xffff) + end = 0xffff; + update_res(info, start, end, IORESOURCE_IO, 1); + update_range(range, start, end); + } + /* add left over io port range to def node/link, [0, 0xffff] */ + /* find the position */ + for (j = 0; j < pci_root_num; j++) { + info = &pci_root_info[j]; + if (info->node == def_node && info->link == def_link) + break; + } + if (j < pci_root_num) { + info = &pci_root_info[j]; + for (i = 0; i < RANGE_NUM; i++) { + if (!range[i].end) + continue; + + update_res(info, range[i].start, range[i].end, + IORESOURCE_IO, 1); + } + } + + memset(range, 0, sizeof(range)); + /* 0xfd00000000-0xffffffffff for HT */ + range[0].end = (0xfdULL<<32) - 1; + + /* need to take out [0, TOM) for RAM*/ + address = MSR_K8_TOP_MEM1; + rdmsrl(address, val); + end = (val & 0xffffff8000000ULL); + printk(KERN_INFO "TOM: %016lx aka %ldM\n", end, end>>20); + if (end < (1ULL<<32)) + update_range(range, 0, end - 1); + + /* get mmconfig */ + get_pci_mmcfg_amd_fam10h_range(); + /* need to take out mmconf range */ + if (fam10h_mmconf_end) { + printk(KERN_DEBUG "Fam 10h mmconf [%llx, %llx]\n", fam10h_mmconf_start, fam10h_mmconf_end); + update_range(range, fam10h_mmconf_start, fam10h_mmconf_end); + } + + /* mmio resource */ + for (i = 0; i < 8; i++) { + reg = read_pci_config(bus, slot, 1, 0x80 + (i << 3)); + if (!(reg & 3)) + continue; + + start = reg & 0xffffff00; /* 39:16 on 31:8*/ + start <<= 8; + reg = read_pci_config(bus, slot, 1, 0x84 + (i << 3)); + node = reg & 0x07; + link = (reg >> 4) & 0x03; + end = (reg & 0xffffff00); + end <<= 8; + end |= 0xffff; + + /* find the position */ + for (j = 0; j < pci_root_num; j++) { + info = &pci_root_info[j]; + if (info->node == node && info->link == link) + break; + } + if (j == pci_root_num) + continue; /* not found */ + + info = &pci_root_info[j]; + + printk(KERN_DEBUG "node %d link %d: mmio [%llx, %llx]", + node, link, (u64)start, (u64)end); + /* + * some sick allocation would have range overlap with fam10h + * mmconf range, so need to update start and end. + */ + if (fam10h_mmconf_end) { + int changed = 0; + u64 endx = 0; + if (start >= fam10h_mmconf_start && + start <= fam10h_mmconf_end) { + start = fam10h_mmconf_end + 1; + changed = 1; + } + + if (end >= fam10h_mmconf_start && + end <= fam10h_mmconf_end) { + end = fam10h_mmconf_start - 1; + changed = 1; + } + + if (start < fam10h_mmconf_start && + end > fam10h_mmconf_end) { + /* we got a hole */ + endx = fam10h_mmconf_start - 1; + update_res(info, start, endx, IORESOURCE_MEM, 0); + update_range(range, start, endx); + printk(KERN_CONT " ==> [%llx, %llx]", (u64)start, endx); + start = fam10h_mmconf_end + 1; + changed = 1; + } + if (changed) { + if (start <= end) { + printk(KERN_CONT " %s [%llx, %llx]", endx?"and":"==>", (u64)start, (u64)end); + } else { + printk(KERN_CONT "%s\n", endx?"":" ==> none"); + continue; + } } } + + update_res(info, start, end, IORESOURCE_MEM, 1); + update_range(range, start, end); + printk(KERN_CONT "\n"); + } + + /* need to take out [4G, TOM2) for RAM*/ + /* SYS_CFG */ + address = MSR_K8_SYSCFG; + rdmsrl(address, val); + /* TOP_MEM2 is enabled? */ + if (val & (1<<21)) { + /* TOP_MEM2 */ + address = MSR_K8_TOP_MEM2; + rdmsrl(address, val); + end = (val & 0xffffff8000000ULL); + printk(KERN_INFO "TOM2: %016lx aka %ldM\n", end, end>>20); + update_range(range, 1ULL<<32, end - 1); + } + + /* + * add left over mmio range to def node/link ? + * that is tricky, just record range in from start_min to 4G + */ + for (j = 0; j < pci_root_num; j++) { + info = &pci_root_info[j]; + if (info->node == def_node && info->link == def_link) + break; + } + if (j < pci_root_num) { + info = &pci_root_info[j]; + + for (i = 0; i < RANGE_NUM; i++) { + if (!range[i].end) + continue; + + update_res(info, range[i].start, range[i].end, + IORESOURCE_MEM, 1); + } + } + +#ifdef CONFIG_NUMA + for (i = 0; i < BUS_NR; i++) { + node = mp_bus_to_node[i]; + if (node >= 0) + printk(KERN_DEBUG "bus: %02x to node: %02x\n", i, node); + } +#endif + + for (i = 0; i < pci_root_num; i++) { + int res_num; + int busnum; + + info = &pci_root_info[i]; + res_num = info->res_num; + busnum = info->bus_min; + printk(KERN_DEBUG "bus: [%02x,%02x] on node %x link %x\n", + info->bus_min, info->bus_max, info->node, info->link); + for (j = 0; j < res_num; j++) { + res = &info->res[j]; + printk(KERN_DEBUG "bus: %02x index %x %s: [%llx, %llx]\n", + busnum, j, + (res->flags & IORESOURCE_IO)?"io port":"mmio", + res->start, res->end); + } } return 0; } -fs_initcall(fill_mp_bus_to_cpumask); +postcore_initcall(early_fill_mp_bus_info); diff --git a/arch/x86/pci/legacy.c b/arch/x86/pci/legacy.c index e041ced0ce1..a67921ce60a 100644 --- a/arch/x86/pci/legacy.c +++ b/arch/x86/pci/legacy.c @@ -12,6 +12,7 @@ static void __devinit pcibios_fixup_peer_bridges(void) { int n, devfn; + long node; if (pcibios_last_bus <= 0 || pcibios_last_bus >= 0xff) return; @@ -21,12 +22,13 @@ static void __devinit pcibios_fixup_peer_bridges(void) u32 l; if (pci_find_bus(0, n)) continue; + node = get_mp_bus_to_node(n); for (devfn = 0; devfn < 256; devfn += 8) { if (!raw_pci_read(0, n, devfn, PCI_VENDOR_ID, 2, &l) && l != 0x0000 && l != 0xffff) { DBG("Found device at %02x:%02x [%04x]\n", n, devfn, l); printk(KERN_INFO "PCI: Discovered peer bus %02x\n", n); - pci_scan_bus_with_sysdata(n); + pci_scan_bus_on_node(n, &pci_root_ops, node); break; } } diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c index 8d54df4dfaa..0cfebecf2a8 100644 --- a/arch/x86/pci/mmconfig-shared.c +++ b/arch/x86/pci/mmconfig-shared.c @@ -28,7 +28,7 @@ static int __initdata pci_mmcfg_resources_inserted; static const char __init *pci_mmcfg_e7520(void) { u32 win; - pci_direct_conf1.read(0, 0, PCI_DEVFN(0,0), 0xce, 2, &win); + raw_pci_ops->read(0, 0, PCI_DEVFN(0, 0), 0xce, 2, &win); win = win & 0xf000; if(win == 0x0000 || win == 0xf000) @@ -53,7 +53,7 @@ static const char __init *pci_mmcfg_intel_945(void) pci_mmcfg_config_num = 1; - pci_direct_conf1.read(0, 0, PCI_DEVFN(0,0), 0x48, 4, &pciexbar); + raw_pci_ops->read(0, 0, PCI_DEVFN(0, 0), 0x48, 4, &pciexbar); /* Enable bit */ if (!(pciexbar & 1)) @@ -100,33 +100,102 @@ static const char __init *pci_mmcfg_intel_945(void) return "Intel Corporation 945G/GZ/P/PL Express Memory Controller Hub"; } +static const char __init *pci_mmcfg_amd_fam10h(void) +{ + u32 low, high, address; + u64 base, msr; + int i; + unsigned segnbits = 0, busnbits; + + if (!(pci_probe & PCI_CHECK_ENABLE_AMD_MMCONF)) + return NULL; + + address = MSR_FAM10H_MMIO_CONF_BASE; + if (rdmsr_safe(address, &low, &high)) + return NULL; + + msr = high; + msr <<= 32; + msr |= low; + + /* mmconfig is not enable */ + if (!(msr & FAM10H_MMIO_CONF_ENABLE)) + return NULL; + + base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT); + + busnbits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) & + FAM10H_MMIO_CONF_BUSRANGE_MASK; + + /* + * only handle bus 0 ? + * need to skip it + */ + if (!busnbits) + return NULL; + + if (busnbits > 8) { + segnbits = busnbits - 8; + busnbits = 8; + } + + pci_mmcfg_config_num = (1 << segnbits); + pci_mmcfg_config = kzalloc(sizeof(pci_mmcfg_config[0]) * + pci_mmcfg_config_num, GFP_KERNEL); + if (!pci_mmcfg_config) + return NULL; + + for (i = 0; i < (1 << segnbits); i++) { + pci_mmcfg_config[i].address = base + (1<<28) * i; + pci_mmcfg_config[i].pci_segment = i; + pci_mmcfg_config[i].start_bus_number = 0; + pci_mmcfg_config[i].end_bus_number = (1 << busnbits) - 1; + } + + return "AMD Family 10h NB"; +} + struct pci_mmcfg_hostbridge_probe { + u32 bus; + u32 devfn; u32 vendor; u32 device; const char *(*probe)(void); }; static struct pci_mmcfg_hostbridge_probe pci_mmcfg_probes[] __initdata = { - { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, pci_mmcfg_e7520 }, - { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82945G_HB, pci_mmcfg_intel_945 }, + { 0, PCI_DEVFN(0, 0), PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_E7520_MCH, pci_mmcfg_e7520 }, + { 0, PCI_DEVFN(0, 0), PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_82945G_HB, pci_mmcfg_intel_945 }, + { 0, PCI_DEVFN(0x18, 0), PCI_VENDOR_ID_AMD, + 0x1200, pci_mmcfg_amd_fam10h }, + { 0xff, PCI_DEVFN(0, 0), PCI_VENDOR_ID_AMD, + 0x1200, pci_mmcfg_amd_fam10h }, }; static int __init pci_mmcfg_check_hostbridge(void) { u32 l; + u32 bus, devfn; u16 vendor, device; int i; const char *name; - pci_direct_conf1.read(0, 0, PCI_DEVFN(0,0), 0, 4, &l); - vendor = l & 0xffff; - device = (l >> 16) & 0xffff; + if (!raw_pci_ops) + return 0; pci_mmcfg_config_num = 0; pci_mmcfg_config = NULL; name = NULL; for (i = 0; !name && i < ARRAY_SIZE(pci_mmcfg_probes); i++) { + bus = pci_mmcfg_probes[i].bus; + devfn = pci_mmcfg_probes[i].devfn; + raw_pci_ops->read(0, bus, devfn, 0, 4, &l); + vendor = l & 0xffff; + device = (l >> 16) & 0xffff; + if (pci_mmcfg_probes[i].vendor == vendor && pci_mmcfg_probes[i].device == device) name = pci_mmcfg_probes[i].probe(); @@ -173,9 +242,78 @@ static void __init pci_mmcfg_insert_resources(unsigned long resource_flags) pci_mmcfg_resources_inserted = 1; } -static void __init pci_mmcfg_reject_broken(int type) +static acpi_status __init check_mcfg_resource(struct acpi_resource *res, + void *data) +{ + struct resource *mcfg_res = data; + struct acpi_resource_address64 address; + acpi_status status; + + if (res->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32) { + struct acpi_resource_fixed_memory32 *fixmem32 = + &res->data.fixed_memory32; + if (!fixmem32) + return AE_OK; + if ((mcfg_res->start >= fixmem32->address) && + (mcfg_res->end < (fixmem32->address + + fixmem32->address_length))) { + mcfg_res->flags = 1; + return AE_CTRL_TERMINATE; + } + } + if ((res->type != ACPI_RESOURCE_TYPE_ADDRESS32) && + (res->type != ACPI_RESOURCE_TYPE_ADDRESS64)) + return AE_OK; + + status = acpi_resource_to_address64(res, &address); + if (ACPI_FAILURE(status) || + (address.address_length <= 0) || + (address.resource_type != ACPI_MEMORY_RANGE)) + return AE_OK; + + if ((mcfg_res->start >= address.minimum) && + (mcfg_res->end < (address.minimum + address.address_length))) { + mcfg_res->flags = 1; + return AE_CTRL_TERMINATE; + } + return AE_OK; +} + +static acpi_status __init find_mboard_resource(acpi_handle handle, u32 lvl, + void *context, void **rv) +{ + struct resource *mcfg_res = context; + + acpi_walk_resources(handle, METHOD_NAME__CRS, + check_mcfg_resource, context); + + if (mcfg_res->flags) + return AE_CTRL_TERMINATE; + + return AE_OK; +} + +static int __init is_acpi_reserved(unsigned long start, unsigned long end) +{ + struct resource mcfg_res; + + mcfg_res.start = start; + mcfg_res.end = end; + mcfg_res.flags = 0; + + acpi_get_devices("PNP0C01", find_mboard_resource, &mcfg_res, NULL); + + if (!mcfg_res.flags) + acpi_get_devices("PNP0C02", find_mboard_resource, &mcfg_res, + NULL); + + return mcfg_res.flags; +} + +static void __init pci_mmcfg_reject_broken(int early) { typeof(pci_mmcfg_config[0]) *cfg; + int i; if ((pci_mmcfg_config_num == 0) || (pci_mmcfg_config == NULL) || @@ -184,51 +322,80 @@ static void __init pci_mmcfg_reject_broken(int type) cfg = &pci_mmcfg_config[0]; - /* - * Handle more broken MCFG tables on Asus etc. - * They only contain a single entry for bus 0-0. - */ - if (pci_mmcfg_config_num == 1 && - cfg->pci_segment == 0 && - (cfg->start_bus_number | cfg->end_bus_number) == 0) { - printk(KERN_ERR "PCI: start and end of bus number is 0. " - "Rejected as broken MCFG.\n"); - goto reject; + for (i = 0; i < pci_mmcfg_config_num; i++) { + int valid = 0; + u32 size = (cfg->end_bus_number + 1) << 20; + cfg = &pci_mmcfg_config[i]; + printk(KERN_NOTICE "PCI: MCFG configuration %d: base %lx " + "segment %hu buses %u - %u\n", + i, (unsigned long)cfg->address, cfg->pci_segment, + (unsigned int)cfg->start_bus_number, + (unsigned int)cfg->end_bus_number); + + if (!early && + is_acpi_reserved(cfg->address, cfg->address + size - 1)) { + printk(KERN_NOTICE "PCI: MCFG area at %Lx reserved " + "in ACPI motherboard resources\n", + cfg->address); + valid = 1; + } + + if (valid) + continue; + + if (!early) + printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %Lx is not" + " reserved in ACPI motherboard resources\n", + cfg->address); + /* Don't try to do this check unless configuration + type 1 is available. how about type 2 ?*/ + if (raw_pci_ops && e820_all_mapped(cfg->address, + cfg->address + size - 1, + E820_RESERVED)) { + printk(KERN_NOTICE + "PCI: MCFG area at %Lx reserved in E820\n", + cfg->address); + valid = 1; + } + + if (!valid) + goto reject; } - /* - * Only do this check when type 1 works. If it doesn't work - * assume we run on a Mac and always use MCFG - */ - if (type == 1 && !e820_all_mapped(cfg->address, - cfg->address + MMCONFIG_APER_MIN, - E820_RESERVED)) { - printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %Lx is not" - " E820-reserved\n", cfg->address); - goto reject; - } return; reject: printk(KERN_ERR "PCI: Not using MMCONFIG.\n"); + pci_mmcfg_arch_free(); kfree(pci_mmcfg_config); pci_mmcfg_config = NULL; pci_mmcfg_config_num = 0; } -void __init pci_mmcfg_init(int type) -{ - int known_bridge = 0; +static int __initdata known_bridge; +void __init __pci_mmcfg_init(int early) +{ + /* MMCONFIG disabled */ if ((pci_probe & PCI_PROBE_MMCONF) == 0) return; - if (type == 1 && pci_mmcfg_check_hostbridge()) - known_bridge = 1; + /* MMCONFIG already enabled */ + if (!early && !(pci_probe & PCI_PROBE_MASK & ~PCI_PROBE_MMCONF)) + return; + + /* for late to exit */ + if (known_bridge) + return; + + if (early) { + if (pci_mmcfg_check_hostbridge()) + known_bridge = 1; + } if (!known_bridge) { acpi_table_parse(ACPI_SIG_MCFG, acpi_parse_mcfg); - pci_mmcfg_reject_broken(type); + pci_mmcfg_reject_broken(early); } if ((pci_mmcfg_config_num == 0) || @@ -249,6 +416,16 @@ void __init pci_mmcfg_init(int type) } } +void __init pci_mmcfg_early_init(void) +{ + __pci_mmcfg_init(1); +} + +void __init pci_mmcfg_late_init(void) +{ + __pci_mmcfg_init(0); +} + static int __init pci_mmcfg_late_insert_resources(void) { /* diff --git a/arch/x86/pci/mmconfig_32.c b/arch/x86/pci/mmconfig_32.c index 081816ada05..f3c761dce69 100644 --- a/arch/x86/pci/mmconfig_32.c +++ b/arch/x86/pci/mmconfig_32.c @@ -136,3 +136,7 @@ int __init pci_mmcfg_arch_init(void) raw_pci_ext_ops = &pci_mmcfg; return 1; } + +void __init pci_mmcfg_arch_free(void) +{ +} diff --git a/arch/x86/pci/mmconfig_64.c b/arch/x86/pci/mmconfig_64.c index 9207fd49233..a1994163c99 100644 --- a/arch/x86/pci/mmconfig_64.c +++ b/arch/x86/pci/mmconfig_64.c @@ -127,7 +127,7 @@ static void __iomem * __init mcfg_ioremap(struct acpi_mcfg_allocation *cfg) int __init pci_mmcfg_arch_init(void) { int i; - pci_mmcfg_virt = kmalloc(sizeof(*pci_mmcfg_virt) * + pci_mmcfg_virt = kzalloc(sizeof(*pci_mmcfg_virt) * pci_mmcfg_config_num, GFP_KERNEL); if (pci_mmcfg_virt == NULL) { printk(KERN_ERR "PCI: Can not allocate memory for mmconfig structures\n"); @@ -141,9 +141,29 @@ int __init pci_mmcfg_arch_init(void) printk(KERN_ERR "PCI: Cannot map mmconfig aperture for " "segment %d\n", pci_mmcfg_config[i].pci_segment); + pci_mmcfg_arch_free(); return 0; } } raw_pci_ext_ops = &pci_mmcfg; return 1; } + +void __init pci_mmcfg_arch_free(void) +{ + int i; + + if (pci_mmcfg_virt == NULL) + return; + + for (i = 0; i < pci_mmcfg_config_num; ++i) { + if (pci_mmcfg_virt[i].virt) { + iounmap(pci_mmcfg_virt[i].virt); + pci_mmcfg_virt[i].virt = NULL; + pci_mmcfg_virt[i].cfg = NULL; + } + } + + kfree(pci_mmcfg_virt); + pci_mmcfg_virt = NULL; +} diff --git a/arch/x86/pci/mp_bus_to_node.c b/arch/x86/pci/mp_bus_to_node.c new file mode 100644 index 00000000000..022943999b8 --- /dev/null +++ b/arch/x86/pci/mp_bus_to_node.c @@ -0,0 +1,23 @@ +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/topology.h> + +#define BUS_NR 256 + +static unsigned char mp_bus_to_node[BUS_NR]; + +void set_mp_bus_to_node(int busnum, int node) +{ + if (busnum >= 0 && busnum < BUS_NR) + mp_bus_to_node[busnum] = (unsigned char) node; +} + +int get_mp_bus_to_node(int busnum) +{ + int node; + + if (busnum < 0 || busnum > (BUS_NR - 1)) + return 0; + node = mp_bus_to_node[busnum]; + return node; +} diff --git a/arch/x86/pci/olpc.c b/arch/x86/pci/olpc.c new file mode 100644 index 00000000000..5e7636558c0 --- /dev/null +++ b/arch/x86/pci/olpc.c @@ -0,0 +1,313 @@ +/* + * Low-level PCI config space access for OLPC systems who lack the VSA + * PCI virtualization software. + * + * Copyright © 2006 Advanced Micro Devices, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * The AMD Geode chipset (ie: GX2 processor, cs5536 I/O companion device) + * has some I/O functions (display, southbridge, sound, USB HCIs, etc) + * that more or less behave like PCI devices, but the hardware doesn't + * directly implement the PCI configuration space headers. AMD provides + * "VSA" (Virtual System Architecture) software that emulates PCI config + * space for these devices, by trapping I/O accesses to PCI config register + * (CF8/CFC) and running some code in System Management Mode interrupt state. + * On the OLPC platform, we don't want to use that VSA code because + * (a) it slows down suspend/resume, and (b) recompiling it requires special + * compilers that are hard to get. So instead of letting the complex VSA + * code simulate the PCI config registers for the on-chip devices, we + * just simulate them the easy way, by inserting the code into the + * pci_write_config and pci_read_config path. Most of the config registers + * are read-only anyway, so the bulk of the simulation is just table lookup. + */ + +#include <linux/pci.h> +#include <linux/init.h> +#include <asm/olpc.h> +#include <asm/geode.h> +#include "pci.h" + +/* + * In the tables below, the first two line (8 longwords) are the + * size masks that are used when the higher level PCI code determines + * the size of the region by writing ~0 to a base address register + * and reading back the result. + * + * The following lines are the values that are read during normal + * PCI config access cycles, i.e. not after just having written + * ~0 to a base address register. + */ + +static const uint32_t lxnb_hdr[] = { /* dev 1 function 0 - devfn = 8 */ + 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, + + 0x281022, 0x2200005, 0x6000021, 0x80f808, /* AMD Vendor ID */ + 0x0, 0x0, 0x0, 0x0, /* No virtual registers, hence no BAR */ + 0x0, 0x0, 0x0, 0x28100b, + 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, +}; + +static const uint32_t gxnb_hdr[] = { /* dev 1 function 0 - devfn = 8 */ + 0xfffffffd, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, + + 0x28100b, 0x2200005, 0x6000021, 0x80f808, /* NSC Vendor ID */ + 0xac1d, 0x0, 0x0, 0x0, /* I/O BAR - base of virtual registers */ + 0x0, 0x0, 0x0, 0x28100b, + 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, +}; + +static const uint32_t lxfb_hdr[] = { /* dev 1 function 1 - devfn = 9 */ + 0xff000008, 0xffffc000, 0xffffc000, 0xffffc000, + 0xffffc000, 0x0, 0x0, 0x0, + + 0x20811022, 0x2200003, 0x3000000, 0x0, /* AMD Vendor ID */ + 0xfd000000, 0xfe000000, 0xfe004000, 0xfe008000, /* FB, GP, VG, DF */ + 0xfe00c000, 0x0, 0x0, 0x30100b, /* VIP */ + 0x0, 0x0, 0x0, 0x10e, /* INTA, IRQ14 for graphics accel */ + 0x0, 0x0, 0x0, 0x0, + 0x3d0, 0x3c0, 0xa0000, 0x0, /* VG IO, VG IO, EGA FB, MONO FB */ + 0x0, 0x0, 0x0, 0x0, +}; + +static const uint32_t gxfb_hdr[] = { /* dev 1 function 1 - devfn = 9 */ + 0xff800008, 0xffffc000, 0xffffc000, 0xffffc000, + 0x0, 0x0, 0x0, 0x0, + + 0x30100b, 0x2200003, 0x3000000, 0x0, /* NSC Vendor ID */ + 0xfd000000, 0xfe000000, 0xfe004000, 0xfe008000, /* FB, GP, VG, DF */ + 0x0, 0x0, 0x0, 0x30100b, + 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, + 0x3d0, 0x3c0, 0xa0000, 0x0, /* VG IO, VG IO, EGA FB, MONO FB */ + 0x0, 0x0, 0x0, 0x0, +}; + +static const uint32_t aes_hdr[] = { /* dev 1 function 2 - devfn = 0xa */ + 0xffffc000, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, + + 0x20821022, 0x2a00006, 0x10100000, 0x8, /* NSC Vendor ID */ + 0xfe010000, 0x0, 0x0, 0x0, /* AES registers */ + 0x0, 0x0, 0x0, 0x20821022, + 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, +}; + + +static const uint32_t isa_hdr[] = { /* dev f function 0 - devfn = 78 */ + 0xfffffff9, 0xffffff01, 0xffffffc1, 0xffffffe1, + 0xffffff81, 0xffffffc1, 0x0, 0x0, + + 0x20901022, 0x2a00049, 0x6010003, 0x802000, + 0x18b1, 0x1001, 0x1801, 0x1881, /* SMB-8 GPIO-256 MFGPT-64 IRQ-32 */ + 0x1401, 0x1841, 0x0, 0x20901022, /* PMS-128 ACPI-64 */ + 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0xaa5b, /* IRQ steering */ + 0x0, 0x0, 0x0, 0x0, +}; + +static const uint32_t ac97_hdr[] = { /* dev f function 3 - devfn = 7b */ + 0xffffff81, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, + + 0x20931022, 0x2a00041, 0x4010001, 0x0, + 0x1481, 0x0, 0x0, 0x0, /* I/O BAR-128 */ + 0x0, 0x0, 0x0, 0x20931022, + 0x0, 0x0, 0x0, 0x205, /* IntB, IRQ5 */ + 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, +}; + +static const uint32_t ohci_hdr[] = { /* dev f function 4 - devfn = 7c */ + 0xfffff000, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, + + 0x20941022, 0x2300006, 0xc031002, 0x0, + 0xfe01a000, 0x0, 0x0, 0x0, /* MEMBAR-1000 */ + 0x0, 0x0, 0x0, 0x20941022, + 0x0, 0x40, 0x0, 0x40a, /* CapPtr INT-D, IRQA */ + 0xc8020001, 0x0, 0x0, 0x0, /* Capabilities - 40 is R/O, + 44 is mask 8103 (power control) */ + 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, +}; + +static const uint32_t ehci_hdr[] = { /* dev f function 4 - devfn = 7d */ + 0xfffff000, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, + + 0x20951022, 0x2300006, 0xc032002, 0x0, + 0xfe01b000, 0x0, 0x0, 0x0, /* MEMBAR-1000 */ + 0x0, 0x0, 0x0, 0x20951022, + 0x0, 0x40, 0x0, 0x40a, /* CapPtr INT-D, IRQA */ + 0xc8020001, 0x0, 0x0, 0x0, /* Capabilities - 40 is R/O, 44 is + mask 8103 (power control) */ +#if 0 + 0x1, 0x40080000, 0x0, 0x0, /* EECP - see EHCI spec section 2.1.7 */ +#endif + 0x01000001, 0x0, 0x0, 0x0, /* EECP - see EHCI spec section 2.1.7 */ + 0x2020, 0x0, 0x0, 0x0, /* (EHCI page 8) 60 SBRN (R/O), + 61 FLADJ (R/W), PORTWAKECAP */ +}; + +static uint32_t ff_loc = ~0; +static uint32_t zero_loc; +static int bar_probing; /* Set after a write of ~0 to a BAR */ +static int is_lx; + +#define NB_SLOT 0x1 /* Northbridge - GX chip - Device 1 */ +#define SB_SLOT 0xf /* Southbridge - CS5536 chip - Device F */ + +static int is_simulated(unsigned int bus, unsigned int devfn) +{ + return (!bus && ((PCI_SLOT(devfn) == NB_SLOT) || + (PCI_SLOT(devfn) == SB_SLOT))); +} + +static uint32_t *hdr_addr(const uint32_t *hdr, int reg) +{ + uint32_t addr; + + /* + * This is a little bit tricky. The header maps consist of + * 0x20 bytes of size masks, followed by 0x70 bytes of header data. + * In the normal case, when not probing a BAR's size, we want + * to access the header data, so we add 0x20 to the reg offset, + * thus skipping the size mask area. + * In the BAR probing case, we want to access the size mask for + * the BAR, so we subtract 0x10 (the config header offset for + * BAR0), and don't skip the size mask area. + */ + + addr = (uint32_t)hdr + reg + (bar_probing ? -0x10 : 0x20); + + bar_probing = 0; + return (uint32_t *)addr; +} + +static int pci_olpc_read(unsigned int seg, unsigned int bus, + unsigned int devfn, int reg, int len, uint32_t *value) +{ + uint32_t *addr; + + /* Use the hardware mechanism for non-simulated devices */ + if (!is_simulated(bus, devfn)) + return pci_direct_conf1.read(seg, bus, devfn, reg, len, value); + + /* + * No device has config registers past 0x70, so we save table space + * by not storing entries for the nonexistent registers + */ + if (reg >= 0x70) + addr = &zero_loc; + else { + switch (devfn) { + case 0x8: + addr = hdr_addr(is_lx ? lxnb_hdr : gxnb_hdr, reg); + break; + case 0x9: + addr = hdr_addr(is_lx ? lxfb_hdr : gxfb_hdr, reg); + break; + case 0xa: + addr = is_lx ? hdr_addr(aes_hdr, reg) : &ff_loc; + break; + case 0x78: + addr = hdr_addr(isa_hdr, reg); + break; + case 0x7b: + addr = hdr_addr(ac97_hdr, reg); + break; + case 0x7c: + addr = hdr_addr(ohci_hdr, reg); + break; + case 0x7d: + addr = hdr_addr(ehci_hdr, reg); + break; + default: + addr = &ff_loc; + break; + } + } + switch (len) { + case 1: + *value = *(uint8_t *)addr; + break; + case 2: + *value = *(uint16_t *)addr; + break; + case 4: + *value = *addr; + break; + default: + BUG(); + } + + return 0; +} + +static int pci_olpc_write(unsigned int seg, unsigned int bus, + unsigned int devfn, int reg, int len, uint32_t value) +{ + /* Use the hardware mechanism for non-simulated devices */ + if (!is_simulated(bus, devfn)) + return pci_direct_conf1.write(seg, bus, devfn, reg, len, value); + + /* XXX we may want to extend this to simulate EHCI power management */ + + /* + * Mostly we just discard writes, but if the write is a size probe + * (i.e. writing ~0 to a BAR), we remember it and arrange to return + * the appropriate size mask on the next read. This is cheating + * to some extent, because it depends on the fact that the next + * access after such a write will always be a read to the same BAR. + */ + + if ((reg >= 0x10) && (reg < 0x2c)) { + /* write is to a BAR */ + if (value == ~0) + bar_probing = 1; + } else { + /* + * No warning on writes to ROM BAR, CMD, LATENCY_TIMER, + * CACHE_LINE_SIZE, or PM registers. + */ + if ((reg != PCI_ROM_ADDRESS) && (reg != PCI_COMMAND_MASTER) && + (reg != PCI_LATENCY_TIMER) && + (reg != PCI_CACHE_LINE_SIZE) && (reg != 0x44)) + printk(KERN_WARNING "OLPC PCI: Config write to devfn" + " %x reg %x value %x\n", devfn, reg, value); + } + + return 0; +} + +static struct pci_raw_ops pci_olpc_conf = { + .read = pci_olpc_read, + .write = pci_olpc_write, +}; + +void __init pci_olpc_init(void) +{ + if (!machine_is_olpc() || olpc_has_vsa()) + return; + + printk(KERN_INFO "PCI: Using configuration type OLPC\n"); + raw_pci_ops = &pci_olpc_conf; + is_lx = is_geode_lx(); +} diff --git a/arch/x86/pci/pci.h b/arch/x86/pci/pci.h index c4bddaeff61..c58805a92db 100644 --- a/arch/x86/pci/pci.h +++ b/arch/x86/pci/pci.h @@ -26,6 +26,7 @@ #define PCI_ASSIGN_ALL_BUSSES 0x4000 #define PCI_CAN_SKIP_ISA_ALIGN 0x8000 #define PCI_USE__CRS 0x10000 +#define PCI_CHECK_ENABLE_AMD_MMCONF 0x20000 extern unsigned int pci_probe; extern unsigned long pirq_table_addr; @@ -97,11 +98,12 @@ extern struct pci_raw_ops pci_direct_conf1; extern int pci_direct_probe(void); extern void pci_direct_init(int type); extern void pci_pcbios_init(void); -extern void pci_mmcfg_init(int type); +extern void pci_olpc_init(void); /* pci-mmconfig.c */ extern int __init pci_mmcfg_arch_init(void); +extern void __init pci_mmcfg_arch_free(void); /* * AMD Fam10h CPUs are buggy, and cannot access MMIO config space diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c index e2af8eee80e..cf058fecfce 100644 --- a/arch/x86/vdso/vdso32-setup.c +++ b/arch/x86/vdso/vdso32-setup.c @@ -162,7 +162,7 @@ static __init void relocate_vdso(Elf32_Ehdr *ehdr) Elf32_Shdr *shdr; int i; - BUG_ON(memcmp(ehdr->e_ident, ELFMAG, 4) != 0 || + BUG_ON(memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0 || !elf_check_arch_ia32(ehdr) || ehdr->e_type != ET_DYN); @@ -303,8 +303,6 @@ int __init sysenter_setup(void) #ifdef CONFIG_X86_32 gate_vma_init(); - - printk("Compat vDSO mapped to %08lx.\n", __fix_to_virt(FIX_VDSO)); #endif if (!vdso32_sysenter()) { diff --git a/arch/x86/video/fbdev.c b/arch/x86/video/fbdev.c index 4db42bff8c6..69527688f79 100644 --- a/arch/x86/video/fbdev.c +++ b/arch/x86/video/fbdev.c @@ -1,5 +1,4 @@ /* - * * Copyright (C) 2007 Antonino Daplas <adaplas@gmail.com> * * This file is subject to the terms and conditions of the GNU General Public @@ -29,3 +28,4 @@ int fb_is_primary_device(struct fb_info *info) return retval; } EXPORT_SYMBOL(fb_is_primary_device); +MODULE_LICENSE("GPL"); |