diff options
author | Kumar Gala <galak@kernel.crashing.org> | 2006-03-20 11:58:02 -0600 |
---|---|---|
committer | Kumar Gala <galak@kernel.crashing.org> | 2006-03-20 11:58:02 -0600 |
commit | 1a02e59a2970f9ed28ab51d3b08624b79e54d848 (patch) | |
tree | 470cce472be3b08c160e0c569648e7228651b12a /arch/i386 | |
parent | ebcff3c773b42bce6182ec16485abca4e53fba97 (diff) | |
parent | 2c276603c3e5ebf38155a9d1fbbda656d52d138e (diff) |
Merge branch 'master'
Diffstat (limited to 'arch/i386')
31 files changed, 180 insertions, 69 deletions
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig index 0afec8566e7..5b1a7d46d1d 100644 --- a/arch/i386/Kconfig +++ b/arch/i386/Kconfig @@ -733,7 +733,7 @@ config PHYSICAL_START config HOTPLUG_CPU bool "Support for hot-pluggable CPUs (EXPERIMENTAL)" - depends on SMP && HOTPLUG && EXPERIMENTAL + depends on SMP && HOTPLUG && EXPERIMENTAL && !X86_VOYAGER ---help--- Say Y here to experiment with turning CPUs off and on. CPUs can be controlled through /sys/devices/system/cpu. @@ -1060,6 +1060,7 @@ source "arch/i386/oprofile/Kconfig" config KPROBES bool "Kprobes (EXPERIMENTAL)" + depends on EXPERIMENTAL && MODULES help Kprobes allows you to trap at almost any kernel address and execute a callback function. register_kprobe() establishes diff --git a/arch/i386/boot/.gitignore b/arch/i386/boot/.gitignore new file mode 100644 index 00000000000..495f20c085d --- /dev/null +++ b/arch/i386/boot/.gitignore @@ -0,0 +1,3 @@ +bootsect +bzImage +setup diff --git a/arch/i386/boot/tools/.gitignore b/arch/i386/boot/tools/.gitignore new file mode 100644 index 00000000000..378eac25d31 --- /dev/null +++ b/arch/i386/boot/tools/.gitignore @@ -0,0 +1 @@ +build diff --git a/arch/i386/kernel/.gitignore b/arch/i386/kernel/.gitignore new file mode 100644 index 00000000000..40836ad9079 --- /dev/null +++ b/arch/i386/kernel/.gitignore @@ -0,0 +1 @@ +vsyscall.lds diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile index 60c3f76dfca..65656c033d7 100644 --- a/arch/i386/kernel/Makefile +++ b/arch/i386/kernel/Makefile @@ -7,11 +7,11 @@ extra-y := head.o init_task.o vmlinux.lds obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \ ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \ pci-dma.o i386_ksyms.o i387.o dmi_scan.o bootflag.o \ - quirks.o i8237.o + quirks.o i8237.o topology.o obj-y += cpu/ obj-y += timers/ -obj-$(CONFIG_ACPI) += acpi/ +obj-y += acpi/ obj-$(CONFIG_X86_BIOS_REBOOT) += reboot.o obj-$(CONFIG_MCA) += mca.o obj-$(CONFIG_X86_MSR) += msr.o diff --git a/arch/i386/kernel/acpi/Makefile b/arch/i386/kernel/acpi/Makefile index d51c7313cae..7e9ac99354f 100644 --- a/arch/i386/kernel/acpi/Makefile +++ b/arch/i386/kernel/acpi/Makefile @@ -1,4 +1,4 @@ -obj-y := boot.o +obj-$(CONFIG_ACPI) += boot.o obj-$(CONFIG_X86_IO_APIC) += earlyquirk.o obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup.o diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c index 79577f0ace9..f1a21945963 100644 --- a/arch/i386/kernel/acpi/boot.c +++ b/arch/i386/kernel/acpi/boot.c @@ -1111,9 +1111,6 @@ int __init acpi_boot_table_init(void) disable_acpi(); return error; } -#ifdef __i386__ - check_acpi_pci(); -#endif acpi_table_parse(ACPI_BOOT, acpi_parse_sbf); diff --git a/arch/i386/kernel/acpi/earlyquirk.c b/arch/i386/kernel/acpi/earlyquirk.c index f1b9d2a46da..2e3b643a4dc 100644 --- a/arch/i386/kernel/acpi/earlyquirk.c +++ b/arch/i386/kernel/acpi/earlyquirk.c @@ -7,14 +7,22 @@ #include <linux/pci.h> #include <asm/pci-direct.h> #include <asm/acpi.h> +#include <asm/apic.h> static int __init check_bridge(int vendor, int device) { +#ifdef CONFIG_ACPI /* According to Nvidia all timer overrides are bogus. Just ignore them all. */ if (vendor == PCI_VENDOR_ID_NVIDIA) { acpi_skip_timer_override = 1; } +#endif + if (vendor == PCI_VENDOR_ID_ATI && timer_over_8254 == 1) { + timer_over_8254 = 0; + printk(KERN_INFO "ATI board detected. Disabling timer routing " + "over 8254.\n"); + } return 0; } diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c index f39e09ef64e..776c90989e0 100644 --- a/arch/i386/kernel/apic.c +++ b/arch/i386/kernel/apic.c @@ -570,16 +570,18 @@ void __devinit setup_local_APIC(void) */ void lapic_shutdown(void) { + unsigned long flags; + if (!cpu_has_apic) return; - local_irq_disable(); + local_irq_save(flags); clear_local_APIC(); if (enabled_via_apicbase) disable_local_APIC(); - local_irq_enable(); + local_irq_restore(flags); } #ifdef CONFIG_PM diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c index 7eb9213734a..e6bd095ae10 100644 --- a/arch/i386/kernel/cpu/common.c +++ b/arch/i386/kernel/cpu/common.c @@ -4,6 +4,7 @@ #include <linux/smp.h> #include <linux/module.h> #include <linux/percpu.h> +#include <linux/bootmem.h> #include <asm/semaphore.h> #include <asm/processor.h> #include <asm/i387.h> @@ -18,6 +19,9 @@ #include "cpu.h" +DEFINE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr); +EXPORT_PER_CPU_SYMBOL(cpu_gdt_descr); + DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]); EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack); @@ -274,10 +278,10 @@ void __devinit generic_identify(struct cpuinfo_x86 * c) c->x86_capability[4] = excap; c->x86 = (tfms >> 8) & 15; c->x86_model = (tfms >> 4) & 15; - if (c->x86 == 0xf) { + if (c->x86 == 0xf) c->x86 += (tfms >> 20) & 0xff; + if (c->x86 >= 0x6) c->x86_model += ((tfms >> 16) & 0xF) << 4; - } c->x86_mask = tfms & 15; } else { /* Have CPUID level 0 only - unheard of */ @@ -571,8 +575,9 @@ void __devinit cpu_init(void) int cpu = smp_processor_id(); struct tss_struct * t = &per_cpu(init_tss, cpu); struct thread_struct *thread = ¤t->thread; - struct desc_struct *gdt = get_cpu_gdt_table(cpu); + struct desc_struct *gdt; __u32 stk16_off = (__u32)&per_cpu(cpu_16bit_stack, cpu); + struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu); if (cpu_test_and_set(cpu, cpu_initialized)) { printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); @@ -590,6 +595,25 @@ void __devinit cpu_init(void) } /* + * This is a horrible hack to allocate the GDT. The problem + * is that cpu_init() is called really early for the boot CPU + * (and hence needs bootmem) but much later for the secondary + * CPUs, when bootmem will have gone away + */ + if (NODE_DATA(0)->bdata->node_bootmem_map) { + gdt = (struct desc_struct *)alloc_bootmem_pages(PAGE_SIZE); + /* alloc_bootmem_pages panics on failure, so no check */ + memset(gdt, 0, PAGE_SIZE); + } else { + gdt = (struct desc_struct *)get_zeroed_page(GFP_KERNEL); + if (unlikely(!gdt)) { + printk(KERN_CRIT "CPU%d failed to allocate GDT\n", cpu); + for (;;) + local_irq_enable(); + } + } + + /* * Initialize the per-CPU GDT with the boot GDT, * and set up the GDT descriptor: */ @@ -601,10 +625,10 @@ void __devinit cpu_init(void) ((((__u64)stk16_off) << 32) & 0xff00000000000000ULL) | (CPU_16BIT_STACK_SIZE - 1); - cpu_gdt_descr[cpu].size = GDT_SIZE - 1; - cpu_gdt_descr[cpu].address = (unsigned long)gdt; + cpu_gdt_descr->size = GDT_SIZE - 1; + cpu_gdt_descr->address = (unsigned long)gdt; - load_gdt(&cpu_gdt_descr[cpu]); + load_gdt(cpu_gdt_descr); load_idt(&idt_descr); /* diff --git a/arch/i386/kernel/cpu/transmeta.c b/arch/i386/kernel/cpu/transmeta.c index bdbeb77f4e2..7214c9b577a 100644 --- a/arch/i386/kernel/cpu/transmeta.c +++ b/arch/i386/kernel/cpu/transmeta.c @@ -1,4 +1,5 @@ #include <linux/kernel.h> +#include <linux/mm.h> #include <linux/init.h> #include <asm/processor.h> #include <asm/msr.h> diff --git a/arch/i386/kernel/efi.c b/arch/i386/kernel/efi.c index ecad519fd39..c9cad7ba0d2 100644 --- a/arch/i386/kernel/efi.c +++ b/arch/i386/kernel/efi.c @@ -70,10 +70,13 @@ static void efi_call_phys_prelog(void) { unsigned long cr4; unsigned long temp; + struct Xgt_desc_struct *cpu_gdt_descr; spin_lock(&efi_rt_lock); local_irq_save(efi_rt_eflags); + cpu_gdt_descr = &per_cpu(cpu_gdt_descr, 0); + /* * If I don't have PSE, I should just duplicate two entries in page * directory. If I have PSE, I just need to duplicate one entry in @@ -103,17 +106,18 @@ static void efi_call_phys_prelog(void) */ local_flush_tlb(); - cpu_gdt_descr[0].address = __pa(cpu_gdt_descr[0].address); - load_gdt((struct Xgt_desc_struct *) __pa(&cpu_gdt_descr[0])); + cpu_gdt_descr->address = __pa(cpu_gdt_descr->address); + load_gdt(cpu_gdt_descr); } static void efi_call_phys_epilog(void) { unsigned long cr4; + struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, 0); + + cpu_gdt_descr->address = __va(cpu_gdt_descr->address); + load_gdt(cpu_gdt_descr); - cpu_gdt_descr[0].address = - (unsigned long) __va(cpu_gdt_descr[0].address); - load_gdt(&cpu_gdt_descr[0]); cr4 = read_cr4(); if (cr4 & X86_CR4_PSE) { diff --git a/arch/i386/kernel/head.S b/arch/i386/kernel/head.S index 5884469f6bf..e0b7c632efb 100644 --- a/arch/i386/kernel/head.S +++ b/arch/i386/kernel/head.S @@ -398,7 +398,11 @@ ignore_int: pushl 32(%esp) pushl 40(%esp) pushl $int_msg +#ifdef CONFIG_EARLY_PRINTK + call early_printk +#else call printk +#endif addl $(5*4),%esp popl %ds popl %es @@ -530,5 +534,3 @@ ENTRY(cpu_gdt_table) .quad 0x0000000000000000 /* 0xf0 - unused */ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */ - /* Be sure this is zeroed to avoid false validations in Xen */ - .fill PAGE_SIZE_asm / 8 - GDT_ENTRIES,8,0 diff --git a/arch/i386/kernel/i386_ksyms.c b/arch/i386/kernel/i386_ksyms.c index 3999bec50c3..055325056a7 100644 --- a/arch/i386/kernel/i386_ksyms.c +++ b/arch/i386/kernel/i386_ksyms.c @@ -3,8 +3,6 @@ #include <asm/checksum.h> #include <asm/desc.h> -EXPORT_SYMBOL_GPL(cpu_gdt_descr); - EXPORT_SYMBOL(__down_failed); EXPORT_SYMBOL(__down_failed_interruptible); EXPORT_SYMBOL(__down_failed_trylock); diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c index f2dd218d88c..39d9a5fa907 100644 --- a/arch/i386/kernel/io_apic.c +++ b/arch/i386/kernel/io_apic.c @@ -51,6 +51,8 @@ static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; static DEFINE_SPINLOCK(ioapic_lock); +int timer_over_8254 __initdata = 1; + /* * Is the SiS APIC rmw bug present ? * -1 = don't know, 0 = no, 1 = yes @@ -2267,7 +2269,8 @@ static inline void check_timer(void) apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); init_8259A(1); timer_ack = 1; - enable_8259A_irq(0); + if (timer_over_8254 > 0) + enable_8259A_irq(0); pin1 = find_isa_irq_pin(0, mp_INT); apic1 = find_isa_irq_apic(0, mp_INT); @@ -2392,6 +2395,20 @@ void __init setup_IO_APIC(void) print_IO_APIC(); } +static int __init setup_disable_8254_timer(char *s) +{ + timer_over_8254 = -1; + return 1; +} +static int __init setup_enable_8254_timer(char *s) +{ + timer_over_8254 = 2; + return 1; +} + +__setup("disable_8254_timer", setup_disable_8254_timer); +__setup("enable_8254_timer", setup_enable_8254_timer); + /* * Called after all the initialization is done. If we didnt find any * APIC bugs then we can allow the modify fast path @@ -2566,8 +2583,10 @@ int __init io_apic_get_unique_id (int ioapic, int apic_id) spin_unlock_irqrestore(&ioapic_lock, flags); /* Sanity check */ - if (reg_00.bits.ID != apic_id) - panic("IOAPIC[%d]: Unable change apic_id!\n", ioapic); + if (reg_00.bits.ID != apic_id) { + printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic); + return -1; + } } apic_printk(APIC_VERBOSE, KERN_INFO diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c index 6483eeb1a4e..694a1399763 100644 --- a/arch/i386/kernel/kprobes.c +++ b/arch/i386/kernel/kprobes.c @@ -58,6 +58,11 @@ static inline int is_IF_modifier(kprobe_opcode_t opcode) int __kprobes arch_prepare_kprobe(struct kprobe *p) { + /* insn: must be on special executable page on i386. */ + p->ainsn.insn = get_insn_slot(); + if (!p->ainsn.insn) + return -ENOMEM; + memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); p->opcode = *p->addr; return 0; @@ -77,6 +82,13 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) (unsigned long) p->addr + sizeof(kprobe_opcode_t)); } +void __kprobes arch_remove_kprobe(struct kprobe *p) +{ + down(&kprobe_mutex); + free_insn_slot(p->ainsn.insn); + up(&kprobe_mutex); +} + static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb) { kcb->prev_kprobe.kp = kprobe_running(); @@ -111,7 +123,7 @@ static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) if (p->opcode == BREAKPOINT_INSTRUCTION) regs->eip = (unsigned long)p->addr; else - regs->eip = (unsigned long)&p->ainsn.insn; + regs->eip = (unsigned long)p->ainsn.insn; } /* Called with kretprobe_lock held */ @@ -351,7 +363,7 @@ static void __kprobes resume_execution(struct kprobe *p, { unsigned long *tos = (unsigned long *)®s->esp; unsigned long next_eip = 0; - unsigned long copy_eip = (unsigned long)&p->ainsn.insn; + unsigned long copy_eip = (unsigned long)p->ainsn.insn; unsigned long orig_eip = (unsigned long)p->addr; switch (p->ainsn.insn[0]) { diff --git a/arch/i386/kernel/machine_kexec.c b/arch/i386/kernel/machine_kexec.c index a912fed4848..f73d7374a2b 100644 --- a/arch/i386/kernel/machine_kexec.c +++ b/arch/i386/kernel/machine_kexec.c @@ -116,13 +116,13 @@ static void load_segments(void) __asm__ __volatile__ ( "\tljmp $"STR(__KERNEL_CS)",$1f\n" "\t1:\n" - "\tmovl $"STR(__KERNEL_DS)",%eax\n" - "\tmovl %eax,%ds\n" - "\tmovl %eax,%es\n" - "\tmovl %eax,%fs\n" - "\tmovl %eax,%gs\n" - "\tmovl %eax,%ss\n" - ); + "\tmovl $"STR(__KERNEL_DS)",%%eax\n" + "\tmovl %%eax,%%ds\n" + "\tmovl %%eax,%%es\n" + "\tmovl %%eax,%%fs\n" + "\tmovl %%eax,%%gs\n" + "\tmovl %%eax,%%ss\n" + ::: "eax", "memory"); #undef STR #undef __STR } diff --git a/arch/i386/kernel/microcode.c b/arch/i386/kernel/microcode.c index d3fdf0057d8..5390b521aca 100644 --- a/arch/i386/kernel/microcode.c +++ b/arch/i386/kernel/microcode.c @@ -74,6 +74,7 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/sched.h> +#include <linux/cpumask.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/vmalloc.h> @@ -250,8 +251,8 @@ static int find_matching_ucodes (void) error = -EINVAL; goto out; } - - for (cpu_num = 0; cpu_num < num_online_cpus(); cpu_num++) { + + for_each_online_cpu(cpu_num) { struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num; if (uci->err != MC_NOTFOUND) /* already found a match or not an online cpu*/ continue; @@ -293,7 +294,7 @@ static int find_matching_ucodes (void) error = -EFAULT; goto out; } - for (cpu_num = 0; cpu_num < num_online_cpus(); cpu_num++) { + for_each_online_cpu(cpu_num) { struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num; if (uci->err != MC_NOTFOUND) /* already found a match or not an online cpu*/ continue; @@ -304,7 +305,9 @@ static int find_matching_ucodes (void) } } /* now check if any cpu has matched */ - for (cpu_num = 0, allocated_flag = 0, sum = 0; cpu_num < num_online_cpus(); cpu_num++) { + allocated_flag = 0; + sum = 0; + for_each_online_cpu(cpu_num) { if (ucode_cpu_info[cpu_num].err == MC_MARKED) { struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num; if (!allocated_flag) { @@ -415,12 +418,12 @@ static int do_microcode_update (void) } out_free: - for (i = 0; i < num_online_cpus(); i++) { + for_each_online_cpu(i) { if (ucode_cpu_info[i].mc) { int j; void *tmp = ucode_cpu_info[i].mc; vfree(tmp); - for (j = i; j < num_online_cpus(); j++) { + for_each_online_cpu(j) { if (ucode_cpu_info[j].mc == tmp) ucode_cpu_info[j].mc = NULL; } diff --git a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c index 0102f3d50e5..e6e2f43db85 100644 --- a/arch/i386/kernel/mpparse.c +++ b/arch/i386/kernel/mpparse.c @@ -710,7 +710,7 @@ void __init get_smp_config (void) * Read the physical hardware table. Anything here will * override the defaults. */ - if (!smp_read_mpc((void *)mpf->mpf_physptr)) { + if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr))) { smp_found_config = 0; printk(KERN_ERR "BIOS bug, MP table errors detected!...\n"); printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n"); @@ -915,6 +915,7 @@ void __init mp_register_ioapic ( u32 gsi_base) { int idx = 0; + int tmpid; if (nr_ioapics >= MAX_IO_APICS) { printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded " @@ -935,9 +936,14 @@ void __init mp_register_ioapic ( set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 < 15)) - mp_ioapics[idx].mpc_apicid = io_apic_get_unique_id(idx, id); + tmpid = io_apic_get_unique_id(idx, id); else - mp_ioapics[idx].mpc_apicid = id; + tmpid = id; + if (tmpid == -1) { + nr_ioapics--; + return; + } + mp_ioapics[idx].mpc_apicid = tmpid; mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx); /* diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c index 63f39a7e2c9..be87c5e2ee9 100644 --- a/arch/i386/kernel/nmi.c +++ b/arch/i386/kernel/nmi.c @@ -357,7 +357,7 @@ static void clear_msr_range(unsigned int base, unsigned int n) wrmsr(base+i, 0, 0); } -static inline void write_watchdog_counter(const char *descr) +static void write_watchdog_counter(const char *descr) { u64 count = (u64)cpu_khz * 1000; @@ -544,7 +544,7 @@ void nmi_watchdog_tick (struct pt_regs * regs) * die_nmi will return ONLY if NOTIFY_STOP happens.. */ die_nmi(regs, "NMI Watchdog detected LOCKUP"); - + } else { last_irq_sums[cpu] = sum; alert_counter[cpu] = 0; } diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c index 51e513b4f72..ab62a9f4701 100644 --- a/arch/i386/kernel/setup.c +++ b/arch/i386/kernel/setup.c @@ -1599,6 +1599,10 @@ void __init setup_arch(char **cmdline_p) if (efi_enabled) efi_map_memmap(); +#ifdef CONFIG_X86_IO_APIC + check_acpi_pci(); /* Checks more than just ACPI actually */ +#endif + #ifdef CONFIG_ACPI /* * Parse the ACPI tables for possible boot-time SMP configuration. diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c index 255adb49826..eba7f53f8b4 100644 --- a/arch/i386/kernel/smpboot.c +++ b/arch/i386/kernel/smpboot.c @@ -87,11 +87,7 @@ EXPORT_SYMBOL(cpu_online_map); cpumask_t cpu_callin_map; cpumask_t cpu_callout_map; EXPORT_SYMBOL(cpu_callout_map); -#ifdef CONFIG_HOTPLUG_CPU -cpumask_t cpu_possible_map = CPU_MASK_ALL; -#else cpumask_t cpu_possible_map; -#endif EXPORT_SYMBOL(cpu_possible_map); static cpumask_t smp_commenced_mask; @@ -902,12 +898,6 @@ static int __devinit do_boot_cpu(int apicid, int cpu) unsigned long start_eip; unsigned short nmi_high = 0, nmi_low = 0; - if (!cpu_gdt_descr[cpu].address && - !(cpu_gdt_descr[cpu].address = get_zeroed_page(GFP_KERNEL))) { - printk("Failed to allocate GDT for CPU %d\n", cpu); - return 1; - } - ++cpucount; /* diff --git a/arch/i386/kernel/syscall_table.S b/arch/i386/kernel/syscall_table.S index 5a8b3fb6d27..ac687d00a1c 100644 --- a/arch/i386/kernel/syscall_table.S +++ b/arch/i386/kernel/syscall_table.S @@ -299,7 +299,7 @@ ENTRY(sys_call_table) .long sys_mknodat .long sys_fchownat .long sys_futimesat - .long sys_newfstatat /* 300 */ + .long sys_fstatat64 /* 300 */ .long sys_unlinkat .long sys_renameat .long sys_linkat diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c index a14d594bfbe..9d307475985 100644 --- a/arch/i386/kernel/time.c +++ b/arch/i386/kernel/time.c @@ -412,9 +412,9 @@ static int timer_resume(struct sys_device *dev) write_seqlock_irqsave(&xtime_lock, flags); xtime.tv_sec = sec; xtime.tv_nsec = 0; - write_sequnlock_irqrestore(&xtime_lock, flags); - jiffies += sleep_length; + jiffies_64 += sleep_length; wall_jiffies += sleep_length; + write_sequnlock_irqrestore(&xtime_lock, flags); if (last_timer->resume) last_timer->resume(); cur_timer = last_timer; diff --git a/arch/i386/kernel/timers/timer_tsc.c b/arch/i386/kernel/timers/timer_tsc.c index 7c86e3c5f1c..a7f5a2aceba 100644 --- a/arch/i386/kernel/timers/timer_tsc.c +++ b/arch/i386/kernel/timers/timer_tsc.c @@ -282,6 +282,10 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, if (val != CPUFREQ_RESUMECHANGE) write_seqlock_irq(&xtime_lock); if (!ref_freq) { + if (!freq->old){ + ref_freq = freq->new; + goto end; + } ref_freq = freq->old; loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy; #ifndef CONFIG_SMP @@ -307,6 +311,7 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, #endif } +end: if (val != CPUFREQ_RESUMECHANGE) write_sequnlock_irq(&xtime_lock); diff --git a/arch/i386/mach-default/topology.c b/arch/i386/kernel/topology.c index b64314069e7..67a0e1baa28 100644 --- a/arch/i386/mach-default/topology.c +++ b/arch/i386/kernel/topology.c @@ -1,12 +1,12 @@ /* - * arch/i386/mach-generic/topology.c - Populate driverfs with topology information + * arch/i386/kernel/topology.c - Populate driverfs with topology information * * Written by: Matthew Dobson, IBM Corporation * Original Code: Paul Dorwin, IBM Corporation, Patrick Mochel, OSDL * * Copyright (C) 2002, IBM Corp. * - * All rights reserved. + * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -34,7 +34,7 @@ static struct i386_cpu cpu_devices[NR_CPUS]; int arch_register_cpu(int num){ struct node *parent = NULL; - + #ifdef CONFIG_NUMA int node = cpu_to_node(num); if (node_online(node)) diff --git a/arch/i386/kernel/vsyscall-sysenter.S b/arch/i386/kernel/vsyscall-sysenter.S index 4daefb2ec1b..76b72815940 100644 --- a/arch/i386/kernel/vsyscall-sysenter.S +++ b/arch/i386/kernel/vsyscall-sysenter.S @@ -7,6 +7,21 @@ * for details. */ +/* + * The caller puts arg2 in %ecx, which gets pushed. The kernel will use + * %ecx itself for arg2. The pushing is because the sysexit instruction + * (found in entry.S) requires that we clobber %ecx with the desired %esp. + * User code might expect that %ecx is unclobbered though, as it would be + * for returning via the iret instruction, so we must push and pop. + * + * The caller puts arg3 in %edx, which the sysexit instruction requires + * for %eip. Thus, exactly as for arg2, we must push and pop. + * + * Arg6 is different. The caller puts arg6 in %ebp. Since the sysenter + * instruction clobbers %esp, the user's %esp won't even survive entry + * into the kernel. We store %esp in %ebp. Code in entry.S must fetch + * arg6 from the stack. + */ .text .globl __kernel_vsyscall .type __kernel_vsyscall,@function diff --git a/arch/i386/mach-default/Makefile b/arch/i386/mach-default/Makefile index e95bb023792..012fe34459e 100644 --- a/arch/i386/mach-default/Makefile +++ b/arch/i386/mach-default/Makefile @@ -2,4 +2,4 @@ # Makefile for the linux kernel. # -obj-y := setup.o topology.o +obj-y := setup.o diff --git a/arch/i386/mach-voyager/voyager_basic.c b/arch/i386/mach-voyager/voyager_basic.c index aa49a33a572..b584060ec00 100644 --- a/arch/i386/mach-voyager/voyager_basic.c +++ b/arch/i386/mach-voyager/voyager_basic.c @@ -23,6 +23,8 @@ #include <linux/delay.h> #include <linux/reboot.h> #include <linux/sysrq.h> +#include <linux/smp.h> +#include <linux/nodemask.h> #include <asm/io.h> #include <asm/voyager.h> #include <asm/vic.h> @@ -328,4 +330,3 @@ void machine_power_off(void) if (pm_power_off) pm_power_off(); } - diff --git a/arch/i386/mach-voyager/voyager_smp.c b/arch/i386/mach-voyager/voyager_smp.c index 72a1b9cae2e..8165626a5c3 100644 --- a/arch/i386/mach-voyager/voyager_smp.c +++ b/arch/i386/mach-voyager/voyager_smp.c @@ -240,7 +240,7 @@ static cpumask_t smp_commenced_mask = CPU_MASK_NONE; cpumask_t cpu_callin_map = CPU_MASK_NONE; cpumask_t cpu_callout_map = CPU_MASK_NONE; EXPORT_SYMBOL(cpu_callout_map); -cpumask_t cpu_possible_map = CPU_MASK_ALL; +cpumask_t cpu_possible_map = CPU_MASK_NONE; EXPORT_SYMBOL(cpu_possible_map); /* The per processor IRQ masks (these are usually kept in sync) */ @@ -402,6 +402,7 @@ find_smp_config(void) cpus_addr(phys_cpu_present_map)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 1) << 8; cpus_addr(phys_cpu_present_map)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 2) << 16; cpus_addr(phys_cpu_present_map)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 3) << 24; + cpu_possible_map = phys_cpu_present_map; printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n", cpus_addr(phys_cpu_present_map)[0]); /* Here we set up the VIC to enable SMP */ /* enable the CPIs by writing the base vector to their register */ diff --git a/arch/i386/oprofile/backtrace.c b/arch/i386/oprofile/backtrace.c index acc18138fb2..c049ce414f0 100644 --- a/arch/i386/oprofile/backtrace.c +++ b/arch/i386/oprofile/backtrace.c @@ -20,7 +20,20 @@ struct frame_head { } __attribute__((packed)); static struct frame_head * -dump_backtrace(struct frame_head * head) +dump_kernel_backtrace(struct frame_head * head) +{ + oprofile_add_trace(head->ret); + + /* frame pointers should strictly progress back up the stack + * (towards higher addresses) */ + if (head >= head->ebp) + return NULL; + + return head->ebp; +} + +static struct frame_head * +dump_user_backtrace(struct frame_head * head) { struct frame_head bufhead[2]; @@ -105,10 +118,10 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth) if (!user_mode_vm(regs)) { while (depth-- && valid_kernel_stack(head, regs)) - head = dump_backtrace(head); + head = dump_kernel_backtrace(head); return; } while (depth-- && head) - head = dump_backtrace(head); + head = dump_user_backtrace(head); } |