aboutsummaryrefslogtreecommitdiff
path: root/arch/i386/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386/kernel')
-rw-r--r--arch/i386/kernel/alternative.c6
-rw-r--r--arch/i386/kernel/cpu/intel_cacheinfo.c4
-rw-r--r--arch/i386/kernel/cpu/perfctr-watchdog.c28
-rw-r--r--arch/i386/kernel/nmi.c2
-rw-r--r--arch/i386/kernel/ptrace.c1
-rw-r--r--arch/i386/kernel/traps.c33
6 files changed, 49 insertions, 25 deletions
diff --git a/arch/i386/kernel/alternative.c b/arch/i386/kernel/alternative.c
index 9f4ac8b02de..bd72d94e713 100644
--- a/arch/i386/kernel/alternative.c
+++ b/arch/i386/kernel/alternative.c
@@ -445,8 +445,6 @@ void __kprobes text_poke(void *addr, unsigned char *opcode, int len)
{
memcpy(addr, opcode, len);
sync_core();
- /* Not strictly needed, but can speed CPU recovery up. Ignore cross cacheline
- case. */
- if (cpu_has_clflush)
- asm("clflush (%0) " :: "r" (addr) : "memory");
+ /* Could also do a CLFLUSH here to speed up CPU recovery; but
+ that causes hangs on some VIA CPUs. */
}
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c
index d5a456d27d8..db6c25aa577 100644
--- a/arch/i386/kernel/cpu/intel_cacheinfo.c
+++ b/arch/i386/kernel/cpu/intel_cacheinfo.c
@@ -515,7 +515,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
cpuid4_info[cpu] = kzalloc(
sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
- if (unlikely(cpuid4_info[cpu] == NULL))
+ if (cpuid4_info[cpu] == NULL)
return -ENOMEM;
oldmask = current->cpus_allowed;
@@ -748,6 +748,8 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
unsigned int cpu = sys_dev->id;
unsigned long i;
+ if (cpuid4_info[cpu] == NULL)
+ return;
for (i = 0; i < num_cache_leaves; i++) {
cache_remove_shared_cpu_map(cpu, i);
kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
diff --git a/arch/i386/kernel/cpu/perfctr-watchdog.c b/arch/i386/kernel/cpu/perfctr-watchdog.c
index 4be488e73be..93fecd4b03d 100644
--- a/arch/i386/kernel/cpu/perfctr-watchdog.c
+++ b/arch/i386/kernel/cpu/perfctr-watchdog.c
@@ -263,8 +263,8 @@ static int setup_k7_watchdog(unsigned nmi_hz)
unsigned int evntsel;
struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
- perfctr_msr = MSR_K7_PERFCTR0;
- evntsel_msr = MSR_K7_EVNTSEL0;
+ perfctr_msr = wd_ops->perfctr;
+ evntsel_msr = wd_ops->evntsel;
wrmsrl(perfctr_msr, 0UL);
@@ -343,8 +343,8 @@ static int setup_p6_watchdog(unsigned nmi_hz)
unsigned int evntsel;
struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
- perfctr_msr = MSR_P6_PERFCTR0;
- evntsel_msr = MSR_P6_EVNTSEL0;
+ perfctr_msr = wd_ops->perfctr;
+ evntsel_msr = wd_ops->evntsel;
/* KVM doesn't implement this MSR */
if (wrmsr_safe(perfctr_msr, 0, 0) < 0)
@@ -569,8 +569,8 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz)
(ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
return 0;
- perfctr_msr = MSR_ARCH_PERFMON_PERFCTR1;
- evntsel_msr = MSR_ARCH_PERFMON_EVENTSEL1;
+ perfctr_msr = wd_ops->perfctr;
+ evntsel_msr = wd_ops->evntsel;
wrmsrl(perfctr_msr, 0UL);
@@ -605,6 +605,16 @@ static struct wd_ops intel_arch_wd_ops = {
.evntsel = MSR_ARCH_PERFMON_EVENTSEL1,
};
+static struct wd_ops coreduo_wd_ops = {
+ .reserve = single_msr_reserve,
+ .unreserve = single_msr_unreserve,
+ .setup = setup_intel_arch_watchdog,
+ .rearm = p6_rearm,
+ .stop = single_msr_stop_watchdog,
+ .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
+ .evntsel = MSR_ARCH_PERFMON_EVENTSEL0,
+};
+
static void probe_nmi_watchdog(void)
{
switch (boot_cpu_data.x86_vendor) {
@@ -615,6 +625,12 @@ static void probe_nmi_watchdog(void)
wd_ops = &k7_wd_ops;
break;
case X86_VENDOR_INTEL:
+ /* Work around Core Duo (Yonah) errata AE49 where perfctr1
+ doesn't have a working enable bit. */
+ if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 14) {
+ wd_ops = &coreduo_wd_ops;
+ break;
+ }
if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
wd_ops = &intel_arch_wd_ops;
break;
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index 8c1c965eb2a..c7227e2180f 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -115,12 +115,12 @@ static int __init check_nmi_watchdog(void)
atomic_dec(&nmi_active);
}
}
+ endflag = 1;
if (!atomic_read(&nmi_active)) {
kfree(prev_nmi_count);
atomic_set(&nmi_active, -1);
return -1;
}
- endflag = 1;
printk("OK.\n");
/* now that we know it works we can reduce NMI frequency to
diff --git a/arch/i386/kernel/ptrace.c b/arch/i386/kernel/ptrace.c
index 0c8f00e69c4..7c1b92522e9 100644
--- a/arch/i386/kernel/ptrace.c
+++ b/arch/i386/kernel/ptrace.c
@@ -274,7 +274,6 @@ static void clear_singlestep(struct task_struct *child)
void ptrace_disable(struct task_struct *child)
{
clear_singlestep(child);
- clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
}
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index cfffe3dd9e8..47b0bef335b 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -100,36 +100,45 @@ asmlinkage void machine_check(void);
int kstack_depth_to_print = 24;
static unsigned int code_bytes = 64;
-static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
+static inline int valid_stack_ptr(struct thread_info *tinfo, void *p, unsigned size)
{
return p > (void *)tinfo &&
- p < (void *)tinfo + THREAD_SIZE - 3;
+ p <= (void *)tinfo + THREAD_SIZE - size;
}
+/* The form of the top of the frame on the stack */
+struct stack_frame {
+ struct stack_frame *next_frame;
+ unsigned long return_address;
+};
+
static inline unsigned long print_context_stack(struct thread_info *tinfo,
unsigned long *stack, unsigned long ebp,
struct stacktrace_ops *ops, void *data)
{
- unsigned long addr;
-
#ifdef CONFIG_FRAME_POINTER
- while (valid_stack_ptr(tinfo, (void *)ebp)) {
- unsigned long new_ebp;
- addr = *(unsigned long *)(ebp + 4);
+ struct stack_frame *frame = (struct stack_frame *)ebp;
+ while (valid_stack_ptr(tinfo, frame, sizeof(*frame))) {
+ struct stack_frame *next;
+ unsigned long addr;
+
+ addr = frame->return_address;
ops->address(data, addr);
/*
* break out of recursive entries (such as
* end_of_stack_stop_unwind_function). Also,
* we can never allow a frame pointer to
* move downwards!
- */
- new_ebp = *(unsigned long *)ebp;
- if (new_ebp <= ebp)
+ */
+ next = frame->next_frame;
+ if (next <= frame)
break;
- ebp = new_ebp;
+ frame = next;
}
#else
- while (valid_stack_ptr(tinfo, stack)) {
+ while (valid_stack_ptr(tinfo, stack, sizeof(*stack))) {
+ unsigned long addr;
+
addr = *stack++;
if (__kernel_text_address(addr))
ops->address(data, addr);