aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-02-12 21:30:48 +0100
committerThomas Gleixner <tglx@linutronix.de>2009-02-12 23:11:58 +0100
commit34b0900d323122113683685b200aae9f9b75e63b (patch)
treea7ebd50755ae7321e89a4e5a0b64e4a5a7de0dc4 /arch/x86/kernel
parentd85cf93da66977dbc645352be1b2084a659d8a0b (diff)
x86: warn if arch_flush_lazy_mmu_cpu is called in preemptible context
Impact: Catch cases where lazy MMU state is active in a preemtible context arch_flush_lazy_mmu_cpu() has been changed to disable preemption so the checks in enter/leave will never trigger. Put the preemtible() check into arch_flush_lazy_mmu_cpu() to catch such cases. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/paravirt.c2
1 files changed, 2 insertions, 0 deletions
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index dcba6c567a2..c6520a4e85d 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -273,6 +273,7 @@ void arch_flush_lazy_mmu_mode(void)
preempt_disable();
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
+ WARN_ON(preempt_count() == 1);
arch_leave_lazy_mmu_mode();
arch_enter_lazy_mmu_mode();
}
@@ -285,6 +286,7 @@ void arch_flush_lazy_cpu_mode(void)
preempt_disable();
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) {
+ WARN_ON(preempt_count() == 1);
arch_leave_lazy_cpu_mode();
arch_enter_lazy_cpu_mode();
}