aboutsummaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
authorStas Sergeev <stsp@aknet.ru>2009-02-23 19:13:07 +0300
committerH. Peter Anvin <hpa@linux.intel.com>2009-02-23 11:34:04 -0800
commitbda3a89745d7bb88d3e735046c0cdc3d0eb2ac24 (patch)
tree7213ea6999ff7dc6120c7d31fffe8b875a3ee1b8 /arch/x86
parent2366c298b5afe52e635afd5604b69ce9fd4471fc (diff)
x86: minor cleanup in the espfix code
Impact: Cleanup Checkin be44d2aabce2d62f72d5751d1871b6212bf7a1c7 eliminates the use of a 16-bit stack for espfix. However, at least one instruction remained that only operated on the low 16 bits of %esp. This is not a bug per se because the kernel stack is always an aligned 4K or 8K block. Therefore it cannot cross 64K boundaries; this code, in fact, relies strictly on that fact. However, it's a lot cleaner (and, for that matter, smaller) to operate on the entire 32-bit register. Signed-off-by: Stas Sergeev <stsp@aknet.ru> CC: Zachary Amsden <zach@vmware.com> CC: Chuck Ebbert <cebbert@redhat.com> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/entry_32.S2
1 files changed, 1 insertions, 1 deletions
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 999e827ef9c..899e8938e79 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1359,7 +1359,7 @@ nmi_espfix_stack:
CFI_ADJUST_CFA_OFFSET 4
pushl %esp
CFI_ADJUST_CFA_OFFSET 4
- addw $4, (%esp)
+ addl $4, (%esp)
/* copy the iret frame of 12 bytes */
.rept 3
pushl 16(%esp)