aboutsummaryrefslogtreecommitdiff
path: root/include/asm-x86
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-02-04 09:16:03 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2008-02-04 09:16:03 -0800
commitd2fc0bacd5c438cb459fdf531eff00ab18422a00 (patch)
treed0ea52e4d2ad2fac12e19eaf6891c6af98353cfc /include/asm-x86
parent93890b71a34f9490673a6edd56b61c2124215e46 (diff)
parent795d45b22c079946332bf3825afefe5a981a97b6 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86
* git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86: (78 commits) x86: fix RTC lockdep warning: potential hardirq recursion x86: cpa, micro-optimization x86: cpa, clean up code flow x86: cpa, eliminate CPA_ enum x86: cpa, cleanups x86: implement gbpages support in change_page_attr() x86: support gbpages in pagetable dump x86: add gbpages support to lookup_address x86: add pgtable accessor functions for gbpages x86: add PUD_PAGE_SIZE x86: add feature macros for the gbpages cpuid bit x86: switch direct mapping setup over to set_pte x86: fix page-present check in cpa_flush_range x86: remove cpa warning x86: remove now unused clear_kernel_mapping x86: switch pci-gart over to using set_memory_np() instead of clear_kernel_mapping() x86: cpa selftest, skip non present entries x86: CPA fix pagetable split x86: rename LARGE_PAGE_SIZE to PMD_PAGE_SIZE x86: cpa, fix lookup_address ...
Diffstat (limited to 'include/asm-x86')
-rw-r--r--include/asm-x86/asm.h7
-rw-r--r--include/asm-x86/bugs.h2
-rw-r--r--include/asm-x86/cpufeature.h14
-rw-r--r--include/asm-x86/efi.h4
-rw-r--r--include/asm-x86/futex.h23
-rw-r--r--include/asm-x86/highmem.h4
-rw-r--r--include/asm-x86/hw_irq_32.h2
-rw-r--r--include/asm-x86/i387.h16
-rw-r--r--include/asm-x86/io_32.h25
-rw-r--r--include/asm-x86/mach-numaq/mach_apic.h2
-rw-r--r--include/asm-x86/msr.h10
-rw-r--r--include/asm-x86/page.h4
-rw-r--r--include/asm-x86/page_64.h3
-rw-r--r--include/asm-x86/pgalloc_32.h6
-rw-r--r--include/asm-x86/pgtable-3level.h26
-rw-r--r--include/asm-x86/pgtable.h4
-rw-r--r--include/asm-x86/pgtable_32.h2
-rw-r--r--include/asm-x86/pgtable_64.h7
-rw-r--r--include/asm-x86/string_32.h8
-rw-r--r--include/asm-x86/system.h23
-rw-r--r--include/asm-x86/uaccess_32.h18
-rw-r--r--include/asm-x86/uaccess_64.h10
-rw-r--r--include/asm-x86/vm86.h1
23 files changed, 91 insertions, 130 deletions
diff --git a/include/asm-x86/asm.h b/include/asm-x86/asm.h
index 1a6980a60fc..90dec0c2364 100644
--- a/include/asm-x86/asm.h
+++ b/include/asm-x86/asm.h
@@ -29,4 +29,11 @@
#endif /* CONFIG_X86_32 */
+/* Exception table entry */
+# define _ASM_EXTABLE(from,to) \
+ " .section __ex_table,\"a\"\n" \
+ _ASM_ALIGN "\n" \
+ _ASM_PTR #from "," #to "\n" \
+ " .previous\n"
+
#endif /* _ASM_X86_ASM_H */
diff --git a/include/asm-x86/bugs.h b/include/asm-x86/bugs.h
index 3fcc30dc073..021cbdd5f25 100644
--- a/include/asm-x86/bugs.h
+++ b/include/asm-x86/bugs.h
@@ -2,6 +2,6 @@
#define _ASM_X86_BUGS_H
extern void check_bugs(void);
-extern int ppro_with_ram_bug(void);
+int ppro_with_ram_bug(void);
#endif /* _ASM_X86_BUGS_H */
diff --git a/include/asm-x86/cpufeature.h b/include/asm-x86/cpufeature.h
index 3fb7dfa7fc9..065e92966c7 100644
--- a/include/asm-x86/cpufeature.h
+++ b/include/asm-x86/cpufeature.h
@@ -4,9 +4,6 @@
#ifndef _ASM_X86_CPUFEATURE_H
#define _ASM_X86_CPUFEATURE_H
-#ifndef __ASSEMBLY__
-#include <linux/bitops.h>
-#endif
#include <asm/required-features.h>
#define NCAPINTS 8 /* N 32-bit words worth of info */
@@ -49,6 +46,7 @@
#define X86_FEATURE_MP (1*32+19) /* MP Capable. */
#define X86_FEATURE_NX (1*32+20) /* Execute Disable */
#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */
+#define X86_FEATURE_GBPAGES (1*32+26) /* GB pages */
#define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */
#define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */
#define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */
@@ -115,6 +113,13 @@
*/
#define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */
+#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
+
+#include <linux/bitops.h>
+
+extern const char * const x86_cap_flags[NCAPINTS*32];
+extern const char * const x86_power_flags[32];
+
#define cpu_has(c, bit) \
(__builtin_constant_p(bit) && \
( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \
@@ -175,6 +180,7 @@
#define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS)
#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH)
#define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS)
+#define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES)
#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)
# define cpu_has_invlpg 1
@@ -204,4 +210,6 @@
#endif /* CONFIG_X86_64 */
+#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */
+
#endif /* _ASM_X86_CPUFEATURE_H */
diff --git a/include/asm-x86/efi.h b/include/asm-x86/efi.h
index 9c68a1f098d..ea9734b74ac 100644
--- a/include/asm-x86/efi.h
+++ b/include/asm-x86/efi.h
@@ -33,7 +33,7 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...);
#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \
efi_call_virt(f, a1, a2, a3, a4, a5, a6)
-#define efi_ioremap(addr, size) ioremap(addr, size)
+#define efi_ioremap(addr, size) ioremap_cache(addr, size)
#else /* !CONFIG_X86_32 */
@@ -86,7 +86,7 @@ extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3,
efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
(u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
-extern void *efi_ioremap(unsigned long offset, unsigned long size);
+extern void *efi_ioremap(unsigned long addr, unsigned long size);
#endif /* CONFIG_X86_32 */
diff --git a/include/asm-x86/futex.h b/include/asm-x86/futex.h
index 9d919264923..cd9f894dd2d 100644
--- a/include/asm-x86/futex.h
+++ b/include/asm-x86/futex.h
@@ -17,11 +17,8 @@
"2: .section .fixup,\"ax\"\n \
3: mov %3, %1\n \
jmp 2b\n \
- .previous\n \
- .section __ex_table,\"a\"\n \
- .align 8\n" \
- _ASM_PTR "1b,3b\n \
- .previous" \
+ .previous\n" \
+ _ASM_EXTABLE(1b,3b) \
: "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
: "i" (-EFAULT), "0" (oparg), "1" (0))
@@ -35,11 +32,9 @@
3: .section .fixup,\"ax\"\n \
4: mov %5, %1\n \
jmp 3b\n \
- .previous\n \
- .section __ex_table,\"a\"\n \
- .align 8\n" \
- _ASM_PTR "1b,4b,2b,4b\n \
- .previous" \
+ .previous\n" \
+ _ASM_EXTABLE(1b,4b) \
+ _ASM_EXTABLE(2b,4b) \
: "=&a" (oldval), "=&r" (ret), "+m" (*uaddr), \
"=&r" (tem) \
: "r" (oparg), "i" (-EFAULT), "1" (0))
@@ -111,18 +106,12 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
return -EFAULT;
__asm__ __volatile__(
-
"1: lock; cmpxchgl %3, %1 \n"
"2: .section .fixup, \"ax\" \n"
"3: mov %2, %0 \n"
" jmp 2b \n"
" .previous \n"
-
- " .section __ex_table, \"a\" \n"
- " .align 8 \n"
- _ASM_PTR " 1b,3b \n"
- " .previous \n"
-
+ _ASM_EXTABLE(1b,3b)
: "=a" (oldval), "+m" (*uaddr)
: "i" (-EFAULT), "r" (newval), "0" (oldval)
: "memory"
diff --git a/include/asm-x86/highmem.h b/include/asm-x86/highmem.h
index 13cdcd66fff..c25cfcaab58 100644
--- a/include/asm-x86/highmem.h
+++ b/include/asm-x86/highmem.h
@@ -63,8 +63,8 @@ extern pte_t *pkmap_page_table;
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
-extern void * FASTCALL(kmap_high(struct page *page));
-extern void FASTCALL(kunmap_high(struct page *page));
+extern void *kmap_high(struct page *page);
+extern void kunmap_high(struct page *page);
void *kmap(struct page *page);
void kunmap(struct page *page);
diff --git a/include/asm-x86/hw_irq_32.h b/include/asm-x86/hw_irq_32.h
index 6d65fbb6358..ea88054e03f 100644
--- a/include/asm-x86/hw_irq_32.h
+++ b/include/asm-x86/hw_irq_32.h
@@ -47,7 +47,7 @@ void enable_8259A_irq(unsigned int irq);
int i8259A_irq_pending(unsigned int irq);
void make_8259A_irq(unsigned int irq);
void init_8259A(int aeoi);
-void FASTCALL(send_IPI_self(int vector));
+void send_IPI_self(int vector);
void init_VISWS_APIC_irqs(void);
void setup_IO_APIC(void);
void disable_IO_APIC(void);
diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h
index ba8105ca822..6b1895ccd6b 100644
--- a/include/asm-x86/i387.h
+++ b/include/asm-x86/i387.h
@@ -13,6 +13,7 @@
#include <linux/sched.h>
#include <linux/kernel_stat.h>
#include <linux/regset.h>
+#include <asm/asm.h>
#include <asm/processor.h>
#include <asm/sigcontext.h>
#include <asm/user.h>
@@ -41,10 +42,7 @@ static inline void tolerant_fwait(void)
{
asm volatile("1: fwait\n"
"2:\n"
- " .section __ex_table,\"a\"\n"
- " .align 8\n"
- " .quad 1b,2b\n"
- " .previous\n");
+ _ASM_EXTABLE(1b,2b));
}
static inline int restore_fpu_checking(struct i387_fxsave_struct *fx)
@@ -57,10 +55,7 @@ static inline int restore_fpu_checking(struct i387_fxsave_struct *fx)
"3: movl $-1,%[err]\n"
" jmp 2b\n"
".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 8\n"
- " .quad 1b,3b\n"
- ".previous"
+ _ASM_EXTABLE(1b,3b)
: [err] "=r" (err)
#if 0 /* See comment in __save_init_fpu() below. */
: [fx] "r" (fx), "m" (*fx), "0" (0));
@@ -99,10 +94,7 @@ static inline int save_i387_checking(struct i387_fxsave_struct __user *fx)
"3: movl $-1,%[err]\n"
" jmp 2b\n"
".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 8\n"
- " .quad 1b,3b\n"
- ".previous"
+ _ASM_EXTABLE(1b,3b)
: [err] "=r" (err), "=m" (*fx)
#if 0 /* See comment in __fxsave_clear() below. */
: [fx] "r" (fx), "0" (0));
diff --git a/include/asm-x86/io_32.h b/include/asm-x86/io_32.h
index 586d7aa54ce..58d2c45cd0b 100644
--- a/include/asm-x86/io_32.h
+++ b/include/asm-x86/io_32.h
@@ -275,29 +275,6 @@ static inline void slow_down_io(void) {
#endif
-#ifdef CONFIG_X86_NUMAQ
-extern void *xquad_portio; /* Where the IO area was mapped */
-#define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port)
-#define __BUILDIO(bwl,bw,type) \
-static inline void out##bwl##_quad(unsigned type value, int port, int quad) { \
- if (xquad_portio) \
- write##bwl(value, XQUAD_PORT_ADDR(port, quad)); \
- else \
- out##bwl##_local(value, port); \
-} \
-static inline void out##bwl(unsigned type value, int port) { \
- out##bwl##_quad(value, port, 0); \
-} \
-static inline unsigned type in##bwl##_quad(int port, int quad) { \
- if (xquad_portio) \
- return read##bwl(XQUAD_PORT_ADDR(port, quad)); \
- else \
- return in##bwl##_local(port); \
-} \
-static inline unsigned type in##bwl(int port) { \
- return in##bwl##_quad(port, 0); \
-}
-#else
#define __BUILDIO(bwl,bw,type) \
static inline void out##bwl(unsigned type value, int port) { \
out##bwl##_local(value, port); \
@@ -305,8 +282,6 @@ static inline void out##bwl(unsigned type value, int port) { \
static inline unsigned type in##bwl(int port) { \
return in##bwl##_local(port); \
}
-#endif
-
#define BUILDIO(bwl,bw,type) \
static inline void out##bwl##_local(unsigned type value, int port) { \
diff --git a/include/asm-x86/mach-numaq/mach_apic.h b/include/asm-x86/mach-numaq/mach_apic.h
index 17e183bd39c..3b637fac890 100644
--- a/include/asm-x86/mach-numaq/mach_apic.h
+++ b/include/asm-x86/mach-numaq/mach_apic.h
@@ -109,6 +109,8 @@ static inline int mpc_apic_id(struct mpc_config_processor *m,
return logical_apicid;
}
+extern void *xquad_portio;
+
static inline void setup_portio_remap(void)
{
int num_quads = num_online_nodes();
diff --git a/include/asm-x86/msr.h b/include/asm-x86/msr.h
index 204a8a30fec..3ca29ebebbb 100644
--- a/include/asm-x86/msr.h
+++ b/include/asm-x86/msr.h
@@ -57,10 +57,7 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr,
".section .fixup,\"ax\"\n\t"
"3: mov %3,%0 ; jmp 1b\n\t"
".previous\n\t"
- ".section __ex_table,\"a\"\n"
- _ASM_ALIGN "\n\t"
- _ASM_PTR " 2b,3b\n\t"
- ".previous"
+ _ASM_EXTABLE(2b,3b)
: "=r" (*err), EAX_EDX_RET(val, low, high)
: "c" (msr), "i" (-EFAULT));
return EAX_EDX_VAL(val, low, high);
@@ -81,10 +78,7 @@ static inline int native_write_msr_safe(unsigned int msr,
".section .fixup,\"ax\"\n\t"
"3: mov %4,%0 ; jmp 1b\n\t"
".previous\n\t"
- ".section __ex_table,\"a\"\n"
- _ASM_ALIGN "\n\t"
- _ASM_PTR " 2b,3b\n\t"
- ".previous"
+ _ASM_EXTABLE(2b,3b)
: "=a" (err)
: "c" (msr), "0" (low), "d" (high),
"i" (-EFAULT));
diff --git a/include/asm-x86/page.h b/include/asm-x86/page.h
index c8b30efeed8..1cb7c51bc29 100644
--- a/include/asm-x86/page.h
+++ b/include/asm-x86/page.h
@@ -13,8 +13,8 @@
#define PHYSICAL_PAGE_MASK (PAGE_MASK & __PHYSICAL_MASK)
#define PTE_MASK (_AT(long, PHYSICAL_PAGE_MASK))
-#define LARGE_PAGE_SIZE (_AC(1,UL) << PMD_SHIFT)
-#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
+#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT)
+#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
#define HPAGE_SHIFT PMD_SHIFT
#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
diff --git a/include/asm-x86/page_64.h b/include/asm-x86/page_64.h
index c1ac42d8707..dcf0c074607 100644
--- a/include/asm-x86/page_64.h
+++ b/include/asm-x86/page_64.h
@@ -23,6 +23,9 @@
#define MCE_STACK 5
#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
+#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
+#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
+
#define __PAGE_OFFSET _AC(0xffff810000000000, UL)
#define __PHYSICAL_START CONFIG_PHYSICAL_START
diff --git a/include/asm-x86/pgalloc_32.h b/include/asm-x86/pgalloc_32.h
index 7641e7b5d93..6c21ef951da 100644
--- a/include/asm-x86/pgalloc_32.h
+++ b/include/asm-x86/pgalloc_32.h
@@ -80,8 +80,10 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
/*
- * Pentium-II erratum A13: in PAE mode we explicitly have to flush
- * the TLB via cr3 if the top-level pgd is changed...
+ * According to Intel App note "TLBs, Paging-Structure Caches,
+ * and Their Invalidation", April 2007, document 317080-001,
+ * section 8.1: in PAE mode we explicitly have to flush the
+ * TLB via cr3 if the top-level pgd is changed...
*/
if (mm == current->active_mm)
write_cr3(read_cr3());
diff --git a/include/asm-x86/pgtable-3level.h b/include/asm-x86/pgtable-3level.h
index a195c3e757b..1d763eec740 100644
--- a/include/asm-x86/pgtable-3level.h
+++ b/include/asm-x86/pgtable-3level.h
@@ -93,26 +93,22 @@ static inline void native_pmd_clear(pmd_t *pmd)
static inline void pud_clear(pud_t *pudp)
{
+ unsigned long pgd;
+
set_pud(pudp, __pud(0));
/*
- * In principle we need to do a cr3 reload here to make sure
- * the processor recognizes the changed pgd. In practice, all
- * the places where pud_clear() gets called are followed by
- * full tlb flushes anyway, so we can defer the cost here.
- *
- * Specifically:
- *
- * mm/memory.c:free_pmd_range() - immediately after the
- * pud_clear() it does a pmd_free_tlb(). We change the
- * mmu_gather structure to do a full tlb flush (which has the
- * effect of reloading cr3) when the pagetable free is
- * complete.
+ * According to Intel App note "TLBs, Paging-Structure Caches,
+ * and Their Invalidation", April 2007, document 317080-001,
+ * section 8.1: in PAE mode we explicitly have to flush the
+ * TLB via cr3 if the top-level pgd is changed...
*
- * arch/x86/mm/hugetlbpage.c:huge_pmd_unshare() - the call to
- * this is followed by a flush_tlb_range, which on x86 does a
- * full tlb flush.
+ * Make sure the pud entry we're updating is within the
+ * current pgd to avoid unnecessary TLB flushes.
*/
+ pgd = read_cr3();
+ if (__pa(pudp) >= pgd && __pa(pudp) < (pgd + sizeof(pgd_t)*PTRS_PER_PGD))
+ write_cr3(pgd);
}
#define pud_page(pud) \
diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h
index cd2524f0745..44c0a4f1b1e 100644
--- a/include/asm-x86/pgtable.h
+++ b/include/asm-x86/pgtable.h
@@ -13,10 +13,12 @@
#define _PAGE_BIT_DIRTY 6
#define _PAGE_BIT_FILE 6
#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
+#define _PAGE_BIT_PAT 7 /* on 4KB pages */
#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
#define _PAGE_BIT_UNUSED2 10
#define _PAGE_BIT_UNUSED3 11
+#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
/*
@@ -36,6 +38,8 @@
#define _PAGE_UNUSED1 (_AC(1, L)<<_PAGE_BIT_UNUSED1)
#define _PAGE_UNUSED2 (_AC(1, L)<<_PAGE_BIT_UNUSED2)
#define _PAGE_UNUSED3 (_AC(1, L)<<_PAGE_BIT_UNUSED3)
+#define _PAGE_PAT (_AC(1, L)<<_PAGE_BIT_PAT)
+#define _PAGE_PAT_LARGE (_AC(1, L)<<_PAGE_BIT_PAT_LARGE)
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
#define _PAGE_NX (_AC(1, ULL) << _PAGE_BIT_NX)
diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h
index 21e70fbf1da..935630d1730 100644
--- a/include/asm-x86/pgtable_32.h
+++ b/include/asm-x86/pgtable_32.h
@@ -148,6 +148,8 @@ static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
*/
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+static inline int pud_large(pud_t pud) { return 0; }
+
/*
* the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
*
diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h
index 6e615a103c2..bd4740a60f2 100644
--- a/include/asm-x86/pgtable_64.h
+++ b/include/asm-x86/pgtable_64.h
@@ -21,7 +21,6 @@ extern pgd_t init_level4_pgt[];
#define swapper_pg_dir init_level4_pgt
extern void paging_init(void);
-extern void clear_kernel_mapping(unsigned long addr, unsigned long size);
#endif /* !__ASSEMBLY__ */
@@ -199,6 +198,12 @@ static inline unsigned long pmd_bad(pmd_t pmd)
#define pud_offset(pgd, address) ((pud_t *) pgd_page_vaddr(*(pgd)) + pud_index(address))
#define pud_present(pud) (pud_val(pud) & _PAGE_PRESENT)
+static inline int pud_large(pud_t pte)
+{
+ return (pud_val(pte) & (_PAGE_PSE|_PAGE_PRESENT)) ==
+ (_PAGE_PSE|_PAGE_PRESENT);
+}
+
/* PMD - Level 2 access */
#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK))
#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
diff --git a/include/asm-x86/string_32.h b/include/asm-x86/string_32.h
index 55bfa308f90..c5d13a86dea 100644
--- a/include/asm-x86/string_32.h
+++ b/include/asm-x86/string_32.h
@@ -213,14 +213,14 @@ static __always_inline void * __constant_c_and_count_memset(void * s, unsigned l
case 0:
return s;
case 1:
- *(unsigned char *)s = pattern;
+ *(unsigned char *)s = pattern & 0xff;
return s;
case 2:
- *(unsigned short *)s = pattern;
+ *(unsigned short *)s = pattern & 0xffff;
return s;
case 3:
- *(unsigned short *)s = pattern;
- *(2+(unsigned char *)s) = pattern;
+ *(unsigned short *)s = pattern & 0xffff;
+ *(2+(unsigned char *)s) = pattern & 0xff;
return s;
case 4:
*(unsigned long *)s = pattern;
diff --git a/include/asm-x86/system.h b/include/asm-x86/system.h
index ee32ef9367f..9cff02ffe6c 100644
--- a/include/asm-x86/system.h
+++ b/include/asm-x86/system.h
@@ -20,8 +20,8 @@
#ifdef CONFIG_X86_32
struct task_struct; /* one of the stranger aspects of C forward declarations */
-extern struct task_struct *FASTCALL(__switch_to(struct task_struct *prev,
- struct task_struct *next));
+struct task_struct *__switch_to(struct task_struct *prev,
+ struct task_struct *next);
/*
* Saving eflags is important. It switches not only IOPL between tasks,
@@ -130,10 +130,7 @@ extern void load_gs_index(unsigned);
"movl %k1, %%" #seg "\n\t" \
"jmp 2b\n" \
".previous\n" \
- ".section __ex_table,\"a\"\n\t" \
- _ASM_ALIGN "\n\t" \
- _ASM_PTR " 1b,3b\n" \
- ".previous" \
+ _ASM_EXTABLE(1b,3b) \
: :"r" (value), "r" (0))
@@ -214,12 +211,10 @@ static inline unsigned long native_read_cr4_safe(void)
/* This could fault if %cr4 does not exist. In x86_64, a cr4 always
* exists, so it will never fail. */
#ifdef CONFIG_X86_32
- asm volatile("1: mov %%cr4, %0 \n"
- "2: \n"
- ".section __ex_table,\"a\" \n"
- ".long 1b,2b \n"
- ".previous \n"
- : "=r" (val), "=m" (__force_order) : "0" (0));
+ asm volatile("1: mov %%cr4, %0\n"
+ "2:\n"
+ _ASM_EXTABLE(1b,2b)
+ : "=r" (val), "=m" (__force_order) : "0" (0));
#else
val = native_read_cr4();
#endif
@@ -276,9 +271,9 @@ static inline void native_wbinvd(void)
#endif /* __KERNEL__ */
-static inline void clflush(void *__p)
+static inline void clflush(volatile void *__p)
{
- asm volatile("clflush %0" : "+m" (*(char __force *)__p));
+ asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
}
#define nop() __asm__ __volatile__ ("nop")
diff --git a/include/asm-x86/uaccess_32.h b/include/asm-x86/uaccess_32.h
index d2a4f7be9c2..fcc570ec4fe 100644
--- a/include/asm-x86/uaccess_32.h
+++ b/include/asm-x86/uaccess_32.h
@@ -8,6 +8,7 @@
#include <linux/thread_info.h>
#include <linux/prefetch.h>
#include <linux/string.h>
+#include <asm/asm.h>
#include <asm/page.h>
#define VERIFY_READ 0
@@ -287,11 +288,8 @@ extern void __put_user_8(void);
"4: movl %3,%0\n" \
" jmp 3b\n" \
".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 4\n" \
- " .long 1b,4b\n" \
- " .long 2b,4b\n" \
- ".previous" \
+ _ASM_EXTABLE(1b,4b) \
+ _ASM_EXTABLE(2b,4b) \
: "=r"(err) \
: "A" (x), "r" (addr), "i"(-EFAULT), "0"(err))
@@ -338,10 +336,7 @@ struct __large_struct { unsigned long buf[100]; };
"3: movl %3,%0\n" \
" jmp 2b\n" \
".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 4\n" \
- " .long 1b,3b\n" \
- ".previous" \
+ _ASM_EXTABLE(1b,3b) \
: "=r"(err) \
: ltype (x), "m"(__m(addr)), "i"(errret), "0"(err))
@@ -378,10 +373,7 @@ do { \
" xor"itype" %"rtype"1,%"rtype"1\n" \
" jmp 2b\n" \
".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 4\n" \
- " .long 1b,3b\n" \
- ".previous" \
+ _ASM_EXTABLE(1b,3b) \
: "=r"(err), ltype (x) \
: "m"(__m(addr)), "i"(errret), "0"(err))
diff --git a/include/asm-x86/uaccess_64.h b/include/asm-x86/uaccess_64.h
index 31d79470271..b87eb4ba8f9 100644
--- a/include/asm-x86/uaccess_64.h
+++ b/include/asm-x86/uaccess_64.h
@@ -181,10 +181,7 @@ struct __large_struct { unsigned long buf[100]; };
"3: mov %3,%0\n" \
" jmp 2b\n" \
".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 8\n" \
- " .quad 1b,3b\n" \
- ".previous" \
+ _ASM_EXTABLE(1b,3b) \
: "=r"(err) \
: ltype (x), "m"(__m(addr)), "i"(errno), "0"(err))
@@ -226,10 +223,7 @@ do { \
" xor"itype" %"rtype"1,%"rtype"1\n" \
" jmp 2b\n" \
".previous\n" \
- ".section __ex_table,\"a\"\n" \
- " .align 8\n" \
- " .quad 1b,3b\n" \
- ".previous" \
+ _ASM_EXTABLE(1b,3b) \
: "=r"(err), ltype (x) \
: "m"(__m(addr)), "i"(errno), "0"(err))
diff --git a/include/asm-x86/vm86.h b/include/asm-x86/vm86.h
index a5edf517b99..c92fe4af52e 100644
--- a/include/asm-x86/vm86.h
+++ b/include/asm-x86/vm86.h
@@ -195,6 +195,7 @@ struct kernel_vm86_struct {
void handle_vm86_fault(struct kernel_vm86_regs *, long);
int handle_vm86_trap(struct kernel_vm86_regs *, long, int);
+struct pt_regs *save_v86_state(struct kernel_vm86_regs *);
struct task_struct;
void release_vm86_irqs(struct task_struct *);